1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/display/drm_dp_mst_helper.h>
77 #include <drm/display/drm_hdmi_helper.h>
78 #include <drm/drm_atomic.h>
79 #include <drm/drm_atomic_uapi.h>
80 #include <drm/drm_atomic_helper.h>
81 #include <drm/drm_fb_helper.h>
82 #include <drm/drm_fourcc.h>
83 #include <drm/drm_edid.h>
84 #include <drm/drm_vblank.h>
85 #include <drm/drm_audio_component.h>
86 #include <drm/drm_gem_atomic_helper.h>
87 
88 #if defined(CONFIG_DRM_AMD_DC_DCN)
89 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
90 
91 #include "dcn/dcn_1_0_offset.h"
92 #include "dcn/dcn_1_0_sh_mask.h"
93 #include "soc15_hw_ip.h"
94 #include "vega10_ip_offset.h"
95 
96 #include "soc15_common.h"
97 #endif
98 
99 #include "modules/inc/mod_freesync.h"
100 #include "modules/power/power_helpers.h"
101 #include "modules/inc/mod_info_packet.h"
102 
103 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
105 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
107 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
109 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
111 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
113 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
115 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
117 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
119 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
121 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
122 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
123 
124 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
125 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
126 
127 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
128 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
129 
130 /* Number of bytes in PSP header for firmware. */
131 #define PSP_HEADER_BYTES 0x100
132 
133 /* Number of bytes in PSP footer for firmware. */
134 #define PSP_FOOTER_BYTES 0x100
135 
136 /**
137  * DOC: overview
138  *
139  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
140  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
141  * requests into DC requests, and DC responses into DRM responses.
142  *
143  * The root control structure is &struct amdgpu_display_manager.
144  */
145 
146 /* basic init/fini API */
147 static int amdgpu_dm_init(struct amdgpu_device *adev);
148 static void amdgpu_dm_fini(struct amdgpu_device *adev);
149 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
150 
151 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
152 {
153 	switch (link->dpcd_caps.dongle_type) {
154 	case DISPLAY_DONGLE_NONE:
155 		return DRM_MODE_SUBCONNECTOR_Native;
156 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
157 		return DRM_MODE_SUBCONNECTOR_VGA;
158 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
159 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
160 		return DRM_MODE_SUBCONNECTOR_DVID;
161 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
162 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
163 		return DRM_MODE_SUBCONNECTOR_HDMIA;
164 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
165 	default:
166 		return DRM_MODE_SUBCONNECTOR_Unknown;
167 	}
168 }
169 
170 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
171 {
172 	struct dc_link *link = aconnector->dc_link;
173 	struct drm_connector *connector = &aconnector->base;
174 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
175 
176 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
177 		return;
178 
179 	if (aconnector->dc_sink)
180 		subconnector = get_subconnector_type(link);
181 
182 	drm_object_property_set_value(&connector->base,
183 			connector->dev->mode_config.dp_subconnector_property,
184 			subconnector);
185 }
186 
187 /*
188  * initializes drm_device display related structures, based on the information
189  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
190  * drm_encoder, drm_mode_config
191  *
192  * Returns 0 on success
193  */
194 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
195 /* removes and deallocates the drm structures, created by the above function */
196 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
197 
198 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
199 				struct drm_plane *plane,
200 				unsigned long possible_crtcs,
201 				const struct dc_plane_cap *plane_cap);
202 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
203 			       struct drm_plane *plane,
204 			       uint32_t link_index);
205 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
206 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
207 				    uint32_t link_index,
208 				    struct amdgpu_encoder *amdgpu_encoder);
209 static int amdgpu_dm_encoder_init(struct drm_device *dev,
210 				  struct amdgpu_encoder *aencoder,
211 				  uint32_t link_index);
212 
213 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
214 
215 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
216 
217 static int amdgpu_dm_atomic_check(struct drm_device *dev,
218 				  struct drm_atomic_state *state);
219 
220 static void handle_cursor_update(struct drm_plane *plane,
221 				 struct drm_plane_state *old_plane_state);
222 
223 static const struct drm_format_info *
224 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
225 
226 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
227 static void handle_hpd_rx_irq(void *param);
228 
229 static bool
230 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
231 				 struct drm_crtc_state *new_crtc_state);
232 /*
233  * dm_vblank_get_counter
234  *
235  * @brief
236  * Get counter for number of vertical blanks
237  *
238  * @param
239  * struct amdgpu_device *adev - [in] desired amdgpu device
240  * int disp_idx - [in] which CRTC to get the counter from
241  *
242  * @return
243  * Counter for vertical blanks
244  */
245 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
246 {
247 	if (crtc >= adev->mode_info.num_crtc)
248 		return 0;
249 	else {
250 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
251 
252 		if (acrtc->dm_irq_params.stream == NULL) {
253 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
254 				  crtc);
255 			return 0;
256 		}
257 
258 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
259 	}
260 }
261 
262 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
263 				  u32 *vbl, u32 *position)
264 {
265 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
266 
267 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
268 		return -EINVAL;
269 	else {
270 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
271 
272 		if (acrtc->dm_irq_params.stream ==  NULL) {
273 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
274 				  crtc);
275 			return 0;
276 		}
277 
278 		/*
279 		 * TODO rework base driver to use values directly.
280 		 * for now parse it back into reg-format
281 		 */
282 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
283 					 &v_blank_start,
284 					 &v_blank_end,
285 					 &h_position,
286 					 &v_position);
287 
288 		*position = v_position | (h_position << 16);
289 		*vbl = v_blank_start | (v_blank_end << 16);
290 	}
291 
292 	return 0;
293 }
294 
295 static bool dm_is_idle(void *handle)
296 {
297 	/* XXX todo */
298 	return true;
299 }
300 
301 static int dm_wait_for_idle(void *handle)
302 {
303 	/* XXX todo */
304 	return 0;
305 }
306 
307 static bool dm_check_soft_reset(void *handle)
308 {
309 	return false;
310 }
311 
312 static int dm_soft_reset(void *handle)
313 {
314 	/* XXX todo */
315 	return 0;
316 }
317 
318 static struct amdgpu_crtc *
319 get_crtc_by_otg_inst(struct amdgpu_device *adev,
320 		     int otg_inst)
321 {
322 	struct drm_device *dev = adev_to_drm(adev);
323 	struct drm_crtc *crtc;
324 	struct amdgpu_crtc *amdgpu_crtc;
325 
326 	if (WARN_ON(otg_inst == -1))
327 		return adev->mode_info.crtcs[0];
328 
329 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
330 		amdgpu_crtc = to_amdgpu_crtc(crtc);
331 
332 		if (amdgpu_crtc->otg_inst == otg_inst)
333 			return amdgpu_crtc;
334 	}
335 
336 	return NULL;
337 }
338 
339 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
340 {
341 	return acrtc->dm_irq_params.freesync_config.state ==
342 		       VRR_STATE_ACTIVE_VARIABLE ||
343 	       acrtc->dm_irq_params.freesync_config.state ==
344 		       VRR_STATE_ACTIVE_FIXED;
345 }
346 
347 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
348 {
349 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
350 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
351 }
352 
353 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
354 					      struct dm_crtc_state *new_state)
355 {
356 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
357 		return true;
358 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
359 		return true;
360 	else
361 		return false;
362 }
363 
364 /**
365  * dm_pflip_high_irq() - Handle pageflip interrupt
366  * @interrupt_params: ignored
367  *
368  * Handles the pageflip interrupt by notifying all interested parties
369  * that the pageflip has been completed.
370  */
371 static void dm_pflip_high_irq(void *interrupt_params)
372 {
373 	struct amdgpu_crtc *amdgpu_crtc;
374 	struct common_irq_params *irq_params = interrupt_params;
375 	struct amdgpu_device *adev = irq_params->adev;
376 	unsigned long flags;
377 	struct drm_pending_vblank_event *e;
378 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
379 	bool vrr_active;
380 
381 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
382 
383 	/* IRQ could occur when in initial stage */
384 	/* TODO work and BO cleanup */
385 	if (amdgpu_crtc == NULL) {
386 		DC_LOG_PFLIP("CRTC is null, returning.\n");
387 		return;
388 	}
389 
390 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
391 
392 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
393 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
394 						 amdgpu_crtc->pflip_status,
395 						 AMDGPU_FLIP_SUBMITTED,
396 						 amdgpu_crtc->crtc_id,
397 						 amdgpu_crtc);
398 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
399 		return;
400 	}
401 
402 	/* page flip completed. */
403 	e = amdgpu_crtc->event;
404 	amdgpu_crtc->event = NULL;
405 
406 	WARN_ON(!e);
407 
408 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
409 
410 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
411 	if (!vrr_active ||
412 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
413 				      &v_blank_end, &hpos, &vpos) ||
414 	    (vpos < v_blank_start)) {
415 		/* Update to correct count and vblank timestamp if racing with
416 		 * vblank irq. This also updates to the correct vblank timestamp
417 		 * even in VRR mode, as scanout is past the front-porch atm.
418 		 */
419 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
420 
421 		/* Wake up userspace by sending the pageflip event with proper
422 		 * count and timestamp of vblank of flip completion.
423 		 */
424 		if (e) {
425 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
426 
427 			/* Event sent, so done with vblank for this flip */
428 			drm_crtc_vblank_put(&amdgpu_crtc->base);
429 		}
430 	} else if (e) {
431 		/* VRR active and inside front-porch: vblank count and
432 		 * timestamp for pageflip event will only be up to date after
433 		 * drm_crtc_handle_vblank() has been executed from late vblank
434 		 * irq handler after start of back-porch (vline 0). We queue the
435 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
436 		 * updated timestamp and count, once it runs after us.
437 		 *
438 		 * We need to open-code this instead of using the helper
439 		 * drm_crtc_arm_vblank_event(), as that helper would
440 		 * call drm_crtc_accurate_vblank_count(), which we must
441 		 * not call in VRR mode while we are in front-porch!
442 		 */
443 
444 		/* sequence will be replaced by real count during send-out. */
445 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
446 		e->pipe = amdgpu_crtc->crtc_id;
447 
448 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
449 		e = NULL;
450 	}
451 
452 	/* Keep track of vblank of this flip for flip throttling. We use the
453 	 * cooked hw counter, as that one incremented at start of this vblank
454 	 * of pageflip completion, so last_flip_vblank is the forbidden count
455 	 * for queueing new pageflips if vsync + VRR is enabled.
456 	 */
457 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
458 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
459 
460 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
461 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
462 
463 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
464 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
465 		     vrr_active, (int) !e);
466 }
467 
468 static void dm_vupdate_high_irq(void *interrupt_params)
469 {
470 	struct common_irq_params *irq_params = interrupt_params;
471 	struct amdgpu_device *adev = irq_params->adev;
472 	struct amdgpu_crtc *acrtc;
473 	struct drm_device *drm_dev;
474 	struct drm_vblank_crtc *vblank;
475 	ktime_t frame_duration_ns, previous_timestamp;
476 	unsigned long flags;
477 	int vrr_active;
478 
479 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
480 
481 	if (acrtc) {
482 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
483 		drm_dev = acrtc->base.dev;
484 		vblank = &drm_dev->vblank[acrtc->base.index];
485 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
486 		frame_duration_ns = vblank->time - previous_timestamp;
487 
488 		if (frame_duration_ns > 0) {
489 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
490 						frame_duration_ns,
491 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
492 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
493 		}
494 
495 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
496 			      acrtc->crtc_id,
497 			      vrr_active);
498 
499 		/* Core vblank handling is done here after end of front-porch in
500 		 * vrr mode, as vblank timestamping will give valid results
501 		 * while now done after front-porch. This will also deliver
502 		 * page-flip completion events that have been queued to us
503 		 * if a pageflip happened inside front-porch.
504 		 */
505 		if (vrr_active) {
506 			drm_crtc_handle_vblank(&acrtc->base);
507 
508 			/* BTR processing for pre-DCE12 ASICs */
509 			if (acrtc->dm_irq_params.stream &&
510 			    adev->family < AMDGPU_FAMILY_AI) {
511 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
512 				mod_freesync_handle_v_update(
513 				    adev->dm.freesync_module,
514 				    acrtc->dm_irq_params.stream,
515 				    &acrtc->dm_irq_params.vrr_params);
516 
517 				dc_stream_adjust_vmin_vmax(
518 				    adev->dm.dc,
519 				    acrtc->dm_irq_params.stream,
520 				    &acrtc->dm_irq_params.vrr_params.adjust);
521 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
522 			}
523 		}
524 	}
525 }
526 
527 /**
528  * dm_crtc_high_irq() - Handles CRTC interrupt
529  * @interrupt_params: used for determining the CRTC instance
530  *
531  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
532  * event handler.
533  */
534 static void dm_crtc_high_irq(void *interrupt_params)
535 {
536 	struct common_irq_params *irq_params = interrupt_params;
537 	struct amdgpu_device *adev = irq_params->adev;
538 	struct amdgpu_crtc *acrtc;
539 	unsigned long flags;
540 	int vrr_active;
541 
542 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
543 	if (!acrtc)
544 		return;
545 
546 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
547 
548 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
549 		      vrr_active, acrtc->dm_irq_params.active_planes);
550 
551 	/**
552 	 * Core vblank handling at start of front-porch is only possible
553 	 * in non-vrr mode, as only there vblank timestamping will give
554 	 * valid results while done in front-porch. Otherwise defer it
555 	 * to dm_vupdate_high_irq after end of front-porch.
556 	 */
557 	if (!vrr_active)
558 		drm_crtc_handle_vblank(&acrtc->base);
559 
560 	/**
561 	 * Following stuff must happen at start of vblank, for crc
562 	 * computation and below-the-range btr support in vrr mode.
563 	 */
564 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
565 
566 	/* BTR updates need to happen before VUPDATE on Vega and above. */
567 	if (adev->family < AMDGPU_FAMILY_AI)
568 		return;
569 
570 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
571 
572 	if (acrtc->dm_irq_params.stream &&
573 	    acrtc->dm_irq_params.vrr_params.supported &&
574 	    acrtc->dm_irq_params.freesync_config.state ==
575 		    VRR_STATE_ACTIVE_VARIABLE) {
576 		mod_freesync_handle_v_update(adev->dm.freesync_module,
577 					     acrtc->dm_irq_params.stream,
578 					     &acrtc->dm_irq_params.vrr_params);
579 
580 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
581 					   &acrtc->dm_irq_params.vrr_params.adjust);
582 	}
583 
584 	/*
585 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
586 	 * In that case, pageflip completion interrupts won't fire and pageflip
587 	 * completion events won't get delivered. Prevent this by sending
588 	 * pending pageflip events from here if a flip is still pending.
589 	 *
590 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
591 	 * avoid race conditions between flip programming and completion,
592 	 * which could cause too early flip completion events.
593 	 */
594 	if (adev->family >= AMDGPU_FAMILY_RV &&
595 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
596 	    acrtc->dm_irq_params.active_planes == 0) {
597 		if (acrtc->event) {
598 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
599 			acrtc->event = NULL;
600 			drm_crtc_vblank_put(&acrtc->base);
601 		}
602 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
603 	}
604 
605 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
606 }
607 
608 #if defined(CONFIG_DRM_AMD_DC_DCN)
609 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
610 /**
611  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
612  * DCN generation ASICs
613  * @interrupt_params: interrupt parameters
614  *
615  * Used to set crc window/read out crc value at vertical line 0 position
616  */
617 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
618 {
619 	struct common_irq_params *irq_params = interrupt_params;
620 	struct amdgpu_device *adev = irq_params->adev;
621 	struct amdgpu_crtc *acrtc;
622 
623 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
624 
625 	if (!acrtc)
626 		return;
627 
628 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
629 }
630 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
631 
632 /**
633  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
634  * @adev: amdgpu_device pointer
635  * @notify: dmub notification structure
636  *
637  * Dmub AUX or SET_CONFIG command completion processing callback
638  * Copies dmub notification to DM which is to be read by AUX command.
639  * issuing thread and also signals the event to wake up the thread.
640  */
641 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
642 					struct dmub_notification *notify)
643 {
644 	if (adev->dm.dmub_notify)
645 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
646 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
647 		complete(&adev->dm.dmub_aux_transfer_done);
648 }
649 
650 /**
651  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
652  * @adev: amdgpu_device pointer
653  * @notify: dmub notification structure
654  *
655  * Dmub Hpd interrupt processing callback. Gets displayindex through the
656  * ink index and calls helper to do the processing.
657  */
658 static void dmub_hpd_callback(struct amdgpu_device *adev,
659 			      struct dmub_notification *notify)
660 {
661 	struct amdgpu_dm_connector *aconnector;
662 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
663 	struct drm_connector *connector;
664 	struct drm_connector_list_iter iter;
665 	struct dc_link *link;
666 	uint8_t link_index = 0;
667 	struct drm_device *dev;
668 
669 	if (adev == NULL)
670 		return;
671 
672 	if (notify == NULL) {
673 		DRM_ERROR("DMUB HPD callback notification was NULL");
674 		return;
675 	}
676 
677 	if (notify->link_index > adev->dm.dc->link_count) {
678 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
679 		return;
680 	}
681 
682 	link_index = notify->link_index;
683 	link = adev->dm.dc->links[link_index];
684 	dev = adev->dm.ddev;
685 
686 	drm_connector_list_iter_begin(dev, &iter);
687 	drm_for_each_connector_iter(connector, &iter) {
688 		aconnector = to_amdgpu_dm_connector(connector);
689 		if (link && aconnector->dc_link == link) {
690 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
691 			hpd_aconnector = aconnector;
692 			break;
693 		}
694 	}
695 	drm_connector_list_iter_end(&iter);
696 
697 	if (hpd_aconnector) {
698 		if (notify->type == DMUB_NOTIFICATION_HPD)
699 			handle_hpd_irq_helper(hpd_aconnector);
700 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
701 			handle_hpd_rx_irq(hpd_aconnector);
702 	}
703 }
704 
705 /**
706  * register_dmub_notify_callback - Sets callback for DMUB notify
707  * @adev: amdgpu_device pointer
708  * @type: Type of dmub notification
709  * @callback: Dmub interrupt callback function
710  * @dmub_int_thread_offload: offload indicator
711  *
712  * API to register a dmub callback handler for a dmub notification
713  * Also sets indicator whether callback processing to be offloaded.
714  * to dmub interrupt handling thread
715  * Return: true if successfully registered, false if there is existing registration
716  */
717 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
718 					  enum dmub_notification_type type,
719 					  dmub_notify_interrupt_callback_t callback,
720 					  bool dmub_int_thread_offload)
721 {
722 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
723 		adev->dm.dmub_callback[type] = callback;
724 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
725 	} else
726 		return false;
727 
728 	return true;
729 }
730 
731 static void dm_handle_hpd_work(struct work_struct *work)
732 {
733 	struct dmub_hpd_work *dmub_hpd_wrk;
734 
735 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
736 
737 	if (!dmub_hpd_wrk->dmub_notify) {
738 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
739 		return;
740 	}
741 
742 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
743 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
744 		dmub_hpd_wrk->dmub_notify);
745 	}
746 
747 	kfree(dmub_hpd_wrk->dmub_notify);
748 	kfree(dmub_hpd_wrk);
749 
750 }
751 
752 #define DMUB_TRACE_MAX_READ 64
753 /**
754  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
755  * @interrupt_params: used for determining the Outbox instance
756  *
757  * Handles the Outbox Interrupt
758  * event handler.
759  */
760 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
761 {
762 	struct dmub_notification notify;
763 	struct common_irq_params *irq_params = interrupt_params;
764 	struct amdgpu_device *adev = irq_params->adev;
765 	struct amdgpu_display_manager *dm = &adev->dm;
766 	struct dmcub_trace_buf_entry entry = { 0 };
767 	uint32_t count = 0;
768 	struct dmub_hpd_work *dmub_hpd_wrk;
769 	struct dc_link *plink = NULL;
770 
771 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
772 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
773 
774 		do {
775 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
776 			if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
777 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
778 				continue;
779 			}
780 			if (!dm->dmub_callback[notify.type]) {
781 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
782 				continue;
783 			}
784 			if (dm->dmub_thread_offload[notify.type] == true) {
785 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
786 				if (!dmub_hpd_wrk) {
787 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
788 					return;
789 				}
790 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
791 				if (!dmub_hpd_wrk->dmub_notify) {
792 					kfree(dmub_hpd_wrk);
793 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
794 					return;
795 				}
796 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
797 				if (dmub_hpd_wrk->dmub_notify)
798 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
799 				dmub_hpd_wrk->adev = adev;
800 				if (notify.type == DMUB_NOTIFICATION_HPD) {
801 					plink = adev->dm.dc->links[notify.link_index];
802 					if (plink) {
803 						plink->hpd_status =
804 							notify.hpd_status == DP_HPD_PLUG;
805 					}
806 				}
807 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
808 			} else {
809 				dm->dmub_callback[notify.type](adev, &notify);
810 			}
811 		} while (notify.pending_notification);
812 	}
813 
814 
815 	do {
816 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
817 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
818 							entry.param0, entry.param1);
819 
820 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
821 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
822 		} else
823 			break;
824 
825 		count++;
826 
827 	} while (count <= DMUB_TRACE_MAX_READ);
828 
829 	if (count > DMUB_TRACE_MAX_READ)
830 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
831 }
832 #endif /* CONFIG_DRM_AMD_DC_DCN */
833 
834 static int dm_set_clockgating_state(void *handle,
835 		  enum amd_clockgating_state state)
836 {
837 	return 0;
838 }
839 
840 static int dm_set_powergating_state(void *handle,
841 		  enum amd_powergating_state state)
842 {
843 	return 0;
844 }
845 
846 /* Prototypes of private functions */
847 static int dm_early_init(void* handle);
848 
849 /* Allocate memory for FBC compressed data  */
850 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
851 {
852 	struct drm_device *dev = connector->dev;
853 	struct amdgpu_device *adev = drm_to_adev(dev);
854 	struct dm_compressor_info *compressor = &adev->dm.compressor;
855 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
856 	struct drm_display_mode *mode;
857 	unsigned long max_size = 0;
858 
859 	if (adev->dm.dc->fbc_compressor == NULL)
860 		return;
861 
862 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
863 		return;
864 
865 	if (compressor->bo_ptr)
866 		return;
867 
868 
869 	list_for_each_entry(mode, &connector->modes, head) {
870 		if (max_size < mode->htotal * mode->vtotal)
871 			max_size = mode->htotal * mode->vtotal;
872 	}
873 
874 	if (max_size) {
875 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
876 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
877 			    &compressor->gpu_addr, &compressor->cpu_addr);
878 
879 		if (r)
880 			DRM_ERROR("DM: Failed to initialize FBC\n");
881 		else {
882 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
883 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
884 		}
885 
886 	}
887 
888 }
889 
890 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
891 					  int pipe, bool *enabled,
892 					  unsigned char *buf, int max_bytes)
893 {
894 	struct drm_device *dev = dev_get_drvdata(kdev);
895 	struct amdgpu_device *adev = drm_to_adev(dev);
896 	struct drm_connector *connector;
897 	struct drm_connector_list_iter conn_iter;
898 	struct amdgpu_dm_connector *aconnector;
899 	int ret = 0;
900 
901 	*enabled = false;
902 
903 	mutex_lock(&adev->dm.audio_lock);
904 
905 	drm_connector_list_iter_begin(dev, &conn_iter);
906 	drm_for_each_connector_iter(connector, &conn_iter) {
907 		aconnector = to_amdgpu_dm_connector(connector);
908 		if (aconnector->audio_inst != port)
909 			continue;
910 
911 		*enabled = true;
912 		ret = drm_eld_size(connector->eld);
913 		memcpy(buf, connector->eld, min(max_bytes, ret));
914 
915 		break;
916 	}
917 	drm_connector_list_iter_end(&conn_iter);
918 
919 	mutex_unlock(&adev->dm.audio_lock);
920 
921 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
922 
923 	return ret;
924 }
925 
926 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
927 	.get_eld = amdgpu_dm_audio_component_get_eld,
928 };
929 
930 static int amdgpu_dm_audio_component_bind(struct device *kdev,
931 				       struct device *hda_kdev, void *data)
932 {
933 	struct drm_device *dev = dev_get_drvdata(kdev);
934 	struct amdgpu_device *adev = drm_to_adev(dev);
935 	struct drm_audio_component *acomp = data;
936 
937 	acomp->ops = &amdgpu_dm_audio_component_ops;
938 	acomp->dev = kdev;
939 	adev->dm.audio_component = acomp;
940 
941 	return 0;
942 }
943 
944 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
945 					  struct device *hda_kdev, void *data)
946 {
947 	struct drm_device *dev = dev_get_drvdata(kdev);
948 	struct amdgpu_device *adev = drm_to_adev(dev);
949 	struct drm_audio_component *acomp = data;
950 
951 	acomp->ops = NULL;
952 	acomp->dev = NULL;
953 	adev->dm.audio_component = NULL;
954 }
955 
956 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
957 	.bind	= amdgpu_dm_audio_component_bind,
958 	.unbind	= amdgpu_dm_audio_component_unbind,
959 };
960 
961 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
962 {
963 	int i, ret;
964 
965 	if (!amdgpu_audio)
966 		return 0;
967 
968 	adev->mode_info.audio.enabled = true;
969 
970 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
971 
972 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
973 		adev->mode_info.audio.pin[i].channels = -1;
974 		adev->mode_info.audio.pin[i].rate = -1;
975 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
976 		adev->mode_info.audio.pin[i].status_bits = 0;
977 		adev->mode_info.audio.pin[i].category_code = 0;
978 		adev->mode_info.audio.pin[i].connected = false;
979 		adev->mode_info.audio.pin[i].id =
980 			adev->dm.dc->res_pool->audios[i]->inst;
981 		adev->mode_info.audio.pin[i].offset = 0;
982 	}
983 
984 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
985 	if (ret < 0)
986 		return ret;
987 
988 	adev->dm.audio_registered = true;
989 
990 	return 0;
991 }
992 
993 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
994 {
995 	if (!amdgpu_audio)
996 		return;
997 
998 	if (!adev->mode_info.audio.enabled)
999 		return;
1000 
1001 	if (adev->dm.audio_registered) {
1002 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1003 		adev->dm.audio_registered = false;
1004 	}
1005 
1006 	/* TODO: Disable audio? */
1007 
1008 	adev->mode_info.audio.enabled = false;
1009 }
1010 
1011 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1012 {
1013 	struct drm_audio_component *acomp = adev->dm.audio_component;
1014 
1015 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1016 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1017 
1018 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1019 						 pin, -1);
1020 	}
1021 }
1022 
1023 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1024 {
1025 	const struct dmcub_firmware_header_v1_0 *hdr;
1026 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1027 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1028 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1029 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1030 	struct abm *abm = adev->dm.dc->res_pool->abm;
1031 	struct dmub_srv_hw_params hw_params;
1032 	enum dmub_status status;
1033 	const unsigned char *fw_inst_const, *fw_bss_data;
1034 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1035 	bool has_hw_support;
1036 
1037 	if (!dmub_srv)
1038 		/* DMUB isn't supported on the ASIC. */
1039 		return 0;
1040 
1041 	if (!fb_info) {
1042 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1043 		return -EINVAL;
1044 	}
1045 
1046 	if (!dmub_fw) {
1047 		/* Firmware required for DMUB support. */
1048 		DRM_ERROR("No firmware provided for DMUB.\n");
1049 		return -EINVAL;
1050 	}
1051 
1052 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1053 	if (status != DMUB_STATUS_OK) {
1054 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1055 		return -EINVAL;
1056 	}
1057 
1058 	if (!has_hw_support) {
1059 		DRM_INFO("DMUB unsupported on ASIC\n");
1060 		return 0;
1061 	}
1062 
1063 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1064 	status = dmub_srv_hw_reset(dmub_srv);
1065 	if (status != DMUB_STATUS_OK)
1066 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1067 
1068 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1069 
1070 	fw_inst_const = dmub_fw->data +
1071 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1072 			PSP_HEADER_BYTES;
1073 
1074 	fw_bss_data = dmub_fw->data +
1075 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1076 		      le32_to_cpu(hdr->inst_const_bytes);
1077 
1078 	/* Copy firmware and bios info into FB memory. */
1079 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1080 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1081 
1082 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1083 
1084 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1085 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1086 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1087 	 * will be done by dm_dmub_hw_init
1088 	 */
1089 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1090 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1091 				fw_inst_const_size);
1092 	}
1093 
1094 	if (fw_bss_data_size)
1095 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1096 		       fw_bss_data, fw_bss_data_size);
1097 
1098 	/* Copy firmware bios info into FB memory. */
1099 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1100 	       adev->bios_size);
1101 
1102 	/* Reset regions that need to be reset. */
1103 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1104 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1105 
1106 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1107 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1108 
1109 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1110 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1111 
1112 	/* Initialize hardware. */
1113 	memset(&hw_params, 0, sizeof(hw_params));
1114 	hw_params.fb_base = adev->gmc.fb_start;
1115 	hw_params.fb_offset = adev->gmc.aper_base;
1116 
1117 	/* backdoor load firmware and trigger dmub running */
1118 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1119 		hw_params.load_inst_const = true;
1120 
1121 	if (dmcu)
1122 		hw_params.psp_version = dmcu->psp_version;
1123 
1124 	for (i = 0; i < fb_info->num_fb; ++i)
1125 		hw_params.fb[i] = &fb_info->fb[i];
1126 
1127 	switch (adev->ip_versions[DCE_HWIP][0]) {
1128 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1129 		hw_params.dpia_supported = true;
1130 #if defined(CONFIG_DRM_AMD_DC_DCN)
1131 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1132 #endif
1133 		break;
1134 	default:
1135 		break;
1136 	}
1137 
1138 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1139 	if (status != DMUB_STATUS_OK) {
1140 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1141 		return -EINVAL;
1142 	}
1143 
1144 	/* Wait for firmware load to finish. */
1145 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1146 	if (status != DMUB_STATUS_OK)
1147 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1148 
1149 	/* Init DMCU and ABM if available. */
1150 	if (dmcu && abm) {
1151 		dmcu->funcs->dmcu_init(dmcu);
1152 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1153 	}
1154 
1155 	if (!adev->dm.dc->ctx->dmub_srv)
1156 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1157 	if (!adev->dm.dc->ctx->dmub_srv) {
1158 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1159 		return -ENOMEM;
1160 	}
1161 
1162 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1163 		 adev->dm.dmcub_fw_version);
1164 
1165 	return 0;
1166 }
1167 
1168 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1169 {
1170 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1171 	enum dmub_status status;
1172 	bool init;
1173 
1174 	if (!dmub_srv) {
1175 		/* DMUB isn't supported on the ASIC. */
1176 		return;
1177 	}
1178 
1179 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1180 	if (status != DMUB_STATUS_OK)
1181 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1182 
1183 	if (status == DMUB_STATUS_OK && init) {
1184 		/* Wait for firmware load to finish. */
1185 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1186 		if (status != DMUB_STATUS_OK)
1187 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1188 	} else {
1189 		/* Perform the full hardware initialization. */
1190 		dm_dmub_hw_init(adev);
1191 	}
1192 }
1193 
1194 #if defined(CONFIG_DRM_AMD_DC_DCN)
1195 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1196 {
1197 	uint64_t pt_base;
1198 	uint32_t logical_addr_low;
1199 	uint32_t logical_addr_high;
1200 	uint32_t agp_base, agp_bot, agp_top;
1201 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1202 
1203 	memset(pa_config, 0, sizeof(*pa_config));
1204 
1205 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1206 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1207 
1208 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1209 		/*
1210 		 * Raven2 has a HW issue that it is unable to use the vram which
1211 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1212 		 * workaround that increase system aperture high address (add 1)
1213 		 * to get rid of the VM fault and hardware hang.
1214 		 */
1215 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1216 	else
1217 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1218 
1219 	agp_base = 0;
1220 	agp_bot = adev->gmc.agp_start >> 24;
1221 	agp_top = adev->gmc.agp_end >> 24;
1222 
1223 
1224 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1225 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1226 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1227 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1228 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1229 	page_table_base.low_part = lower_32_bits(pt_base);
1230 
1231 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1232 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1233 
1234 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1235 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1236 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1237 
1238 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1239 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1240 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1241 
1242 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1243 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1244 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1245 
1246 	pa_config->is_hvm_enabled = 0;
1247 
1248 }
1249 #endif
1250 #if defined(CONFIG_DRM_AMD_DC_DCN)
1251 static void vblank_control_worker(struct work_struct *work)
1252 {
1253 	struct vblank_control_work *vblank_work =
1254 		container_of(work, struct vblank_control_work, work);
1255 	struct amdgpu_display_manager *dm = vblank_work->dm;
1256 
1257 	mutex_lock(&dm->dc_lock);
1258 
1259 	if (vblank_work->enable)
1260 		dm->active_vblank_irq_count++;
1261 	else if(dm->active_vblank_irq_count)
1262 		dm->active_vblank_irq_count--;
1263 
1264 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1265 
1266 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1267 
1268 	/* Control PSR based on vblank requirements from OS */
1269 	if (vblank_work->stream && vblank_work->stream->link) {
1270 		if (vblank_work->enable) {
1271 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1272 				amdgpu_dm_psr_disable(vblank_work->stream);
1273 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1274 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1275 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1276 			amdgpu_dm_psr_enable(vblank_work->stream);
1277 		}
1278 	}
1279 
1280 	mutex_unlock(&dm->dc_lock);
1281 
1282 	dc_stream_release(vblank_work->stream);
1283 
1284 	kfree(vblank_work);
1285 }
1286 
1287 #endif
1288 
1289 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1290 {
1291 	struct hpd_rx_irq_offload_work *offload_work;
1292 	struct amdgpu_dm_connector *aconnector;
1293 	struct dc_link *dc_link;
1294 	struct amdgpu_device *adev;
1295 	enum dc_connection_type new_connection_type = dc_connection_none;
1296 	unsigned long flags;
1297 
1298 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1299 	aconnector = offload_work->offload_wq->aconnector;
1300 
1301 	if (!aconnector) {
1302 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1303 		goto skip;
1304 	}
1305 
1306 	adev = drm_to_adev(aconnector->base.dev);
1307 	dc_link = aconnector->dc_link;
1308 
1309 	mutex_lock(&aconnector->hpd_lock);
1310 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1311 		DRM_ERROR("KMS: Failed to detect connector\n");
1312 	mutex_unlock(&aconnector->hpd_lock);
1313 
1314 	if (new_connection_type == dc_connection_none)
1315 		goto skip;
1316 
1317 	if (amdgpu_in_reset(adev))
1318 		goto skip;
1319 
1320 	mutex_lock(&adev->dm.dc_lock);
1321 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1322 		dc_link_dp_handle_automated_test(dc_link);
1323 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1324 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1325 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1326 		dc_link_dp_handle_link_loss(dc_link);
1327 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1328 		offload_work->offload_wq->is_handling_link_loss = false;
1329 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1330 	}
1331 	mutex_unlock(&adev->dm.dc_lock);
1332 
1333 skip:
1334 	kfree(offload_work);
1335 
1336 }
1337 
1338 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1339 {
1340 	int max_caps = dc->caps.max_links;
1341 	int i = 0;
1342 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1343 
1344 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1345 
1346 	if (!hpd_rx_offload_wq)
1347 		return NULL;
1348 
1349 
1350 	for (i = 0; i < max_caps; i++) {
1351 		hpd_rx_offload_wq[i].wq =
1352 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1353 
1354 		if (hpd_rx_offload_wq[i].wq == NULL) {
1355 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1356 			return NULL;
1357 		}
1358 
1359 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1360 	}
1361 
1362 	return hpd_rx_offload_wq;
1363 }
1364 
1365 struct amdgpu_stutter_quirk {
1366 	u16 chip_vendor;
1367 	u16 chip_device;
1368 	u16 subsys_vendor;
1369 	u16 subsys_device;
1370 	u8 revision;
1371 };
1372 
1373 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1374 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1375 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1376 	{ 0, 0, 0, 0, 0 },
1377 };
1378 
1379 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1380 {
1381 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1382 
1383 	while (p && p->chip_device != 0) {
1384 		if (pdev->vendor == p->chip_vendor &&
1385 		    pdev->device == p->chip_device &&
1386 		    pdev->subsystem_vendor == p->subsys_vendor &&
1387 		    pdev->subsystem_device == p->subsys_device &&
1388 		    pdev->revision == p->revision) {
1389 			return true;
1390 		}
1391 		++p;
1392 	}
1393 	return false;
1394 }
1395 
1396 static int amdgpu_dm_init(struct amdgpu_device *adev)
1397 {
1398 	struct dc_init_data init_data;
1399 #ifdef CONFIG_DRM_AMD_DC_HDCP
1400 	struct dc_callback_init init_params;
1401 #endif
1402 	int r;
1403 
1404 	adev->dm.ddev = adev_to_drm(adev);
1405 	adev->dm.adev = adev;
1406 
1407 	/* Zero all the fields */
1408 	memset(&init_data, 0, sizeof(init_data));
1409 #ifdef CONFIG_DRM_AMD_DC_HDCP
1410 	memset(&init_params, 0, sizeof(init_params));
1411 #endif
1412 
1413 	mutex_init(&adev->dm.dc_lock);
1414 	mutex_init(&adev->dm.audio_lock);
1415 #if defined(CONFIG_DRM_AMD_DC_DCN)
1416 	spin_lock_init(&adev->dm.vblank_lock);
1417 #endif
1418 
1419 	if(amdgpu_dm_irq_init(adev)) {
1420 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1421 		goto error;
1422 	}
1423 
1424 	init_data.asic_id.chip_family = adev->family;
1425 
1426 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1427 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1428 	init_data.asic_id.chip_id = adev->pdev->device;
1429 
1430 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1431 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1432 	init_data.asic_id.atombios_base_address =
1433 		adev->mode_info.atom_context->bios;
1434 
1435 	init_data.driver = adev;
1436 
1437 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1438 
1439 	if (!adev->dm.cgs_device) {
1440 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1441 		goto error;
1442 	}
1443 
1444 	init_data.cgs_device = adev->dm.cgs_device;
1445 
1446 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1447 
1448 	switch (adev->ip_versions[DCE_HWIP][0]) {
1449 	case IP_VERSION(2, 1, 0):
1450 		switch (adev->dm.dmcub_fw_version) {
1451 		case 0: /* development */
1452 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1453 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1454 			init_data.flags.disable_dmcu = false;
1455 			break;
1456 		default:
1457 			init_data.flags.disable_dmcu = true;
1458 		}
1459 		break;
1460 	case IP_VERSION(2, 0, 3):
1461 		init_data.flags.disable_dmcu = true;
1462 		break;
1463 	default:
1464 		break;
1465 	}
1466 
1467 	switch (adev->asic_type) {
1468 	case CHIP_CARRIZO:
1469 	case CHIP_STONEY:
1470 		init_data.flags.gpu_vm_support = true;
1471 		break;
1472 	default:
1473 		switch (adev->ip_versions[DCE_HWIP][0]) {
1474 		case IP_VERSION(1, 0, 0):
1475 		case IP_VERSION(1, 0, 1):
1476 			/* enable S/G on PCO and RV2 */
1477 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1478 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1479 				init_data.flags.gpu_vm_support = true;
1480 			break;
1481 		case IP_VERSION(2, 1, 0):
1482 		case IP_VERSION(3, 0, 1):
1483 		case IP_VERSION(3, 1, 2):
1484 		case IP_VERSION(3, 1, 3):
1485 		case IP_VERSION(3, 1, 5):
1486 		case IP_VERSION(3, 1, 6):
1487 			init_data.flags.gpu_vm_support = true;
1488 			break;
1489 		default:
1490 			break;
1491 		}
1492 		break;
1493 	}
1494 
1495 	if (init_data.flags.gpu_vm_support)
1496 		adev->mode_info.gpu_vm_support = true;
1497 
1498 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1499 		init_data.flags.fbc_support = true;
1500 
1501 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1502 		init_data.flags.multi_mon_pp_mclk_switch = true;
1503 
1504 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1505 		init_data.flags.disable_fractional_pwm = true;
1506 
1507 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1508 		init_data.flags.edp_no_power_sequencing = true;
1509 
1510 #ifdef CONFIG_DRM_AMD_DC_DCN
1511 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1512 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1513 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1514 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1515 #endif
1516 
1517 	init_data.flags.seamless_boot_edp_requested = false;
1518 
1519 	if (check_seamless_boot_capability(adev)) {
1520 		init_data.flags.seamless_boot_edp_requested = true;
1521 		init_data.flags.allow_seamless_boot_optimization = true;
1522 		DRM_INFO("Seamless boot condition check passed\n");
1523 	}
1524 
1525 	INIT_LIST_HEAD(&adev->dm.da_list);
1526 	/* Display Core create. */
1527 	adev->dm.dc = dc_create(&init_data);
1528 
1529 	if (adev->dm.dc) {
1530 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1531 	} else {
1532 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1533 		goto error;
1534 	}
1535 
1536 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1537 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1538 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1539 	}
1540 
1541 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1542 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1543 	if (dm_should_disable_stutter(adev->pdev))
1544 		adev->dm.dc->debug.disable_stutter = true;
1545 
1546 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1547 		adev->dm.dc->debug.disable_stutter = true;
1548 
1549 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1550 		adev->dm.dc->debug.disable_dsc = true;
1551 		adev->dm.dc->debug.disable_dsc_edp = true;
1552 	}
1553 
1554 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1555 		adev->dm.dc->debug.disable_clock_gate = true;
1556 
1557 	r = dm_dmub_hw_init(adev);
1558 	if (r) {
1559 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1560 		goto error;
1561 	}
1562 
1563 	dc_hardware_init(adev->dm.dc);
1564 
1565 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1566 	if (!adev->dm.hpd_rx_offload_wq) {
1567 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1568 		goto error;
1569 	}
1570 
1571 #if defined(CONFIG_DRM_AMD_DC_DCN)
1572 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1573 		struct dc_phy_addr_space_config pa_config;
1574 
1575 		mmhub_read_system_context(adev, &pa_config);
1576 
1577 		// Call the DC init_memory func
1578 		dc_setup_system_context(adev->dm.dc, &pa_config);
1579 	}
1580 #endif
1581 
1582 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1583 	if (!adev->dm.freesync_module) {
1584 		DRM_ERROR(
1585 		"amdgpu: failed to initialize freesync_module.\n");
1586 	} else
1587 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1588 				adev->dm.freesync_module);
1589 
1590 	amdgpu_dm_init_color_mod();
1591 
1592 #if defined(CONFIG_DRM_AMD_DC_DCN)
1593 	if (adev->dm.dc->caps.max_links > 0) {
1594 		adev->dm.vblank_control_workqueue =
1595 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1596 		if (!adev->dm.vblank_control_workqueue)
1597 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1598 	}
1599 #endif
1600 
1601 #ifdef CONFIG_DRM_AMD_DC_HDCP
1602 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1603 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1604 
1605 		if (!adev->dm.hdcp_workqueue)
1606 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1607 		else
1608 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1609 
1610 		dc_init_callbacks(adev->dm.dc, &init_params);
1611 	}
1612 #endif
1613 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1614 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1615 #endif
1616 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1617 		init_completion(&adev->dm.dmub_aux_transfer_done);
1618 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1619 		if (!adev->dm.dmub_notify) {
1620 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1621 			goto error;
1622 		}
1623 
1624 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1625 		if (!adev->dm.delayed_hpd_wq) {
1626 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1627 			goto error;
1628 		}
1629 
1630 		amdgpu_dm_outbox_init(adev);
1631 #if defined(CONFIG_DRM_AMD_DC_DCN)
1632 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1633 			dmub_aux_setconfig_callback, false)) {
1634 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1635 			goto error;
1636 		}
1637 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1638 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1639 			goto error;
1640 		}
1641 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1642 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1643 			goto error;
1644 		}
1645 #endif /* CONFIG_DRM_AMD_DC_DCN */
1646 	}
1647 
1648 	if (amdgpu_dm_initialize_drm_device(adev)) {
1649 		DRM_ERROR(
1650 		"amdgpu: failed to initialize sw for display support.\n");
1651 		goto error;
1652 	}
1653 
1654 	/* create fake encoders for MST */
1655 	dm_dp_create_fake_mst_encoders(adev);
1656 
1657 	/* TODO: Add_display_info? */
1658 
1659 	/* TODO use dynamic cursor width */
1660 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1661 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1662 
1663 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1664 		DRM_ERROR(
1665 		"amdgpu: failed to initialize sw for display support.\n");
1666 		goto error;
1667 	}
1668 
1669 
1670 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1671 
1672 	return 0;
1673 error:
1674 	amdgpu_dm_fini(adev);
1675 
1676 	return -EINVAL;
1677 }
1678 
1679 static int amdgpu_dm_early_fini(void *handle)
1680 {
1681 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1682 
1683 	amdgpu_dm_audio_fini(adev);
1684 
1685 	return 0;
1686 }
1687 
1688 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1689 {
1690 	int i;
1691 
1692 #if defined(CONFIG_DRM_AMD_DC_DCN)
1693 	if (adev->dm.vblank_control_workqueue) {
1694 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1695 		adev->dm.vblank_control_workqueue = NULL;
1696 	}
1697 #endif
1698 
1699 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1700 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1701 	}
1702 
1703 	amdgpu_dm_destroy_drm_device(&adev->dm);
1704 
1705 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1706 	if (adev->dm.crc_rd_wrk) {
1707 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1708 		kfree(adev->dm.crc_rd_wrk);
1709 		adev->dm.crc_rd_wrk = NULL;
1710 	}
1711 #endif
1712 #ifdef CONFIG_DRM_AMD_DC_HDCP
1713 	if (adev->dm.hdcp_workqueue) {
1714 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1715 		adev->dm.hdcp_workqueue = NULL;
1716 	}
1717 
1718 	if (adev->dm.dc)
1719 		dc_deinit_callbacks(adev->dm.dc);
1720 #endif
1721 
1722 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1723 
1724 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1725 		kfree(adev->dm.dmub_notify);
1726 		adev->dm.dmub_notify = NULL;
1727 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1728 		adev->dm.delayed_hpd_wq = NULL;
1729 	}
1730 
1731 	if (adev->dm.dmub_bo)
1732 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1733 				      &adev->dm.dmub_bo_gpu_addr,
1734 				      &adev->dm.dmub_bo_cpu_addr);
1735 
1736 	if (adev->dm.hpd_rx_offload_wq) {
1737 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1738 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1739 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1740 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1741 			}
1742 		}
1743 
1744 		kfree(adev->dm.hpd_rx_offload_wq);
1745 		adev->dm.hpd_rx_offload_wq = NULL;
1746 	}
1747 
1748 	/* DC Destroy TODO: Replace destroy DAL */
1749 	if (adev->dm.dc)
1750 		dc_destroy(&adev->dm.dc);
1751 	/*
1752 	 * TODO: pageflip, vlank interrupt
1753 	 *
1754 	 * amdgpu_dm_irq_fini(adev);
1755 	 */
1756 
1757 	if (adev->dm.cgs_device) {
1758 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1759 		adev->dm.cgs_device = NULL;
1760 	}
1761 	if (adev->dm.freesync_module) {
1762 		mod_freesync_destroy(adev->dm.freesync_module);
1763 		adev->dm.freesync_module = NULL;
1764 	}
1765 
1766 	mutex_destroy(&adev->dm.audio_lock);
1767 	mutex_destroy(&adev->dm.dc_lock);
1768 
1769 	return;
1770 }
1771 
1772 static int load_dmcu_fw(struct amdgpu_device *adev)
1773 {
1774 	const char *fw_name_dmcu = NULL;
1775 	int r;
1776 	const struct dmcu_firmware_header_v1_0 *hdr;
1777 
1778 	switch(adev->asic_type) {
1779 #if defined(CONFIG_DRM_AMD_DC_SI)
1780 	case CHIP_TAHITI:
1781 	case CHIP_PITCAIRN:
1782 	case CHIP_VERDE:
1783 	case CHIP_OLAND:
1784 #endif
1785 	case CHIP_BONAIRE:
1786 	case CHIP_HAWAII:
1787 	case CHIP_KAVERI:
1788 	case CHIP_KABINI:
1789 	case CHIP_MULLINS:
1790 	case CHIP_TONGA:
1791 	case CHIP_FIJI:
1792 	case CHIP_CARRIZO:
1793 	case CHIP_STONEY:
1794 	case CHIP_POLARIS11:
1795 	case CHIP_POLARIS10:
1796 	case CHIP_POLARIS12:
1797 	case CHIP_VEGAM:
1798 	case CHIP_VEGA10:
1799 	case CHIP_VEGA12:
1800 	case CHIP_VEGA20:
1801 		return 0;
1802 	case CHIP_NAVI12:
1803 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1804 		break;
1805 	case CHIP_RAVEN:
1806 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1807 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1808 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1809 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1810 		else
1811 			return 0;
1812 		break;
1813 	default:
1814 		switch (adev->ip_versions[DCE_HWIP][0]) {
1815 		case IP_VERSION(2, 0, 2):
1816 		case IP_VERSION(2, 0, 3):
1817 		case IP_VERSION(2, 0, 0):
1818 		case IP_VERSION(2, 1, 0):
1819 		case IP_VERSION(3, 0, 0):
1820 		case IP_VERSION(3, 0, 2):
1821 		case IP_VERSION(3, 0, 3):
1822 		case IP_VERSION(3, 0, 1):
1823 		case IP_VERSION(3, 1, 2):
1824 		case IP_VERSION(3, 1, 3):
1825 		case IP_VERSION(3, 1, 5):
1826 		case IP_VERSION(3, 1, 6):
1827 			return 0;
1828 		default:
1829 			break;
1830 		}
1831 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1832 		return -EINVAL;
1833 	}
1834 
1835 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1836 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1837 		return 0;
1838 	}
1839 
1840 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1841 	if (r == -ENOENT) {
1842 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1843 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1844 		adev->dm.fw_dmcu = NULL;
1845 		return 0;
1846 	}
1847 	if (r) {
1848 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1849 			fw_name_dmcu);
1850 		return r;
1851 	}
1852 
1853 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1854 	if (r) {
1855 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1856 			fw_name_dmcu);
1857 		release_firmware(adev->dm.fw_dmcu);
1858 		adev->dm.fw_dmcu = NULL;
1859 		return r;
1860 	}
1861 
1862 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1863 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1864 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1865 	adev->firmware.fw_size +=
1866 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1867 
1868 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1869 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1870 	adev->firmware.fw_size +=
1871 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1872 
1873 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1874 
1875 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1876 
1877 	return 0;
1878 }
1879 
1880 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1881 {
1882 	struct amdgpu_device *adev = ctx;
1883 
1884 	return dm_read_reg(adev->dm.dc->ctx, address);
1885 }
1886 
1887 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1888 				     uint32_t value)
1889 {
1890 	struct amdgpu_device *adev = ctx;
1891 
1892 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1893 }
1894 
1895 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1896 {
1897 	struct dmub_srv_create_params create_params;
1898 	struct dmub_srv_region_params region_params;
1899 	struct dmub_srv_region_info region_info;
1900 	struct dmub_srv_fb_params fb_params;
1901 	struct dmub_srv_fb_info *fb_info;
1902 	struct dmub_srv *dmub_srv;
1903 	const struct dmcub_firmware_header_v1_0 *hdr;
1904 	const char *fw_name_dmub;
1905 	enum dmub_asic dmub_asic;
1906 	enum dmub_status status;
1907 	int r;
1908 
1909 	switch (adev->ip_versions[DCE_HWIP][0]) {
1910 	case IP_VERSION(2, 1, 0):
1911 		dmub_asic = DMUB_ASIC_DCN21;
1912 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1913 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1914 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1915 		break;
1916 	case IP_VERSION(3, 0, 0):
1917 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1918 			dmub_asic = DMUB_ASIC_DCN30;
1919 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1920 		} else {
1921 			dmub_asic = DMUB_ASIC_DCN30;
1922 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1923 		}
1924 		break;
1925 	case IP_VERSION(3, 0, 1):
1926 		dmub_asic = DMUB_ASIC_DCN301;
1927 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1928 		break;
1929 	case IP_VERSION(3, 0, 2):
1930 		dmub_asic = DMUB_ASIC_DCN302;
1931 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1932 		break;
1933 	case IP_VERSION(3, 0, 3):
1934 		dmub_asic = DMUB_ASIC_DCN303;
1935 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1936 		break;
1937 	case IP_VERSION(3, 1, 2):
1938 	case IP_VERSION(3, 1, 3):
1939 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1940 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1941 		break;
1942 	case IP_VERSION(3, 1, 5):
1943 		dmub_asic = DMUB_ASIC_DCN315;
1944 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1945 		break;
1946 	case IP_VERSION(3, 1, 6):
1947 		dmub_asic = DMUB_ASIC_DCN316;
1948 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
1949 		break;
1950 	default:
1951 		/* ASIC doesn't support DMUB. */
1952 		return 0;
1953 	}
1954 
1955 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1956 	if (r) {
1957 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1958 		return 0;
1959 	}
1960 
1961 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1962 	if (r) {
1963 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1964 		return 0;
1965 	}
1966 
1967 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1968 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1969 
1970 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1971 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1972 			AMDGPU_UCODE_ID_DMCUB;
1973 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1974 			adev->dm.dmub_fw;
1975 		adev->firmware.fw_size +=
1976 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1977 
1978 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1979 			 adev->dm.dmcub_fw_version);
1980 	}
1981 
1982 
1983 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1984 	dmub_srv = adev->dm.dmub_srv;
1985 
1986 	if (!dmub_srv) {
1987 		DRM_ERROR("Failed to allocate DMUB service!\n");
1988 		return -ENOMEM;
1989 	}
1990 
1991 	memset(&create_params, 0, sizeof(create_params));
1992 	create_params.user_ctx = adev;
1993 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1994 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1995 	create_params.asic = dmub_asic;
1996 
1997 	/* Create the DMUB service. */
1998 	status = dmub_srv_create(dmub_srv, &create_params);
1999 	if (status != DMUB_STATUS_OK) {
2000 		DRM_ERROR("Error creating DMUB service: %d\n", status);
2001 		return -EINVAL;
2002 	}
2003 
2004 	/* Calculate the size of all the regions for the DMUB service. */
2005 	memset(&region_params, 0, sizeof(region_params));
2006 
2007 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2008 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2009 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2010 	region_params.vbios_size = adev->bios_size;
2011 	region_params.fw_bss_data = region_params.bss_data_size ?
2012 		adev->dm.dmub_fw->data +
2013 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2014 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
2015 	region_params.fw_inst_const =
2016 		adev->dm.dmub_fw->data +
2017 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2018 		PSP_HEADER_BYTES;
2019 
2020 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2021 					   &region_info);
2022 
2023 	if (status != DMUB_STATUS_OK) {
2024 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2025 		return -EINVAL;
2026 	}
2027 
2028 	/*
2029 	 * Allocate a framebuffer based on the total size of all the regions.
2030 	 * TODO: Move this into GART.
2031 	 */
2032 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2033 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2034 				    &adev->dm.dmub_bo_gpu_addr,
2035 				    &adev->dm.dmub_bo_cpu_addr);
2036 	if (r)
2037 		return r;
2038 
2039 	/* Rebase the regions on the framebuffer address. */
2040 	memset(&fb_params, 0, sizeof(fb_params));
2041 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2042 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2043 	fb_params.region_info = &region_info;
2044 
2045 	adev->dm.dmub_fb_info =
2046 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2047 	fb_info = adev->dm.dmub_fb_info;
2048 
2049 	if (!fb_info) {
2050 		DRM_ERROR(
2051 			"Failed to allocate framebuffer info for DMUB service!\n");
2052 		return -ENOMEM;
2053 	}
2054 
2055 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2056 	if (status != DMUB_STATUS_OK) {
2057 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2058 		return -EINVAL;
2059 	}
2060 
2061 	return 0;
2062 }
2063 
2064 static int dm_sw_init(void *handle)
2065 {
2066 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2067 	int r;
2068 
2069 	r = dm_dmub_sw_init(adev);
2070 	if (r)
2071 		return r;
2072 
2073 	return load_dmcu_fw(adev);
2074 }
2075 
2076 static int dm_sw_fini(void *handle)
2077 {
2078 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2079 
2080 	kfree(adev->dm.dmub_fb_info);
2081 	adev->dm.dmub_fb_info = NULL;
2082 
2083 	if (adev->dm.dmub_srv) {
2084 		dmub_srv_destroy(adev->dm.dmub_srv);
2085 		adev->dm.dmub_srv = NULL;
2086 	}
2087 
2088 	release_firmware(adev->dm.dmub_fw);
2089 	adev->dm.dmub_fw = NULL;
2090 
2091 	release_firmware(adev->dm.fw_dmcu);
2092 	adev->dm.fw_dmcu = NULL;
2093 
2094 	return 0;
2095 }
2096 
2097 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2098 {
2099 	struct amdgpu_dm_connector *aconnector;
2100 	struct drm_connector *connector;
2101 	struct drm_connector_list_iter iter;
2102 	int ret = 0;
2103 
2104 	drm_connector_list_iter_begin(dev, &iter);
2105 	drm_for_each_connector_iter(connector, &iter) {
2106 		aconnector = to_amdgpu_dm_connector(connector);
2107 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2108 		    aconnector->mst_mgr.aux) {
2109 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2110 					 aconnector,
2111 					 aconnector->base.base.id);
2112 
2113 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2114 			if (ret < 0) {
2115 				DRM_ERROR("DM_MST: Failed to start MST\n");
2116 				aconnector->dc_link->type =
2117 					dc_connection_single;
2118 				break;
2119 			}
2120 		}
2121 	}
2122 	drm_connector_list_iter_end(&iter);
2123 
2124 	return ret;
2125 }
2126 
2127 static int dm_late_init(void *handle)
2128 {
2129 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2130 
2131 	struct dmcu_iram_parameters params;
2132 	unsigned int linear_lut[16];
2133 	int i;
2134 	struct dmcu *dmcu = NULL;
2135 
2136 	dmcu = adev->dm.dc->res_pool->dmcu;
2137 
2138 	for (i = 0; i < 16; i++)
2139 		linear_lut[i] = 0xFFFF * i / 15;
2140 
2141 	params.set = 0;
2142 	params.backlight_ramping_override = false;
2143 	params.backlight_ramping_start = 0xCCCC;
2144 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2145 	params.backlight_lut_array_size = 16;
2146 	params.backlight_lut_array = linear_lut;
2147 
2148 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2149 	 * 0xFFFF x 0.01 = 0x28F
2150 	 */
2151 	params.min_abm_backlight = 0x28F;
2152 	/* In the case where abm is implemented on dmcub,
2153 	* dmcu object will be null.
2154 	* ABM 2.4 and up are implemented on dmcub.
2155 	*/
2156 	if (dmcu) {
2157 		if (!dmcu_load_iram(dmcu, params))
2158 			return -EINVAL;
2159 	} else if (adev->dm.dc->ctx->dmub_srv) {
2160 		struct dc_link *edp_links[MAX_NUM_EDP];
2161 		int edp_num;
2162 
2163 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2164 		for (i = 0; i < edp_num; i++) {
2165 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2166 				return -EINVAL;
2167 		}
2168 	}
2169 
2170 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2171 }
2172 
2173 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2174 {
2175 	struct amdgpu_dm_connector *aconnector;
2176 	struct drm_connector *connector;
2177 	struct drm_connector_list_iter iter;
2178 	struct drm_dp_mst_topology_mgr *mgr;
2179 	int ret;
2180 	bool need_hotplug = false;
2181 
2182 	drm_connector_list_iter_begin(dev, &iter);
2183 	drm_for_each_connector_iter(connector, &iter) {
2184 		aconnector = to_amdgpu_dm_connector(connector);
2185 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2186 		    aconnector->mst_port)
2187 			continue;
2188 
2189 		mgr = &aconnector->mst_mgr;
2190 
2191 		if (suspend) {
2192 			drm_dp_mst_topology_mgr_suspend(mgr);
2193 		} else {
2194 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2195 			if (ret < 0) {
2196 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2197 				need_hotplug = true;
2198 			}
2199 		}
2200 	}
2201 	drm_connector_list_iter_end(&iter);
2202 
2203 	if (need_hotplug)
2204 		drm_kms_helper_hotplug_event(dev);
2205 }
2206 
2207 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2208 {
2209 	int ret = 0;
2210 
2211 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2212 	 * on window driver dc implementation.
2213 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2214 	 * should be passed to smu during boot up and resume from s3.
2215 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2216 	 * dcn20_resource_construct
2217 	 * then call pplib functions below to pass the settings to smu:
2218 	 * smu_set_watermarks_for_clock_ranges
2219 	 * smu_set_watermarks_table
2220 	 * navi10_set_watermarks_table
2221 	 * smu_write_watermarks_table
2222 	 *
2223 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2224 	 * dc has implemented different flow for window driver:
2225 	 * dc_hardware_init / dc_set_power_state
2226 	 * dcn10_init_hw
2227 	 * notify_wm_ranges
2228 	 * set_wm_ranges
2229 	 * -- Linux
2230 	 * smu_set_watermarks_for_clock_ranges
2231 	 * renoir_set_watermarks_table
2232 	 * smu_write_watermarks_table
2233 	 *
2234 	 * For Linux,
2235 	 * dc_hardware_init -> amdgpu_dm_init
2236 	 * dc_set_power_state --> dm_resume
2237 	 *
2238 	 * therefore, this function apply to navi10/12/14 but not Renoir
2239 	 * *
2240 	 */
2241 	switch (adev->ip_versions[DCE_HWIP][0]) {
2242 	case IP_VERSION(2, 0, 2):
2243 	case IP_VERSION(2, 0, 0):
2244 		break;
2245 	default:
2246 		return 0;
2247 	}
2248 
2249 	ret = amdgpu_dpm_write_watermarks_table(adev);
2250 	if (ret) {
2251 		DRM_ERROR("Failed to update WMTABLE!\n");
2252 		return ret;
2253 	}
2254 
2255 	return 0;
2256 }
2257 
2258 /**
2259  * dm_hw_init() - Initialize DC device
2260  * @handle: The base driver device containing the amdgpu_dm device.
2261  *
2262  * Initialize the &struct amdgpu_display_manager device. This involves calling
2263  * the initializers of each DM component, then populating the struct with them.
2264  *
2265  * Although the function implies hardware initialization, both hardware and
2266  * software are initialized here. Splitting them out to their relevant init
2267  * hooks is a future TODO item.
2268  *
2269  * Some notable things that are initialized here:
2270  *
2271  * - Display Core, both software and hardware
2272  * - DC modules that we need (freesync and color management)
2273  * - DRM software states
2274  * - Interrupt sources and handlers
2275  * - Vblank support
2276  * - Debug FS entries, if enabled
2277  */
2278 static int dm_hw_init(void *handle)
2279 {
2280 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2281 	/* Create DAL display manager */
2282 	amdgpu_dm_init(adev);
2283 	amdgpu_dm_hpd_init(adev);
2284 
2285 	return 0;
2286 }
2287 
2288 /**
2289  * dm_hw_fini() - Teardown DC device
2290  * @handle: The base driver device containing the amdgpu_dm device.
2291  *
2292  * Teardown components within &struct amdgpu_display_manager that require
2293  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2294  * were loaded. Also flush IRQ workqueues and disable them.
2295  */
2296 static int dm_hw_fini(void *handle)
2297 {
2298 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2299 
2300 	amdgpu_dm_hpd_fini(adev);
2301 
2302 	amdgpu_dm_irq_fini(adev);
2303 	amdgpu_dm_fini(adev);
2304 	return 0;
2305 }
2306 
2307 
2308 static int dm_enable_vblank(struct drm_crtc *crtc);
2309 static void dm_disable_vblank(struct drm_crtc *crtc);
2310 
2311 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2312 				 struct dc_state *state, bool enable)
2313 {
2314 	enum dc_irq_source irq_source;
2315 	struct amdgpu_crtc *acrtc;
2316 	int rc = -EBUSY;
2317 	int i = 0;
2318 
2319 	for (i = 0; i < state->stream_count; i++) {
2320 		acrtc = get_crtc_by_otg_inst(
2321 				adev, state->stream_status[i].primary_otg_inst);
2322 
2323 		if (acrtc && state->stream_status[i].plane_count != 0) {
2324 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2325 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2326 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2327 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2328 			if (rc)
2329 				DRM_WARN("Failed to %s pflip interrupts\n",
2330 					 enable ? "enable" : "disable");
2331 
2332 			if (enable) {
2333 				rc = dm_enable_vblank(&acrtc->base);
2334 				if (rc)
2335 					DRM_WARN("Failed to enable vblank interrupts\n");
2336 			} else {
2337 				dm_disable_vblank(&acrtc->base);
2338 			}
2339 
2340 		}
2341 	}
2342 
2343 }
2344 
2345 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2346 {
2347 	struct dc_state *context = NULL;
2348 	enum dc_status res = DC_ERROR_UNEXPECTED;
2349 	int i;
2350 	struct dc_stream_state *del_streams[MAX_PIPES];
2351 	int del_streams_count = 0;
2352 
2353 	memset(del_streams, 0, sizeof(del_streams));
2354 
2355 	context = dc_create_state(dc);
2356 	if (context == NULL)
2357 		goto context_alloc_fail;
2358 
2359 	dc_resource_state_copy_construct_current(dc, context);
2360 
2361 	/* First remove from context all streams */
2362 	for (i = 0; i < context->stream_count; i++) {
2363 		struct dc_stream_state *stream = context->streams[i];
2364 
2365 		del_streams[del_streams_count++] = stream;
2366 	}
2367 
2368 	/* Remove all planes for removed streams and then remove the streams */
2369 	for (i = 0; i < del_streams_count; i++) {
2370 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2371 			res = DC_FAIL_DETACH_SURFACES;
2372 			goto fail;
2373 		}
2374 
2375 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2376 		if (res != DC_OK)
2377 			goto fail;
2378 	}
2379 
2380 	res = dc_commit_state(dc, context);
2381 
2382 fail:
2383 	dc_release_state(context);
2384 
2385 context_alloc_fail:
2386 	return res;
2387 }
2388 
2389 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2390 {
2391 	int i;
2392 
2393 	if (dm->hpd_rx_offload_wq) {
2394 		for (i = 0; i < dm->dc->caps.max_links; i++)
2395 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2396 	}
2397 }
2398 
2399 static int dm_suspend(void *handle)
2400 {
2401 	struct amdgpu_device *adev = handle;
2402 	struct amdgpu_display_manager *dm = &adev->dm;
2403 	int ret = 0;
2404 
2405 	if (amdgpu_in_reset(adev)) {
2406 		mutex_lock(&dm->dc_lock);
2407 
2408 #if defined(CONFIG_DRM_AMD_DC_DCN)
2409 		dc_allow_idle_optimizations(adev->dm.dc, false);
2410 #endif
2411 
2412 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2413 
2414 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2415 
2416 		amdgpu_dm_commit_zero_streams(dm->dc);
2417 
2418 		amdgpu_dm_irq_suspend(adev);
2419 
2420 		hpd_rx_irq_work_suspend(dm);
2421 
2422 		return ret;
2423 	}
2424 
2425 	WARN_ON(adev->dm.cached_state);
2426 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2427 
2428 	s3_handle_mst(adev_to_drm(adev), true);
2429 
2430 	amdgpu_dm_irq_suspend(adev);
2431 
2432 	hpd_rx_irq_work_suspend(dm);
2433 
2434 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2435 
2436 	return 0;
2437 }
2438 
2439 struct amdgpu_dm_connector *
2440 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2441 					     struct drm_crtc *crtc)
2442 {
2443 	uint32_t i;
2444 	struct drm_connector_state *new_con_state;
2445 	struct drm_connector *connector;
2446 	struct drm_crtc *crtc_from_state;
2447 
2448 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2449 		crtc_from_state = new_con_state->crtc;
2450 
2451 		if (crtc_from_state == crtc)
2452 			return to_amdgpu_dm_connector(connector);
2453 	}
2454 
2455 	return NULL;
2456 }
2457 
2458 static void emulated_link_detect(struct dc_link *link)
2459 {
2460 	struct dc_sink_init_data sink_init_data = { 0 };
2461 	struct display_sink_capability sink_caps = { 0 };
2462 	enum dc_edid_status edid_status;
2463 	struct dc_context *dc_ctx = link->ctx;
2464 	struct dc_sink *sink = NULL;
2465 	struct dc_sink *prev_sink = NULL;
2466 
2467 	link->type = dc_connection_none;
2468 	prev_sink = link->local_sink;
2469 
2470 	if (prev_sink)
2471 		dc_sink_release(prev_sink);
2472 
2473 	switch (link->connector_signal) {
2474 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2475 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2476 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2477 		break;
2478 	}
2479 
2480 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2481 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2482 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2483 		break;
2484 	}
2485 
2486 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2487 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2488 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2489 		break;
2490 	}
2491 
2492 	case SIGNAL_TYPE_LVDS: {
2493 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2494 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2495 		break;
2496 	}
2497 
2498 	case SIGNAL_TYPE_EDP: {
2499 		sink_caps.transaction_type =
2500 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2501 		sink_caps.signal = SIGNAL_TYPE_EDP;
2502 		break;
2503 	}
2504 
2505 	case SIGNAL_TYPE_DISPLAY_PORT: {
2506 		sink_caps.transaction_type =
2507 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2508 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2509 		break;
2510 	}
2511 
2512 	default:
2513 		DC_ERROR("Invalid connector type! signal:%d\n",
2514 			link->connector_signal);
2515 		return;
2516 	}
2517 
2518 	sink_init_data.link = link;
2519 	sink_init_data.sink_signal = sink_caps.signal;
2520 
2521 	sink = dc_sink_create(&sink_init_data);
2522 	if (!sink) {
2523 		DC_ERROR("Failed to create sink!\n");
2524 		return;
2525 	}
2526 
2527 	/* dc_sink_create returns a new reference */
2528 	link->local_sink = sink;
2529 
2530 	edid_status = dm_helpers_read_local_edid(
2531 			link->ctx,
2532 			link,
2533 			sink);
2534 
2535 	if (edid_status != EDID_OK)
2536 		DC_ERROR("Failed to read EDID");
2537 
2538 }
2539 
2540 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2541 				     struct amdgpu_display_manager *dm)
2542 {
2543 	struct {
2544 		struct dc_surface_update surface_updates[MAX_SURFACES];
2545 		struct dc_plane_info plane_infos[MAX_SURFACES];
2546 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2547 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2548 		struct dc_stream_update stream_update;
2549 	} * bundle;
2550 	int k, m;
2551 
2552 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2553 
2554 	if (!bundle) {
2555 		dm_error("Failed to allocate update bundle\n");
2556 		goto cleanup;
2557 	}
2558 
2559 	for (k = 0; k < dc_state->stream_count; k++) {
2560 		bundle->stream_update.stream = dc_state->streams[k];
2561 
2562 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2563 			bundle->surface_updates[m].surface =
2564 				dc_state->stream_status->plane_states[m];
2565 			bundle->surface_updates[m].surface->force_full_update =
2566 				true;
2567 		}
2568 		dc_commit_updates_for_stream(
2569 			dm->dc, bundle->surface_updates,
2570 			dc_state->stream_status->plane_count,
2571 			dc_state->streams[k], &bundle->stream_update, dc_state);
2572 	}
2573 
2574 cleanup:
2575 	kfree(bundle);
2576 
2577 	return;
2578 }
2579 
2580 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2581 {
2582 	struct dc_stream_state *stream_state;
2583 	struct amdgpu_dm_connector *aconnector = link->priv;
2584 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2585 	struct dc_stream_update stream_update;
2586 	bool dpms_off = true;
2587 
2588 	memset(&stream_update, 0, sizeof(stream_update));
2589 	stream_update.dpms_off = &dpms_off;
2590 
2591 	mutex_lock(&adev->dm.dc_lock);
2592 	stream_state = dc_stream_find_from_link(link);
2593 
2594 	if (stream_state == NULL) {
2595 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2596 		mutex_unlock(&adev->dm.dc_lock);
2597 		return;
2598 	}
2599 
2600 	stream_update.stream = stream_state;
2601 	acrtc_state->force_dpms_off = true;
2602 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2603 				     stream_state, &stream_update,
2604 				     stream_state->ctx->dc->current_state);
2605 	mutex_unlock(&adev->dm.dc_lock);
2606 }
2607 
2608 static int dm_resume(void *handle)
2609 {
2610 	struct amdgpu_device *adev = handle;
2611 	struct drm_device *ddev = adev_to_drm(adev);
2612 	struct amdgpu_display_manager *dm = &adev->dm;
2613 	struct amdgpu_dm_connector *aconnector;
2614 	struct drm_connector *connector;
2615 	struct drm_connector_list_iter iter;
2616 	struct drm_crtc *crtc;
2617 	struct drm_crtc_state *new_crtc_state;
2618 	struct dm_crtc_state *dm_new_crtc_state;
2619 	struct drm_plane *plane;
2620 	struct drm_plane_state *new_plane_state;
2621 	struct dm_plane_state *dm_new_plane_state;
2622 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2623 	enum dc_connection_type new_connection_type = dc_connection_none;
2624 	struct dc_state *dc_state;
2625 	int i, r, j;
2626 
2627 	if (amdgpu_in_reset(adev)) {
2628 		dc_state = dm->cached_dc_state;
2629 
2630 		/*
2631 		 * The dc->current_state is backed up into dm->cached_dc_state
2632 		 * before we commit 0 streams.
2633 		 *
2634 		 * DC will clear link encoder assignments on the real state
2635 		 * but the changes won't propagate over to the copy we made
2636 		 * before the 0 streams commit.
2637 		 *
2638 		 * DC expects that link encoder assignments are *not* valid
2639 		 * when committing a state, so as a workaround we can copy
2640 		 * off of the current state.
2641 		 *
2642 		 * We lose the previous assignments, but we had already
2643 		 * commit 0 streams anyway.
2644 		 */
2645 		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2646 
2647 		if (dc_enable_dmub_notifications(adev->dm.dc))
2648 			amdgpu_dm_outbox_init(adev);
2649 
2650 		r = dm_dmub_hw_init(adev);
2651 		if (r)
2652 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2653 
2654 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2655 		dc_resume(dm->dc);
2656 
2657 		amdgpu_dm_irq_resume_early(adev);
2658 
2659 		for (i = 0; i < dc_state->stream_count; i++) {
2660 			dc_state->streams[i]->mode_changed = true;
2661 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2662 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2663 					= 0xffffffff;
2664 			}
2665 		}
2666 
2667 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2668 
2669 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2670 
2671 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2672 
2673 		dc_release_state(dm->cached_dc_state);
2674 		dm->cached_dc_state = NULL;
2675 
2676 		amdgpu_dm_irq_resume_late(adev);
2677 
2678 		mutex_unlock(&dm->dc_lock);
2679 
2680 		return 0;
2681 	}
2682 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2683 	dc_release_state(dm_state->context);
2684 	dm_state->context = dc_create_state(dm->dc);
2685 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2686 	dc_resource_state_construct(dm->dc, dm_state->context);
2687 
2688 	/* Re-enable outbox interrupts for DPIA. */
2689 	if (dc_enable_dmub_notifications(adev->dm.dc))
2690 		amdgpu_dm_outbox_init(adev);
2691 
2692 	/* Before powering on DC we need to re-initialize DMUB. */
2693 	dm_dmub_hw_resume(adev);
2694 
2695 	/* power on hardware */
2696 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2697 
2698 	/* program HPD filter */
2699 	dc_resume(dm->dc);
2700 
2701 	/*
2702 	 * early enable HPD Rx IRQ, should be done before set mode as short
2703 	 * pulse interrupts are used for MST
2704 	 */
2705 	amdgpu_dm_irq_resume_early(adev);
2706 
2707 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2708 	s3_handle_mst(ddev, false);
2709 
2710 	/* Do detection*/
2711 	drm_connector_list_iter_begin(ddev, &iter);
2712 	drm_for_each_connector_iter(connector, &iter) {
2713 		aconnector = to_amdgpu_dm_connector(connector);
2714 
2715 		/*
2716 		 * this is the case when traversing through already created
2717 		 * MST connectors, should be skipped
2718 		 */
2719 		if (aconnector->dc_link &&
2720 		    aconnector->dc_link->type == dc_connection_mst_branch)
2721 			continue;
2722 
2723 		mutex_lock(&aconnector->hpd_lock);
2724 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2725 			DRM_ERROR("KMS: Failed to detect connector\n");
2726 
2727 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2728 			emulated_link_detect(aconnector->dc_link);
2729 		else
2730 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2731 
2732 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2733 			aconnector->fake_enable = false;
2734 
2735 		if (aconnector->dc_sink)
2736 			dc_sink_release(aconnector->dc_sink);
2737 		aconnector->dc_sink = NULL;
2738 		amdgpu_dm_update_connector_after_detect(aconnector);
2739 		mutex_unlock(&aconnector->hpd_lock);
2740 	}
2741 	drm_connector_list_iter_end(&iter);
2742 
2743 	/* Force mode set in atomic commit */
2744 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2745 		new_crtc_state->active_changed = true;
2746 
2747 	/*
2748 	 * atomic_check is expected to create the dc states. We need to release
2749 	 * them here, since they were duplicated as part of the suspend
2750 	 * procedure.
2751 	 */
2752 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2753 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2754 		if (dm_new_crtc_state->stream) {
2755 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2756 			dc_stream_release(dm_new_crtc_state->stream);
2757 			dm_new_crtc_state->stream = NULL;
2758 		}
2759 	}
2760 
2761 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2762 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2763 		if (dm_new_plane_state->dc_state) {
2764 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2765 			dc_plane_state_release(dm_new_plane_state->dc_state);
2766 			dm_new_plane_state->dc_state = NULL;
2767 		}
2768 	}
2769 
2770 	drm_atomic_helper_resume(ddev, dm->cached_state);
2771 
2772 	dm->cached_state = NULL;
2773 
2774 	amdgpu_dm_irq_resume_late(adev);
2775 
2776 	amdgpu_dm_smu_write_watermarks_table(adev);
2777 
2778 	return 0;
2779 }
2780 
2781 /**
2782  * DOC: DM Lifecycle
2783  *
2784  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2785  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2786  * the base driver's device list to be initialized and torn down accordingly.
2787  *
2788  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2789  */
2790 
2791 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2792 	.name = "dm",
2793 	.early_init = dm_early_init,
2794 	.late_init = dm_late_init,
2795 	.sw_init = dm_sw_init,
2796 	.sw_fini = dm_sw_fini,
2797 	.early_fini = amdgpu_dm_early_fini,
2798 	.hw_init = dm_hw_init,
2799 	.hw_fini = dm_hw_fini,
2800 	.suspend = dm_suspend,
2801 	.resume = dm_resume,
2802 	.is_idle = dm_is_idle,
2803 	.wait_for_idle = dm_wait_for_idle,
2804 	.check_soft_reset = dm_check_soft_reset,
2805 	.soft_reset = dm_soft_reset,
2806 	.set_clockgating_state = dm_set_clockgating_state,
2807 	.set_powergating_state = dm_set_powergating_state,
2808 };
2809 
2810 const struct amdgpu_ip_block_version dm_ip_block =
2811 {
2812 	.type = AMD_IP_BLOCK_TYPE_DCE,
2813 	.major = 1,
2814 	.minor = 0,
2815 	.rev = 0,
2816 	.funcs = &amdgpu_dm_funcs,
2817 };
2818 
2819 
2820 /**
2821  * DOC: atomic
2822  *
2823  * *WIP*
2824  */
2825 
2826 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2827 	.fb_create = amdgpu_display_user_framebuffer_create,
2828 	.get_format_info = amd_get_format_info,
2829 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2830 	.atomic_check = amdgpu_dm_atomic_check,
2831 	.atomic_commit = drm_atomic_helper_commit,
2832 };
2833 
2834 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2835 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2836 };
2837 
2838 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2839 {
2840 	u32 max_cll, min_cll, max, min, q, r;
2841 	struct amdgpu_dm_backlight_caps *caps;
2842 	struct amdgpu_display_manager *dm;
2843 	struct drm_connector *conn_base;
2844 	struct amdgpu_device *adev;
2845 	struct dc_link *link = NULL;
2846 	static const u8 pre_computed_values[] = {
2847 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2848 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2849 	int i;
2850 
2851 	if (!aconnector || !aconnector->dc_link)
2852 		return;
2853 
2854 	link = aconnector->dc_link;
2855 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2856 		return;
2857 
2858 	conn_base = &aconnector->base;
2859 	adev = drm_to_adev(conn_base->dev);
2860 	dm = &adev->dm;
2861 	for (i = 0; i < dm->num_of_edps; i++) {
2862 		if (link == dm->backlight_link[i])
2863 			break;
2864 	}
2865 	if (i >= dm->num_of_edps)
2866 		return;
2867 	caps = &dm->backlight_caps[i];
2868 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2869 	caps->aux_support = false;
2870 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2871 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2872 
2873 	if (caps->ext_caps->bits.oled == 1 /*||
2874 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2875 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2876 		caps->aux_support = true;
2877 
2878 	if (amdgpu_backlight == 0)
2879 		caps->aux_support = false;
2880 	else if (amdgpu_backlight == 1)
2881 		caps->aux_support = true;
2882 
2883 	/* From the specification (CTA-861-G), for calculating the maximum
2884 	 * luminance we need to use:
2885 	 *	Luminance = 50*2**(CV/32)
2886 	 * Where CV is a one-byte value.
2887 	 * For calculating this expression we may need float point precision;
2888 	 * to avoid this complexity level, we take advantage that CV is divided
2889 	 * by a constant. From the Euclids division algorithm, we know that CV
2890 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2891 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2892 	 * need to pre-compute the value of r/32. For pre-computing the values
2893 	 * We just used the following Ruby line:
2894 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2895 	 * The results of the above expressions can be verified at
2896 	 * pre_computed_values.
2897 	 */
2898 	q = max_cll >> 5;
2899 	r = max_cll % 32;
2900 	max = (1 << q) * pre_computed_values[r];
2901 
2902 	// min luminance: maxLum * (CV/255)^2 / 100
2903 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2904 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2905 
2906 	caps->aux_max_input_signal = max;
2907 	caps->aux_min_input_signal = min;
2908 }
2909 
2910 void amdgpu_dm_update_connector_after_detect(
2911 		struct amdgpu_dm_connector *aconnector)
2912 {
2913 	struct drm_connector *connector = &aconnector->base;
2914 	struct drm_device *dev = connector->dev;
2915 	struct dc_sink *sink;
2916 
2917 	/* MST handled by drm_mst framework */
2918 	if (aconnector->mst_mgr.mst_state == true)
2919 		return;
2920 
2921 	sink = aconnector->dc_link->local_sink;
2922 	if (sink)
2923 		dc_sink_retain(sink);
2924 
2925 	/*
2926 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2927 	 * the connector sink is set to either fake or physical sink depends on link status.
2928 	 * Skip if already done during boot.
2929 	 */
2930 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2931 			&& aconnector->dc_em_sink) {
2932 
2933 		/*
2934 		 * For S3 resume with headless use eml_sink to fake stream
2935 		 * because on resume connector->sink is set to NULL
2936 		 */
2937 		mutex_lock(&dev->mode_config.mutex);
2938 
2939 		if (sink) {
2940 			if (aconnector->dc_sink) {
2941 				amdgpu_dm_update_freesync_caps(connector, NULL);
2942 				/*
2943 				 * retain and release below are used to
2944 				 * bump up refcount for sink because the link doesn't point
2945 				 * to it anymore after disconnect, so on next crtc to connector
2946 				 * reshuffle by UMD we will get into unwanted dc_sink release
2947 				 */
2948 				dc_sink_release(aconnector->dc_sink);
2949 			}
2950 			aconnector->dc_sink = sink;
2951 			dc_sink_retain(aconnector->dc_sink);
2952 			amdgpu_dm_update_freesync_caps(connector,
2953 					aconnector->edid);
2954 		} else {
2955 			amdgpu_dm_update_freesync_caps(connector, NULL);
2956 			if (!aconnector->dc_sink) {
2957 				aconnector->dc_sink = aconnector->dc_em_sink;
2958 				dc_sink_retain(aconnector->dc_sink);
2959 			}
2960 		}
2961 
2962 		mutex_unlock(&dev->mode_config.mutex);
2963 
2964 		if (sink)
2965 			dc_sink_release(sink);
2966 		return;
2967 	}
2968 
2969 	/*
2970 	 * TODO: temporary guard to look for proper fix
2971 	 * if this sink is MST sink, we should not do anything
2972 	 */
2973 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2974 		dc_sink_release(sink);
2975 		return;
2976 	}
2977 
2978 	if (aconnector->dc_sink == sink) {
2979 		/*
2980 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2981 		 * Do nothing!!
2982 		 */
2983 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2984 				aconnector->connector_id);
2985 		if (sink)
2986 			dc_sink_release(sink);
2987 		return;
2988 	}
2989 
2990 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2991 		aconnector->connector_id, aconnector->dc_sink, sink);
2992 
2993 	mutex_lock(&dev->mode_config.mutex);
2994 
2995 	/*
2996 	 * 1. Update status of the drm connector
2997 	 * 2. Send an event and let userspace tell us what to do
2998 	 */
2999 	if (sink) {
3000 		/*
3001 		 * TODO: check if we still need the S3 mode update workaround.
3002 		 * If yes, put it here.
3003 		 */
3004 		if (aconnector->dc_sink) {
3005 			amdgpu_dm_update_freesync_caps(connector, NULL);
3006 			dc_sink_release(aconnector->dc_sink);
3007 		}
3008 
3009 		aconnector->dc_sink = sink;
3010 		dc_sink_retain(aconnector->dc_sink);
3011 		if (sink->dc_edid.length == 0) {
3012 			aconnector->edid = NULL;
3013 			if (aconnector->dc_link->aux_mode) {
3014 				drm_dp_cec_unset_edid(
3015 					&aconnector->dm_dp_aux.aux);
3016 			}
3017 		} else {
3018 			aconnector->edid =
3019 				(struct edid *)sink->dc_edid.raw_edid;
3020 
3021 			if (aconnector->dc_link->aux_mode)
3022 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3023 						    aconnector->edid);
3024 		}
3025 
3026 		drm_connector_update_edid_property(connector, aconnector->edid);
3027 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3028 		update_connector_ext_caps(aconnector);
3029 	} else {
3030 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3031 		amdgpu_dm_update_freesync_caps(connector, NULL);
3032 		drm_connector_update_edid_property(connector, NULL);
3033 		aconnector->num_modes = 0;
3034 		dc_sink_release(aconnector->dc_sink);
3035 		aconnector->dc_sink = NULL;
3036 		aconnector->edid = NULL;
3037 #ifdef CONFIG_DRM_AMD_DC_HDCP
3038 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3039 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3040 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3041 #endif
3042 	}
3043 
3044 	mutex_unlock(&dev->mode_config.mutex);
3045 
3046 	update_subconnector_property(aconnector);
3047 
3048 	if (sink)
3049 		dc_sink_release(sink);
3050 }
3051 
3052 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3053 {
3054 	struct drm_connector *connector = &aconnector->base;
3055 	struct drm_device *dev = connector->dev;
3056 	enum dc_connection_type new_connection_type = dc_connection_none;
3057 	struct amdgpu_device *adev = drm_to_adev(dev);
3058 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3059 	struct dm_crtc_state *dm_crtc_state = NULL;
3060 
3061 	if (adev->dm.disable_hpd_irq)
3062 		return;
3063 
3064 	if (dm_con_state->base.state && dm_con_state->base.crtc)
3065 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3066 					dm_con_state->base.state,
3067 					dm_con_state->base.crtc));
3068 	/*
3069 	 * In case of failure or MST no need to update connector status or notify the OS
3070 	 * since (for MST case) MST does this in its own context.
3071 	 */
3072 	mutex_lock(&aconnector->hpd_lock);
3073 
3074 #ifdef CONFIG_DRM_AMD_DC_HDCP
3075 	if (adev->dm.hdcp_workqueue) {
3076 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3077 		dm_con_state->update_hdcp = true;
3078 	}
3079 #endif
3080 	if (aconnector->fake_enable)
3081 		aconnector->fake_enable = false;
3082 
3083 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3084 		DRM_ERROR("KMS: Failed to detect connector\n");
3085 
3086 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3087 		emulated_link_detect(aconnector->dc_link);
3088 
3089 		drm_modeset_lock_all(dev);
3090 		dm_restore_drm_connector_state(dev, connector);
3091 		drm_modeset_unlock_all(dev);
3092 
3093 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3094 			drm_kms_helper_connector_hotplug_event(connector);
3095 
3096 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3097 		if (new_connection_type == dc_connection_none &&
3098 		    aconnector->dc_link->type == dc_connection_none &&
3099 		    dm_crtc_state)
3100 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3101 
3102 		amdgpu_dm_update_connector_after_detect(aconnector);
3103 
3104 		drm_modeset_lock_all(dev);
3105 		dm_restore_drm_connector_state(dev, connector);
3106 		drm_modeset_unlock_all(dev);
3107 
3108 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3109 			drm_kms_helper_connector_hotplug_event(connector);
3110 	}
3111 	mutex_unlock(&aconnector->hpd_lock);
3112 
3113 }
3114 
3115 static void handle_hpd_irq(void *param)
3116 {
3117 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3118 
3119 	handle_hpd_irq_helper(aconnector);
3120 
3121 }
3122 
3123 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3124 {
3125 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3126 	uint8_t dret;
3127 	bool new_irq_handled = false;
3128 	int dpcd_addr;
3129 	int dpcd_bytes_to_read;
3130 
3131 	const int max_process_count = 30;
3132 	int process_count = 0;
3133 
3134 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3135 
3136 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3137 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3138 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3139 		dpcd_addr = DP_SINK_COUNT;
3140 	} else {
3141 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3142 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3143 		dpcd_addr = DP_SINK_COUNT_ESI;
3144 	}
3145 
3146 	dret = drm_dp_dpcd_read(
3147 		&aconnector->dm_dp_aux.aux,
3148 		dpcd_addr,
3149 		esi,
3150 		dpcd_bytes_to_read);
3151 
3152 	while (dret == dpcd_bytes_to_read &&
3153 		process_count < max_process_count) {
3154 		uint8_t retry;
3155 		dret = 0;
3156 
3157 		process_count++;
3158 
3159 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3160 		/* handle HPD short pulse irq */
3161 		if (aconnector->mst_mgr.mst_state)
3162 			drm_dp_mst_hpd_irq(
3163 				&aconnector->mst_mgr,
3164 				esi,
3165 				&new_irq_handled);
3166 
3167 		if (new_irq_handled) {
3168 			/* ACK at DPCD to notify down stream */
3169 			const int ack_dpcd_bytes_to_write =
3170 				dpcd_bytes_to_read - 1;
3171 
3172 			for (retry = 0; retry < 3; retry++) {
3173 				uint8_t wret;
3174 
3175 				wret = drm_dp_dpcd_write(
3176 					&aconnector->dm_dp_aux.aux,
3177 					dpcd_addr + 1,
3178 					&esi[1],
3179 					ack_dpcd_bytes_to_write);
3180 				if (wret == ack_dpcd_bytes_to_write)
3181 					break;
3182 			}
3183 
3184 			/* check if there is new irq to be handled */
3185 			dret = drm_dp_dpcd_read(
3186 				&aconnector->dm_dp_aux.aux,
3187 				dpcd_addr,
3188 				esi,
3189 				dpcd_bytes_to_read);
3190 
3191 			new_irq_handled = false;
3192 		} else {
3193 			break;
3194 		}
3195 	}
3196 
3197 	if (process_count == max_process_count)
3198 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3199 }
3200 
3201 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3202 							union hpd_irq_data hpd_irq_data)
3203 {
3204 	struct hpd_rx_irq_offload_work *offload_work =
3205 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3206 
3207 	if (!offload_work) {
3208 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3209 		return;
3210 	}
3211 
3212 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3213 	offload_work->data = hpd_irq_data;
3214 	offload_work->offload_wq = offload_wq;
3215 
3216 	queue_work(offload_wq->wq, &offload_work->work);
3217 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3218 }
3219 
3220 static void handle_hpd_rx_irq(void *param)
3221 {
3222 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3223 	struct drm_connector *connector = &aconnector->base;
3224 	struct drm_device *dev = connector->dev;
3225 	struct dc_link *dc_link = aconnector->dc_link;
3226 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3227 	bool result = false;
3228 	enum dc_connection_type new_connection_type = dc_connection_none;
3229 	struct amdgpu_device *adev = drm_to_adev(dev);
3230 	union hpd_irq_data hpd_irq_data;
3231 	bool link_loss = false;
3232 	bool has_left_work = false;
3233 	int idx = aconnector->base.index;
3234 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3235 
3236 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3237 
3238 	if (adev->dm.disable_hpd_irq)
3239 		return;
3240 
3241 	/*
3242 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3243 	 * conflict, after implement i2c helper, this mutex should be
3244 	 * retired.
3245 	 */
3246 	mutex_lock(&aconnector->hpd_lock);
3247 
3248 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3249 						&link_loss, true, &has_left_work);
3250 
3251 	if (!has_left_work)
3252 		goto out;
3253 
3254 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3255 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3256 		goto out;
3257 	}
3258 
3259 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3260 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3261 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3262 			dm_handle_mst_sideband_msg(aconnector);
3263 			goto out;
3264 		}
3265 
3266 		if (link_loss) {
3267 			bool skip = false;
3268 
3269 			spin_lock(&offload_wq->offload_lock);
3270 			skip = offload_wq->is_handling_link_loss;
3271 
3272 			if (!skip)
3273 				offload_wq->is_handling_link_loss = true;
3274 
3275 			spin_unlock(&offload_wq->offload_lock);
3276 
3277 			if (!skip)
3278 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3279 
3280 			goto out;
3281 		}
3282 	}
3283 
3284 out:
3285 	if (result && !is_mst_root_connector) {
3286 		/* Downstream Port status changed. */
3287 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3288 			DRM_ERROR("KMS: Failed to detect connector\n");
3289 
3290 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3291 			emulated_link_detect(dc_link);
3292 
3293 			if (aconnector->fake_enable)
3294 				aconnector->fake_enable = false;
3295 
3296 			amdgpu_dm_update_connector_after_detect(aconnector);
3297 
3298 
3299 			drm_modeset_lock_all(dev);
3300 			dm_restore_drm_connector_state(dev, connector);
3301 			drm_modeset_unlock_all(dev);
3302 
3303 			drm_kms_helper_connector_hotplug_event(connector);
3304 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3305 
3306 			if (aconnector->fake_enable)
3307 				aconnector->fake_enable = false;
3308 
3309 			amdgpu_dm_update_connector_after_detect(aconnector);
3310 
3311 
3312 			drm_modeset_lock_all(dev);
3313 			dm_restore_drm_connector_state(dev, connector);
3314 			drm_modeset_unlock_all(dev);
3315 
3316 			drm_kms_helper_connector_hotplug_event(connector);
3317 		}
3318 	}
3319 #ifdef CONFIG_DRM_AMD_DC_HDCP
3320 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3321 		if (adev->dm.hdcp_workqueue)
3322 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3323 	}
3324 #endif
3325 
3326 	if (dc_link->type != dc_connection_mst_branch)
3327 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3328 
3329 	mutex_unlock(&aconnector->hpd_lock);
3330 }
3331 
3332 static void register_hpd_handlers(struct amdgpu_device *adev)
3333 {
3334 	struct drm_device *dev = adev_to_drm(adev);
3335 	struct drm_connector *connector;
3336 	struct amdgpu_dm_connector *aconnector;
3337 	const struct dc_link *dc_link;
3338 	struct dc_interrupt_params int_params = {0};
3339 
3340 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3341 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3342 
3343 	list_for_each_entry(connector,
3344 			&dev->mode_config.connector_list, head)	{
3345 
3346 		aconnector = to_amdgpu_dm_connector(connector);
3347 		dc_link = aconnector->dc_link;
3348 
3349 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3350 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3351 			int_params.irq_source = dc_link->irq_source_hpd;
3352 
3353 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3354 					handle_hpd_irq,
3355 					(void *) aconnector);
3356 		}
3357 
3358 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3359 
3360 			/* Also register for DP short pulse (hpd_rx). */
3361 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3362 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3363 
3364 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3365 					handle_hpd_rx_irq,
3366 					(void *) aconnector);
3367 
3368 			if (adev->dm.hpd_rx_offload_wq)
3369 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3370 					aconnector;
3371 		}
3372 	}
3373 }
3374 
3375 #if defined(CONFIG_DRM_AMD_DC_SI)
3376 /* Register IRQ sources and initialize IRQ callbacks */
3377 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3378 {
3379 	struct dc *dc = adev->dm.dc;
3380 	struct common_irq_params *c_irq_params;
3381 	struct dc_interrupt_params int_params = {0};
3382 	int r;
3383 	int i;
3384 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3385 
3386 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3387 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3388 
3389 	/*
3390 	 * Actions of amdgpu_irq_add_id():
3391 	 * 1. Register a set() function with base driver.
3392 	 *    Base driver will call set() function to enable/disable an
3393 	 *    interrupt in DC hardware.
3394 	 * 2. Register amdgpu_dm_irq_handler().
3395 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3396 	 *    coming from DC hardware.
3397 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3398 	 *    for acknowledging and handling. */
3399 
3400 	/* Use VBLANK interrupt */
3401 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3402 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3403 		if (r) {
3404 			DRM_ERROR("Failed to add crtc irq id!\n");
3405 			return r;
3406 		}
3407 
3408 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3409 		int_params.irq_source =
3410 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3411 
3412 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3413 
3414 		c_irq_params->adev = adev;
3415 		c_irq_params->irq_src = int_params.irq_source;
3416 
3417 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3418 				dm_crtc_high_irq, c_irq_params);
3419 	}
3420 
3421 	/* Use GRPH_PFLIP interrupt */
3422 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3423 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3424 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3425 		if (r) {
3426 			DRM_ERROR("Failed to add page flip irq id!\n");
3427 			return r;
3428 		}
3429 
3430 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3431 		int_params.irq_source =
3432 			dc_interrupt_to_irq_source(dc, i, 0);
3433 
3434 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3435 
3436 		c_irq_params->adev = adev;
3437 		c_irq_params->irq_src = int_params.irq_source;
3438 
3439 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3440 				dm_pflip_high_irq, c_irq_params);
3441 
3442 	}
3443 
3444 	/* HPD */
3445 	r = amdgpu_irq_add_id(adev, client_id,
3446 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3447 	if (r) {
3448 		DRM_ERROR("Failed to add hpd irq id!\n");
3449 		return r;
3450 	}
3451 
3452 	register_hpd_handlers(adev);
3453 
3454 	return 0;
3455 }
3456 #endif
3457 
3458 /* Register IRQ sources and initialize IRQ callbacks */
3459 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3460 {
3461 	struct dc *dc = adev->dm.dc;
3462 	struct common_irq_params *c_irq_params;
3463 	struct dc_interrupt_params int_params = {0};
3464 	int r;
3465 	int i;
3466 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3467 
3468 	if (adev->family >= AMDGPU_FAMILY_AI)
3469 		client_id = SOC15_IH_CLIENTID_DCE;
3470 
3471 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3472 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3473 
3474 	/*
3475 	 * Actions of amdgpu_irq_add_id():
3476 	 * 1. Register a set() function with base driver.
3477 	 *    Base driver will call set() function to enable/disable an
3478 	 *    interrupt in DC hardware.
3479 	 * 2. Register amdgpu_dm_irq_handler().
3480 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3481 	 *    coming from DC hardware.
3482 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3483 	 *    for acknowledging and handling. */
3484 
3485 	/* Use VBLANK interrupt */
3486 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3487 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3488 		if (r) {
3489 			DRM_ERROR("Failed to add crtc irq id!\n");
3490 			return r;
3491 		}
3492 
3493 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3494 		int_params.irq_source =
3495 			dc_interrupt_to_irq_source(dc, i, 0);
3496 
3497 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3498 
3499 		c_irq_params->adev = adev;
3500 		c_irq_params->irq_src = int_params.irq_source;
3501 
3502 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3503 				dm_crtc_high_irq, c_irq_params);
3504 	}
3505 
3506 	/* Use VUPDATE interrupt */
3507 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3508 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3509 		if (r) {
3510 			DRM_ERROR("Failed to add vupdate irq id!\n");
3511 			return r;
3512 		}
3513 
3514 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3515 		int_params.irq_source =
3516 			dc_interrupt_to_irq_source(dc, i, 0);
3517 
3518 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3519 
3520 		c_irq_params->adev = adev;
3521 		c_irq_params->irq_src = int_params.irq_source;
3522 
3523 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3524 				dm_vupdate_high_irq, c_irq_params);
3525 	}
3526 
3527 	/* Use GRPH_PFLIP interrupt */
3528 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3529 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3530 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3531 		if (r) {
3532 			DRM_ERROR("Failed to add page flip irq id!\n");
3533 			return r;
3534 		}
3535 
3536 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3537 		int_params.irq_source =
3538 			dc_interrupt_to_irq_source(dc, i, 0);
3539 
3540 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3541 
3542 		c_irq_params->adev = adev;
3543 		c_irq_params->irq_src = int_params.irq_source;
3544 
3545 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3546 				dm_pflip_high_irq, c_irq_params);
3547 
3548 	}
3549 
3550 	/* HPD */
3551 	r = amdgpu_irq_add_id(adev, client_id,
3552 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3553 	if (r) {
3554 		DRM_ERROR("Failed to add hpd irq id!\n");
3555 		return r;
3556 	}
3557 
3558 	register_hpd_handlers(adev);
3559 
3560 	return 0;
3561 }
3562 
3563 #if defined(CONFIG_DRM_AMD_DC_DCN)
3564 /* Register IRQ sources and initialize IRQ callbacks */
3565 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3566 {
3567 	struct dc *dc = adev->dm.dc;
3568 	struct common_irq_params *c_irq_params;
3569 	struct dc_interrupt_params int_params = {0};
3570 	int r;
3571 	int i;
3572 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3573 	static const unsigned int vrtl_int_srcid[] = {
3574 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3575 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3576 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3577 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3578 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3579 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3580 	};
3581 #endif
3582 
3583 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3584 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3585 
3586 	/*
3587 	 * Actions of amdgpu_irq_add_id():
3588 	 * 1. Register a set() function with base driver.
3589 	 *    Base driver will call set() function to enable/disable an
3590 	 *    interrupt in DC hardware.
3591 	 * 2. Register amdgpu_dm_irq_handler().
3592 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3593 	 *    coming from DC hardware.
3594 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3595 	 *    for acknowledging and handling.
3596 	 */
3597 
3598 	/* Use VSTARTUP interrupt */
3599 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3600 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3601 			i++) {
3602 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3603 
3604 		if (r) {
3605 			DRM_ERROR("Failed to add crtc irq id!\n");
3606 			return r;
3607 		}
3608 
3609 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3610 		int_params.irq_source =
3611 			dc_interrupt_to_irq_source(dc, i, 0);
3612 
3613 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3614 
3615 		c_irq_params->adev = adev;
3616 		c_irq_params->irq_src = int_params.irq_source;
3617 
3618 		amdgpu_dm_irq_register_interrupt(
3619 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3620 	}
3621 
3622 	/* Use otg vertical line interrupt */
3623 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3624 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3625 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3626 				vrtl_int_srcid[i], &adev->vline0_irq);
3627 
3628 		if (r) {
3629 			DRM_ERROR("Failed to add vline0 irq id!\n");
3630 			return r;
3631 		}
3632 
3633 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3634 		int_params.irq_source =
3635 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3636 
3637 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3638 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3639 			break;
3640 		}
3641 
3642 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3643 					- DC_IRQ_SOURCE_DC1_VLINE0];
3644 
3645 		c_irq_params->adev = adev;
3646 		c_irq_params->irq_src = int_params.irq_source;
3647 
3648 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3649 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3650 	}
3651 #endif
3652 
3653 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3654 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3655 	 * to trigger at end of each vblank, regardless of state of the lock,
3656 	 * matching DCE behaviour.
3657 	 */
3658 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3659 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3660 	     i++) {
3661 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3662 
3663 		if (r) {
3664 			DRM_ERROR("Failed to add vupdate irq id!\n");
3665 			return r;
3666 		}
3667 
3668 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3669 		int_params.irq_source =
3670 			dc_interrupt_to_irq_source(dc, i, 0);
3671 
3672 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3673 
3674 		c_irq_params->adev = adev;
3675 		c_irq_params->irq_src = int_params.irq_source;
3676 
3677 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3678 				dm_vupdate_high_irq, c_irq_params);
3679 	}
3680 
3681 	/* Use GRPH_PFLIP interrupt */
3682 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3683 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3684 			i++) {
3685 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3686 		if (r) {
3687 			DRM_ERROR("Failed to add page flip irq id!\n");
3688 			return r;
3689 		}
3690 
3691 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3692 		int_params.irq_source =
3693 			dc_interrupt_to_irq_source(dc, i, 0);
3694 
3695 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3696 
3697 		c_irq_params->adev = adev;
3698 		c_irq_params->irq_src = int_params.irq_source;
3699 
3700 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3701 				dm_pflip_high_irq, c_irq_params);
3702 
3703 	}
3704 
3705 	/* HPD */
3706 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3707 			&adev->hpd_irq);
3708 	if (r) {
3709 		DRM_ERROR("Failed to add hpd irq id!\n");
3710 		return r;
3711 	}
3712 
3713 	register_hpd_handlers(adev);
3714 
3715 	return 0;
3716 }
3717 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3718 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3719 {
3720 	struct dc *dc = adev->dm.dc;
3721 	struct common_irq_params *c_irq_params;
3722 	struct dc_interrupt_params int_params = {0};
3723 	int r, i;
3724 
3725 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3726 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3727 
3728 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3729 			&adev->dmub_outbox_irq);
3730 	if (r) {
3731 		DRM_ERROR("Failed to add outbox irq id!\n");
3732 		return r;
3733 	}
3734 
3735 	if (dc->ctx->dmub_srv) {
3736 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3737 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3738 		int_params.irq_source =
3739 		dc_interrupt_to_irq_source(dc, i, 0);
3740 
3741 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3742 
3743 		c_irq_params->adev = adev;
3744 		c_irq_params->irq_src = int_params.irq_source;
3745 
3746 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3747 				dm_dmub_outbox1_low_irq, c_irq_params);
3748 	}
3749 
3750 	return 0;
3751 }
3752 #endif
3753 
3754 /*
3755  * Acquires the lock for the atomic state object and returns
3756  * the new atomic state.
3757  *
3758  * This should only be called during atomic check.
3759  */
3760 int dm_atomic_get_state(struct drm_atomic_state *state,
3761 			struct dm_atomic_state **dm_state)
3762 {
3763 	struct drm_device *dev = state->dev;
3764 	struct amdgpu_device *adev = drm_to_adev(dev);
3765 	struct amdgpu_display_manager *dm = &adev->dm;
3766 	struct drm_private_state *priv_state;
3767 
3768 	if (*dm_state)
3769 		return 0;
3770 
3771 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3772 	if (IS_ERR(priv_state))
3773 		return PTR_ERR(priv_state);
3774 
3775 	*dm_state = to_dm_atomic_state(priv_state);
3776 
3777 	return 0;
3778 }
3779 
3780 static struct dm_atomic_state *
3781 dm_atomic_get_new_state(struct drm_atomic_state *state)
3782 {
3783 	struct drm_device *dev = state->dev;
3784 	struct amdgpu_device *adev = drm_to_adev(dev);
3785 	struct amdgpu_display_manager *dm = &adev->dm;
3786 	struct drm_private_obj *obj;
3787 	struct drm_private_state *new_obj_state;
3788 	int i;
3789 
3790 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3791 		if (obj->funcs == dm->atomic_obj.funcs)
3792 			return to_dm_atomic_state(new_obj_state);
3793 	}
3794 
3795 	return NULL;
3796 }
3797 
3798 static struct drm_private_state *
3799 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3800 {
3801 	struct dm_atomic_state *old_state, *new_state;
3802 
3803 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3804 	if (!new_state)
3805 		return NULL;
3806 
3807 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3808 
3809 	old_state = to_dm_atomic_state(obj->state);
3810 
3811 	if (old_state && old_state->context)
3812 		new_state->context = dc_copy_state(old_state->context);
3813 
3814 	if (!new_state->context) {
3815 		kfree(new_state);
3816 		return NULL;
3817 	}
3818 
3819 	return &new_state->base;
3820 }
3821 
3822 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3823 				    struct drm_private_state *state)
3824 {
3825 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3826 
3827 	if (dm_state && dm_state->context)
3828 		dc_release_state(dm_state->context);
3829 
3830 	kfree(dm_state);
3831 }
3832 
3833 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3834 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3835 	.atomic_destroy_state = dm_atomic_destroy_state,
3836 };
3837 
3838 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3839 {
3840 	struct dm_atomic_state *state;
3841 	int r;
3842 
3843 	adev->mode_info.mode_config_initialized = true;
3844 
3845 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3846 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3847 
3848 	adev_to_drm(adev)->mode_config.max_width = 16384;
3849 	adev_to_drm(adev)->mode_config.max_height = 16384;
3850 
3851 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3852 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3853 	/* indicates support for immediate flip */
3854 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3855 
3856 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3857 
3858 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3859 	if (!state)
3860 		return -ENOMEM;
3861 
3862 	state->context = dc_create_state(adev->dm.dc);
3863 	if (!state->context) {
3864 		kfree(state);
3865 		return -ENOMEM;
3866 	}
3867 
3868 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3869 
3870 	drm_atomic_private_obj_init(adev_to_drm(adev),
3871 				    &adev->dm.atomic_obj,
3872 				    &state->base,
3873 				    &dm_atomic_state_funcs);
3874 
3875 	r = amdgpu_display_modeset_create_props(adev);
3876 	if (r) {
3877 		dc_release_state(state->context);
3878 		kfree(state);
3879 		return r;
3880 	}
3881 
3882 	r = amdgpu_dm_audio_init(adev);
3883 	if (r) {
3884 		dc_release_state(state->context);
3885 		kfree(state);
3886 		return r;
3887 	}
3888 
3889 	return 0;
3890 }
3891 
3892 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3893 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3894 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3895 
3896 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3897 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3898 
3899 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3900 					    int bl_idx)
3901 {
3902 #if defined(CONFIG_ACPI)
3903 	struct amdgpu_dm_backlight_caps caps;
3904 
3905 	memset(&caps, 0, sizeof(caps));
3906 
3907 	if (dm->backlight_caps[bl_idx].caps_valid)
3908 		return;
3909 
3910 	amdgpu_acpi_get_backlight_caps(&caps);
3911 	if (caps.caps_valid) {
3912 		dm->backlight_caps[bl_idx].caps_valid = true;
3913 		if (caps.aux_support)
3914 			return;
3915 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3916 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3917 	} else {
3918 		dm->backlight_caps[bl_idx].min_input_signal =
3919 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3920 		dm->backlight_caps[bl_idx].max_input_signal =
3921 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3922 	}
3923 #else
3924 	if (dm->backlight_caps[bl_idx].aux_support)
3925 		return;
3926 
3927 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3928 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3929 #endif
3930 }
3931 
3932 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3933 				unsigned *min, unsigned *max)
3934 {
3935 	if (!caps)
3936 		return 0;
3937 
3938 	if (caps->aux_support) {
3939 		// Firmware limits are in nits, DC API wants millinits.
3940 		*max = 1000 * caps->aux_max_input_signal;
3941 		*min = 1000 * caps->aux_min_input_signal;
3942 	} else {
3943 		// Firmware limits are 8-bit, PWM control is 16-bit.
3944 		*max = 0x101 * caps->max_input_signal;
3945 		*min = 0x101 * caps->min_input_signal;
3946 	}
3947 	return 1;
3948 }
3949 
3950 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3951 					uint32_t brightness)
3952 {
3953 	unsigned min, max;
3954 
3955 	if (!get_brightness_range(caps, &min, &max))
3956 		return brightness;
3957 
3958 	// Rescale 0..255 to min..max
3959 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3960 				       AMDGPU_MAX_BL_LEVEL);
3961 }
3962 
3963 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3964 				      uint32_t brightness)
3965 {
3966 	unsigned min, max;
3967 
3968 	if (!get_brightness_range(caps, &min, &max))
3969 		return brightness;
3970 
3971 	if (brightness < min)
3972 		return 0;
3973 	// Rescale min..max to 0..255
3974 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3975 				 max - min);
3976 }
3977 
3978 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3979 					 int bl_idx,
3980 					 u32 user_brightness)
3981 {
3982 	struct amdgpu_dm_backlight_caps caps;
3983 	struct dc_link *link;
3984 	u32 brightness;
3985 	bool rc;
3986 
3987 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3988 	caps = dm->backlight_caps[bl_idx];
3989 
3990 	dm->brightness[bl_idx] = user_brightness;
3991 	/* update scratch register */
3992 	if (bl_idx == 0)
3993 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3994 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3995 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3996 
3997 	/* Change brightness based on AUX property */
3998 	if (caps.aux_support) {
3999 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
4000 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4001 		if (!rc)
4002 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4003 	} else {
4004 		rc = dc_link_set_backlight_level(link, brightness, 0);
4005 		if (!rc)
4006 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4007 	}
4008 
4009 	if (rc)
4010 		dm->actual_brightness[bl_idx] = user_brightness;
4011 }
4012 
4013 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4014 {
4015 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4016 	int i;
4017 
4018 	for (i = 0; i < dm->num_of_edps; i++) {
4019 		if (bd == dm->backlight_dev[i])
4020 			break;
4021 	}
4022 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4023 		i = 0;
4024 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4025 
4026 	return 0;
4027 }
4028 
4029 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4030 					 int bl_idx)
4031 {
4032 	struct amdgpu_dm_backlight_caps caps;
4033 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4034 
4035 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4036 	caps = dm->backlight_caps[bl_idx];
4037 
4038 	if (caps.aux_support) {
4039 		u32 avg, peak;
4040 		bool rc;
4041 
4042 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4043 		if (!rc)
4044 			return dm->brightness[bl_idx];
4045 		return convert_brightness_to_user(&caps, avg);
4046 	} else {
4047 		int ret = dc_link_get_backlight_level(link);
4048 
4049 		if (ret == DC_ERROR_UNEXPECTED)
4050 			return dm->brightness[bl_idx];
4051 		return convert_brightness_to_user(&caps, ret);
4052 	}
4053 }
4054 
4055 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4056 {
4057 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4058 	int i;
4059 
4060 	for (i = 0; i < dm->num_of_edps; i++) {
4061 		if (bd == dm->backlight_dev[i])
4062 			break;
4063 	}
4064 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4065 		i = 0;
4066 	return amdgpu_dm_backlight_get_level(dm, i);
4067 }
4068 
4069 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4070 	.options = BL_CORE_SUSPENDRESUME,
4071 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4072 	.update_status	= amdgpu_dm_backlight_update_status,
4073 };
4074 
4075 static void
4076 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4077 {
4078 	char bl_name[16];
4079 	struct backlight_properties props = { 0 };
4080 
4081 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4082 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4083 
4084 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4085 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4086 	props.type = BACKLIGHT_RAW;
4087 
4088 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4089 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4090 
4091 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4092 								       adev_to_drm(dm->adev)->dev,
4093 								       dm,
4094 								       &amdgpu_dm_backlight_ops,
4095 								       &props);
4096 
4097 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4098 		DRM_ERROR("DM: Backlight registration failed!\n");
4099 	else
4100 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4101 }
4102 #endif
4103 
4104 static int initialize_plane(struct amdgpu_display_manager *dm,
4105 			    struct amdgpu_mode_info *mode_info, int plane_id,
4106 			    enum drm_plane_type plane_type,
4107 			    const struct dc_plane_cap *plane_cap)
4108 {
4109 	struct drm_plane *plane;
4110 	unsigned long possible_crtcs;
4111 	int ret = 0;
4112 
4113 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4114 	if (!plane) {
4115 		DRM_ERROR("KMS: Failed to allocate plane\n");
4116 		return -ENOMEM;
4117 	}
4118 	plane->type = plane_type;
4119 
4120 	/*
4121 	 * HACK: IGT tests expect that the primary plane for a CRTC
4122 	 * can only have one possible CRTC. Only expose support for
4123 	 * any CRTC if they're not going to be used as a primary plane
4124 	 * for a CRTC - like overlay or underlay planes.
4125 	 */
4126 	possible_crtcs = 1 << plane_id;
4127 	if (plane_id >= dm->dc->caps.max_streams)
4128 		possible_crtcs = 0xff;
4129 
4130 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4131 
4132 	if (ret) {
4133 		DRM_ERROR("KMS: Failed to initialize plane\n");
4134 		kfree(plane);
4135 		return ret;
4136 	}
4137 
4138 	if (mode_info)
4139 		mode_info->planes[plane_id] = plane;
4140 
4141 	return ret;
4142 }
4143 
4144 
4145 static void register_backlight_device(struct amdgpu_display_manager *dm,
4146 				      struct dc_link *link)
4147 {
4148 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4149 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4150 
4151 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4152 	    link->type != dc_connection_none) {
4153 		/*
4154 		 * Event if registration failed, we should continue with
4155 		 * DM initialization because not having a backlight control
4156 		 * is better then a black screen.
4157 		 */
4158 		if (!dm->backlight_dev[dm->num_of_edps])
4159 			amdgpu_dm_register_backlight_device(dm);
4160 
4161 		if (dm->backlight_dev[dm->num_of_edps]) {
4162 			dm->backlight_link[dm->num_of_edps] = link;
4163 			dm->num_of_edps++;
4164 		}
4165 	}
4166 #endif
4167 }
4168 
4169 
4170 /*
4171  * In this architecture, the association
4172  * connector -> encoder -> crtc
4173  * id not really requried. The crtc and connector will hold the
4174  * display_index as an abstraction to use with DAL component
4175  *
4176  * Returns 0 on success
4177  */
4178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4179 {
4180 	struct amdgpu_display_manager *dm = &adev->dm;
4181 	int32_t i;
4182 	struct amdgpu_dm_connector *aconnector = NULL;
4183 	struct amdgpu_encoder *aencoder = NULL;
4184 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4185 	uint32_t link_cnt;
4186 	int32_t primary_planes;
4187 	enum dc_connection_type new_connection_type = dc_connection_none;
4188 	const struct dc_plane_cap *plane;
4189 	bool psr_feature_enabled = false;
4190 
4191 	dm->display_indexes_num = dm->dc->caps.max_streams;
4192 	/* Update the actual used number of crtc */
4193 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4194 
4195 	link_cnt = dm->dc->caps.max_links;
4196 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4197 		DRM_ERROR("DM: Failed to initialize mode config\n");
4198 		return -EINVAL;
4199 	}
4200 
4201 	/* There is one primary plane per CRTC */
4202 	primary_planes = dm->dc->caps.max_streams;
4203 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4204 
4205 	/*
4206 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4207 	 * Order is reversed to match iteration order in atomic check.
4208 	 */
4209 	for (i = (primary_planes - 1); i >= 0; i--) {
4210 		plane = &dm->dc->caps.planes[i];
4211 
4212 		if (initialize_plane(dm, mode_info, i,
4213 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4214 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4215 			goto fail;
4216 		}
4217 	}
4218 
4219 	/*
4220 	 * Initialize overlay planes, index starting after primary planes.
4221 	 * These planes have a higher DRM index than the primary planes since
4222 	 * they should be considered as having a higher z-order.
4223 	 * Order is reversed to match iteration order in atomic check.
4224 	 *
4225 	 * Only support DCN for now, and only expose one so we don't encourage
4226 	 * userspace to use up all the pipes.
4227 	 */
4228 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4229 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4230 
4231 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4232 			continue;
4233 
4234 		if (!plane->blends_with_above || !plane->blends_with_below)
4235 			continue;
4236 
4237 		if (!plane->pixel_format_support.argb8888)
4238 			continue;
4239 
4240 		if (initialize_plane(dm, NULL, primary_planes + i,
4241 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4242 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4243 			goto fail;
4244 		}
4245 
4246 		/* Only create one overlay plane. */
4247 		break;
4248 	}
4249 
4250 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4251 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4252 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4253 			goto fail;
4254 		}
4255 
4256 #if defined(CONFIG_DRM_AMD_DC_DCN)
4257 	/* Use Outbox interrupt */
4258 	switch (adev->ip_versions[DCE_HWIP][0]) {
4259 	case IP_VERSION(3, 0, 0):
4260 	case IP_VERSION(3, 1, 2):
4261 	case IP_VERSION(3, 1, 3):
4262 	case IP_VERSION(3, 1, 5):
4263 	case IP_VERSION(3, 1, 6):
4264 	case IP_VERSION(2, 1, 0):
4265 		if (register_outbox_irq_handlers(dm->adev)) {
4266 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4267 			goto fail;
4268 		}
4269 		break;
4270 	default:
4271 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4272 			      adev->ip_versions[DCE_HWIP][0]);
4273 	}
4274 
4275 	/* Determine whether to enable PSR support by default. */
4276 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4277 		switch (adev->ip_versions[DCE_HWIP][0]) {
4278 		case IP_VERSION(3, 1, 2):
4279 		case IP_VERSION(3, 1, 3):
4280 		case IP_VERSION(3, 1, 5):
4281 		case IP_VERSION(3, 1, 6):
4282 			psr_feature_enabled = true;
4283 			break;
4284 		default:
4285 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4286 			break;
4287 		}
4288 	}
4289 #endif
4290 
4291 	/* Disable vblank IRQs aggressively for power-saving. */
4292 	adev_to_drm(adev)->vblank_disable_immediate = true;
4293 
4294 	/* loops over all connectors on the board */
4295 	for (i = 0; i < link_cnt; i++) {
4296 		struct dc_link *link = NULL;
4297 
4298 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4299 			DRM_ERROR(
4300 				"KMS: Cannot support more than %d display indexes\n",
4301 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4302 			continue;
4303 		}
4304 
4305 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4306 		if (!aconnector)
4307 			goto fail;
4308 
4309 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4310 		if (!aencoder)
4311 			goto fail;
4312 
4313 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4314 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4315 			goto fail;
4316 		}
4317 
4318 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4319 			DRM_ERROR("KMS: Failed to initialize connector\n");
4320 			goto fail;
4321 		}
4322 
4323 		link = dc_get_link_at_index(dm->dc, i);
4324 
4325 		if (!dc_link_detect_sink(link, &new_connection_type))
4326 			DRM_ERROR("KMS: Failed to detect connector\n");
4327 
4328 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4329 			emulated_link_detect(link);
4330 			amdgpu_dm_update_connector_after_detect(aconnector);
4331 
4332 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4333 			amdgpu_dm_update_connector_after_detect(aconnector);
4334 			register_backlight_device(dm, link);
4335 			if (dm->num_of_edps)
4336 				update_connector_ext_caps(aconnector);
4337 			if (psr_feature_enabled)
4338 				amdgpu_dm_set_psr_caps(link);
4339 
4340 			/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4341 			 * PSR is also supported.
4342 			 */
4343 			if (link->psr_settings.psr_feature_enabled)
4344 				adev_to_drm(adev)->vblank_disable_immediate = false;
4345 		}
4346 
4347 
4348 	}
4349 
4350 	/* Software is initialized. Now we can register interrupt handlers. */
4351 	switch (adev->asic_type) {
4352 #if defined(CONFIG_DRM_AMD_DC_SI)
4353 	case CHIP_TAHITI:
4354 	case CHIP_PITCAIRN:
4355 	case CHIP_VERDE:
4356 	case CHIP_OLAND:
4357 		if (dce60_register_irq_handlers(dm->adev)) {
4358 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4359 			goto fail;
4360 		}
4361 		break;
4362 #endif
4363 	case CHIP_BONAIRE:
4364 	case CHIP_HAWAII:
4365 	case CHIP_KAVERI:
4366 	case CHIP_KABINI:
4367 	case CHIP_MULLINS:
4368 	case CHIP_TONGA:
4369 	case CHIP_FIJI:
4370 	case CHIP_CARRIZO:
4371 	case CHIP_STONEY:
4372 	case CHIP_POLARIS11:
4373 	case CHIP_POLARIS10:
4374 	case CHIP_POLARIS12:
4375 	case CHIP_VEGAM:
4376 	case CHIP_VEGA10:
4377 	case CHIP_VEGA12:
4378 	case CHIP_VEGA20:
4379 		if (dce110_register_irq_handlers(dm->adev)) {
4380 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4381 			goto fail;
4382 		}
4383 		break;
4384 	default:
4385 #if defined(CONFIG_DRM_AMD_DC_DCN)
4386 		switch (adev->ip_versions[DCE_HWIP][0]) {
4387 		case IP_VERSION(1, 0, 0):
4388 		case IP_VERSION(1, 0, 1):
4389 		case IP_VERSION(2, 0, 2):
4390 		case IP_VERSION(2, 0, 3):
4391 		case IP_VERSION(2, 0, 0):
4392 		case IP_VERSION(2, 1, 0):
4393 		case IP_VERSION(3, 0, 0):
4394 		case IP_VERSION(3, 0, 2):
4395 		case IP_VERSION(3, 0, 3):
4396 		case IP_VERSION(3, 0, 1):
4397 		case IP_VERSION(3, 1, 2):
4398 		case IP_VERSION(3, 1, 3):
4399 		case IP_VERSION(3, 1, 5):
4400 		case IP_VERSION(3, 1, 6):
4401 			if (dcn10_register_irq_handlers(dm->adev)) {
4402 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4403 				goto fail;
4404 			}
4405 			break;
4406 		default:
4407 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4408 					adev->ip_versions[DCE_HWIP][0]);
4409 			goto fail;
4410 		}
4411 #endif
4412 		break;
4413 	}
4414 
4415 	return 0;
4416 fail:
4417 	kfree(aencoder);
4418 	kfree(aconnector);
4419 
4420 	return -EINVAL;
4421 }
4422 
4423 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4424 {
4425 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4426 	return;
4427 }
4428 
4429 /******************************************************************************
4430  * amdgpu_display_funcs functions
4431  *****************************************************************************/
4432 
4433 /*
4434  * dm_bandwidth_update - program display watermarks
4435  *
4436  * @adev: amdgpu_device pointer
4437  *
4438  * Calculate and program the display watermarks and line buffer allocation.
4439  */
4440 static void dm_bandwidth_update(struct amdgpu_device *adev)
4441 {
4442 	/* TODO: implement later */
4443 }
4444 
4445 static const struct amdgpu_display_funcs dm_display_funcs = {
4446 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4447 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4448 	.backlight_set_level = NULL, /* never called for DC */
4449 	.backlight_get_level = NULL, /* never called for DC */
4450 	.hpd_sense = NULL,/* called unconditionally */
4451 	.hpd_set_polarity = NULL, /* called unconditionally */
4452 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4453 	.page_flip_get_scanoutpos =
4454 		dm_crtc_get_scanoutpos,/* called unconditionally */
4455 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4456 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4457 };
4458 
4459 #if defined(CONFIG_DEBUG_KERNEL_DC)
4460 
4461 static ssize_t s3_debug_store(struct device *device,
4462 			      struct device_attribute *attr,
4463 			      const char *buf,
4464 			      size_t count)
4465 {
4466 	int ret;
4467 	int s3_state;
4468 	struct drm_device *drm_dev = dev_get_drvdata(device);
4469 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4470 
4471 	ret = kstrtoint(buf, 0, &s3_state);
4472 
4473 	if (ret == 0) {
4474 		if (s3_state) {
4475 			dm_resume(adev);
4476 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4477 		} else
4478 			dm_suspend(adev);
4479 	}
4480 
4481 	return ret == 0 ? count : 0;
4482 }
4483 
4484 DEVICE_ATTR_WO(s3_debug);
4485 
4486 #endif
4487 
4488 static int dm_early_init(void *handle)
4489 {
4490 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4491 
4492 	switch (adev->asic_type) {
4493 #if defined(CONFIG_DRM_AMD_DC_SI)
4494 	case CHIP_TAHITI:
4495 	case CHIP_PITCAIRN:
4496 	case CHIP_VERDE:
4497 		adev->mode_info.num_crtc = 6;
4498 		adev->mode_info.num_hpd = 6;
4499 		adev->mode_info.num_dig = 6;
4500 		break;
4501 	case CHIP_OLAND:
4502 		adev->mode_info.num_crtc = 2;
4503 		adev->mode_info.num_hpd = 2;
4504 		adev->mode_info.num_dig = 2;
4505 		break;
4506 #endif
4507 	case CHIP_BONAIRE:
4508 	case CHIP_HAWAII:
4509 		adev->mode_info.num_crtc = 6;
4510 		adev->mode_info.num_hpd = 6;
4511 		adev->mode_info.num_dig = 6;
4512 		break;
4513 	case CHIP_KAVERI:
4514 		adev->mode_info.num_crtc = 4;
4515 		adev->mode_info.num_hpd = 6;
4516 		adev->mode_info.num_dig = 7;
4517 		break;
4518 	case CHIP_KABINI:
4519 	case CHIP_MULLINS:
4520 		adev->mode_info.num_crtc = 2;
4521 		adev->mode_info.num_hpd = 6;
4522 		adev->mode_info.num_dig = 6;
4523 		break;
4524 	case CHIP_FIJI:
4525 	case CHIP_TONGA:
4526 		adev->mode_info.num_crtc = 6;
4527 		adev->mode_info.num_hpd = 6;
4528 		adev->mode_info.num_dig = 7;
4529 		break;
4530 	case CHIP_CARRIZO:
4531 		adev->mode_info.num_crtc = 3;
4532 		adev->mode_info.num_hpd = 6;
4533 		adev->mode_info.num_dig = 9;
4534 		break;
4535 	case CHIP_STONEY:
4536 		adev->mode_info.num_crtc = 2;
4537 		adev->mode_info.num_hpd = 6;
4538 		adev->mode_info.num_dig = 9;
4539 		break;
4540 	case CHIP_POLARIS11:
4541 	case CHIP_POLARIS12:
4542 		adev->mode_info.num_crtc = 5;
4543 		adev->mode_info.num_hpd = 5;
4544 		adev->mode_info.num_dig = 5;
4545 		break;
4546 	case CHIP_POLARIS10:
4547 	case CHIP_VEGAM:
4548 		adev->mode_info.num_crtc = 6;
4549 		adev->mode_info.num_hpd = 6;
4550 		adev->mode_info.num_dig = 6;
4551 		break;
4552 	case CHIP_VEGA10:
4553 	case CHIP_VEGA12:
4554 	case CHIP_VEGA20:
4555 		adev->mode_info.num_crtc = 6;
4556 		adev->mode_info.num_hpd = 6;
4557 		adev->mode_info.num_dig = 6;
4558 		break;
4559 	default:
4560 #if defined(CONFIG_DRM_AMD_DC_DCN)
4561 		switch (adev->ip_versions[DCE_HWIP][0]) {
4562 		case IP_VERSION(2, 0, 2):
4563 		case IP_VERSION(3, 0, 0):
4564 			adev->mode_info.num_crtc = 6;
4565 			adev->mode_info.num_hpd = 6;
4566 			adev->mode_info.num_dig = 6;
4567 			break;
4568 		case IP_VERSION(2, 0, 0):
4569 		case IP_VERSION(3, 0, 2):
4570 			adev->mode_info.num_crtc = 5;
4571 			adev->mode_info.num_hpd = 5;
4572 			adev->mode_info.num_dig = 5;
4573 			break;
4574 		case IP_VERSION(2, 0, 3):
4575 		case IP_VERSION(3, 0, 3):
4576 			adev->mode_info.num_crtc = 2;
4577 			adev->mode_info.num_hpd = 2;
4578 			adev->mode_info.num_dig = 2;
4579 			break;
4580 		case IP_VERSION(1, 0, 0):
4581 		case IP_VERSION(1, 0, 1):
4582 		case IP_VERSION(3, 0, 1):
4583 		case IP_VERSION(2, 1, 0):
4584 		case IP_VERSION(3, 1, 2):
4585 		case IP_VERSION(3, 1, 3):
4586 		case IP_VERSION(3, 1, 5):
4587 		case IP_VERSION(3, 1, 6):
4588 			adev->mode_info.num_crtc = 4;
4589 			adev->mode_info.num_hpd = 4;
4590 			adev->mode_info.num_dig = 4;
4591 			break;
4592 		default:
4593 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4594 					adev->ip_versions[DCE_HWIP][0]);
4595 			return -EINVAL;
4596 		}
4597 #endif
4598 		break;
4599 	}
4600 
4601 	amdgpu_dm_set_irq_funcs(adev);
4602 
4603 	if (adev->mode_info.funcs == NULL)
4604 		adev->mode_info.funcs = &dm_display_funcs;
4605 
4606 	/*
4607 	 * Note: Do NOT change adev->audio_endpt_rreg and
4608 	 * adev->audio_endpt_wreg because they are initialised in
4609 	 * amdgpu_device_init()
4610 	 */
4611 #if defined(CONFIG_DEBUG_KERNEL_DC)
4612 	device_create_file(
4613 		adev_to_drm(adev)->dev,
4614 		&dev_attr_s3_debug);
4615 #endif
4616 
4617 	return 0;
4618 }
4619 
4620 static bool modeset_required(struct drm_crtc_state *crtc_state,
4621 			     struct dc_stream_state *new_stream,
4622 			     struct dc_stream_state *old_stream)
4623 {
4624 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4625 }
4626 
4627 static bool modereset_required(struct drm_crtc_state *crtc_state)
4628 {
4629 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4630 }
4631 
4632 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4633 {
4634 	drm_encoder_cleanup(encoder);
4635 	kfree(encoder);
4636 }
4637 
4638 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4639 	.destroy = amdgpu_dm_encoder_destroy,
4640 };
4641 
4642 
4643 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4644 					 struct drm_framebuffer *fb,
4645 					 int *min_downscale, int *max_upscale)
4646 {
4647 	struct amdgpu_device *adev = drm_to_adev(dev);
4648 	struct dc *dc = adev->dm.dc;
4649 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4650 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4651 
4652 	switch (fb->format->format) {
4653 	case DRM_FORMAT_P010:
4654 	case DRM_FORMAT_NV12:
4655 	case DRM_FORMAT_NV21:
4656 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4657 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4658 		break;
4659 
4660 	case DRM_FORMAT_XRGB16161616F:
4661 	case DRM_FORMAT_ARGB16161616F:
4662 	case DRM_FORMAT_XBGR16161616F:
4663 	case DRM_FORMAT_ABGR16161616F:
4664 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4665 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4666 		break;
4667 
4668 	default:
4669 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4670 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4671 		break;
4672 	}
4673 
4674 	/*
4675 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4676 	 * scaling factor of 1.0 == 1000 units.
4677 	 */
4678 	if (*max_upscale == 1)
4679 		*max_upscale = 1000;
4680 
4681 	if (*min_downscale == 1)
4682 		*min_downscale = 1000;
4683 }
4684 
4685 
4686 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4687 				const struct drm_plane_state *state,
4688 				struct dc_scaling_info *scaling_info)
4689 {
4690 	int scale_w, scale_h, min_downscale, max_upscale;
4691 
4692 	memset(scaling_info, 0, sizeof(*scaling_info));
4693 
4694 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4695 	scaling_info->src_rect.x = state->src_x >> 16;
4696 	scaling_info->src_rect.y = state->src_y >> 16;
4697 
4698 	/*
4699 	 * For reasons we don't (yet) fully understand a non-zero
4700 	 * src_y coordinate into an NV12 buffer can cause a
4701 	 * system hang on DCN1x.
4702 	 * To avoid hangs (and maybe be overly cautious)
4703 	 * let's reject both non-zero src_x and src_y.
4704 	 *
4705 	 * We currently know of only one use-case to reproduce a
4706 	 * scenario with non-zero src_x and src_y for NV12, which
4707 	 * is to gesture the YouTube Android app into full screen
4708 	 * on ChromeOS.
4709 	 */
4710 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4711 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4712 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4713 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4714 		return -EINVAL;
4715 
4716 	scaling_info->src_rect.width = state->src_w >> 16;
4717 	if (scaling_info->src_rect.width == 0)
4718 		return -EINVAL;
4719 
4720 	scaling_info->src_rect.height = state->src_h >> 16;
4721 	if (scaling_info->src_rect.height == 0)
4722 		return -EINVAL;
4723 
4724 	scaling_info->dst_rect.x = state->crtc_x;
4725 	scaling_info->dst_rect.y = state->crtc_y;
4726 
4727 	if (state->crtc_w == 0)
4728 		return -EINVAL;
4729 
4730 	scaling_info->dst_rect.width = state->crtc_w;
4731 
4732 	if (state->crtc_h == 0)
4733 		return -EINVAL;
4734 
4735 	scaling_info->dst_rect.height = state->crtc_h;
4736 
4737 	/* DRM doesn't specify clipping on destination output. */
4738 	scaling_info->clip_rect = scaling_info->dst_rect;
4739 
4740 	/* Validate scaling per-format with DC plane caps */
4741 	if (state->plane && state->plane->dev && state->fb) {
4742 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4743 					     &min_downscale, &max_upscale);
4744 	} else {
4745 		min_downscale = 250;
4746 		max_upscale = 16000;
4747 	}
4748 
4749 	scale_w = scaling_info->dst_rect.width * 1000 /
4750 		  scaling_info->src_rect.width;
4751 
4752 	if (scale_w < min_downscale || scale_w > max_upscale)
4753 		return -EINVAL;
4754 
4755 	scale_h = scaling_info->dst_rect.height * 1000 /
4756 		  scaling_info->src_rect.height;
4757 
4758 	if (scale_h < min_downscale || scale_h > max_upscale)
4759 		return -EINVAL;
4760 
4761 	/*
4762 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4763 	 * assume reasonable defaults based on the format.
4764 	 */
4765 
4766 	return 0;
4767 }
4768 
4769 static void
4770 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4771 				 uint64_t tiling_flags)
4772 {
4773 	/* Fill GFX8 params */
4774 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4775 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4776 
4777 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4778 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4779 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4780 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4781 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4782 
4783 		/* XXX fix me for VI */
4784 		tiling_info->gfx8.num_banks = num_banks;
4785 		tiling_info->gfx8.array_mode =
4786 				DC_ARRAY_2D_TILED_THIN1;
4787 		tiling_info->gfx8.tile_split = tile_split;
4788 		tiling_info->gfx8.bank_width = bankw;
4789 		tiling_info->gfx8.bank_height = bankh;
4790 		tiling_info->gfx8.tile_aspect = mtaspect;
4791 		tiling_info->gfx8.tile_mode =
4792 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4793 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4794 			== DC_ARRAY_1D_TILED_THIN1) {
4795 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4796 	}
4797 
4798 	tiling_info->gfx8.pipe_config =
4799 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4800 }
4801 
4802 static void
4803 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4804 				  union dc_tiling_info *tiling_info)
4805 {
4806 	tiling_info->gfx9.num_pipes =
4807 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4808 	tiling_info->gfx9.num_banks =
4809 		adev->gfx.config.gb_addr_config_fields.num_banks;
4810 	tiling_info->gfx9.pipe_interleave =
4811 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4812 	tiling_info->gfx9.num_shader_engines =
4813 		adev->gfx.config.gb_addr_config_fields.num_se;
4814 	tiling_info->gfx9.max_compressed_frags =
4815 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4816 	tiling_info->gfx9.num_rb_per_se =
4817 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4818 	tiling_info->gfx9.shaderEnable = 1;
4819 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4820 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4821 }
4822 
4823 static int
4824 validate_dcc(struct amdgpu_device *adev,
4825 	     const enum surface_pixel_format format,
4826 	     const enum dc_rotation_angle rotation,
4827 	     const union dc_tiling_info *tiling_info,
4828 	     const struct dc_plane_dcc_param *dcc,
4829 	     const struct dc_plane_address *address,
4830 	     const struct plane_size *plane_size)
4831 {
4832 	struct dc *dc = adev->dm.dc;
4833 	struct dc_dcc_surface_param input;
4834 	struct dc_surface_dcc_cap output;
4835 
4836 	memset(&input, 0, sizeof(input));
4837 	memset(&output, 0, sizeof(output));
4838 
4839 	if (!dcc->enable)
4840 		return 0;
4841 
4842 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4843 	    !dc->cap_funcs.get_dcc_compression_cap)
4844 		return -EINVAL;
4845 
4846 	input.format = format;
4847 	input.surface_size.width = plane_size->surface_size.width;
4848 	input.surface_size.height = plane_size->surface_size.height;
4849 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4850 
4851 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4852 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4853 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4854 		input.scan = SCAN_DIRECTION_VERTICAL;
4855 
4856 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4857 		return -EINVAL;
4858 
4859 	if (!output.capable)
4860 		return -EINVAL;
4861 
4862 	if (dcc->independent_64b_blks == 0 &&
4863 	    output.grph.rgb.independent_64b_blks != 0)
4864 		return -EINVAL;
4865 
4866 	return 0;
4867 }
4868 
4869 static bool
4870 modifier_has_dcc(uint64_t modifier)
4871 {
4872 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4873 }
4874 
4875 static unsigned
4876 modifier_gfx9_swizzle_mode(uint64_t modifier)
4877 {
4878 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4879 		return 0;
4880 
4881 	return AMD_FMT_MOD_GET(TILE, modifier);
4882 }
4883 
4884 static const struct drm_format_info *
4885 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4886 {
4887 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4888 }
4889 
4890 static void
4891 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4892 				    union dc_tiling_info *tiling_info,
4893 				    uint64_t modifier)
4894 {
4895 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4896 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4897 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4898 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4899 
4900 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4901 
4902 	if (!IS_AMD_FMT_MOD(modifier))
4903 		return;
4904 
4905 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4906 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4907 
4908 	if (adev->family >= AMDGPU_FAMILY_NV) {
4909 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4910 	} else {
4911 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4912 
4913 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4914 	}
4915 }
4916 
4917 enum dm_micro_swizzle {
4918 	MICRO_SWIZZLE_Z = 0,
4919 	MICRO_SWIZZLE_S = 1,
4920 	MICRO_SWIZZLE_D = 2,
4921 	MICRO_SWIZZLE_R = 3
4922 };
4923 
4924 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4925 					  uint32_t format,
4926 					  uint64_t modifier)
4927 {
4928 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4929 	const struct drm_format_info *info = drm_format_info(format);
4930 	int i;
4931 
4932 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4933 
4934 	if (!info)
4935 		return false;
4936 
4937 	/*
4938 	 * We always have to allow these modifiers:
4939 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4940 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4941 	 */
4942 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4943 	    modifier == DRM_FORMAT_MOD_INVALID) {
4944 		return true;
4945 	}
4946 
4947 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4948 	for (i = 0; i < plane->modifier_count; i++) {
4949 		if (modifier == plane->modifiers[i])
4950 			break;
4951 	}
4952 	if (i == plane->modifier_count)
4953 		return false;
4954 
4955 	/*
4956 	 * For D swizzle the canonical modifier depends on the bpp, so check
4957 	 * it here.
4958 	 */
4959 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4960 	    adev->family >= AMDGPU_FAMILY_NV) {
4961 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4962 			return false;
4963 	}
4964 
4965 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4966 	    info->cpp[0] < 8)
4967 		return false;
4968 
4969 	if (modifier_has_dcc(modifier)) {
4970 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4971 		if (info->cpp[0] != 4)
4972 			return false;
4973 		/* We support multi-planar formats, but not when combined with
4974 		 * additional DCC metadata planes. */
4975 		if (info->num_planes > 1)
4976 			return false;
4977 	}
4978 
4979 	return true;
4980 }
4981 
4982 static void
4983 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4984 {
4985 	if (!*mods)
4986 		return;
4987 
4988 	if (*cap - *size < 1) {
4989 		uint64_t new_cap = *cap * 2;
4990 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4991 
4992 		if (!new_mods) {
4993 			kfree(*mods);
4994 			*mods = NULL;
4995 			return;
4996 		}
4997 
4998 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4999 		kfree(*mods);
5000 		*mods = new_mods;
5001 		*cap = new_cap;
5002 	}
5003 
5004 	(*mods)[*size] = mod;
5005 	*size += 1;
5006 }
5007 
5008 static void
5009 add_gfx9_modifiers(const struct amdgpu_device *adev,
5010 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
5011 {
5012 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5013 	int pipe_xor_bits = min(8, pipes +
5014 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5015 	int bank_xor_bits = min(8 - pipe_xor_bits,
5016 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5017 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5018 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5019 
5020 
5021 	if (adev->family == AMDGPU_FAMILY_RV) {
5022 		/* Raven2 and later */
5023 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5024 
5025 		/*
5026 		 * No _D DCC swizzles yet because we only allow 32bpp, which
5027 		 * doesn't support _D on DCN
5028 		 */
5029 
5030 		if (has_constant_encode) {
5031 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5032 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5033 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5034 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5035 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5036 				    AMD_FMT_MOD_SET(DCC, 1) |
5037 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5038 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5039 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5040 		}
5041 
5042 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5043 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5044 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5045 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5046 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5047 			    AMD_FMT_MOD_SET(DCC, 1) |
5048 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5049 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5050 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5051 
5052 		if (has_constant_encode) {
5053 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5054 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5055 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5056 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5057 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5058 				    AMD_FMT_MOD_SET(DCC, 1) |
5059 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5060 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5061 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5062 
5063 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5064 				    AMD_FMT_MOD_SET(RB, rb) |
5065 				    AMD_FMT_MOD_SET(PIPE, pipes));
5066 		}
5067 
5068 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5069 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5070 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5071 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5072 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5073 			    AMD_FMT_MOD_SET(DCC, 1) |
5074 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5075 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5076 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5077 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5078 			    AMD_FMT_MOD_SET(RB, rb) |
5079 			    AMD_FMT_MOD_SET(PIPE, pipes));
5080 	}
5081 
5082 	/*
5083 	 * Only supported for 64bpp on Raven, will be filtered on format in
5084 	 * dm_plane_format_mod_supported.
5085 	 */
5086 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5087 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5088 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5089 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5090 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5091 
5092 	if (adev->family == AMDGPU_FAMILY_RV) {
5093 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5094 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5095 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5096 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5097 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5098 	}
5099 
5100 	/*
5101 	 * Only supported for 64bpp on Raven, will be filtered on format in
5102 	 * dm_plane_format_mod_supported.
5103 	 */
5104 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5105 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5106 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5107 
5108 	if (adev->family == AMDGPU_FAMILY_RV) {
5109 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5110 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5111 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5112 	}
5113 }
5114 
5115 static void
5116 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5117 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5118 {
5119 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5120 
5121 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5122 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5123 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5124 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5125 		    AMD_FMT_MOD_SET(DCC, 1) |
5126 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5127 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5128 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5129 
5130 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5131 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5132 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5133 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5134 		    AMD_FMT_MOD_SET(DCC, 1) |
5135 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5136 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5137 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5138 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5139 
5140 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5141 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5142 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5143 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5144 
5145 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5146 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5147 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5148 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5149 
5150 
5151 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5152 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5153 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5154 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5155 
5156 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5157 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5158 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5159 }
5160 
5161 static void
5162 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5163 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5164 {
5165 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5166 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5167 
5168 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5169 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5170 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5171 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5172 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5173 		    AMD_FMT_MOD_SET(DCC, 1) |
5174 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5175 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5176 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5177 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5178 
5179 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5180 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5181 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5182 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5183 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5184 		    AMD_FMT_MOD_SET(DCC, 1) |
5185 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5186 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5187 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5188 
5189 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5190 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5191 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5192 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5193 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5194 		    AMD_FMT_MOD_SET(DCC, 1) |
5195 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5196 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5197 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5198 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5199 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5200 
5201 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5202 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5203 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5204 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5205 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5206 		    AMD_FMT_MOD_SET(DCC, 1) |
5207 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5208 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5209 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5210 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5211 
5212 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5213 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5214 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5215 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5216 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5217 
5218 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5219 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5220 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5221 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5222 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5223 
5224 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5225 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5226 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5227 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5228 
5229 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5230 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5231 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5232 }
5233 
5234 static int
5235 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5236 {
5237 	uint64_t size = 0, capacity = 128;
5238 	*mods = NULL;
5239 
5240 	/* We have not hooked up any pre-GFX9 modifiers. */
5241 	if (adev->family < AMDGPU_FAMILY_AI)
5242 		return 0;
5243 
5244 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5245 
5246 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5247 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5248 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5249 		return *mods ? 0 : -ENOMEM;
5250 	}
5251 
5252 	switch (adev->family) {
5253 	case AMDGPU_FAMILY_AI:
5254 	case AMDGPU_FAMILY_RV:
5255 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5256 		break;
5257 	case AMDGPU_FAMILY_NV:
5258 	case AMDGPU_FAMILY_VGH:
5259 	case AMDGPU_FAMILY_YC:
5260 	case AMDGPU_FAMILY_GC_10_3_6:
5261 	case AMDGPU_FAMILY_GC_10_3_7:
5262 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5263 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5264 		else
5265 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5266 		break;
5267 	}
5268 
5269 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5270 
5271 	/* INVALID marks the end of the list. */
5272 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5273 
5274 	if (!*mods)
5275 		return -ENOMEM;
5276 
5277 	return 0;
5278 }
5279 
5280 static int
5281 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5282 					  const struct amdgpu_framebuffer *afb,
5283 					  const enum surface_pixel_format format,
5284 					  const enum dc_rotation_angle rotation,
5285 					  const struct plane_size *plane_size,
5286 					  union dc_tiling_info *tiling_info,
5287 					  struct dc_plane_dcc_param *dcc,
5288 					  struct dc_plane_address *address,
5289 					  const bool force_disable_dcc)
5290 {
5291 	const uint64_t modifier = afb->base.modifier;
5292 	int ret = 0;
5293 
5294 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5295 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5296 
5297 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5298 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5299 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5300 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5301 
5302 		dcc->enable = 1;
5303 		dcc->meta_pitch = afb->base.pitches[1];
5304 		dcc->independent_64b_blks = independent_64b_blks;
5305 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5306 			if (independent_64b_blks && independent_128b_blks)
5307 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5308 			else if (independent_128b_blks)
5309 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5310 			else if (independent_64b_blks && !independent_128b_blks)
5311 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5312 			else
5313 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5314 		} else {
5315 			if (independent_64b_blks)
5316 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5317 			else
5318 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5319 		}
5320 
5321 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5322 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5323 	}
5324 
5325 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5326 	if (ret)
5327 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5328 
5329 	return ret;
5330 }
5331 
5332 static int
5333 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5334 			     const struct amdgpu_framebuffer *afb,
5335 			     const enum surface_pixel_format format,
5336 			     const enum dc_rotation_angle rotation,
5337 			     const uint64_t tiling_flags,
5338 			     union dc_tiling_info *tiling_info,
5339 			     struct plane_size *plane_size,
5340 			     struct dc_plane_dcc_param *dcc,
5341 			     struct dc_plane_address *address,
5342 			     bool tmz_surface,
5343 			     bool force_disable_dcc)
5344 {
5345 	const struct drm_framebuffer *fb = &afb->base;
5346 	int ret;
5347 
5348 	memset(tiling_info, 0, sizeof(*tiling_info));
5349 	memset(plane_size, 0, sizeof(*plane_size));
5350 	memset(dcc, 0, sizeof(*dcc));
5351 	memset(address, 0, sizeof(*address));
5352 
5353 	address->tmz_surface = tmz_surface;
5354 
5355 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5356 		uint64_t addr = afb->address + fb->offsets[0];
5357 
5358 		plane_size->surface_size.x = 0;
5359 		plane_size->surface_size.y = 0;
5360 		plane_size->surface_size.width = fb->width;
5361 		plane_size->surface_size.height = fb->height;
5362 		plane_size->surface_pitch =
5363 			fb->pitches[0] / fb->format->cpp[0];
5364 
5365 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5366 		address->grph.addr.low_part = lower_32_bits(addr);
5367 		address->grph.addr.high_part = upper_32_bits(addr);
5368 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5369 		uint64_t luma_addr = afb->address + fb->offsets[0];
5370 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5371 
5372 		plane_size->surface_size.x = 0;
5373 		plane_size->surface_size.y = 0;
5374 		plane_size->surface_size.width = fb->width;
5375 		plane_size->surface_size.height = fb->height;
5376 		plane_size->surface_pitch =
5377 			fb->pitches[0] / fb->format->cpp[0];
5378 
5379 		plane_size->chroma_size.x = 0;
5380 		plane_size->chroma_size.y = 0;
5381 		/* TODO: set these based on surface format */
5382 		plane_size->chroma_size.width = fb->width / 2;
5383 		plane_size->chroma_size.height = fb->height / 2;
5384 
5385 		plane_size->chroma_pitch =
5386 			fb->pitches[1] / fb->format->cpp[1];
5387 
5388 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5389 		address->video_progressive.luma_addr.low_part =
5390 			lower_32_bits(luma_addr);
5391 		address->video_progressive.luma_addr.high_part =
5392 			upper_32_bits(luma_addr);
5393 		address->video_progressive.chroma_addr.low_part =
5394 			lower_32_bits(chroma_addr);
5395 		address->video_progressive.chroma_addr.high_part =
5396 			upper_32_bits(chroma_addr);
5397 	}
5398 
5399 	if (adev->family >= AMDGPU_FAMILY_AI) {
5400 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5401 								rotation, plane_size,
5402 								tiling_info, dcc,
5403 								address,
5404 								force_disable_dcc);
5405 		if (ret)
5406 			return ret;
5407 	} else {
5408 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5409 	}
5410 
5411 	return 0;
5412 }
5413 
5414 static void
5415 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5416 			       bool *per_pixel_alpha, bool *global_alpha,
5417 			       int *global_alpha_value)
5418 {
5419 	*per_pixel_alpha = false;
5420 	*global_alpha = false;
5421 	*global_alpha_value = 0xff;
5422 
5423 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5424 		return;
5425 
5426 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5427 		static const uint32_t alpha_formats[] = {
5428 			DRM_FORMAT_ARGB8888,
5429 			DRM_FORMAT_RGBA8888,
5430 			DRM_FORMAT_ABGR8888,
5431 		};
5432 		uint32_t format = plane_state->fb->format->format;
5433 		unsigned int i;
5434 
5435 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5436 			if (format == alpha_formats[i]) {
5437 				*per_pixel_alpha = true;
5438 				break;
5439 			}
5440 		}
5441 	}
5442 
5443 	if (plane_state->alpha < 0xffff) {
5444 		*global_alpha = true;
5445 		*global_alpha_value = plane_state->alpha >> 8;
5446 	}
5447 }
5448 
5449 static int
5450 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5451 			    const enum surface_pixel_format format,
5452 			    enum dc_color_space *color_space)
5453 {
5454 	bool full_range;
5455 
5456 	*color_space = COLOR_SPACE_SRGB;
5457 
5458 	/* DRM color properties only affect non-RGB formats. */
5459 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5460 		return 0;
5461 
5462 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5463 
5464 	switch (plane_state->color_encoding) {
5465 	case DRM_COLOR_YCBCR_BT601:
5466 		if (full_range)
5467 			*color_space = COLOR_SPACE_YCBCR601;
5468 		else
5469 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5470 		break;
5471 
5472 	case DRM_COLOR_YCBCR_BT709:
5473 		if (full_range)
5474 			*color_space = COLOR_SPACE_YCBCR709;
5475 		else
5476 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5477 		break;
5478 
5479 	case DRM_COLOR_YCBCR_BT2020:
5480 		if (full_range)
5481 			*color_space = COLOR_SPACE_2020_YCBCR;
5482 		else
5483 			return -EINVAL;
5484 		break;
5485 
5486 	default:
5487 		return -EINVAL;
5488 	}
5489 
5490 	return 0;
5491 }
5492 
5493 static int
5494 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5495 			    const struct drm_plane_state *plane_state,
5496 			    const uint64_t tiling_flags,
5497 			    struct dc_plane_info *plane_info,
5498 			    struct dc_plane_address *address,
5499 			    bool tmz_surface,
5500 			    bool force_disable_dcc)
5501 {
5502 	const struct drm_framebuffer *fb = plane_state->fb;
5503 	const struct amdgpu_framebuffer *afb =
5504 		to_amdgpu_framebuffer(plane_state->fb);
5505 	int ret;
5506 
5507 	memset(plane_info, 0, sizeof(*plane_info));
5508 
5509 	switch (fb->format->format) {
5510 	case DRM_FORMAT_C8:
5511 		plane_info->format =
5512 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5513 		break;
5514 	case DRM_FORMAT_RGB565:
5515 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5516 		break;
5517 	case DRM_FORMAT_XRGB8888:
5518 	case DRM_FORMAT_ARGB8888:
5519 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5520 		break;
5521 	case DRM_FORMAT_XRGB2101010:
5522 	case DRM_FORMAT_ARGB2101010:
5523 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5524 		break;
5525 	case DRM_FORMAT_XBGR2101010:
5526 	case DRM_FORMAT_ABGR2101010:
5527 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5528 		break;
5529 	case DRM_FORMAT_XBGR8888:
5530 	case DRM_FORMAT_ABGR8888:
5531 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5532 		break;
5533 	case DRM_FORMAT_NV21:
5534 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5535 		break;
5536 	case DRM_FORMAT_NV12:
5537 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5538 		break;
5539 	case DRM_FORMAT_P010:
5540 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5541 		break;
5542 	case DRM_FORMAT_XRGB16161616F:
5543 	case DRM_FORMAT_ARGB16161616F:
5544 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5545 		break;
5546 	case DRM_FORMAT_XBGR16161616F:
5547 	case DRM_FORMAT_ABGR16161616F:
5548 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5549 		break;
5550 	case DRM_FORMAT_XRGB16161616:
5551 	case DRM_FORMAT_ARGB16161616:
5552 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5553 		break;
5554 	case DRM_FORMAT_XBGR16161616:
5555 	case DRM_FORMAT_ABGR16161616:
5556 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5557 		break;
5558 	default:
5559 		DRM_ERROR(
5560 			"Unsupported screen format %p4cc\n",
5561 			&fb->format->format);
5562 		return -EINVAL;
5563 	}
5564 
5565 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5566 	case DRM_MODE_ROTATE_0:
5567 		plane_info->rotation = ROTATION_ANGLE_0;
5568 		break;
5569 	case DRM_MODE_ROTATE_90:
5570 		plane_info->rotation = ROTATION_ANGLE_90;
5571 		break;
5572 	case DRM_MODE_ROTATE_180:
5573 		plane_info->rotation = ROTATION_ANGLE_180;
5574 		break;
5575 	case DRM_MODE_ROTATE_270:
5576 		plane_info->rotation = ROTATION_ANGLE_270;
5577 		break;
5578 	default:
5579 		plane_info->rotation = ROTATION_ANGLE_0;
5580 		break;
5581 	}
5582 
5583 	plane_info->visible = true;
5584 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5585 
5586 	plane_info->layer_index = 0;
5587 
5588 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5589 					  &plane_info->color_space);
5590 	if (ret)
5591 		return ret;
5592 
5593 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5594 					   plane_info->rotation, tiling_flags,
5595 					   &plane_info->tiling_info,
5596 					   &plane_info->plane_size,
5597 					   &plane_info->dcc, address, tmz_surface,
5598 					   force_disable_dcc);
5599 	if (ret)
5600 		return ret;
5601 
5602 	fill_blending_from_plane_state(
5603 		plane_state, &plane_info->per_pixel_alpha,
5604 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5605 
5606 	return 0;
5607 }
5608 
5609 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5610 				    struct dc_plane_state *dc_plane_state,
5611 				    struct drm_plane_state *plane_state,
5612 				    struct drm_crtc_state *crtc_state)
5613 {
5614 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5615 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5616 	struct dc_scaling_info scaling_info;
5617 	struct dc_plane_info plane_info;
5618 	int ret;
5619 	bool force_disable_dcc = false;
5620 
5621 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5622 	if (ret)
5623 		return ret;
5624 
5625 	dc_plane_state->src_rect = scaling_info.src_rect;
5626 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5627 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5628 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5629 
5630 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5631 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5632 					  afb->tiling_flags,
5633 					  &plane_info,
5634 					  &dc_plane_state->address,
5635 					  afb->tmz_surface,
5636 					  force_disable_dcc);
5637 	if (ret)
5638 		return ret;
5639 
5640 	dc_plane_state->format = plane_info.format;
5641 	dc_plane_state->color_space = plane_info.color_space;
5642 	dc_plane_state->format = plane_info.format;
5643 	dc_plane_state->plane_size = plane_info.plane_size;
5644 	dc_plane_state->rotation = plane_info.rotation;
5645 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5646 	dc_plane_state->stereo_format = plane_info.stereo_format;
5647 	dc_plane_state->tiling_info = plane_info.tiling_info;
5648 	dc_plane_state->visible = plane_info.visible;
5649 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5650 	dc_plane_state->global_alpha = plane_info.global_alpha;
5651 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5652 	dc_plane_state->dcc = plane_info.dcc;
5653 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5654 	dc_plane_state->flip_int_enabled = true;
5655 
5656 	/*
5657 	 * Always set input transfer function, since plane state is refreshed
5658 	 * every time.
5659 	 */
5660 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5661 	if (ret)
5662 		return ret;
5663 
5664 	return 0;
5665 }
5666 
5667 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5668 					   const struct dm_connector_state *dm_state,
5669 					   struct dc_stream_state *stream)
5670 {
5671 	enum amdgpu_rmx_type rmx_type;
5672 
5673 	struct rect src = { 0 }; /* viewport in composition space*/
5674 	struct rect dst = { 0 }; /* stream addressable area */
5675 
5676 	/* no mode. nothing to be done */
5677 	if (!mode)
5678 		return;
5679 
5680 	/* Full screen scaling by default */
5681 	src.width = mode->hdisplay;
5682 	src.height = mode->vdisplay;
5683 	dst.width = stream->timing.h_addressable;
5684 	dst.height = stream->timing.v_addressable;
5685 
5686 	if (dm_state) {
5687 		rmx_type = dm_state->scaling;
5688 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5689 			if (src.width * dst.height <
5690 					src.height * dst.width) {
5691 				/* height needs less upscaling/more downscaling */
5692 				dst.width = src.width *
5693 						dst.height / src.height;
5694 			} else {
5695 				/* width needs less upscaling/more downscaling */
5696 				dst.height = src.height *
5697 						dst.width / src.width;
5698 			}
5699 		} else if (rmx_type == RMX_CENTER) {
5700 			dst = src;
5701 		}
5702 
5703 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5704 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5705 
5706 		if (dm_state->underscan_enable) {
5707 			dst.x += dm_state->underscan_hborder / 2;
5708 			dst.y += dm_state->underscan_vborder / 2;
5709 			dst.width -= dm_state->underscan_hborder;
5710 			dst.height -= dm_state->underscan_vborder;
5711 		}
5712 	}
5713 
5714 	stream->src = src;
5715 	stream->dst = dst;
5716 
5717 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5718 		      dst.x, dst.y, dst.width, dst.height);
5719 
5720 }
5721 
5722 static enum dc_color_depth
5723 convert_color_depth_from_display_info(const struct drm_connector *connector,
5724 				      bool is_y420, int requested_bpc)
5725 {
5726 	uint8_t bpc;
5727 
5728 	if (is_y420) {
5729 		bpc = 8;
5730 
5731 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5732 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5733 			bpc = 16;
5734 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5735 			bpc = 12;
5736 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5737 			bpc = 10;
5738 	} else {
5739 		bpc = (uint8_t)connector->display_info.bpc;
5740 		/* Assume 8 bpc by default if no bpc is specified. */
5741 		bpc = bpc ? bpc : 8;
5742 	}
5743 
5744 	if (requested_bpc > 0) {
5745 		/*
5746 		 * Cap display bpc based on the user requested value.
5747 		 *
5748 		 * The value for state->max_bpc may not correctly updated
5749 		 * depending on when the connector gets added to the state
5750 		 * or if this was called outside of atomic check, so it
5751 		 * can't be used directly.
5752 		 */
5753 		bpc = min_t(u8, bpc, requested_bpc);
5754 
5755 		/* Round down to the nearest even number. */
5756 		bpc = bpc - (bpc & 1);
5757 	}
5758 
5759 	switch (bpc) {
5760 	case 0:
5761 		/*
5762 		 * Temporary Work around, DRM doesn't parse color depth for
5763 		 * EDID revision before 1.4
5764 		 * TODO: Fix edid parsing
5765 		 */
5766 		return COLOR_DEPTH_888;
5767 	case 6:
5768 		return COLOR_DEPTH_666;
5769 	case 8:
5770 		return COLOR_DEPTH_888;
5771 	case 10:
5772 		return COLOR_DEPTH_101010;
5773 	case 12:
5774 		return COLOR_DEPTH_121212;
5775 	case 14:
5776 		return COLOR_DEPTH_141414;
5777 	case 16:
5778 		return COLOR_DEPTH_161616;
5779 	default:
5780 		return COLOR_DEPTH_UNDEFINED;
5781 	}
5782 }
5783 
5784 static enum dc_aspect_ratio
5785 get_aspect_ratio(const struct drm_display_mode *mode_in)
5786 {
5787 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5788 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5789 }
5790 
5791 static enum dc_color_space
5792 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5793 {
5794 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5795 
5796 	switch (dc_crtc_timing->pixel_encoding)	{
5797 	case PIXEL_ENCODING_YCBCR422:
5798 	case PIXEL_ENCODING_YCBCR444:
5799 	case PIXEL_ENCODING_YCBCR420:
5800 	{
5801 		/*
5802 		 * 27030khz is the separation point between HDTV and SDTV
5803 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5804 		 * respectively
5805 		 */
5806 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5807 			if (dc_crtc_timing->flags.Y_ONLY)
5808 				color_space =
5809 					COLOR_SPACE_YCBCR709_LIMITED;
5810 			else
5811 				color_space = COLOR_SPACE_YCBCR709;
5812 		} else {
5813 			if (dc_crtc_timing->flags.Y_ONLY)
5814 				color_space =
5815 					COLOR_SPACE_YCBCR601_LIMITED;
5816 			else
5817 				color_space = COLOR_SPACE_YCBCR601;
5818 		}
5819 
5820 	}
5821 	break;
5822 	case PIXEL_ENCODING_RGB:
5823 		color_space = COLOR_SPACE_SRGB;
5824 		break;
5825 
5826 	default:
5827 		WARN_ON(1);
5828 		break;
5829 	}
5830 
5831 	return color_space;
5832 }
5833 
5834 static bool adjust_colour_depth_from_display_info(
5835 	struct dc_crtc_timing *timing_out,
5836 	const struct drm_display_info *info)
5837 {
5838 	enum dc_color_depth depth = timing_out->display_color_depth;
5839 	int normalized_clk;
5840 	do {
5841 		normalized_clk = timing_out->pix_clk_100hz / 10;
5842 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5843 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5844 			normalized_clk /= 2;
5845 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5846 		switch (depth) {
5847 		case COLOR_DEPTH_888:
5848 			break;
5849 		case COLOR_DEPTH_101010:
5850 			normalized_clk = (normalized_clk * 30) / 24;
5851 			break;
5852 		case COLOR_DEPTH_121212:
5853 			normalized_clk = (normalized_clk * 36) / 24;
5854 			break;
5855 		case COLOR_DEPTH_161616:
5856 			normalized_clk = (normalized_clk * 48) / 24;
5857 			break;
5858 		default:
5859 			/* The above depths are the only ones valid for HDMI. */
5860 			return false;
5861 		}
5862 		if (normalized_clk <= info->max_tmds_clock) {
5863 			timing_out->display_color_depth = depth;
5864 			return true;
5865 		}
5866 	} while (--depth > COLOR_DEPTH_666);
5867 	return false;
5868 }
5869 
5870 static void fill_stream_properties_from_drm_display_mode(
5871 	struct dc_stream_state *stream,
5872 	const struct drm_display_mode *mode_in,
5873 	const struct drm_connector *connector,
5874 	const struct drm_connector_state *connector_state,
5875 	const struct dc_stream_state *old_stream,
5876 	int requested_bpc)
5877 {
5878 	struct dc_crtc_timing *timing_out = &stream->timing;
5879 	const struct drm_display_info *info = &connector->display_info;
5880 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5881 	struct hdmi_vendor_infoframe hv_frame;
5882 	struct hdmi_avi_infoframe avi_frame;
5883 
5884 	memset(&hv_frame, 0, sizeof(hv_frame));
5885 	memset(&avi_frame, 0, sizeof(avi_frame));
5886 
5887 	timing_out->h_border_left = 0;
5888 	timing_out->h_border_right = 0;
5889 	timing_out->v_border_top = 0;
5890 	timing_out->v_border_bottom = 0;
5891 	/* TODO: un-hardcode */
5892 	if (drm_mode_is_420_only(info, mode_in)
5893 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5894 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5895 	else if (drm_mode_is_420_also(info, mode_in)
5896 			&& aconnector->force_yuv420_output)
5897 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5898 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5899 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5900 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5901 	else
5902 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5903 
5904 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5905 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5906 		connector,
5907 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5908 		requested_bpc);
5909 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5910 	timing_out->hdmi_vic = 0;
5911 
5912 	if(old_stream) {
5913 		timing_out->vic = old_stream->timing.vic;
5914 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5915 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5916 	} else {
5917 		timing_out->vic = drm_match_cea_mode(mode_in);
5918 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5919 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5920 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5921 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5922 	}
5923 
5924 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5925 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5926 		timing_out->vic = avi_frame.video_code;
5927 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5928 		timing_out->hdmi_vic = hv_frame.vic;
5929 	}
5930 
5931 	if (is_freesync_video_mode(mode_in, aconnector)) {
5932 		timing_out->h_addressable = mode_in->hdisplay;
5933 		timing_out->h_total = mode_in->htotal;
5934 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5935 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5936 		timing_out->v_total = mode_in->vtotal;
5937 		timing_out->v_addressable = mode_in->vdisplay;
5938 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5939 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5940 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5941 	} else {
5942 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5943 		timing_out->h_total = mode_in->crtc_htotal;
5944 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5945 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5946 		timing_out->v_total = mode_in->crtc_vtotal;
5947 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5948 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5949 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5950 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5951 	}
5952 
5953 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5954 
5955 	stream->output_color_space = get_output_color_space(timing_out);
5956 
5957 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5958 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5959 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5960 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5961 		    drm_mode_is_420_also(info, mode_in) &&
5962 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5963 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5964 			adjust_colour_depth_from_display_info(timing_out, info);
5965 		}
5966 	}
5967 }
5968 
5969 static void fill_audio_info(struct audio_info *audio_info,
5970 			    const struct drm_connector *drm_connector,
5971 			    const struct dc_sink *dc_sink)
5972 {
5973 	int i = 0;
5974 	int cea_revision = 0;
5975 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5976 
5977 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5978 	audio_info->product_id = edid_caps->product_id;
5979 
5980 	cea_revision = drm_connector->display_info.cea_rev;
5981 
5982 	strscpy(audio_info->display_name,
5983 		edid_caps->display_name,
5984 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5985 
5986 	if (cea_revision >= 3) {
5987 		audio_info->mode_count = edid_caps->audio_mode_count;
5988 
5989 		for (i = 0; i < audio_info->mode_count; ++i) {
5990 			audio_info->modes[i].format_code =
5991 					(enum audio_format_code)
5992 					(edid_caps->audio_modes[i].format_code);
5993 			audio_info->modes[i].channel_count =
5994 					edid_caps->audio_modes[i].channel_count;
5995 			audio_info->modes[i].sample_rates.all =
5996 					edid_caps->audio_modes[i].sample_rate;
5997 			audio_info->modes[i].sample_size =
5998 					edid_caps->audio_modes[i].sample_size;
5999 		}
6000 	}
6001 
6002 	audio_info->flags.all = edid_caps->speaker_flags;
6003 
6004 	/* TODO: We only check for the progressive mode, check for interlace mode too */
6005 	if (drm_connector->latency_present[0]) {
6006 		audio_info->video_latency = drm_connector->video_latency[0];
6007 		audio_info->audio_latency = drm_connector->audio_latency[0];
6008 	}
6009 
6010 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6011 
6012 }
6013 
6014 static void
6015 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6016 				      struct drm_display_mode *dst_mode)
6017 {
6018 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6019 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6020 	dst_mode->crtc_clock = src_mode->crtc_clock;
6021 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6022 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6023 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6024 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6025 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
6026 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
6027 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6028 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6029 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6030 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6031 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6032 }
6033 
6034 static void
6035 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6036 					const struct drm_display_mode *native_mode,
6037 					bool scale_enabled)
6038 {
6039 	if (scale_enabled) {
6040 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6041 	} else if (native_mode->clock == drm_mode->clock &&
6042 			native_mode->htotal == drm_mode->htotal &&
6043 			native_mode->vtotal == drm_mode->vtotal) {
6044 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6045 	} else {
6046 		/* no scaling nor amdgpu inserted, no need to patch */
6047 	}
6048 }
6049 
6050 static struct dc_sink *
6051 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6052 {
6053 	struct dc_sink_init_data sink_init_data = { 0 };
6054 	struct dc_sink *sink = NULL;
6055 	sink_init_data.link = aconnector->dc_link;
6056 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6057 
6058 	sink = dc_sink_create(&sink_init_data);
6059 	if (!sink) {
6060 		DRM_ERROR("Failed to create sink!\n");
6061 		return NULL;
6062 	}
6063 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6064 
6065 	return sink;
6066 }
6067 
6068 static void set_multisync_trigger_params(
6069 		struct dc_stream_state *stream)
6070 {
6071 	struct dc_stream_state *master = NULL;
6072 
6073 	if (stream->triggered_crtc_reset.enabled) {
6074 		master = stream->triggered_crtc_reset.event_source;
6075 		stream->triggered_crtc_reset.event =
6076 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6077 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6078 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6079 	}
6080 }
6081 
6082 static void set_master_stream(struct dc_stream_state *stream_set[],
6083 			      int stream_count)
6084 {
6085 	int j, highest_rfr = 0, master_stream = 0;
6086 
6087 	for (j = 0;  j < stream_count; j++) {
6088 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6089 			int refresh_rate = 0;
6090 
6091 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6092 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6093 			if (refresh_rate > highest_rfr) {
6094 				highest_rfr = refresh_rate;
6095 				master_stream = j;
6096 			}
6097 		}
6098 	}
6099 	for (j = 0;  j < stream_count; j++) {
6100 		if (stream_set[j])
6101 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6102 	}
6103 }
6104 
6105 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6106 {
6107 	int i = 0;
6108 	struct dc_stream_state *stream;
6109 
6110 	if (context->stream_count < 2)
6111 		return;
6112 	for (i = 0; i < context->stream_count ; i++) {
6113 		if (!context->streams[i])
6114 			continue;
6115 		/*
6116 		 * TODO: add a function to read AMD VSDB bits and set
6117 		 * crtc_sync_master.multi_sync_enabled flag
6118 		 * For now it's set to false
6119 		 */
6120 	}
6121 
6122 	set_master_stream(context->streams, context->stream_count);
6123 
6124 	for (i = 0; i < context->stream_count ; i++) {
6125 		stream = context->streams[i];
6126 
6127 		if (!stream)
6128 			continue;
6129 
6130 		set_multisync_trigger_params(stream);
6131 	}
6132 }
6133 
6134 #if defined(CONFIG_DRM_AMD_DC_DCN)
6135 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6136 							struct dc_sink *sink, struct dc_stream_state *stream,
6137 							struct dsc_dec_dpcd_caps *dsc_caps)
6138 {
6139 	stream->timing.flags.DSC = 0;
6140 	dsc_caps->is_dsc_supported = false;
6141 
6142 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6143 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6144 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6145 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6146 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6147 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6148 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6149 				dsc_caps);
6150 	}
6151 }
6152 
6153 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6154 				    struct dc_sink *sink, struct dc_stream_state *stream,
6155 				    struct dsc_dec_dpcd_caps *dsc_caps,
6156 				    uint32_t max_dsc_target_bpp_limit_override)
6157 {
6158 	const struct dc_link_settings *verified_link_cap = NULL;
6159 	uint32_t link_bw_in_kbps;
6160 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6161 	struct dc *dc = sink->ctx->dc;
6162 	struct dc_dsc_bw_range bw_range = {0};
6163 	struct dc_dsc_config dsc_cfg = {0};
6164 
6165 	verified_link_cap = dc_link_get_link_cap(stream->link);
6166 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6167 	edp_min_bpp_x16 = 8 * 16;
6168 	edp_max_bpp_x16 = 8 * 16;
6169 
6170 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6171 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6172 
6173 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6174 		edp_min_bpp_x16 = edp_max_bpp_x16;
6175 
6176 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6177 				dc->debug.dsc_min_slice_height_override,
6178 				edp_min_bpp_x16, edp_max_bpp_x16,
6179 				dsc_caps,
6180 				&stream->timing,
6181 				&bw_range)) {
6182 
6183 		if (bw_range.max_kbps < link_bw_in_kbps) {
6184 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6185 					dsc_caps,
6186 					dc->debug.dsc_min_slice_height_override,
6187 					max_dsc_target_bpp_limit_override,
6188 					0,
6189 					&stream->timing,
6190 					&dsc_cfg)) {
6191 				stream->timing.dsc_cfg = dsc_cfg;
6192 				stream->timing.flags.DSC = 1;
6193 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6194 			}
6195 			return;
6196 		}
6197 	}
6198 
6199 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6200 				dsc_caps,
6201 				dc->debug.dsc_min_slice_height_override,
6202 				max_dsc_target_bpp_limit_override,
6203 				link_bw_in_kbps,
6204 				&stream->timing,
6205 				&dsc_cfg)) {
6206 		stream->timing.dsc_cfg = dsc_cfg;
6207 		stream->timing.flags.DSC = 1;
6208 	}
6209 }
6210 
6211 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6212 										struct dc_sink *sink, struct dc_stream_state *stream,
6213 										struct dsc_dec_dpcd_caps *dsc_caps)
6214 {
6215 	struct drm_connector *drm_connector = &aconnector->base;
6216 	uint32_t link_bandwidth_kbps;
6217 	uint32_t max_dsc_target_bpp_limit_override = 0;
6218 	struct dc *dc = sink->ctx->dc;
6219 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6220 	uint32_t dsc_max_supported_bw_in_kbps;
6221 
6222 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6223 							dc_link_get_link_cap(aconnector->dc_link));
6224 
6225 	if (stream->link && stream->link->local_sink)
6226 		max_dsc_target_bpp_limit_override =
6227 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6228 
6229 	/* Set DSC policy according to dsc_clock_en */
6230 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6231 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6232 
6233 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6234 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6235 
6236 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6237 
6238 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6239 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6240 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6241 						dsc_caps,
6242 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6243 						max_dsc_target_bpp_limit_override,
6244 						link_bandwidth_kbps,
6245 						&stream->timing,
6246 						&stream->timing.dsc_cfg)) {
6247 				stream->timing.flags.DSC = 1;
6248 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6249 								 __func__, drm_connector->name);
6250 			}
6251 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6252 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6253 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6254 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6255 
6256 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6257 					max_supported_bw_in_kbps > 0 &&
6258 					dsc_max_supported_bw_in_kbps > 0)
6259 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6260 						dsc_caps,
6261 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6262 						max_dsc_target_bpp_limit_override,
6263 						dsc_max_supported_bw_in_kbps,
6264 						&stream->timing,
6265 						&stream->timing.dsc_cfg)) {
6266 					stream->timing.flags.DSC = 1;
6267 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6268 									 __func__, drm_connector->name);
6269 				}
6270 		}
6271 	}
6272 
6273 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6274 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6275 		stream->timing.flags.DSC = 1;
6276 
6277 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6278 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6279 
6280 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6281 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6282 
6283 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6284 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6285 }
6286 #endif /* CONFIG_DRM_AMD_DC_DCN */
6287 
6288 /**
6289  * DOC: FreeSync Video
6290  *
6291  * When a userspace application wants to play a video, the content follows a
6292  * standard format definition that usually specifies the FPS for that format.
6293  * The below list illustrates some video format and the expected FPS,
6294  * respectively:
6295  *
6296  * - TV/NTSC (23.976 FPS)
6297  * - Cinema (24 FPS)
6298  * - TV/PAL (25 FPS)
6299  * - TV/NTSC (29.97 FPS)
6300  * - TV/NTSC (30 FPS)
6301  * - Cinema HFR (48 FPS)
6302  * - TV/PAL (50 FPS)
6303  * - Commonly used (60 FPS)
6304  * - Multiples of 24 (48,72,96,120 FPS)
6305  *
6306  * The list of standards video format is not huge and can be added to the
6307  * connector modeset list beforehand. With that, userspace can leverage
6308  * FreeSync to extends the front porch in order to attain the target refresh
6309  * rate. Such a switch will happen seamlessly, without screen blanking or
6310  * reprogramming of the output in any other way. If the userspace requests a
6311  * modesetting change compatible with FreeSync modes that only differ in the
6312  * refresh rate, DC will skip the full update and avoid blink during the
6313  * transition. For example, the video player can change the modesetting from
6314  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6315  * causing any display blink. This same concept can be applied to a mode
6316  * setting change.
6317  */
6318 static struct drm_display_mode *
6319 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6320 			  bool use_probed_modes)
6321 {
6322 	struct drm_display_mode *m, *m_pref = NULL;
6323 	u16 current_refresh, highest_refresh;
6324 	struct list_head *list_head = use_probed_modes ?
6325 						    &aconnector->base.probed_modes :
6326 						    &aconnector->base.modes;
6327 
6328 	if (aconnector->freesync_vid_base.clock != 0)
6329 		return &aconnector->freesync_vid_base;
6330 
6331 	/* Find the preferred mode */
6332 	list_for_each_entry (m, list_head, head) {
6333 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6334 			m_pref = m;
6335 			break;
6336 		}
6337 	}
6338 
6339 	if (!m_pref) {
6340 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6341 		m_pref = list_first_entry_or_null(
6342 			&aconnector->base.modes, struct drm_display_mode, head);
6343 		if (!m_pref) {
6344 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6345 			return NULL;
6346 		}
6347 	}
6348 
6349 	highest_refresh = drm_mode_vrefresh(m_pref);
6350 
6351 	/*
6352 	 * Find the mode with highest refresh rate with same resolution.
6353 	 * For some monitors, preferred mode is not the mode with highest
6354 	 * supported refresh rate.
6355 	 */
6356 	list_for_each_entry (m, list_head, head) {
6357 		current_refresh  = drm_mode_vrefresh(m);
6358 
6359 		if (m->hdisplay == m_pref->hdisplay &&
6360 		    m->vdisplay == m_pref->vdisplay &&
6361 		    highest_refresh < current_refresh) {
6362 			highest_refresh = current_refresh;
6363 			m_pref = m;
6364 		}
6365 	}
6366 
6367 	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6368 	return m_pref;
6369 }
6370 
6371 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6372 				   struct amdgpu_dm_connector *aconnector)
6373 {
6374 	struct drm_display_mode *high_mode;
6375 	int timing_diff;
6376 
6377 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6378 	if (!high_mode || !mode)
6379 		return false;
6380 
6381 	timing_diff = high_mode->vtotal - mode->vtotal;
6382 
6383 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6384 	    high_mode->hdisplay != mode->hdisplay ||
6385 	    high_mode->vdisplay != mode->vdisplay ||
6386 	    high_mode->hsync_start != mode->hsync_start ||
6387 	    high_mode->hsync_end != mode->hsync_end ||
6388 	    high_mode->htotal != mode->htotal ||
6389 	    high_mode->hskew != mode->hskew ||
6390 	    high_mode->vscan != mode->vscan ||
6391 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6392 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6393 		return false;
6394 	else
6395 		return true;
6396 }
6397 
6398 static struct dc_stream_state *
6399 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6400 		       const struct drm_display_mode *drm_mode,
6401 		       const struct dm_connector_state *dm_state,
6402 		       const struct dc_stream_state *old_stream,
6403 		       int requested_bpc)
6404 {
6405 	struct drm_display_mode *preferred_mode = NULL;
6406 	struct drm_connector *drm_connector;
6407 	const struct drm_connector_state *con_state =
6408 		dm_state ? &dm_state->base : NULL;
6409 	struct dc_stream_state *stream = NULL;
6410 	struct drm_display_mode mode = *drm_mode;
6411 	struct drm_display_mode saved_mode;
6412 	struct drm_display_mode *freesync_mode = NULL;
6413 	bool native_mode_found = false;
6414 	bool recalculate_timing = false;
6415 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6416 	int mode_refresh;
6417 	int preferred_refresh = 0;
6418 #if defined(CONFIG_DRM_AMD_DC_DCN)
6419 	struct dsc_dec_dpcd_caps dsc_caps;
6420 #endif
6421 	struct dc_sink *sink = NULL;
6422 
6423 	memset(&saved_mode, 0, sizeof(saved_mode));
6424 
6425 	if (aconnector == NULL) {
6426 		DRM_ERROR("aconnector is NULL!\n");
6427 		return stream;
6428 	}
6429 
6430 	drm_connector = &aconnector->base;
6431 
6432 	if (!aconnector->dc_sink) {
6433 		sink = create_fake_sink(aconnector);
6434 		if (!sink)
6435 			return stream;
6436 	} else {
6437 		sink = aconnector->dc_sink;
6438 		dc_sink_retain(sink);
6439 	}
6440 
6441 	stream = dc_create_stream_for_sink(sink);
6442 
6443 	if (stream == NULL) {
6444 		DRM_ERROR("Failed to create stream for sink!\n");
6445 		goto finish;
6446 	}
6447 
6448 	stream->dm_stream_context = aconnector;
6449 
6450 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6451 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6452 
6453 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6454 		/* Search for preferred mode */
6455 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6456 			native_mode_found = true;
6457 			break;
6458 		}
6459 	}
6460 	if (!native_mode_found)
6461 		preferred_mode = list_first_entry_or_null(
6462 				&aconnector->base.modes,
6463 				struct drm_display_mode,
6464 				head);
6465 
6466 	mode_refresh = drm_mode_vrefresh(&mode);
6467 
6468 	if (preferred_mode == NULL) {
6469 		/*
6470 		 * This may not be an error, the use case is when we have no
6471 		 * usermode calls to reset and set mode upon hotplug. In this
6472 		 * case, we call set mode ourselves to restore the previous mode
6473 		 * and the modelist may not be filled in in time.
6474 		 */
6475 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6476 	} else {
6477 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6478 		if (recalculate_timing) {
6479 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6480 			drm_mode_copy(&saved_mode, &mode);
6481 			drm_mode_copy(&mode, freesync_mode);
6482 		} else {
6483 			decide_crtc_timing_for_drm_display_mode(
6484 				&mode, preferred_mode, scale);
6485 
6486 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6487 		}
6488 	}
6489 
6490 	if (recalculate_timing)
6491 		drm_mode_set_crtcinfo(&saved_mode, 0);
6492 	else if (!dm_state)
6493 		drm_mode_set_crtcinfo(&mode, 0);
6494 
6495        /*
6496 	* If scaling is enabled and refresh rate didn't change
6497 	* we copy the vic and polarities of the old timings
6498 	*/
6499 	if (!scale || mode_refresh != preferred_refresh)
6500 		fill_stream_properties_from_drm_display_mode(
6501 			stream, &mode, &aconnector->base, con_state, NULL,
6502 			requested_bpc);
6503 	else
6504 		fill_stream_properties_from_drm_display_mode(
6505 			stream, &mode, &aconnector->base, con_state, old_stream,
6506 			requested_bpc);
6507 
6508 #if defined(CONFIG_DRM_AMD_DC_DCN)
6509 	/* SST DSC determination policy */
6510 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6511 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6512 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6513 #endif
6514 
6515 	update_stream_scaling_settings(&mode, dm_state, stream);
6516 
6517 	fill_audio_info(
6518 		&stream->audio_info,
6519 		drm_connector,
6520 		sink);
6521 
6522 	update_stream_signal(stream, sink);
6523 
6524 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6525 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6526 
6527 	if (stream->link->psr_settings.psr_feature_enabled) {
6528 		//
6529 		// should decide stream support vsc sdp colorimetry capability
6530 		// before building vsc info packet
6531 		//
6532 		stream->use_vsc_sdp_for_colorimetry = false;
6533 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6534 			stream->use_vsc_sdp_for_colorimetry =
6535 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6536 		} else {
6537 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6538 				stream->use_vsc_sdp_for_colorimetry = true;
6539 		}
6540 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6541 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6542 
6543 	}
6544 finish:
6545 	dc_sink_release(sink);
6546 
6547 	return stream;
6548 }
6549 
6550 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6551 {
6552 	drm_crtc_cleanup(crtc);
6553 	kfree(crtc);
6554 }
6555 
6556 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6557 				  struct drm_crtc_state *state)
6558 {
6559 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6560 
6561 	/* TODO Destroy dc_stream objects are stream object is flattened */
6562 	if (cur->stream)
6563 		dc_stream_release(cur->stream);
6564 
6565 
6566 	__drm_atomic_helper_crtc_destroy_state(state);
6567 
6568 
6569 	kfree(state);
6570 }
6571 
6572 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6573 {
6574 	struct dm_crtc_state *state;
6575 
6576 	if (crtc->state)
6577 		dm_crtc_destroy_state(crtc, crtc->state);
6578 
6579 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6580 	if (WARN_ON(!state))
6581 		return;
6582 
6583 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6584 }
6585 
6586 static struct drm_crtc_state *
6587 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6588 {
6589 	struct dm_crtc_state *state, *cur;
6590 
6591 	cur = to_dm_crtc_state(crtc->state);
6592 
6593 	if (WARN_ON(!crtc->state))
6594 		return NULL;
6595 
6596 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6597 	if (!state)
6598 		return NULL;
6599 
6600 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6601 
6602 	if (cur->stream) {
6603 		state->stream = cur->stream;
6604 		dc_stream_retain(state->stream);
6605 	}
6606 
6607 	state->active_planes = cur->active_planes;
6608 	state->vrr_infopacket = cur->vrr_infopacket;
6609 	state->abm_level = cur->abm_level;
6610 	state->vrr_supported = cur->vrr_supported;
6611 	state->freesync_config = cur->freesync_config;
6612 	state->cm_has_degamma = cur->cm_has_degamma;
6613 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6614 	state->force_dpms_off = cur->force_dpms_off;
6615 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6616 
6617 	return &state->base;
6618 }
6619 
6620 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6621 {
6622 	crtc_debugfs_init(crtc);
6623 
6624 	return 0;
6625 }
6626 
6627 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6628 {
6629 	enum dc_irq_source irq_source;
6630 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6631 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6632 	int rc;
6633 
6634 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6635 
6636 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6637 
6638 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6639 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6640 	return rc;
6641 }
6642 
6643 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6644 {
6645 	enum dc_irq_source irq_source;
6646 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6647 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6648 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6649 #if defined(CONFIG_DRM_AMD_DC_DCN)
6650 	struct amdgpu_display_manager *dm = &adev->dm;
6651 	struct vblank_control_work *work;
6652 #endif
6653 	int rc = 0;
6654 
6655 	if (enable) {
6656 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6657 		if (amdgpu_dm_vrr_active(acrtc_state))
6658 			rc = dm_set_vupdate_irq(crtc, true);
6659 	} else {
6660 		/* vblank irq off -> vupdate irq off */
6661 		rc = dm_set_vupdate_irq(crtc, false);
6662 	}
6663 
6664 	if (rc)
6665 		return rc;
6666 
6667 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6668 
6669 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6670 		return -EBUSY;
6671 
6672 	if (amdgpu_in_reset(adev))
6673 		return 0;
6674 
6675 #if defined(CONFIG_DRM_AMD_DC_DCN)
6676 	if (dm->vblank_control_workqueue) {
6677 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6678 		if (!work)
6679 			return -ENOMEM;
6680 
6681 		INIT_WORK(&work->work, vblank_control_worker);
6682 		work->dm = dm;
6683 		work->acrtc = acrtc;
6684 		work->enable = enable;
6685 
6686 		if (acrtc_state->stream) {
6687 			dc_stream_retain(acrtc_state->stream);
6688 			work->stream = acrtc_state->stream;
6689 		}
6690 
6691 		queue_work(dm->vblank_control_workqueue, &work->work);
6692 	}
6693 #endif
6694 
6695 	return 0;
6696 }
6697 
6698 static int dm_enable_vblank(struct drm_crtc *crtc)
6699 {
6700 	return dm_set_vblank(crtc, true);
6701 }
6702 
6703 static void dm_disable_vblank(struct drm_crtc *crtc)
6704 {
6705 	dm_set_vblank(crtc, false);
6706 }
6707 
6708 /* Implemented only the options currently availible for the driver */
6709 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6710 	.reset = dm_crtc_reset_state,
6711 	.destroy = amdgpu_dm_crtc_destroy,
6712 	.set_config = drm_atomic_helper_set_config,
6713 	.page_flip = drm_atomic_helper_page_flip,
6714 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6715 	.atomic_destroy_state = dm_crtc_destroy_state,
6716 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6717 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6718 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6719 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6720 	.enable_vblank = dm_enable_vblank,
6721 	.disable_vblank = dm_disable_vblank,
6722 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6723 	.late_register = amdgpu_dm_crtc_late_register,
6724 };
6725 
6726 static enum drm_connector_status
6727 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6728 {
6729 	bool connected;
6730 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6731 
6732 	/*
6733 	 * Notes:
6734 	 * 1. This interface is NOT called in context of HPD irq.
6735 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6736 	 * makes it a bad place for *any* MST-related activity.
6737 	 */
6738 
6739 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6740 	    !aconnector->fake_enable)
6741 		connected = (aconnector->dc_sink != NULL);
6742 	else
6743 		connected = (aconnector->base.force == DRM_FORCE_ON);
6744 
6745 	update_subconnector_property(aconnector);
6746 
6747 	return (connected ? connector_status_connected :
6748 			connector_status_disconnected);
6749 }
6750 
6751 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6752 					    struct drm_connector_state *connector_state,
6753 					    struct drm_property *property,
6754 					    uint64_t val)
6755 {
6756 	struct drm_device *dev = connector->dev;
6757 	struct amdgpu_device *adev = drm_to_adev(dev);
6758 	struct dm_connector_state *dm_old_state =
6759 		to_dm_connector_state(connector->state);
6760 	struct dm_connector_state *dm_new_state =
6761 		to_dm_connector_state(connector_state);
6762 
6763 	int ret = -EINVAL;
6764 
6765 	if (property == dev->mode_config.scaling_mode_property) {
6766 		enum amdgpu_rmx_type rmx_type;
6767 
6768 		switch (val) {
6769 		case DRM_MODE_SCALE_CENTER:
6770 			rmx_type = RMX_CENTER;
6771 			break;
6772 		case DRM_MODE_SCALE_ASPECT:
6773 			rmx_type = RMX_ASPECT;
6774 			break;
6775 		case DRM_MODE_SCALE_FULLSCREEN:
6776 			rmx_type = RMX_FULL;
6777 			break;
6778 		case DRM_MODE_SCALE_NONE:
6779 		default:
6780 			rmx_type = RMX_OFF;
6781 			break;
6782 		}
6783 
6784 		if (dm_old_state->scaling == rmx_type)
6785 			return 0;
6786 
6787 		dm_new_state->scaling = rmx_type;
6788 		ret = 0;
6789 	} else if (property == adev->mode_info.underscan_hborder_property) {
6790 		dm_new_state->underscan_hborder = val;
6791 		ret = 0;
6792 	} else if (property == adev->mode_info.underscan_vborder_property) {
6793 		dm_new_state->underscan_vborder = val;
6794 		ret = 0;
6795 	} else if (property == adev->mode_info.underscan_property) {
6796 		dm_new_state->underscan_enable = val;
6797 		ret = 0;
6798 	} else if (property == adev->mode_info.abm_level_property) {
6799 		dm_new_state->abm_level = val;
6800 		ret = 0;
6801 	}
6802 
6803 	return ret;
6804 }
6805 
6806 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6807 					    const struct drm_connector_state *state,
6808 					    struct drm_property *property,
6809 					    uint64_t *val)
6810 {
6811 	struct drm_device *dev = connector->dev;
6812 	struct amdgpu_device *adev = drm_to_adev(dev);
6813 	struct dm_connector_state *dm_state =
6814 		to_dm_connector_state(state);
6815 	int ret = -EINVAL;
6816 
6817 	if (property == dev->mode_config.scaling_mode_property) {
6818 		switch (dm_state->scaling) {
6819 		case RMX_CENTER:
6820 			*val = DRM_MODE_SCALE_CENTER;
6821 			break;
6822 		case RMX_ASPECT:
6823 			*val = DRM_MODE_SCALE_ASPECT;
6824 			break;
6825 		case RMX_FULL:
6826 			*val = DRM_MODE_SCALE_FULLSCREEN;
6827 			break;
6828 		case RMX_OFF:
6829 		default:
6830 			*val = DRM_MODE_SCALE_NONE;
6831 			break;
6832 		}
6833 		ret = 0;
6834 	} else if (property == adev->mode_info.underscan_hborder_property) {
6835 		*val = dm_state->underscan_hborder;
6836 		ret = 0;
6837 	} else if (property == adev->mode_info.underscan_vborder_property) {
6838 		*val = dm_state->underscan_vborder;
6839 		ret = 0;
6840 	} else if (property == adev->mode_info.underscan_property) {
6841 		*val = dm_state->underscan_enable;
6842 		ret = 0;
6843 	} else if (property == adev->mode_info.abm_level_property) {
6844 		*val = dm_state->abm_level;
6845 		ret = 0;
6846 	}
6847 
6848 	return ret;
6849 }
6850 
6851 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6852 {
6853 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6854 
6855 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6856 }
6857 
6858 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6859 {
6860 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6861 	const struct dc_link *link = aconnector->dc_link;
6862 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6863 	struct amdgpu_display_manager *dm = &adev->dm;
6864 	int i;
6865 
6866 	/*
6867 	 * Call only if mst_mgr was iniitalized before since it's not done
6868 	 * for all connector types.
6869 	 */
6870 	if (aconnector->mst_mgr.dev)
6871 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6872 
6873 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6874 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6875 	for (i = 0; i < dm->num_of_edps; i++) {
6876 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6877 			backlight_device_unregister(dm->backlight_dev[i]);
6878 			dm->backlight_dev[i] = NULL;
6879 		}
6880 	}
6881 #endif
6882 
6883 	if (aconnector->dc_em_sink)
6884 		dc_sink_release(aconnector->dc_em_sink);
6885 	aconnector->dc_em_sink = NULL;
6886 	if (aconnector->dc_sink)
6887 		dc_sink_release(aconnector->dc_sink);
6888 	aconnector->dc_sink = NULL;
6889 
6890 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6891 	drm_connector_unregister(connector);
6892 	drm_connector_cleanup(connector);
6893 	if (aconnector->i2c) {
6894 		i2c_del_adapter(&aconnector->i2c->base);
6895 		kfree(aconnector->i2c);
6896 	}
6897 	kfree(aconnector->dm_dp_aux.aux.name);
6898 
6899 	kfree(connector);
6900 }
6901 
6902 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6903 {
6904 	struct dm_connector_state *state =
6905 		to_dm_connector_state(connector->state);
6906 
6907 	if (connector->state)
6908 		__drm_atomic_helper_connector_destroy_state(connector->state);
6909 
6910 	kfree(state);
6911 
6912 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6913 
6914 	if (state) {
6915 		state->scaling = RMX_OFF;
6916 		state->underscan_enable = false;
6917 		state->underscan_hborder = 0;
6918 		state->underscan_vborder = 0;
6919 		state->base.max_requested_bpc = 8;
6920 		state->vcpi_slots = 0;
6921 		state->pbn = 0;
6922 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6923 			state->abm_level = amdgpu_dm_abm_level;
6924 
6925 		__drm_atomic_helper_connector_reset(connector, &state->base);
6926 	}
6927 }
6928 
6929 struct drm_connector_state *
6930 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6931 {
6932 	struct dm_connector_state *state =
6933 		to_dm_connector_state(connector->state);
6934 
6935 	struct dm_connector_state *new_state =
6936 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6937 
6938 	if (!new_state)
6939 		return NULL;
6940 
6941 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6942 
6943 	new_state->freesync_capable = state->freesync_capable;
6944 	new_state->abm_level = state->abm_level;
6945 	new_state->scaling = state->scaling;
6946 	new_state->underscan_enable = state->underscan_enable;
6947 	new_state->underscan_hborder = state->underscan_hborder;
6948 	new_state->underscan_vborder = state->underscan_vborder;
6949 	new_state->vcpi_slots = state->vcpi_slots;
6950 	new_state->pbn = state->pbn;
6951 	return &new_state->base;
6952 }
6953 
6954 static int
6955 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6956 {
6957 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6958 		to_amdgpu_dm_connector(connector);
6959 	int r;
6960 
6961 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6962 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6963 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6964 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6965 		if (r)
6966 			return r;
6967 	}
6968 
6969 #if defined(CONFIG_DEBUG_FS)
6970 	connector_debugfs_init(amdgpu_dm_connector);
6971 #endif
6972 
6973 	return 0;
6974 }
6975 
6976 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6977 	.reset = amdgpu_dm_connector_funcs_reset,
6978 	.detect = amdgpu_dm_connector_detect,
6979 	.fill_modes = drm_helper_probe_single_connector_modes,
6980 	.destroy = amdgpu_dm_connector_destroy,
6981 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6982 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6983 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6984 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6985 	.late_register = amdgpu_dm_connector_late_register,
6986 	.early_unregister = amdgpu_dm_connector_unregister
6987 };
6988 
6989 static int get_modes(struct drm_connector *connector)
6990 {
6991 	return amdgpu_dm_connector_get_modes(connector);
6992 }
6993 
6994 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6995 {
6996 	struct dc_sink_init_data init_params = {
6997 			.link = aconnector->dc_link,
6998 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6999 	};
7000 	struct edid *edid;
7001 
7002 	if (!aconnector->base.edid_blob_ptr) {
7003 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7004 				aconnector->base.name);
7005 
7006 		aconnector->base.force = DRM_FORCE_OFF;
7007 		aconnector->base.override_edid = false;
7008 		return;
7009 	}
7010 
7011 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7012 
7013 	aconnector->edid = edid;
7014 
7015 	aconnector->dc_em_sink = dc_link_add_remote_sink(
7016 		aconnector->dc_link,
7017 		(uint8_t *)edid,
7018 		(edid->extensions + 1) * EDID_LENGTH,
7019 		&init_params);
7020 
7021 	if (aconnector->base.force == DRM_FORCE_ON) {
7022 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
7023 		aconnector->dc_link->local_sink :
7024 		aconnector->dc_em_sink;
7025 		dc_sink_retain(aconnector->dc_sink);
7026 	}
7027 }
7028 
7029 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7030 {
7031 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7032 
7033 	/*
7034 	 * In case of headless boot with force on for DP managed connector
7035 	 * Those settings have to be != 0 to get initial modeset
7036 	 */
7037 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7038 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7039 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7040 	}
7041 
7042 
7043 	aconnector->base.override_edid = true;
7044 	create_eml_sink(aconnector);
7045 }
7046 
7047 struct dc_stream_state *
7048 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7049 				const struct drm_display_mode *drm_mode,
7050 				const struct dm_connector_state *dm_state,
7051 				const struct dc_stream_state *old_stream)
7052 {
7053 	struct drm_connector *connector = &aconnector->base;
7054 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7055 	struct dc_stream_state *stream;
7056 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7057 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7058 	enum dc_status dc_result = DC_OK;
7059 
7060 	do {
7061 		stream = create_stream_for_sink(aconnector, drm_mode,
7062 						dm_state, old_stream,
7063 						requested_bpc);
7064 		if (stream == NULL) {
7065 			DRM_ERROR("Failed to create stream for sink!\n");
7066 			break;
7067 		}
7068 
7069 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7070 
7071 		if (dc_result != DC_OK) {
7072 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7073 				      drm_mode->hdisplay,
7074 				      drm_mode->vdisplay,
7075 				      drm_mode->clock,
7076 				      dc_result,
7077 				      dc_status_to_str(dc_result));
7078 
7079 			dc_stream_release(stream);
7080 			stream = NULL;
7081 			requested_bpc -= 2; /* lower bpc to retry validation */
7082 		}
7083 
7084 	} while (stream == NULL && requested_bpc >= 6);
7085 
7086 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7087 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7088 
7089 		aconnector->force_yuv420_output = true;
7090 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7091 						dm_state, old_stream);
7092 		aconnector->force_yuv420_output = false;
7093 	}
7094 
7095 	return stream;
7096 }
7097 
7098 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7099 				   struct drm_display_mode *mode)
7100 {
7101 	int result = MODE_ERROR;
7102 	struct dc_sink *dc_sink;
7103 	/* TODO: Unhardcode stream count */
7104 	struct dc_stream_state *stream;
7105 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7106 
7107 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7108 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7109 		return result;
7110 
7111 	/*
7112 	 * Only run this the first time mode_valid is called to initilialize
7113 	 * EDID mgmt
7114 	 */
7115 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7116 		!aconnector->dc_em_sink)
7117 		handle_edid_mgmt(aconnector);
7118 
7119 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7120 
7121 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7122 				aconnector->base.force != DRM_FORCE_ON) {
7123 		DRM_ERROR("dc_sink is NULL!\n");
7124 		goto fail;
7125 	}
7126 
7127 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7128 	if (stream) {
7129 		dc_stream_release(stream);
7130 		result = MODE_OK;
7131 	}
7132 
7133 fail:
7134 	/* TODO: error handling*/
7135 	return result;
7136 }
7137 
7138 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7139 				struct dc_info_packet *out)
7140 {
7141 	struct hdmi_drm_infoframe frame;
7142 	unsigned char buf[30]; /* 26 + 4 */
7143 	ssize_t len;
7144 	int ret, i;
7145 
7146 	memset(out, 0, sizeof(*out));
7147 
7148 	if (!state->hdr_output_metadata)
7149 		return 0;
7150 
7151 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7152 	if (ret)
7153 		return ret;
7154 
7155 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7156 	if (len < 0)
7157 		return (int)len;
7158 
7159 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7160 	if (len != 30)
7161 		return -EINVAL;
7162 
7163 	/* Prepare the infopacket for DC. */
7164 	switch (state->connector->connector_type) {
7165 	case DRM_MODE_CONNECTOR_HDMIA:
7166 		out->hb0 = 0x87; /* type */
7167 		out->hb1 = 0x01; /* version */
7168 		out->hb2 = 0x1A; /* length */
7169 		out->sb[0] = buf[3]; /* checksum */
7170 		i = 1;
7171 		break;
7172 
7173 	case DRM_MODE_CONNECTOR_DisplayPort:
7174 	case DRM_MODE_CONNECTOR_eDP:
7175 		out->hb0 = 0x00; /* sdp id, zero */
7176 		out->hb1 = 0x87; /* type */
7177 		out->hb2 = 0x1D; /* payload len - 1 */
7178 		out->hb3 = (0x13 << 2); /* sdp version */
7179 		out->sb[0] = 0x01; /* version */
7180 		out->sb[1] = 0x1A; /* length */
7181 		i = 2;
7182 		break;
7183 
7184 	default:
7185 		return -EINVAL;
7186 	}
7187 
7188 	memcpy(&out->sb[i], &buf[4], 26);
7189 	out->valid = true;
7190 
7191 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7192 		       sizeof(out->sb), false);
7193 
7194 	return 0;
7195 }
7196 
7197 static int
7198 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7199 				 struct drm_atomic_state *state)
7200 {
7201 	struct drm_connector_state *new_con_state =
7202 		drm_atomic_get_new_connector_state(state, conn);
7203 	struct drm_connector_state *old_con_state =
7204 		drm_atomic_get_old_connector_state(state, conn);
7205 	struct drm_crtc *crtc = new_con_state->crtc;
7206 	struct drm_crtc_state *new_crtc_state;
7207 	int ret;
7208 
7209 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7210 
7211 	if (!crtc)
7212 		return 0;
7213 
7214 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7215 		struct dc_info_packet hdr_infopacket;
7216 
7217 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7218 		if (ret)
7219 			return ret;
7220 
7221 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7222 		if (IS_ERR(new_crtc_state))
7223 			return PTR_ERR(new_crtc_state);
7224 
7225 		/*
7226 		 * DC considers the stream backends changed if the
7227 		 * static metadata changes. Forcing the modeset also
7228 		 * gives a simple way for userspace to switch from
7229 		 * 8bpc to 10bpc when setting the metadata to enter
7230 		 * or exit HDR.
7231 		 *
7232 		 * Changing the static metadata after it's been
7233 		 * set is permissible, however. So only force a
7234 		 * modeset if we're entering or exiting HDR.
7235 		 */
7236 		new_crtc_state->mode_changed =
7237 			!old_con_state->hdr_output_metadata ||
7238 			!new_con_state->hdr_output_metadata;
7239 	}
7240 
7241 	return 0;
7242 }
7243 
7244 static const struct drm_connector_helper_funcs
7245 amdgpu_dm_connector_helper_funcs = {
7246 	/*
7247 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7248 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7249 	 * are missing after user start lightdm. So we need to renew modes list.
7250 	 * in get_modes call back, not just return the modes count
7251 	 */
7252 	.get_modes = get_modes,
7253 	.mode_valid = amdgpu_dm_connector_mode_valid,
7254 	.atomic_check = amdgpu_dm_connector_atomic_check,
7255 };
7256 
7257 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7258 {
7259 }
7260 
7261 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7262 {
7263 	struct drm_atomic_state *state = new_crtc_state->state;
7264 	struct drm_plane *plane;
7265 	int num_active = 0;
7266 
7267 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7268 		struct drm_plane_state *new_plane_state;
7269 
7270 		/* Cursor planes are "fake". */
7271 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7272 			continue;
7273 
7274 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7275 
7276 		if (!new_plane_state) {
7277 			/*
7278 			 * The plane is enable on the CRTC and hasn't changed
7279 			 * state. This means that it previously passed
7280 			 * validation and is therefore enabled.
7281 			 */
7282 			num_active += 1;
7283 			continue;
7284 		}
7285 
7286 		/* We need a framebuffer to be considered enabled. */
7287 		num_active += (new_plane_state->fb != NULL);
7288 	}
7289 
7290 	return num_active;
7291 }
7292 
7293 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7294 					 struct drm_crtc_state *new_crtc_state)
7295 {
7296 	struct dm_crtc_state *dm_new_crtc_state =
7297 		to_dm_crtc_state(new_crtc_state);
7298 
7299 	dm_new_crtc_state->active_planes = 0;
7300 
7301 	if (!dm_new_crtc_state->stream)
7302 		return;
7303 
7304 	dm_new_crtc_state->active_planes =
7305 		count_crtc_active_planes(new_crtc_state);
7306 }
7307 
7308 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7309 				       struct drm_atomic_state *state)
7310 {
7311 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7312 									  crtc);
7313 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7314 	struct dc *dc = adev->dm.dc;
7315 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7316 	int ret = -EINVAL;
7317 
7318 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7319 
7320 	dm_update_crtc_active_planes(crtc, crtc_state);
7321 
7322 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7323 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7324 		return ret;
7325 	}
7326 
7327 	/*
7328 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7329 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7330 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7331 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7332 	 */
7333 	if (crtc_state->enable &&
7334 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7335 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7336 		return -EINVAL;
7337 	}
7338 
7339 	/* In some use cases, like reset, no stream is attached */
7340 	if (!dm_crtc_state->stream)
7341 		return 0;
7342 
7343 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7344 		return 0;
7345 
7346 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7347 	return ret;
7348 }
7349 
7350 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7351 				      const struct drm_display_mode *mode,
7352 				      struct drm_display_mode *adjusted_mode)
7353 {
7354 	return true;
7355 }
7356 
7357 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7358 	.disable = dm_crtc_helper_disable,
7359 	.atomic_check = dm_crtc_helper_atomic_check,
7360 	.mode_fixup = dm_crtc_helper_mode_fixup,
7361 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7362 };
7363 
7364 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7365 {
7366 
7367 }
7368 
7369 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7370 {
7371 	switch (display_color_depth) {
7372 		case COLOR_DEPTH_666:
7373 			return 6;
7374 		case COLOR_DEPTH_888:
7375 			return 8;
7376 		case COLOR_DEPTH_101010:
7377 			return 10;
7378 		case COLOR_DEPTH_121212:
7379 			return 12;
7380 		case COLOR_DEPTH_141414:
7381 			return 14;
7382 		case COLOR_DEPTH_161616:
7383 			return 16;
7384 		default:
7385 			break;
7386 		}
7387 	return 0;
7388 }
7389 
7390 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7391 					  struct drm_crtc_state *crtc_state,
7392 					  struct drm_connector_state *conn_state)
7393 {
7394 	struct drm_atomic_state *state = crtc_state->state;
7395 	struct drm_connector *connector = conn_state->connector;
7396 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7397 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7398 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7399 	struct drm_dp_mst_topology_mgr *mst_mgr;
7400 	struct drm_dp_mst_port *mst_port;
7401 	enum dc_color_depth color_depth;
7402 	int clock, bpp = 0;
7403 	bool is_y420 = false;
7404 
7405 	if (!aconnector->port || !aconnector->dc_sink)
7406 		return 0;
7407 
7408 	mst_port = aconnector->port;
7409 	mst_mgr = &aconnector->mst_port->mst_mgr;
7410 
7411 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7412 		return 0;
7413 
7414 	if (!state->duplicated) {
7415 		int max_bpc = conn_state->max_requested_bpc;
7416 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7417 				aconnector->force_yuv420_output;
7418 		color_depth = convert_color_depth_from_display_info(connector,
7419 								    is_y420,
7420 								    max_bpc);
7421 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7422 		clock = adjusted_mode->clock;
7423 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7424 	}
7425 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7426 									   mst_mgr,
7427 									   mst_port,
7428 									   dm_new_connector_state->pbn,
7429 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7430 	if (dm_new_connector_state->vcpi_slots < 0) {
7431 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7432 		return dm_new_connector_state->vcpi_slots;
7433 	}
7434 	return 0;
7435 }
7436 
7437 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7438 	.disable = dm_encoder_helper_disable,
7439 	.atomic_check = dm_encoder_helper_atomic_check
7440 };
7441 
7442 #if defined(CONFIG_DRM_AMD_DC_DCN)
7443 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7444 					    struct dc_state *dc_state,
7445 					    struct dsc_mst_fairness_vars *vars)
7446 {
7447 	struct dc_stream_state *stream = NULL;
7448 	struct drm_connector *connector;
7449 	struct drm_connector_state *new_con_state;
7450 	struct amdgpu_dm_connector *aconnector;
7451 	struct dm_connector_state *dm_conn_state;
7452 	int i, j;
7453 	int vcpi, pbn_div, pbn, slot_num = 0;
7454 
7455 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7456 
7457 		aconnector = to_amdgpu_dm_connector(connector);
7458 
7459 		if (!aconnector->port)
7460 			continue;
7461 
7462 		if (!new_con_state || !new_con_state->crtc)
7463 			continue;
7464 
7465 		dm_conn_state = to_dm_connector_state(new_con_state);
7466 
7467 		for (j = 0; j < dc_state->stream_count; j++) {
7468 			stream = dc_state->streams[j];
7469 			if (!stream)
7470 				continue;
7471 
7472 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7473 				break;
7474 
7475 			stream = NULL;
7476 		}
7477 
7478 		if (!stream)
7479 			continue;
7480 
7481 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7482 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7483 		for (j = 0; j < dc_state->stream_count; j++) {
7484 			if (vars[j].aconnector == aconnector) {
7485 				pbn = vars[j].pbn;
7486 				break;
7487 			}
7488 		}
7489 
7490 		if (j == dc_state->stream_count)
7491 			continue;
7492 
7493 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7494 
7495 		if (stream->timing.flags.DSC != 1) {
7496 			dm_conn_state->pbn = pbn;
7497 			dm_conn_state->vcpi_slots = slot_num;
7498 
7499 			drm_dp_mst_atomic_enable_dsc(state,
7500 						     aconnector->port,
7501 						     dm_conn_state->pbn,
7502 						     0,
7503 						     false);
7504 			continue;
7505 		}
7506 
7507 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7508 						    aconnector->port,
7509 						    pbn, pbn_div,
7510 						    true);
7511 		if (vcpi < 0)
7512 			return vcpi;
7513 
7514 		dm_conn_state->pbn = pbn;
7515 		dm_conn_state->vcpi_slots = vcpi;
7516 	}
7517 	return 0;
7518 }
7519 #endif
7520 
7521 static void dm_drm_plane_reset(struct drm_plane *plane)
7522 {
7523 	struct dm_plane_state *amdgpu_state = NULL;
7524 
7525 	if (plane->state)
7526 		plane->funcs->atomic_destroy_state(plane, plane->state);
7527 
7528 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7529 	WARN_ON(amdgpu_state == NULL);
7530 
7531 	if (amdgpu_state)
7532 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7533 }
7534 
7535 static struct drm_plane_state *
7536 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7537 {
7538 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7539 
7540 	old_dm_plane_state = to_dm_plane_state(plane->state);
7541 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7542 	if (!dm_plane_state)
7543 		return NULL;
7544 
7545 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7546 
7547 	if (old_dm_plane_state->dc_state) {
7548 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7549 		dc_plane_state_retain(dm_plane_state->dc_state);
7550 	}
7551 
7552 	return &dm_plane_state->base;
7553 }
7554 
7555 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7556 				struct drm_plane_state *state)
7557 {
7558 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7559 
7560 	if (dm_plane_state->dc_state)
7561 		dc_plane_state_release(dm_plane_state->dc_state);
7562 
7563 	drm_atomic_helper_plane_destroy_state(plane, state);
7564 }
7565 
7566 static const struct drm_plane_funcs dm_plane_funcs = {
7567 	.update_plane	= drm_atomic_helper_update_plane,
7568 	.disable_plane	= drm_atomic_helper_disable_plane,
7569 	.destroy	= drm_primary_helper_destroy,
7570 	.reset = dm_drm_plane_reset,
7571 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7572 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7573 	.format_mod_supported = dm_plane_format_mod_supported,
7574 };
7575 
7576 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7577 				      struct drm_plane_state *new_state)
7578 {
7579 	struct amdgpu_framebuffer *afb;
7580 	struct drm_gem_object *obj;
7581 	struct amdgpu_device *adev;
7582 	struct amdgpu_bo *rbo;
7583 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7584 	uint32_t domain;
7585 	int r;
7586 
7587 	if (!new_state->fb) {
7588 		DRM_DEBUG_KMS("No FB bound\n");
7589 		return 0;
7590 	}
7591 
7592 	afb = to_amdgpu_framebuffer(new_state->fb);
7593 	obj = new_state->fb->obj[0];
7594 	rbo = gem_to_amdgpu_bo(obj);
7595 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7596 
7597 	r = amdgpu_bo_reserve(rbo, true);
7598 	if (r) {
7599 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7600 		return r;
7601 	}
7602 
7603 	r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7604 	if (r) {
7605 		dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7606 		goto error_unlock;
7607 	}
7608 
7609 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7610 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7611 	else
7612 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7613 
7614 	r = amdgpu_bo_pin(rbo, domain);
7615 	if (unlikely(r != 0)) {
7616 		if (r != -ERESTARTSYS)
7617 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7618 		goto error_unlock;
7619 	}
7620 
7621 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7622 	if (unlikely(r != 0)) {
7623 		DRM_ERROR("%p bind failed\n", rbo);
7624 		goto error_unpin;
7625 	}
7626 
7627 	r = drm_gem_plane_helper_prepare_fb(plane, new_state);
7628 	if (unlikely(r != 0))
7629 		goto error_unpin;
7630 
7631 	amdgpu_bo_unreserve(rbo);
7632 
7633 	afb->address = amdgpu_bo_gpu_offset(rbo);
7634 
7635 	amdgpu_bo_ref(rbo);
7636 
7637 	/**
7638 	 * We don't do surface updates on planes that have been newly created,
7639 	 * but we also don't have the afb->address during atomic check.
7640 	 *
7641 	 * Fill in buffer attributes depending on the address here, but only on
7642 	 * newly created planes since they're not being used by DC yet and this
7643 	 * won't modify global state.
7644 	 */
7645 	dm_plane_state_old = to_dm_plane_state(plane->state);
7646 	dm_plane_state_new = to_dm_plane_state(new_state);
7647 
7648 	if (dm_plane_state_new->dc_state &&
7649 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7650 		struct dc_plane_state *plane_state =
7651 			dm_plane_state_new->dc_state;
7652 		bool force_disable_dcc = !plane_state->dcc.enable;
7653 
7654 		fill_plane_buffer_attributes(
7655 			adev, afb, plane_state->format, plane_state->rotation,
7656 			afb->tiling_flags,
7657 			&plane_state->tiling_info, &plane_state->plane_size,
7658 			&plane_state->dcc, &plane_state->address,
7659 			afb->tmz_surface, force_disable_dcc);
7660 	}
7661 
7662 	return 0;
7663 
7664 error_unpin:
7665 	amdgpu_bo_unpin(rbo);
7666 
7667 error_unlock:
7668 	amdgpu_bo_unreserve(rbo);
7669 	return r;
7670 }
7671 
7672 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7673 				       struct drm_plane_state *old_state)
7674 {
7675 	struct amdgpu_bo *rbo;
7676 	int r;
7677 
7678 	if (!old_state->fb)
7679 		return;
7680 
7681 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7682 	r = amdgpu_bo_reserve(rbo, false);
7683 	if (unlikely(r)) {
7684 		DRM_ERROR("failed to reserve rbo before unpin\n");
7685 		return;
7686 	}
7687 
7688 	amdgpu_bo_unpin(rbo);
7689 	amdgpu_bo_unreserve(rbo);
7690 	amdgpu_bo_unref(&rbo);
7691 }
7692 
7693 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7694 				       struct drm_crtc_state *new_crtc_state)
7695 {
7696 	struct drm_framebuffer *fb = state->fb;
7697 	int min_downscale, max_upscale;
7698 	int min_scale = 0;
7699 	int max_scale = INT_MAX;
7700 
7701 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7702 	if (fb && state->crtc) {
7703 		/* Validate viewport to cover the case when only the position changes */
7704 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7705 			int viewport_width = state->crtc_w;
7706 			int viewport_height = state->crtc_h;
7707 
7708 			if (state->crtc_x < 0)
7709 				viewport_width += state->crtc_x;
7710 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7711 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7712 
7713 			if (state->crtc_y < 0)
7714 				viewport_height += state->crtc_y;
7715 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7716 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7717 
7718 			if (viewport_width < 0 || viewport_height < 0) {
7719 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7720 				return -EINVAL;
7721 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7722 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7723 				return -EINVAL;
7724 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7725 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7726 				return -EINVAL;
7727 			}
7728 
7729 		}
7730 
7731 		/* Get min/max allowed scaling factors from plane caps. */
7732 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7733 					     &min_downscale, &max_upscale);
7734 		/*
7735 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7736 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7737 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7738 		 */
7739 		min_scale = (1000 << 16) / max_upscale;
7740 		max_scale = (1000 << 16) / min_downscale;
7741 	}
7742 
7743 	return drm_atomic_helper_check_plane_state(
7744 		state, new_crtc_state, min_scale, max_scale, true, true);
7745 }
7746 
7747 static int dm_plane_atomic_check(struct drm_plane *plane,
7748 				 struct drm_atomic_state *state)
7749 {
7750 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7751 										 plane);
7752 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7753 	struct dc *dc = adev->dm.dc;
7754 	struct dm_plane_state *dm_plane_state;
7755 	struct dc_scaling_info scaling_info;
7756 	struct drm_crtc_state *new_crtc_state;
7757 	int ret;
7758 
7759 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7760 
7761 	dm_plane_state = to_dm_plane_state(new_plane_state);
7762 
7763 	if (!dm_plane_state->dc_state)
7764 		return 0;
7765 
7766 	new_crtc_state =
7767 		drm_atomic_get_new_crtc_state(state,
7768 					      new_plane_state->crtc);
7769 	if (!new_crtc_state)
7770 		return -EINVAL;
7771 
7772 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7773 	if (ret)
7774 		return ret;
7775 
7776 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7777 	if (ret)
7778 		return ret;
7779 
7780 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7781 		return 0;
7782 
7783 	return -EINVAL;
7784 }
7785 
7786 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7787 				       struct drm_atomic_state *state)
7788 {
7789 	/* Only support async updates on cursor planes. */
7790 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7791 		return -EINVAL;
7792 
7793 	return 0;
7794 }
7795 
7796 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7797 					 struct drm_atomic_state *state)
7798 {
7799 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7800 									   plane);
7801 	struct drm_plane_state *old_state =
7802 		drm_atomic_get_old_plane_state(state, plane);
7803 
7804 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7805 
7806 	swap(plane->state->fb, new_state->fb);
7807 
7808 	plane->state->src_x = new_state->src_x;
7809 	plane->state->src_y = new_state->src_y;
7810 	plane->state->src_w = new_state->src_w;
7811 	plane->state->src_h = new_state->src_h;
7812 	plane->state->crtc_x = new_state->crtc_x;
7813 	plane->state->crtc_y = new_state->crtc_y;
7814 	plane->state->crtc_w = new_state->crtc_w;
7815 	plane->state->crtc_h = new_state->crtc_h;
7816 
7817 	handle_cursor_update(plane, old_state);
7818 }
7819 
7820 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7821 	.prepare_fb = dm_plane_helper_prepare_fb,
7822 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7823 	.atomic_check = dm_plane_atomic_check,
7824 	.atomic_async_check = dm_plane_atomic_async_check,
7825 	.atomic_async_update = dm_plane_atomic_async_update
7826 };
7827 
7828 /*
7829  * TODO: these are currently initialized to rgb formats only.
7830  * For future use cases we should either initialize them dynamically based on
7831  * plane capabilities, or initialize this array to all formats, so internal drm
7832  * check will succeed, and let DC implement proper check
7833  */
7834 static const uint32_t rgb_formats[] = {
7835 	DRM_FORMAT_XRGB8888,
7836 	DRM_FORMAT_ARGB8888,
7837 	DRM_FORMAT_RGBA8888,
7838 	DRM_FORMAT_XRGB2101010,
7839 	DRM_FORMAT_XBGR2101010,
7840 	DRM_FORMAT_ARGB2101010,
7841 	DRM_FORMAT_ABGR2101010,
7842 	DRM_FORMAT_XRGB16161616,
7843 	DRM_FORMAT_XBGR16161616,
7844 	DRM_FORMAT_ARGB16161616,
7845 	DRM_FORMAT_ABGR16161616,
7846 	DRM_FORMAT_XBGR8888,
7847 	DRM_FORMAT_ABGR8888,
7848 	DRM_FORMAT_RGB565,
7849 };
7850 
7851 static const uint32_t overlay_formats[] = {
7852 	DRM_FORMAT_XRGB8888,
7853 	DRM_FORMAT_ARGB8888,
7854 	DRM_FORMAT_RGBA8888,
7855 	DRM_FORMAT_XBGR8888,
7856 	DRM_FORMAT_ABGR8888,
7857 	DRM_FORMAT_RGB565
7858 };
7859 
7860 static const u32 cursor_formats[] = {
7861 	DRM_FORMAT_ARGB8888
7862 };
7863 
7864 static int get_plane_formats(const struct drm_plane *plane,
7865 			     const struct dc_plane_cap *plane_cap,
7866 			     uint32_t *formats, int max_formats)
7867 {
7868 	int i, num_formats = 0;
7869 
7870 	/*
7871 	 * TODO: Query support for each group of formats directly from
7872 	 * DC plane caps. This will require adding more formats to the
7873 	 * caps list.
7874 	 */
7875 
7876 	switch (plane->type) {
7877 	case DRM_PLANE_TYPE_PRIMARY:
7878 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7879 			if (num_formats >= max_formats)
7880 				break;
7881 
7882 			formats[num_formats++] = rgb_formats[i];
7883 		}
7884 
7885 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7886 			formats[num_formats++] = DRM_FORMAT_NV12;
7887 		if (plane_cap && plane_cap->pixel_format_support.p010)
7888 			formats[num_formats++] = DRM_FORMAT_P010;
7889 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7890 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7891 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7892 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7893 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7894 		}
7895 		break;
7896 
7897 	case DRM_PLANE_TYPE_OVERLAY:
7898 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7899 			if (num_formats >= max_formats)
7900 				break;
7901 
7902 			formats[num_formats++] = overlay_formats[i];
7903 		}
7904 		break;
7905 
7906 	case DRM_PLANE_TYPE_CURSOR:
7907 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7908 			if (num_formats >= max_formats)
7909 				break;
7910 
7911 			formats[num_formats++] = cursor_formats[i];
7912 		}
7913 		break;
7914 	}
7915 
7916 	return num_formats;
7917 }
7918 
7919 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7920 				struct drm_plane *plane,
7921 				unsigned long possible_crtcs,
7922 				const struct dc_plane_cap *plane_cap)
7923 {
7924 	uint32_t formats[32];
7925 	int num_formats;
7926 	int res = -EPERM;
7927 	unsigned int supported_rotations;
7928 	uint64_t *modifiers = NULL;
7929 
7930 	num_formats = get_plane_formats(plane, plane_cap, formats,
7931 					ARRAY_SIZE(formats));
7932 
7933 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7934 	if (res)
7935 		return res;
7936 
7937 	if (modifiers == NULL)
7938 		adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7939 
7940 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7941 				       &dm_plane_funcs, formats, num_formats,
7942 				       modifiers, plane->type, NULL);
7943 	kfree(modifiers);
7944 	if (res)
7945 		return res;
7946 
7947 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7948 	    plane_cap && plane_cap->per_pixel_alpha) {
7949 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7950 					  BIT(DRM_MODE_BLEND_PREMULTI);
7951 
7952 		drm_plane_create_alpha_property(plane);
7953 		drm_plane_create_blend_mode_property(plane, blend_caps);
7954 	}
7955 
7956 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7957 	    plane_cap &&
7958 	    (plane_cap->pixel_format_support.nv12 ||
7959 	     plane_cap->pixel_format_support.p010)) {
7960 		/* This only affects YUV formats. */
7961 		drm_plane_create_color_properties(
7962 			plane,
7963 			BIT(DRM_COLOR_YCBCR_BT601) |
7964 			BIT(DRM_COLOR_YCBCR_BT709) |
7965 			BIT(DRM_COLOR_YCBCR_BT2020),
7966 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7967 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7968 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7969 	}
7970 
7971 	supported_rotations =
7972 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7973 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7974 
7975 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7976 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7977 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7978 						   supported_rotations);
7979 
7980 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7981 
7982 	/* Create (reset) the plane state */
7983 	if (plane->funcs->reset)
7984 		plane->funcs->reset(plane);
7985 
7986 	return 0;
7987 }
7988 
7989 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7990 			       struct drm_plane *plane,
7991 			       uint32_t crtc_index)
7992 {
7993 	struct amdgpu_crtc *acrtc = NULL;
7994 	struct drm_plane *cursor_plane;
7995 
7996 	int res = -ENOMEM;
7997 
7998 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7999 	if (!cursor_plane)
8000 		goto fail;
8001 
8002 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
8003 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
8004 
8005 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8006 	if (!acrtc)
8007 		goto fail;
8008 
8009 	res = drm_crtc_init_with_planes(
8010 			dm->ddev,
8011 			&acrtc->base,
8012 			plane,
8013 			cursor_plane,
8014 			&amdgpu_dm_crtc_funcs, NULL);
8015 
8016 	if (res)
8017 		goto fail;
8018 
8019 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8020 
8021 	/* Create (reset) the plane state */
8022 	if (acrtc->base.funcs->reset)
8023 		acrtc->base.funcs->reset(&acrtc->base);
8024 
8025 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8026 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8027 
8028 	acrtc->crtc_id = crtc_index;
8029 	acrtc->base.enabled = false;
8030 	acrtc->otg_inst = -1;
8031 
8032 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8033 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8034 				   true, MAX_COLOR_LUT_ENTRIES);
8035 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8036 
8037 	return 0;
8038 
8039 fail:
8040 	kfree(acrtc);
8041 	kfree(cursor_plane);
8042 	return res;
8043 }
8044 
8045 
8046 static int to_drm_connector_type(enum signal_type st)
8047 {
8048 	switch (st) {
8049 	case SIGNAL_TYPE_HDMI_TYPE_A:
8050 		return DRM_MODE_CONNECTOR_HDMIA;
8051 	case SIGNAL_TYPE_EDP:
8052 		return DRM_MODE_CONNECTOR_eDP;
8053 	case SIGNAL_TYPE_LVDS:
8054 		return DRM_MODE_CONNECTOR_LVDS;
8055 	case SIGNAL_TYPE_RGB:
8056 		return DRM_MODE_CONNECTOR_VGA;
8057 	case SIGNAL_TYPE_DISPLAY_PORT:
8058 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
8059 		return DRM_MODE_CONNECTOR_DisplayPort;
8060 	case SIGNAL_TYPE_DVI_DUAL_LINK:
8061 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
8062 		return DRM_MODE_CONNECTOR_DVID;
8063 	case SIGNAL_TYPE_VIRTUAL:
8064 		return DRM_MODE_CONNECTOR_VIRTUAL;
8065 
8066 	default:
8067 		return DRM_MODE_CONNECTOR_Unknown;
8068 	}
8069 }
8070 
8071 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8072 {
8073 	struct drm_encoder *encoder;
8074 
8075 	/* There is only one encoder per connector */
8076 	drm_connector_for_each_possible_encoder(connector, encoder)
8077 		return encoder;
8078 
8079 	return NULL;
8080 }
8081 
8082 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8083 {
8084 	struct drm_encoder *encoder;
8085 	struct amdgpu_encoder *amdgpu_encoder;
8086 
8087 	encoder = amdgpu_dm_connector_to_encoder(connector);
8088 
8089 	if (encoder == NULL)
8090 		return;
8091 
8092 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8093 
8094 	amdgpu_encoder->native_mode.clock = 0;
8095 
8096 	if (!list_empty(&connector->probed_modes)) {
8097 		struct drm_display_mode *preferred_mode = NULL;
8098 
8099 		list_for_each_entry(preferred_mode,
8100 				    &connector->probed_modes,
8101 				    head) {
8102 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8103 				amdgpu_encoder->native_mode = *preferred_mode;
8104 
8105 			break;
8106 		}
8107 
8108 	}
8109 }
8110 
8111 static struct drm_display_mode *
8112 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8113 			     char *name,
8114 			     int hdisplay, int vdisplay)
8115 {
8116 	struct drm_device *dev = encoder->dev;
8117 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8118 	struct drm_display_mode *mode = NULL;
8119 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8120 
8121 	mode = drm_mode_duplicate(dev, native_mode);
8122 
8123 	if (mode == NULL)
8124 		return NULL;
8125 
8126 	mode->hdisplay = hdisplay;
8127 	mode->vdisplay = vdisplay;
8128 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8129 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8130 
8131 	return mode;
8132 
8133 }
8134 
8135 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8136 						 struct drm_connector *connector)
8137 {
8138 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8139 	struct drm_display_mode *mode = NULL;
8140 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8141 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8142 				to_amdgpu_dm_connector(connector);
8143 	int i;
8144 	int n;
8145 	struct mode_size {
8146 		char name[DRM_DISPLAY_MODE_LEN];
8147 		int w;
8148 		int h;
8149 	} common_modes[] = {
8150 		{  "640x480",  640,  480},
8151 		{  "800x600",  800,  600},
8152 		{ "1024x768", 1024,  768},
8153 		{ "1280x720", 1280,  720},
8154 		{ "1280x800", 1280,  800},
8155 		{"1280x1024", 1280, 1024},
8156 		{ "1440x900", 1440,  900},
8157 		{"1680x1050", 1680, 1050},
8158 		{"1600x1200", 1600, 1200},
8159 		{"1920x1080", 1920, 1080},
8160 		{"1920x1200", 1920, 1200}
8161 	};
8162 
8163 	n = ARRAY_SIZE(common_modes);
8164 
8165 	for (i = 0; i < n; i++) {
8166 		struct drm_display_mode *curmode = NULL;
8167 		bool mode_existed = false;
8168 
8169 		if (common_modes[i].w > native_mode->hdisplay ||
8170 		    common_modes[i].h > native_mode->vdisplay ||
8171 		   (common_modes[i].w == native_mode->hdisplay &&
8172 		    common_modes[i].h == native_mode->vdisplay))
8173 			continue;
8174 
8175 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8176 			if (common_modes[i].w == curmode->hdisplay &&
8177 			    common_modes[i].h == curmode->vdisplay) {
8178 				mode_existed = true;
8179 				break;
8180 			}
8181 		}
8182 
8183 		if (mode_existed)
8184 			continue;
8185 
8186 		mode = amdgpu_dm_create_common_mode(encoder,
8187 				common_modes[i].name, common_modes[i].w,
8188 				common_modes[i].h);
8189 		if (!mode)
8190 			continue;
8191 
8192 		drm_mode_probed_add(connector, mode);
8193 		amdgpu_dm_connector->num_modes++;
8194 	}
8195 }
8196 
8197 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8198 {
8199 	struct drm_encoder *encoder;
8200 	struct amdgpu_encoder *amdgpu_encoder;
8201 	const struct drm_display_mode *native_mode;
8202 
8203 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8204 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8205 		return;
8206 
8207 	encoder = amdgpu_dm_connector_to_encoder(connector);
8208 	if (!encoder)
8209 		return;
8210 
8211 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8212 
8213 	native_mode = &amdgpu_encoder->native_mode;
8214 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8215 		return;
8216 
8217 	drm_connector_set_panel_orientation_with_quirk(connector,
8218 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8219 						       native_mode->hdisplay,
8220 						       native_mode->vdisplay);
8221 }
8222 
8223 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8224 					      struct edid *edid)
8225 {
8226 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8227 			to_amdgpu_dm_connector(connector);
8228 
8229 	if (edid) {
8230 		/* empty probed_modes */
8231 		INIT_LIST_HEAD(&connector->probed_modes);
8232 		amdgpu_dm_connector->num_modes =
8233 				drm_add_edid_modes(connector, edid);
8234 
8235 		/* sorting the probed modes before calling function
8236 		 * amdgpu_dm_get_native_mode() since EDID can have
8237 		 * more than one preferred mode. The modes that are
8238 		 * later in the probed mode list could be of higher
8239 		 * and preferred resolution. For example, 3840x2160
8240 		 * resolution in base EDID preferred timing and 4096x2160
8241 		 * preferred resolution in DID extension block later.
8242 		 */
8243 		drm_mode_sort(&connector->probed_modes);
8244 		amdgpu_dm_get_native_mode(connector);
8245 
8246 		/* Freesync capabilities are reset by calling
8247 		 * drm_add_edid_modes() and need to be
8248 		 * restored here.
8249 		 */
8250 		amdgpu_dm_update_freesync_caps(connector, edid);
8251 
8252 		amdgpu_set_panel_orientation(connector);
8253 	} else {
8254 		amdgpu_dm_connector->num_modes = 0;
8255 	}
8256 }
8257 
8258 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8259 			      struct drm_display_mode *mode)
8260 {
8261 	struct drm_display_mode *m;
8262 
8263 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8264 		if (drm_mode_equal(m, mode))
8265 			return true;
8266 	}
8267 
8268 	return false;
8269 }
8270 
8271 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8272 {
8273 	const struct drm_display_mode *m;
8274 	struct drm_display_mode *new_mode;
8275 	uint i;
8276 	uint32_t new_modes_count = 0;
8277 
8278 	/* Standard FPS values
8279 	 *
8280 	 * 23.976       - TV/NTSC
8281 	 * 24 	        - Cinema
8282 	 * 25 	        - TV/PAL
8283 	 * 29.97        - TV/NTSC
8284 	 * 30 	        - TV/NTSC
8285 	 * 48 	        - Cinema HFR
8286 	 * 50 	        - TV/PAL
8287 	 * 60 	        - Commonly used
8288 	 * 48,72,96,120 - Multiples of 24
8289 	 */
8290 	static const uint32_t common_rates[] = {
8291 		23976, 24000, 25000, 29970, 30000,
8292 		48000, 50000, 60000, 72000, 96000, 120000
8293 	};
8294 
8295 	/*
8296 	 * Find mode with highest refresh rate with the same resolution
8297 	 * as the preferred mode. Some monitors report a preferred mode
8298 	 * with lower resolution than the highest refresh rate supported.
8299 	 */
8300 
8301 	m = get_highest_refresh_rate_mode(aconnector, true);
8302 	if (!m)
8303 		return 0;
8304 
8305 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8306 		uint64_t target_vtotal, target_vtotal_diff;
8307 		uint64_t num, den;
8308 
8309 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8310 			continue;
8311 
8312 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8313 		    common_rates[i] > aconnector->max_vfreq * 1000)
8314 			continue;
8315 
8316 		num = (unsigned long long)m->clock * 1000 * 1000;
8317 		den = common_rates[i] * (unsigned long long)m->htotal;
8318 		target_vtotal = div_u64(num, den);
8319 		target_vtotal_diff = target_vtotal - m->vtotal;
8320 
8321 		/* Check for illegal modes */
8322 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8323 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8324 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8325 			continue;
8326 
8327 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8328 		if (!new_mode)
8329 			goto out;
8330 
8331 		new_mode->vtotal += (u16)target_vtotal_diff;
8332 		new_mode->vsync_start += (u16)target_vtotal_diff;
8333 		new_mode->vsync_end += (u16)target_vtotal_diff;
8334 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8335 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8336 
8337 		if (!is_duplicate_mode(aconnector, new_mode)) {
8338 			drm_mode_probed_add(&aconnector->base, new_mode);
8339 			new_modes_count += 1;
8340 		} else
8341 			drm_mode_destroy(aconnector->base.dev, new_mode);
8342 	}
8343  out:
8344 	return new_modes_count;
8345 }
8346 
8347 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8348 						   struct edid *edid)
8349 {
8350 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8351 		to_amdgpu_dm_connector(connector);
8352 
8353 	if (!edid)
8354 		return;
8355 
8356 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8357 		amdgpu_dm_connector->num_modes +=
8358 			add_fs_modes(amdgpu_dm_connector);
8359 }
8360 
8361 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8362 {
8363 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8364 			to_amdgpu_dm_connector(connector);
8365 	struct drm_encoder *encoder;
8366 	struct edid *edid = amdgpu_dm_connector->edid;
8367 
8368 	encoder = amdgpu_dm_connector_to_encoder(connector);
8369 
8370 	if (!drm_edid_is_valid(edid)) {
8371 		amdgpu_dm_connector->num_modes =
8372 				drm_add_modes_noedid(connector, 640, 480);
8373 	} else {
8374 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8375 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8376 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8377 	}
8378 	amdgpu_dm_fbc_init(connector);
8379 
8380 	return amdgpu_dm_connector->num_modes;
8381 }
8382 
8383 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8384 				     struct amdgpu_dm_connector *aconnector,
8385 				     int connector_type,
8386 				     struct dc_link *link,
8387 				     int link_index)
8388 {
8389 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8390 
8391 	/*
8392 	 * Some of the properties below require access to state, like bpc.
8393 	 * Allocate some default initial connector state with our reset helper.
8394 	 */
8395 	if (aconnector->base.funcs->reset)
8396 		aconnector->base.funcs->reset(&aconnector->base);
8397 
8398 	aconnector->connector_id = link_index;
8399 	aconnector->dc_link = link;
8400 	aconnector->base.interlace_allowed = false;
8401 	aconnector->base.doublescan_allowed = false;
8402 	aconnector->base.stereo_allowed = false;
8403 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8404 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8405 	aconnector->audio_inst = -1;
8406 	mutex_init(&aconnector->hpd_lock);
8407 
8408 	/*
8409 	 * configure support HPD hot plug connector_>polled default value is 0
8410 	 * which means HPD hot plug not supported
8411 	 */
8412 	switch (connector_type) {
8413 	case DRM_MODE_CONNECTOR_HDMIA:
8414 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8415 		aconnector->base.ycbcr_420_allowed =
8416 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8417 		break;
8418 	case DRM_MODE_CONNECTOR_DisplayPort:
8419 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8420 		link->link_enc = link_enc_cfg_get_link_enc(link);
8421 		ASSERT(link->link_enc);
8422 		if (link->link_enc)
8423 			aconnector->base.ycbcr_420_allowed =
8424 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8425 		break;
8426 	case DRM_MODE_CONNECTOR_DVID:
8427 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8428 		break;
8429 	default:
8430 		break;
8431 	}
8432 
8433 	drm_object_attach_property(&aconnector->base.base,
8434 				dm->ddev->mode_config.scaling_mode_property,
8435 				DRM_MODE_SCALE_NONE);
8436 
8437 	drm_object_attach_property(&aconnector->base.base,
8438 				adev->mode_info.underscan_property,
8439 				UNDERSCAN_OFF);
8440 	drm_object_attach_property(&aconnector->base.base,
8441 				adev->mode_info.underscan_hborder_property,
8442 				0);
8443 	drm_object_attach_property(&aconnector->base.base,
8444 				adev->mode_info.underscan_vborder_property,
8445 				0);
8446 
8447 	if (!aconnector->mst_port)
8448 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8449 
8450 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8451 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8452 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8453 
8454 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8455 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8456 		drm_object_attach_property(&aconnector->base.base,
8457 				adev->mode_info.abm_level_property, 0);
8458 	}
8459 
8460 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8461 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8462 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8463 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8464 
8465 		if (!aconnector->mst_port)
8466 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8467 
8468 #ifdef CONFIG_DRM_AMD_DC_HDCP
8469 		if (adev->dm.hdcp_workqueue)
8470 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8471 #endif
8472 	}
8473 }
8474 
8475 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8476 			      struct i2c_msg *msgs, int num)
8477 {
8478 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8479 	struct ddc_service *ddc_service = i2c->ddc_service;
8480 	struct i2c_command cmd;
8481 	int i;
8482 	int result = -EIO;
8483 
8484 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8485 
8486 	if (!cmd.payloads)
8487 		return result;
8488 
8489 	cmd.number_of_payloads = num;
8490 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8491 	cmd.speed = 100;
8492 
8493 	for (i = 0; i < num; i++) {
8494 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8495 		cmd.payloads[i].address = msgs[i].addr;
8496 		cmd.payloads[i].length = msgs[i].len;
8497 		cmd.payloads[i].data = msgs[i].buf;
8498 	}
8499 
8500 	if (dc_submit_i2c(
8501 			ddc_service->ctx->dc,
8502 			ddc_service->ddc_pin->hw_info.ddc_channel,
8503 			&cmd))
8504 		result = num;
8505 
8506 	kfree(cmd.payloads);
8507 	return result;
8508 }
8509 
8510 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8511 {
8512 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8513 }
8514 
8515 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8516 	.master_xfer = amdgpu_dm_i2c_xfer,
8517 	.functionality = amdgpu_dm_i2c_func,
8518 };
8519 
8520 static struct amdgpu_i2c_adapter *
8521 create_i2c(struct ddc_service *ddc_service,
8522 	   int link_index,
8523 	   int *res)
8524 {
8525 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8526 	struct amdgpu_i2c_adapter *i2c;
8527 
8528 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8529 	if (!i2c)
8530 		return NULL;
8531 	i2c->base.owner = THIS_MODULE;
8532 	i2c->base.class = I2C_CLASS_DDC;
8533 	i2c->base.dev.parent = &adev->pdev->dev;
8534 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8535 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8536 	i2c_set_adapdata(&i2c->base, i2c);
8537 	i2c->ddc_service = ddc_service;
8538 	if (i2c->ddc_service->ddc_pin)
8539 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8540 
8541 	return i2c;
8542 }
8543 
8544 
8545 /*
8546  * Note: this function assumes that dc_link_detect() was called for the
8547  * dc_link which will be represented by this aconnector.
8548  */
8549 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8550 				    struct amdgpu_dm_connector *aconnector,
8551 				    uint32_t link_index,
8552 				    struct amdgpu_encoder *aencoder)
8553 {
8554 	int res = 0;
8555 	int connector_type;
8556 	struct dc *dc = dm->dc;
8557 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8558 	struct amdgpu_i2c_adapter *i2c;
8559 
8560 	link->priv = aconnector;
8561 
8562 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8563 
8564 	i2c = create_i2c(link->ddc, link->link_index, &res);
8565 	if (!i2c) {
8566 		DRM_ERROR("Failed to create i2c adapter data\n");
8567 		return -ENOMEM;
8568 	}
8569 
8570 	aconnector->i2c = i2c;
8571 	res = i2c_add_adapter(&i2c->base);
8572 
8573 	if (res) {
8574 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8575 		goto out_free;
8576 	}
8577 
8578 	connector_type = to_drm_connector_type(link->connector_signal);
8579 
8580 	res = drm_connector_init_with_ddc(
8581 			dm->ddev,
8582 			&aconnector->base,
8583 			&amdgpu_dm_connector_funcs,
8584 			connector_type,
8585 			&i2c->base);
8586 
8587 	if (res) {
8588 		DRM_ERROR("connector_init failed\n");
8589 		aconnector->connector_id = -1;
8590 		goto out_free;
8591 	}
8592 
8593 	drm_connector_helper_add(
8594 			&aconnector->base,
8595 			&amdgpu_dm_connector_helper_funcs);
8596 
8597 	amdgpu_dm_connector_init_helper(
8598 		dm,
8599 		aconnector,
8600 		connector_type,
8601 		link,
8602 		link_index);
8603 
8604 	drm_connector_attach_encoder(
8605 		&aconnector->base, &aencoder->base);
8606 
8607 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8608 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8609 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8610 
8611 out_free:
8612 	if (res) {
8613 		kfree(i2c);
8614 		aconnector->i2c = NULL;
8615 	}
8616 	return res;
8617 }
8618 
8619 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8620 {
8621 	switch (adev->mode_info.num_crtc) {
8622 	case 1:
8623 		return 0x1;
8624 	case 2:
8625 		return 0x3;
8626 	case 3:
8627 		return 0x7;
8628 	case 4:
8629 		return 0xf;
8630 	case 5:
8631 		return 0x1f;
8632 	case 6:
8633 	default:
8634 		return 0x3f;
8635 	}
8636 }
8637 
8638 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8639 				  struct amdgpu_encoder *aencoder,
8640 				  uint32_t link_index)
8641 {
8642 	struct amdgpu_device *adev = drm_to_adev(dev);
8643 
8644 	int res = drm_encoder_init(dev,
8645 				   &aencoder->base,
8646 				   &amdgpu_dm_encoder_funcs,
8647 				   DRM_MODE_ENCODER_TMDS,
8648 				   NULL);
8649 
8650 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8651 
8652 	if (!res)
8653 		aencoder->encoder_id = link_index;
8654 	else
8655 		aencoder->encoder_id = -1;
8656 
8657 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8658 
8659 	return res;
8660 }
8661 
8662 static void manage_dm_interrupts(struct amdgpu_device *adev,
8663 				 struct amdgpu_crtc *acrtc,
8664 				 bool enable)
8665 {
8666 	/*
8667 	 * We have no guarantee that the frontend index maps to the same
8668 	 * backend index - some even map to more than one.
8669 	 *
8670 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8671 	 */
8672 	int irq_type =
8673 		amdgpu_display_crtc_idx_to_irq_type(
8674 			adev,
8675 			acrtc->crtc_id);
8676 
8677 	if (enable) {
8678 		drm_crtc_vblank_on(&acrtc->base);
8679 		amdgpu_irq_get(
8680 			adev,
8681 			&adev->pageflip_irq,
8682 			irq_type);
8683 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8684 		amdgpu_irq_get(
8685 			adev,
8686 			&adev->vline0_irq,
8687 			irq_type);
8688 #endif
8689 	} else {
8690 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8691 		amdgpu_irq_put(
8692 			adev,
8693 			&adev->vline0_irq,
8694 			irq_type);
8695 #endif
8696 		amdgpu_irq_put(
8697 			adev,
8698 			&adev->pageflip_irq,
8699 			irq_type);
8700 		drm_crtc_vblank_off(&acrtc->base);
8701 	}
8702 }
8703 
8704 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8705 				      struct amdgpu_crtc *acrtc)
8706 {
8707 	int irq_type =
8708 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8709 
8710 	/**
8711 	 * This reads the current state for the IRQ and force reapplies
8712 	 * the setting to hardware.
8713 	 */
8714 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8715 }
8716 
8717 static bool
8718 is_scaling_state_different(const struct dm_connector_state *dm_state,
8719 			   const struct dm_connector_state *old_dm_state)
8720 {
8721 	if (dm_state->scaling != old_dm_state->scaling)
8722 		return true;
8723 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8724 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8725 			return true;
8726 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8727 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8728 			return true;
8729 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8730 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8731 		return true;
8732 	return false;
8733 }
8734 
8735 #ifdef CONFIG_DRM_AMD_DC_HDCP
8736 static bool is_content_protection_different(struct drm_connector_state *state,
8737 					    const struct drm_connector_state *old_state,
8738 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8739 {
8740 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8741 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8742 
8743 	/* Handle: Type0/1 change */
8744 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8745 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8746 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8747 		return true;
8748 	}
8749 
8750 	/* CP is being re enabled, ignore this
8751 	 *
8752 	 * Handles:	ENABLED -> DESIRED
8753 	 */
8754 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8755 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8756 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8757 		return false;
8758 	}
8759 
8760 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8761 	 *
8762 	 * Handles:	UNDESIRED -> ENABLED
8763 	 */
8764 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8765 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8766 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8767 
8768 	/* Stream removed and re-enabled
8769 	 *
8770 	 * Can sometimes overlap with the HPD case,
8771 	 * thus set update_hdcp to false to avoid
8772 	 * setting HDCP multiple times.
8773 	 *
8774 	 * Handles:	DESIRED -> DESIRED (Special case)
8775 	 */
8776 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8777 		state->crtc && state->crtc->enabled &&
8778 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8779 		dm_con_state->update_hdcp = false;
8780 		return true;
8781 	}
8782 
8783 	/* Hot-plug, headless s3, dpms
8784 	 *
8785 	 * Only start HDCP if the display is connected/enabled.
8786 	 * update_hdcp flag will be set to false until the next
8787 	 * HPD comes in.
8788 	 *
8789 	 * Handles:	DESIRED -> DESIRED (Special case)
8790 	 */
8791 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8792 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8793 		dm_con_state->update_hdcp = false;
8794 		return true;
8795 	}
8796 
8797 	/*
8798 	 * Handles:	UNDESIRED -> UNDESIRED
8799 	 *		DESIRED -> DESIRED
8800 	 *		ENABLED -> ENABLED
8801 	 */
8802 	if (old_state->content_protection == state->content_protection)
8803 		return false;
8804 
8805 	/*
8806 	 * Handles:	UNDESIRED -> DESIRED
8807 	 *		DESIRED -> UNDESIRED
8808 	 *		ENABLED -> UNDESIRED
8809 	 */
8810 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8811 		return true;
8812 
8813 	/*
8814 	 * Handles:	DESIRED -> ENABLED
8815 	 */
8816 	return false;
8817 }
8818 
8819 #endif
8820 static void remove_stream(struct amdgpu_device *adev,
8821 			  struct amdgpu_crtc *acrtc,
8822 			  struct dc_stream_state *stream)
8823 {
8824 	/* this is the update mode case */
8825 
8826 	acrtc->otg_inst = -1;
8827 	acrtc->enabled = false;
8828 }
8829 
8830 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8831 			       struct dc_cursor_position *position)
8832 {
8833 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8834 	int x, y;
8835 	int xorigin = 0, yorigin = 0;
8836 
8837 	if (!crtc || !plane->state->fb)
8838 		return 0;
8839 
8840 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8841 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8842 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8843 			  __func__,
8844 			  plane->state->crtc_w,
8845 			  plane->state->crtc_h);
8846 		return -EINVAL;
8847 	}
8848 
8849 	x = plane->state->crtc_x;
8850 	y = plane->state->crtc_y;
8851 
8852 	if (x <= -amdgpu_crtc->max_cursor_width ||
8853 	    y <= -amdgpu_crtc->max_cursor_height)
8854 		return 0;
8855 
8856 	if (x < 0) {
8857 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8858 		x = 0;
8859 	}
8860 	if (y < 0) {
8861 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8862 		y = 0;
8863 	}
8864 	position->enable = true;
8865 	position->translate_by_source = true;
8866 	position->x = x;
8867 	position->y = y;
8868 	position->x_hotspot = xorigin;
8869 	position->y_hotspot = yorigin;
8870 
8871 	return 0;
8872 }
8873 
8874 static void handle_cursor_update(struct drm_plane *plane,
8875 				 struct drm_plane_state *old_plane_state)
8876 {
8877 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8878 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8879 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8880 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8881 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8882 	uint64_t address = afb ? afb->address : 0;
8883 	struct dc_cursor_position position = {0};
8884 	struct dc_cursor_attributes attributes;
8885 	int ret;
8886 
8887 	if (!plane->state->fb && !old_plane_state->fb)
8888 		return;
8889 
8890 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8891 		      __func__,
8892 		      amdgpu_crtc->crtc_id,
8893 		      plane->state->crtc_w,
8894 		      plane->state->crtc_h);
8895 
8896 	ret = get_cursor_position(plane, crtc, &position);
8897 	if (ret)
8898 		return;
8899 
8900 	if (!position.enable) {
8901 		/* turn off cursor */
8902 		if (crtc_state && crtc_state->stream) {
8903 			mutex_lock(&adev->dm.dc_lock);
8904 			dc_stream_set_cursor_position(crtc_state->stream,
8905 						      &position);
8906 			mutex_unlock(&adev->dm.dc_lock);
8907 		}
8908 		return;
8909 	}
8910 
8911 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8912 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8913 
8914 	memset(&attributes, 0, sizeof(attributes));
8915 	attributes.address.high_part = upper_32_bits(address);
8916 	attributes.address.low_part  = lower_32_bits(address);
8917 	attributes.width             = plane->state->crtc_w;
8918 	attributes.height            = plane->state->crtc_h;
8919 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8920 	attributes.rotation_angle    = 0;
8921 	attributes.attribute_flags.value = 0;
8922 
8923 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8924 
8925 	if (crtc_state->stream) {
8926 		mutex_lock(&adev->dm.dc_lock);
8927 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8928 							 &attributes))
8929 			DRM_ERROR("DC failed to set cursor attributes\n");
8930 
8931 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8932 						   &position))
8933 			DRM_ERROR("DC failed to set cursor position\n");
8934 		mutex_unlock(&adev->dm.dc_lock);
8935 	}
8936 }
8937 
8938 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8939 {
8940 
8941 	assert_spin_locked(&acrtc->base.dev->event_lock);
8942 	WARN_ON(acrtc->event);
8943 
8944 	acrtc->event = acrtc->base.state->event;
8945 
8946 	/* Set the flip status */
8947 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8948 
8949 	/* Mark this event as consumed */
8950 	acrtc->base.state->event = NULL;
8951 
8952 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8953 		     acrtc->crtc_id);
8954 }
8955 
8956 static void update_freesync_state_on_stream(
8957 	struct amdgpu_display_manager *dm,
8958 	struct dm_crtc_state *new_crtc_state,
8959 	struct dc_stream_state *new_stream,
8960 	struct dc_plane_state *surface,
8961 	u32 flip_timestamp_in_us)
8962 {
8963 	struct mod_vrr_params vrr_params;
8964 	struct dc_info_packet vrr_infopacket = {0};
8965 	struct amdgpu_device *adev = dm->adev;
8966 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8967 	unsigned long flags;
8968 	bool pack_sdp_v1_3 = false;
8969 
8970 	if (!new_stream)
8971 		return;
8972 
8973 	/*
8974 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8975 	 * For now it's sufficient to just guard against these conditions.
8976 	 */
8977 
8978 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8979 		return;
8980 
8981 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8982         vrr_params = acrtc->dm_irq_params.vrr_params;
8983 
8984 	if (surface) {
8985 		mod_freesync_handle_preflip(
8986 			dm->freesync_module,
8987 			surface,
8988 			new_stream,
8989 			flip_timestamp_in_us,
8990 			&vrr_params);
8991 
8992 		if (adev->family < AMDGPU_FAMILY_AI &&
8993 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8994 			mod_freesync_handle_v_update(dm->freesync_module,
8995 						     new_stream, &vrr_params);
8996 
8997 			/* Need to call this before the frame ends. */
8998 			dc_stream_adjust_vmin_vmax(dm->dc,
8999 						   new_crtc_state->stream,
9000 						   &vrr_params.adjust);
9001 		}
9002 	}
9003 
9004 	mod_freesync_build_vrr_infopacket(
9005 		dm->freesync_module,
9006 		new_stream,
9007 		&vrr_params,
9008 		PACKET_TYPE_VRR,
9009 		TRANSFER_FUNC_UNKNOWN,
9010 		&vrr_infopacket,
9011 		pack_sdp_v1_3);
9012 
9013 	new_crtc_state->freesync_timing_changed |=
9014 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9015 			&vrr_params.adjust,
9016 			sizeof(vrr_params.adjust)) != 0);
9017 
9018 	new_crtc_state->freesync_vrr_info_changed |=
9019 		(memcmp(&new_crtc_state->vrr_infopacket,
9020 			&vrr_infopacket,
9021 			sizeof(vrr_infopacket)) != 0);
9022 
9023 	acrtc->dm_irq_params.vrr_params = vrr_params;
9024 	new_crtc_state->vrr_infopacket = vrr_infopacket;
9025 
9026 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9027 	new_stream->vrr_infopacket = vrr_infopacket;
9028 
9029 	if (new_crtc_state->freesync_vrr_info_changed)
9030 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9031 			      new_crtc_state->base.crtc->base.id,
9032 			      (int)new_crtc_state->base.vrr_enabled,
9033 			      (int)vrr_params.state);
9034 
9035 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9036 }
9037 
9038 static void update_stream_irq_parameters(
9039 	struct amdgpu_display_manager *dm,
9040 	struct dm_crtc_state *new_crtc_state)
9041 {
9042 	struct dc_stream_state *new_stream = new_crtc_state->stream;
9043 	struct mod_vrr_params vrr_params;
9044 	struct mod_freesync_config config = new_crtc_state->freesync_config;
9045 	struct amdgpu_device *adev = dm->adev;
9046 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9047 	unsigned long flags;
9048 
9049 	if (!new_stream)
9050 		return;
9051 
9052 	/*
9053 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9054 	 * For now it's sufficient to just guard against these conditions.
9055 	 */
9056 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9057 		return;
9058 
9059 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9060 	vrr_params = acrtc->dm_irq_params.vrr_params;
9061 
9062 	if (new_crtc_state->vrr_supported &&
9063 	    config.min_refresh_in_uhz &&
9064 	    config.max_refresh_in_uhz) {
9065 		/*
9066 		 * if freesync compatible mode was set, config.state will be set
9067 		 * in atomic check
9068 		 */
9069 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9070 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9071 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9072 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9073 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9074 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9075 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9076 		} else {
9077 			config.state = new_crtc_state->base.vrr_enabled ?
9078 						     VRR_STATE_ACTIVE_VARIABLE :
9079 						     VRR_STATE_INACTIVE;
9080 		}
9081 	} else {
9082 		config.state = VRR_STATE_UNSUPPORTED;
9083 	}
9084 
9085 	mod_freesync_build_vrr_params(dm->freesync_module,
9086 				      new_stream,
9087 				      &config, &vrr_params);
9088 
9089 	new_crtc_state->freesync_timing_changed |=
9090 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9091 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9092 
9093 	new_crtc_state->freesync_config = config;
9094 	/* Copy state for access from DM IRQ handler */
9095 	acrtc->dm_irq_params.freesync_config = config;
9096 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9097 	acrtc->dm_irq_params.vrr_params = vrr_params;
9098 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9099 }
9100 
9101 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9102 					    struct dm_crtc_state *new_state)
9103 {
9104 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9105 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9106 
9107 	if (!old_vrr_active && new_vrr_active) {
9108 		/* Transition VRR inactive -> active:
9109 		 * While VRR is active, we must not disable vblank irq, as a
9110 		 * reenable after disable would compute bogus vblank/pflip
9111 		 * timestamps if it likely happened inside display front-porch.
9112 		 *
9113 		 * We also need vupdate irq for the actual core vblank handling
9114 		 * at end of vblank.
9115 		 */
9116 		dm_set_vupdate_irq(new_state->base.crtc, true);
9117 		drm_crtc_vblank_get(new_state->base.crtc);
9118 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9119 				 __func__, new_state->base.crtc->base.id);
9120 	} else if (old_vrr_active && !new_vrr_active) {
9121 		/* Transition VRR active -> inactive:
9122 		 * Allow vblank irq disable again for fixed refresh rate.
9123 		 */
9124 		dm_set_vupdate_irq(new_state->base.crtc, false);
9125 		drm_crtc_vblank_put(new_state->base.crtc);
9126 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9127 				 __func__, new_state->base.crtc->base.id);
9128 	}
9129 }
9130 
9131 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9132 {
9133 	struct drm_plane *plane;
9134 	struct drm_plane_state *old_plane_state;
9135 	int i;
9136 
9137 	/*
9138 	 * TODO: Make this per-stream so we don't issue redundant updates for
9139 	 * commits with multiple streams.
9140 	 */
9141 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9142 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9143 			handle_cursor_update(plane, old_plane_state);
9144 }
9145 
9146 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9147 				    struct dc_state *dc_state,
9148 				    struct drm_device *dev,
9149 				    struct amdgpu_display_manager *dm,
9150 				    struct drm_crtc *pcrtc,
9151 				    bool wait_for_vblank)
9152 {
9153 	uint32_t i;
9154 	uint64_t timestamp_ns;
9155 	struct drm_plane *plane;
9156 	struct drm_plane_state *old_plane_state, *new_plane_state;
9157 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9158 	struct drm_crtc_state *new_pcrtc_state =
9159 			drm_atomic_get_new_crtc_state(state, pcrtc);
9160 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9161 	struct dm_crtc_state *dm_old_crtc_state =
9162 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9163 	int planes_count = 0, vpos, hpos;
9164 	unsigned long flags;
9165 	struct amdgpu_bo *abo;
9166 	uint32_t target_vblank, last_flip_vblank;
9167 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9168 	bool pflip_present = false;
9169 	struct {
9170 		struct dc_surface_update surface_updates[MAX_SURFACES];
9171 		struct dc_plane_info plane_infos[MAX_SURFACES];
9172 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9173 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9174 		struct dc_stream_update stream_update;
9175 	} *bundle;
9176 
9177 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9178 
9179 	if (!bundle) {
9180 		dm_error("Failed to allocate update bundle\n");
9181 		goto cleanup;
9182 	}
9183 
9184 	/*
9185 	 * Disable the cursor first if we're disabling all the planes.
9186 	 * It'll remain on the screen after the planes are re-enabled
9187 	 * if we don't.
9188 	 */
9189 	if (acrtc_state->active_planes == 0)
9190 		amdgpu_dm_commit_cursors(state);
9191 
9192 	/* update planes when needed */
9193 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9194 		struct drm_crtc *crtc = new_plane_state->crtc;
9195 		struct drm_crtc_state *new_crtc_state;
9196 		struct drm_framebuffer *fb = new_plane_state->fb;
9197 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9198 		bool plane_needs_flip;
9199 		struct dc_plane_state *dc_plane;
9200 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9201 
9202 		/* Cursor plane is handled after stream updates */
9203 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9204 			continue;
9205 
9206 		if (!fb || !crtc || pcrtc != crtc)
9207 			continue;
9208 
9209 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9210 		if (!new_crtc_state->active)
9211 			continue;
9212 
9213 		dc_plane = dm_new_plane_state->dc_state;
9214 
9215 		bundle->surface_updates[planes_count].surface = dc_plane;
9216 		if (new_pcrtc_state->color_mgmt_changed) {
9217 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9218 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9219 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9220 		}
9221 
9222 		fill_dc_scaling_info(dm->adev, new_plane_state,
9223 				     &bundle->scaling_infos[planes_count]);
9224 
9225 		bundle->surface_updates[planes_count].scaling_info =
9226 			&bundle->scaling_infos[planes_count];
9227 
9228 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9229 
9230 		pflip_present = pflip_present || plane_needs_flip;
9231 
9232 		if (!plane_needs_flip) {
9233 			planes_count += 1;
9234 			continue;
9235 		}
9236 
9237 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9238 		fill_dc_plane_info_and_addr(
9239 			dm->adev, new_plane_state,
9240 			afb->tiling_flags,
9241 			&bundle->plane_infos[planes_count],
9242 			&bundle->flip_addrs[planes_count].address,
9243 			afb->tmz_surface, false);
9244 
9245 		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9246 				 new_plane_state->plane->index,
9247 				 bundle->plane_infos[planes_count].dcc.enable);
9248 
9249 		bundle->surface_updates[planes_count].plane_info =
9250 			&bundle->plane_infos[planes_count];
9251 
9252 		/*
9253 		 * Only allow immediate flips for fast updates that don't
9254 		 * change FB pitch, DCC state, rotation or mirroing.
9255 		 */
9256 		bundle->flip_addrs[planes_count].flip_immediate =
9257 			crtc->state->async_flip &&
9258 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9259 
9260 		timestamp_ns = ktime_get_ns();
9261 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9262 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9263 		bundle->surface_updates[planes_count].surface = dc_plane;
9264 
9265 		if (!bundle->surface_updates[planes_count].surface) {
9266 			DRM_ERROR("No surface for CRTC: id=%d\n",
9267 					acrtc_attach->crtc_id);
9268 			continue;
9269 		}
9270 
9271 		if (plane == pcrtc->primary)
9272 			update_freesync_state_on_stream(
9273 				dm,
9274 				acrtc_state,
9275 				acrtc_state->stream,
9276 				dc_plane,
9277 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9278 
9279 		drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9280 				 __func__,
9281 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9282 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9283 
9284 		planes_count += 1;
9285 
9286 	}
9287 
9288 	if (pflip_present) {
9289 		if (!vrr_active) {
9290 			/* Use old throttling in non-vrr fixed refresh rate mode
9291 			 * to keep flip scheduling based on target vblank counts
9292 			 * working in a backwards compatible way, e.g., for
9293 			 * clients using the GLX_OML_sync_control extension or
9294 			 * DRI3/Present extension with defined target_msc.
9295 			 */
9296 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9297 		}
9298 		else {
9299 			/* For variable refresh rate mode only:
9300 			 * Get vblank of last completed flip to avoid > 1 vrr
9301 			 * flips per video frame by use of throttling, but allow
9302 			 * flip programming anywhere in the possibly large
9303 			 * variable vrr vblank interval for fine-grained flip
9304 			 * timing control and more opportunity to avoid stutter
9305 			 * on late submission of flips.
9306 			 */
9307 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9308 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9309 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9310 		}
9311 
9312 		target_vblank = last_flip_vblank + wait_for_vblank;
9313 
9314 		/*
9315 		 * Wait until we're out of the vertical blank period before the one
9316 		 * targeted by the flip
9317 		 */
9318 		while ((acrtc_attach->enabled &&
9319 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9320 							    0, &vpos, &hpos, NULL,
9321 							    NULL, &pcrtc->hwmode)
9322 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9323 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9324 			(int)(target_vblank -
9325 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9326 			usleep_range(1000, 1100);
9327 		}
9328 
9329 		/**
9330 		 * Prepare the flip event for the pageflip interrupt to handle.
9331 		 *
9332 		 * This only works in the case where we've already turned on the
9333 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9334 		 * from 0 -> n planes we have to skip a hardware generated event
9335 		 * and rely on sending it from software.
9336 		 */
9337 		if (acrtc_attach->base.state->event &&
9338 		    acrtc_state->active_planes > 0 &&
9339 		    !acrtc_state->force_dpms_off) {
9340 			drm_crtc_vblank_get(pcrtc);
9341 
9342 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9343 
9344 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9345 			prepare_flip_isr(acrtc_attach);
9346 
9347 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9348 		}
9349 
9350 		if (acrtc_state->stream) {
9351 			if (acrtc_state->freesync_vrr_info_changed)
9352 				bundle->stream_update.vrr_infopacket =
9353 					&acrtc_state->stream->vrr_infopacket;
9354 		}
9355 	}
9356 
9357 	/* Update the planes if changed or disable if we don't have any. */
9358 	if ((planes_count || acrtc_state->active_planes == 0) &&
9359 		acrtc_state->stream) {
9360 #if defined(CONFIG_DRM_AMD_DC_DCN)
9361 		/*
9362 		 * If PSR or idle optimizations are enabled then flush out
9363 		 * any pending work before hardware programming.
9364 		 */
9365 		if (dm->vblank_control_workqueue)
9366 			flush_workqueue(dm->vblank_control_workqueue);
9367 #endif
9368 
9369 		bundle->stream_update.stream = acrtc_state->stream;
9370 		if (new_pcrtc_state->mode_changed) {
9371 			bundle->stream_update.src = acrtc_state->stream->src;
9372 			bundle->stream_update.dst = acrtc_state->stream->dst;
9373 		}
9374 
9375 		if (new_pcrtc_state->color_mgmt_changed) {
9376 			/*
9377 			 * TODO: This isn't fully correct since we've actually
9378 			 * already modified the stream in place.
9379 			 */
9380 			bundle->stream_update.gamut_remap =
9381 				&acrtc_state->stream->gamut_remap_matrix;
9382 			bundle->stream_update.output_csc_transform =
9383 				&acrtc_state->stream->csc_color_matrix;
9384 			bundle->stream_update.out_transfer_func =
9385 				acrtc_state->stream->out_transfer_func;
9386 		}
9387 
9388 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9389 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9390 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9391 
9392 		/*
9393 		 * If FreeSync state on the stream has changed then we need to
9394 		 * re-adjust the min/max bounds now that DC doesn't handle this
9395 		 * as part of commit.
9396 		 */
9397 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9398 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9399 			dc_stream_adjust_vmin_vmax(
9400 				dm->dc, acrtc_state->stream,
9401 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9402 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9403 		}
9404 		mutex_lock(&dm->dc_lock);
9405 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9406 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9407 			amdgpu_dm_psr_disable(acrtc_state->stream);
9408 
9409 		dc_commit_updates_for_stream(dm->dc,
9410 						     bundle->surface_updates,
9411 						     planes_count,
9412 						     acrtc_state->stream,
9413 						     &bundle->stream_update,
9414 						     dc_state);
9415 
9416 		/**
9417 		 * Enable or disable the interrupts on the backend.
9418 		 *
9419 		 * Most pipes are put into power gating when unused.
9420 		 *
9421 		 * When power gating is enabled on a pipe we lose the
9422 		 * interrupt enablement state when power gating is disabled.
9423 		 *
9424 		 * So we need to update the IRQ control state in hardware
9425 		 * whenever the pipe turns on (since it could be previously
9426 		 * power gated) or off (since some pipes can't be power gated
9427 		 * on some ASICs).
9428 		 */
9429 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9430 			dm_update_pflip_irq_state(drm_to_adev(dev),
9431 						  acrtc_attach);
9432 
9433 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9434 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9435 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9436 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9437 
9438 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9439 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9440 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9441 			struct amdgpu_dm_connector *aconn =
9442 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9443 
9444 			if (aconn->psr_skip_count > 0)
9445 				aconn->psr_skip_count--;
9446 
9447 			/* Allow PSR when skip count is 0. */
9448 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9449 		} else {
9450 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9451 		}
9452 
9453 		mutex_unlock(&dm->dc_lock);
9454 	}
9455 
9456 	/*
9457 	 * Update cursor state *after* programming all the planes.
9458 	 * This avoids redundant programming in the case where we're going
9459 	 * to be disabling a single plane - those pipes are being disabled.
9460 	 */
9461 	if (acrtc_state->active_planes)
9462 		amdgpu_dm_commit_cursors(state);
9463 
9464 cleanup:
9465 	kfree(bundle);
9466 }
9467 
9468 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9469 				   struct drm_atomic_state *state)
9470 {
9471 	struct amdgpu_device *adev = drm_to_adev(dev);
9472 	struct amdgpu_dm_connector *aconnector;
9473 	struct drm_connector *connector;
9474 	struct drm_connector_state *old_con_state, *new_con_state;
9475 	struct drm_crtc_state *new_crtc_state;
9476 	struct dm_crtc_state *new_dm_crtc_state;
9477 	const struct dc_stream_status *status;
9478 	int i, inst;
9479 
9480 	/* Notify device removals. */
9481 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9482 		if (old_con_state->crtc != new_con_state->crtc) {
9483 			/* CRTC changes require notification. */
9484 			goto notify;
9485 		}
9486 
9487 		if (!new_con_state->crtc)
9488 			continue;
9489 
9490 		new_crtc_state = drm_atomic_get_new_crtc_state(
9491 			state, new_con_state->crtc);
9492 
9493 		if (!new_crtc_state)
9494 			continue;
9495 
9496 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9497 			continue;
9498 
9499 	notify:
9500 		aconnector = to_amdgpu_dm_connector(connector);
9501 
9502 		mutex_lock(&adev->dm.audio_lock);
9503 		inst = aconnector->audio_inst;
9504 		aconnector->audio_inst = -1;
9505 		mutex_unlock(&adev->dm.audio_lock);
9506 
9507 		amdgpu_dm_audio_eld_notify(adev, inst);
9508 	}
9509 
9510 	/* Notify audio device additions. */
9511 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9512 		if (!new_con_state->crtc)
9513 			continue;
9514 
9515 		new_crtc_state = drm_atomic_get_new_crtc_state(
9516 			state, new_con_state->crtc);
9517 
9518 		if (!new_crtc_state)
9519 			continue;
9520 
9521 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9522 			continue;
9523 
9524 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9525 		if (!new_dm_crtc_state->stream)
9526 			continue;
9527 
9528 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9529 		if (!status)
9530 			continue;
9531 
9532 		aconnector = to_amdgpu_dm_connector(connector);
9533 
9534 		mutex_lock(&adev->dm.audio_lock);
9535 		inst = status->audio_inst;
9536 		aconnector->audio_inst = inst;
9537 		mutex_unlock(&adev->dm.audio_lock);
9538 
9539 		amdgpu_dm_audio_eld_notify(adev, inst);
9540 	}
9541 }
9542 
9543 /*
9544  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9545  * @crtc_state: the DRM CRTC state
9546  * @stream_state: the DC stream state.
9547  *
9548  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9549  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9550  */
9551 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9552 						struct dc_stream_state *stream_state)
9553 {
9554 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9555 }
9556 
9557 /**
9558  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9559  * @state: The atomic state to commit
9560  *
9561  * This will tell DC to commit the constructed DC state from atomic_check,
9562  * programming the hardware. Any failures here implies a hardware failure, since
9563  * atomic check should have filtered anything non-kosher.
9564  */
9565 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9566 {
9567 	struct drm_device *dev = state->dev;
9568 	struct amdgpu_device *adev = drm_to_adev(dev);
9569 	struct amdgpu_display_manager *dm = &adev->dm;
9570 	struct dm_atomic_state *dm_state;
9571 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9572 	uint32_t i, j;
9573 	struct drm_crtc *crtc;
9574 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9575 	unsigned long flags;
9576 	bool wait_for_vblank = true;
9577 	struct drm_connector *connector;
9578 	struct drm_connector_state *old_con_state, *new_con_state;
9579 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9580 	int crtc_disable_count = 0;
9581 	bool mode_set_reset_required = false;
9582 	int r;
9583 
9584 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9585 
9586 	r = drm_atomic_helper_wait_for_fences(dev, state, false);
9587 	if (unlikely(r))
9588 		DRM_ERROR("Waiting for fences timed out!");
9589 
9590 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9591 
9592 	dm_state = dm_atomic_get_new_state(state);
9593 	if (dm_state && dm_state->context) {
9594 		dc_state = dm_state->context;
9595 	} else {
9596 		/* No state changes, retain current state. */
9597 		dc_state_temp = dc_create_state(dm->dc);
9598 		ASSERT(dc_state_temp);
9599 		dc_state = dc_state_temp;
9600 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9601 	}
9602 
9603 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9604 				       new_crtc_state, i) {
9605 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9606 
9607 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9608 
9609 		if (old_crtc_state->active &&
9610 		    (!new_crtc_state->active ||
9611 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9612 			manage_dm_interrupts(adev, acrtc, false);
9613 			dc_stream_release(dm_old_crtc_state->stream);
9614 		}
9615 	}
9616 
9617 	drm_atomic_helper_calc_timestamping_constants(state);
9618 
9619 	/* update changed items */
9620 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9621 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9622 
9623 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9624 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9625 
9626 		drm_dbg_state(state->dev,
9627 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9628 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9629 			"connectors_changed:%d\n",
9630 			acrtc->crtc_id,
9631 			new_crtc_state->enable,
9632 			new_crtc_state->active,
9633 			new_crtc_state->planes_changed,
9634 			new_crtc_state->mode_changed,
9635 			new_crtc_state->active_changed,
9636 			new_crtc_state->connectors_changed);
9637 
9638 		/* Disable cursor if disabling crtc */
9639 		if (old_crtc_state->active && !new_crtc_state->active) {
9640 			struct dc_cursor_position position;
9641 
9642 			memset(&position, 0, sizeof(position));
9643 			mutex_lock(&dm->dc_lock);
9644 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9645 			mutex_unlock(&dm->dc_lock);
9646 		}
9647 
9648 		/* Copy all transient state flags into dc state */
9649 		if (dm_new_crtc_state->stream) {
9650 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9651 							    dm_new_crtc_state->stream);
9652 		}
9653 
9654 		/* handles headless hotplug case, updating new_state and
9655 		 * aconnector as needed
9656 		 */
9657 
9658 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9659 
9660 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9661 
9662 			if (!dm_new_crtc_state->stream) {
9663 				/*
9664 				 * this could happen because of issues with
9665 				 * userspace notifications delivery.
9666 				 * In this case userspace tries to set mode on
9667 				 * display which is disconnected in fact.
9668 				 * dc_sink is NULL in this case on aconnector.
9669 				 * We expect reset mode will come soon.
9670 				 *
9671 				 * This can also happen when unplug is done
9672 				 * during resume sequence ended
9673 				 *
9674 				 * In this case, we want to pretend we still
9675 				 * have a sink to keep the pipe running so that
9676 				 * hw state is consistent with the sw state
9677 				 */
9678 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9679 						__func__, acrtc->base.base.id);
9680 				continue;
9681 			}
9682 
9683 			if (dm_old_crtc_state->stream)
9684 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9685 
9686 			pm_runtime_get_noresume(dev->dev);
9687 
9688 			acrtc->enabled = true;
9689 			acrtc->hw_mode = new_crtc_state->mode;
9690 			crtc->hwmode = new_crtc_state->mode;
9691 			mode_set_reset_required = true;
9692 		} else if (modereset_required(new_crtc_state)) {
9693 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9694 			/* i.e. reset mode */
9695 			if (dm_old_crtc_state->stream)
9696 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9697 
9698 			mode_set_reset_required = true;
9699 		}
9700 	} /* for_each_crtc_in_state() */
9701 
9702 	if (dc_state) {
9703 		/* if there mode set or reset, disable eDP PSR */
9704 		if (mode_set_reset_required) {
9705 #if defined(CONFIG_DRM_AMD_DC_DCN)
9706 			if (dm->vblank_control_workqueue)
9707 				flush_workqueue(dm->vblank_control_workqueue);
9708 #endif
9709 			amdgpu_dm_psr_disable_all(dm);
9710 		}
9711 
9712 		dm_enable_per_frame_crtc_master_sync(dc_state);
9713 		mutex_lock(&dm->dc_lock);
9714 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9715 #if defined(CONFIG_DRM_AMD_DC_DCN)
9716                /* Allow idle optimization when vblank count is 0 for display off */
9717                if (dm->active_vblank_irq_count == 0)
9718                    dc_allow_idle_optimizations(dm->dc,true);
9719 #endif
9720 		mutex_unlock(&dm->dc_lock);
9721 	}
9722 
9723 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9724 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9725 
9726 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9727 
9728 		if (dm_new_crtc_state->stream != NULL) {
9729 			const struct dc_stream_status *status =
9730 					dc_stream_get_status(dm_new_crtc_state->stream);
9731 
9732 			if (!status)
9733 				status = dc_stream_get_status_from_state(dc_state,
9734 									 dm_new_crtc_state->stream);
9735 			if (!status)
9736 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9737 			else
9738 				acrtc->otg_inst = status->primary_otg_inst;
9739 		}
9740 	}
9741 #ifdef CONFIG_DRM_AMD_DC_HDCP
9742 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9743 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9744 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9745 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9746 
9747 		new_crtc_state = NULL;
9748 
9749 		if (acrtc)
9750 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9751 
9752 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9753 
9754 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9755 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9756 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9757 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9758 			dm_new_con_state->update_hdcp = true;
9759 			continue;
9760 		}
9761 
9762 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9763 			hdcp_update_display(
9764 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9765 				new_con_state->hdcp_content_type,
9766 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9767 	}
9768 #endif
9769 
9770 	/* Handle connector state changes */
9771 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9772 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9773 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9774 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9775 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9776 		struct dc_stream_update stream_update;
9777 		struct dc_info_packet hdr_packet;
9778 		struct dc_stream_status *status = NULL;
9779 		bool abm_changed, hdr_changed, scaling_changed;
9780 
9781 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9782 		memset(&stream_update, 0, sizeof(stream_update));
9783 
9784 		if (acrtc) {
9785 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9786 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9787 		}
9788 
9789 		/* Skip any modesets/resets */
9790 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9791 			continue;
9792 
9793 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9794 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9795 
9796 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9797 							     dm_old_con_state);
9798 
9799 		abm_changed = dm_new_crtc_state->abm_level !=
9800 			      dm_old_crtc_state->abm_level;
9801 
9802 		hdr_changed =
9803 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9804 
9805 		if (!scaling_changed && !abm_changed && !hdr_changed)
9806 			continue;
9807 
9808 		stream_update.stream = dm_new_crtc_state->stream;
9809 		if (scaling_changed) {
9810 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9811 					dm_new_con_state, dm_new_crtc_state->stream);
9812 
9813 			stream_update.src = dm_new_crtc_state->stream->src;
9814 			stream_update.dst = dm_new_crtc_state->stream->dst;
9815 		}
9816 
9817 		if (abm_changed) {
9818 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9819 
9820 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9821 		}
9822 
9823 		if (hdr_changed) {
9824 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9825 			stream_update.hdr_static_metadata = &hdr_packet;
9826 		}
9827 
9828 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9829 
9830 		if (WARN_ON(!status))
9831 			continue;
9832 
9833 		WARN_ON(!status->plane_count);
9834 
9835 		/*
9836 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9837 		 * Here we create an empty update on each plane.
9838 		 * To fix this, DC should permit updating only stream properties.
9839 		 */
9840 		for (j = 0; j < status->plane_count; j++)
9841 			dummy_updates[j].surface = status->plane_states[0];
9842 
9843 
9844 		mutex_lock(&dm->dc_lock);
9845 		dc_commit_updates_for_stream(dm->dc,
9846 						     dummy_updates,
9847 						     status->plane_count,
9848 						     dm_new_crtc_state->stream,
9849 						     &stream_update,
9850 						     dc_state);
9851 		mutex_unlock(&dm->dc_lock);
9852 	}
9853 
9854 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9855 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9856 				      new_crtc_state, i) {
9857 		if (old_crtc_state->active && !new_crtc_state->active)
9858 			crtc_disable_count++;
9859 
9860 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9861 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9862 
9863 		/* For freesync config update on crtc state and params for irq */
9864 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9865 
9866 		/* Handle vrr on->off / off->on transitions */
9867 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9868 						dm_new_crtc_state);
9869 	}
9870 
9871 	/**
9872 	 * Enable interrupts for CRTCs that are newly enabled or went through
9873 	 * a modeset. It was intentionally deferred until after the front end
9874 	 * state was modified to wait until the OTG was on and so the IRQ
9875 	 * handlers didn't access stale or invalid state.
9876 	 */
9877 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9878 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9879 #ifdef CONFIG_DEBUG_FS
9880 		bool configure_crc = false;
9881 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9882 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9883 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9884 #endif
9885 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9886 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9887 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9888 #endif
9889 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9890 
9891 		if (new_crtc_state->active &&
9892 		    (!old_crtc_state->active ||
9893 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9894 			dc_stream_retain(dm_new_crtc_state->stream);
9895 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9896 			manage_dm_interrupts(adev, acrtc, true);
9897 
9898 #ifdef CONFIG_DEBUG_FS
9899 			/**
9900 			 * Frontend may have changed so reapply the CRC capture
9901 			 * settings for the stream.
9902 			 */
9903 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9904 
9905 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9906 				configure_crc = true;
9907 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9908 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9909 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9910 					acrtc->dm_irq_params.crc_window.update_win = true;
9911 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9912 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9913 					crc_rd_wrk->crtc = crtc;
9914 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9915 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9916 				}
9917 #endif
9918 			}
9919 
9920 			if (configure_crc)
9921 				if (amdgpu_dm_crtc_configure_crc_source(
9922 					crtc, dm_new_crtc_state, cur_crc_src))
9923 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9924 #endif
9925 		}
9926 	}
9927 
9928 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9929 		if (new_crtc_state->async_flip)
9930 			wait_for_vblank = false;
9931 
9932 	/* update planes when needed per crtc*/
9933 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9934 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9935 
9936 		if (dm_new_crtc_state->stream)
9937 			amdgpu_dm_commit_planes(state, dc_state, dev,
9938 						dm, crtc, wait_for_vblank);
9939 	}
9940 
9941 	/* Update audio instances for each connector. */
9942 	amdgpu_dm_commit_audio(dev, state);
9943 
9944 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9945 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9946 	/* restore the backlight level */
9947 	for (i = 0; i < dm->num_of_edps; i++) {
9948 		if (dm->backlight_dev[i] &&
9949 		    (dm->actual_brightness[i] != dm->brightness[i]))
9950 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9951 	}
9952 #endif
9953 	/*
9954 	 * send vblank event on all events not handled in flip and
9955 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9956 	 */
9957 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9958 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9959 
9960 		if (new_crtc_state->event)
9961 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9962 
9963 		new_crtc_state->event = NULL;
9964 	}
9965 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9966 
9967 	/* Signal HW programming completion */
9968 	drm_atomic_helper_commit_hw_done(state);
9969 
9970 	if (wait_for_vblank)
9971 		drm_atomic_helper_wait_for_flip_done(dev, state);
9972 
9973 	drm_atomic_helper_cleanup_planes(dev, state);
9974 
9975 	/* return the stolen vga memory back to VRAM */
9976 	if (!adev->mman.keep_stolen_vga_memory)
9977 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9978 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9979 
9980 	/*
9981 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9982 	 * so we can put the GPU into runtime suspend if we're not driving any
9983 	 * displays anymore
9984 	 */
9985 	for (i = 0; i < crtc_disable_count; i++)
9986 		pm_runtime_put_autosuspend(dev->dev);
9987 	pm_runtime_mark_last_busy(dev->dev);
9988 
9989 	if (dc_state_temp)
9990 		dc_release_state(dc_state_temp);
9991 }
9992 
9993 
9994 static int dm_force_atomic_commit(struct drm_connector *connector)
9995 {
9996 	int ret = 0;
9997 	struct drm_device *ddev = connector->dev;
9998 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9999 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10000 	struct drm_plane *plane = disconnected_acrtc->base.primary;
10001 	struct drm_connector_state *conn_state;
10002 	struct drm_crtc_state *crtc_state;
10003 	struct drm_plane_state *plane_state;
10004 
10005 	if (!state)
10006 		return -ENOMEM;
10007 
10008 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
10009 
10010 	/* Construct an atomic state to restore previous display setting */
10011 
10012 	/*
10013 	 * Attach connectors to drm_atomic_state
10014 	 */
10015 	conn_state = drm_atomic_get_connector_state(state, connector);
10016 
10017 	ret = PTR_ERR_OR_ZERO(conn_state);
10018 	if (ret)
10019 		goto out;
10020 
10021 	/* Attach crtc to drm_atomic_state*/
10022 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10023 
10024 	ret = PTR_ERR_OR_ZERO(crtc_state);
10025 	if (ret)
10026 		goto out;
10027 
10028 	/* force a restore */
10029 	crtc_state->mode_changed = true;
10030 
10031 	/* Attach plane to drm_atomic_state */
10032 	plane_state = drm_atomic_get_plane_state(state, plane);
10033 
10034 	ret = PTR_ERR_OR_ZERO(plane_state);
10035 	if (ret)
10036 		goto out;
10037 
10038 	/* Call commit internally with the state we just constructed */
10039 	ret = drm_atomic_commit(state);
10040 
10041 out:
10042 	drm_atomic_state_put(state);
10043 	if (ret)
10044 		DRM_ERROR("Restoring old state failed with %i\n", ret);
10045 
10046 	return ret;
10047 }
10048 
10049 /*
10050  * This function handles all cases when set mode does not come upon hotplug.
10051  * This includes when a display is unplugged then plugged back into the
10052  * same port and when running without usermode desktop manager supprot
10053  */
10054 void dm_restore_drm_connector_state(struct drm_device *dev,
10055 				    struct drm_connector *connector)
10056 {
10057 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10058 	struct amdgpu_crtc *disconnected_acrtc;
10059 	struct dm_crtc_state *acrtc_state;
10060 
10061 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10062 		return;
10063 
10064 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10065 	if (!disconnected_acrtc)
10066 		return;
10067 
10068 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10069 	if (!acrtc_state->stream)
10070 		return;
10071 
10072 	/*
10073 	 * If the previous sink is not released and different from the current,
10074 	 * we deduce we are in a state where we can not rely on usermode call
10075 	 * to turn on the display, so we do it here
10076 	 */
10077 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10078 		dm_force_atomic_commit(&aconnector->base);
10079 }
10080 
10081 /*
10082  * Grabs all modesetting locks to serialize against any blocking commits,
10083  * Waits for completion of all non blocking commits.
10084  */
10085 static int do_aquire_global_lock(struct drm_device *dev,
10086 				 struct drm_atomic_state *state)
10087 {
10088 	struct drm_crtc *crtc;
10089 	struct drm_crtc_commit *commit;
10090 	long ret;
10091 
10092 	/*
10093 	 * Adding all modeset locks to aquire_ctx will
10094 	 * ensure that when the framework release it the
10095 	 * extra locks we are locking here will get released to
10096 	 */
10097 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10098 	if (ret)
10099 		return ret;
10100 
10101 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10102 		spin_lock(&crtc->commit_lock);
10103 		commit = list_first_entry_or_null(&crtc->commit_list,
10104 				struct drm_crtc_commit, commit_entry);
10105 		if (commit)
10106 			drm_crtc_commit_get(commit);
10107 		spin_unlock(&crtc->commit_lock);
10108 
10109 		if (!commit)
10110 			continue;
10111 
10112 		/*
10113 		 * Make sure all pending HW programming completed and
10114 		 * page flips done
10115 		 */
10116 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10117 
10118 		if (ret > 0)
10119 			ret = wait_for_completion_interruptible_timeout(
10120 					&commit->flip_done, 10*HZ);
10121 
10122 		if (ret == 0)
10123 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10124 				  "timed out\n", crtc->base.id, crtc->name);
10125 
10126 		drm_crtc_commit_put(commit);
10127 	}
10128 
10129 	return ret < 0 ? ret : 0;
10130 }
10131 
10132 static void get_freesync_config_for_crtc(
10133 	struct dm_crtc_state *new_crtc_state,
10134 	struct dm_connector_state *new_con_state)
10135 {
10136 	struct mod_freesync_config config = {0};
10137 	struct amdgpu_dm_connector *aconnector =
10138 			to_amdgpu_dm_connector(new_con_state->base.connector);
10139 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10140 	int vrefresh = drm_mode_vrefresh(mode);
10141 	bool fs_vid_mode = false;
10142 
10143 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10144 					vrefresh >= aconnector->min_vfreq &&
10145 					vrefresh <= aconnector->max_vfreq;
10146 
10147 	if (new_crtc_state->vrr_supported) {
10148 		new_crtc_state->stream->ignore_msa_timing_param = true;
10149 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10150 
10151 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10152 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10153 		config.vsif_supported = true;
10154 		config.btr = true;
10155 
10156 		if (fs_vid_mode) {
10157 			config.state = VRR_STATE_ACTIVE_FIXED;
10158 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10159 			goto out;
10160 		} else if (new_crtc_state->base.vrr_enabled) {
10161 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10162 		} else {
10163 			config.state = VRR_STATE_INACTIVE;
10164 		}
10165 	}
10166 out:
10167 	new_crtc_state->freesync_config = config;
10168 }
10169 
10170 static void reset_freesync_config_for_crtc(
10171 	struct dm_crtc_state *new_crtc_state)
10172 {
10173 	new_crtc_state->vrr_supported = false;
10174 
10175 	memset(&new_crtc_state->vrr_infopacket, 0,
10176 	       sizeof(new_crtc_state->vrr_infopacket));
10177 }
10178 
10179 static bool
10180 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10181 				 struct drm_crtc_state *new_crtc_state)
10182 {
10183 	const struct drm_display_mode *old_mode, *new_mode;
10184 
10185 	if (!old_crtc_state || !new_crtc_state)
10186 		return false;
10187 
10188 	old_mode = &old_crtc_state->mode;
10189 	new_mode = &new_crtc_state->mode;
10190 
10191 	if (old_mode->clock       == new_mode->clock &&
10192 	    old_mode->hdisplay    == new_mode->hdisplay &&
10193 	    old_mode->vdisplay    == new_mode->vdisplay &&
10194 	    old_mode->htotal      == new_mode->htotal &&
10195 	    old_mode->vtotal      != new_mode->vtotal &&
10196 	    old_mode->hsync_start == new_mode->hsync_start &&
10197 	    old_mode->vsync_start != new_mode->vsync_start &&
10198 	    old_mode->hsync_end   == new_mode->hsync_end &&
10199 	    old_mode->vsync_end   != new_mode->vsync_end &&
10200 	    old_mode->hskew       == new_mode->hskew &&
10201 	    old_mode->vscan       == new_mode->vscan &&
10202 	    (old_mode->vsync_end - old_mode->vsync_start) ==
10203 	    (new_mode->vsync_end - new_mode->vsync_start))
10204 		return true;
10205 
10206 	return false;
10207 }
10208 
10209 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10210 	uint64_t num, den, res;
10211 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10212 
10213 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10214 
10215 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10216 	den = (unsigned long long)new_crtc_state->mode.htotal *
10217 	      (unsigned long long)new_crtc_state->mode.vtotal;
10218 
10219 	res = div_u64(num, den);
10220 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10221 }
10222 
10223 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10224 			 struct drm_atomic_state *state,
10225 			 struct drm_crtc *crtc,
10226 			 struct drm_crtc_state *old_crtc_state,
10227 			 struct drm_crtc_state *new_crtc_state,
10228 			 bool enable,
10229 			 bool *lock_and_validation_needed)
10230 {
10231 	struct dm_atomic_state *dm_state = NULL;
10232 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10233 	struct dc_stream_state *new_stream;
10234 	int ret = 0;
10235 
10236 	/*
10237 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10238 	 * update changed items
10239 	 */
10240 	struct amdgpu_crtc *acrtc = NULL;
10241 	struct amdgpu_dm_connector *aconnector = NULL;
10242 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10243 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10244 
10245 	new_stream = NULL;
10246 
10247 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10248 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10249 	acrtc = to_amdgpu_crtc(crtc);
10250 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10251 
10252 	/* TODO This hack should go away */
10253 	if (aconnector && enable) {
10254 		/* Make sure fake sink is created in plug-in scenario */
10255 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10256 							    &aconnector->base);
10257 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10258 							    &aconnector->base);
10259 
10260 		if (IS_ERR(drm_new_conn_state)) {
10261 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10262 			goto fail;
10263 		}
10264 
10265 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10266 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10267 
10268 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10269 			goto skip_modeset;
10270 
10271 		new_stream = create_validate_stream_for_sink(aconnector,
10272 							     &new_crtc_state->mode,
10273 							     dm_new_conn_state,
10274 							     dm_old_crtc_state->stream);
10275 
10276 		/*
10277 		 * we can have no stream on ACTION_SET if a display
10278 		 * was disconnected during S3, in this case it is not an
10279 		 * error, the OS will be updated after detection, and
10280 		 * will do the right thing on next atomic commit
10281 		 */
10282 
10283 		if (!new_stream) {
10284 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10285 					__func__, acrtc->base.base.id);
10286 			ret = -ENOMEM;
10287 			goto fail;
10288 		}
10289 
10290 		/*
10291 		 * TODO: Check VSDB bits to decide whether this should
10292 		 * be enabled or not.
10293 		 */
10294 		new_stream->triggered_crtc_reset.enabled =
10295 			dm->force_timing_sync;
10296 
10297 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10298 
10299 		ret = fill_hdr_info_packet(drm_new_conn_state,
10300 					   &new_stream->hdr_static_metadata);
10301 		if (ret)
10302 			goto fail;
10303 
10304 		/*
10305 		 * If we already removed the old stream from the context
10306 		 * (and set the new stream to NULL) then we can't reuse
10307 		 * the old stream even if the stream and scaling are unchanged.
10308 		 * We'll hit the BUG_ON and black screen.
10309 		 *
10310 		 * TODO: Refactor this function to allow this check to work
10311 		 * in all conditions.
10312 		 */
10313 		if (dm_new_crtc_state->stream &&
10314 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10315 			goto skip_modeset;
10316 
10317 		if (dm_new_crtc_state->stream &&
10318 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10319 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10320 			new_crtc_state->mode_changed = false;
10321 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10322 					 new_crtc_state->mode_changed);
10323 		}
10324 	}
10325 
10326 	/* mode_changed flag may get updated above, need to check again */
10327 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10328 		goto skip_modeset;
10329 
10330 	drm_dbg_state(state->dev,
10331 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10332 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10333 		"connectors_changed:%d\n",
10334 		acrtc->crtc_id,
10335 		new_crtc_state->enable,
10336 		new_crtc_state->active,
10337 		new_crtc_state->planes_changed,
10338 		new_crtc_state->mode_changed,
10339 		new_crtc_state->active_changed,
10340 		new_crtc_state->connectors_changed);
10341 
10342 	/* Remove stream for any changed/disabled CRTC */
10343 	if (!enable) {
10344 
10345 		if (!dm_old_crtc_state->stream)
10346 			goto skip_modeset;
10347 
10348 		if (dm_new_crtc_state->stream &&
10349 		    is_timing_unchanged_for_freesync(new_crtc_state,
10350 						     old_crtc_state)) {
10351 			new_crtc_state->mode_changed = false;
10352 			DRM_DEBUG_DRIVER(
10353 				"Mode change not required for front porch change, "
10354 				"setting mode_changed to %d",
10355 				new_crtc_state->mode_changed);
10356 
10357 			set_freesync_fixed_config(dm_new_crtc_state);
10358 
10359 			goto skip_modeset;
10360 		} else if (aconnector &&
10361 			   is_freesync_video_mode(&new_crtc_state->mode,
10362 						  aconnector)) {
10363 			struct drm_display_mode *high_mode;
10364 
10365 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10366 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10367 				set_freesync_fixed_config(dm_new_crtc_state);
10368 			}
10369 		}
10370 
10371 		ret = dm_atomic_get_state(state, &dm_state);
10372 		if (ret)
10373 			goto fail;
10374 
10375 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10376 				crtc->base.id);
10377 
10378 		/* i.e. reset mode */
10379 		if (dc_remove_stream_from_ctx(
10380 				dm->dc,
10381 				dm_state->context,
10382 				dm_old_crtc_state->stream) != DC_OK) {
10383 			ret = -EINVAL;
10384 			goto fail;
10385 		}
10386 
10387 		dc_stream_release(dm_old_crtc_state->stream);
10388 		dm_new_crtc_state->stream = NULL;
10389 
10390 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10391 
10392 		*lock_and_validation_needed = true;
10393 
10394 	} else {/* Add stream for any updated/enabled CRTC */
10395 		/*
10396 		 * Quick fix to prevent NULL pointer on new_stream when
10397 		 * added MST connectors not found in existing crtc_state in the chained mode
10398 		 * TODO: need to dig out the root cause of that
10399 		 */
10400 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10401 			goto skip_modeset;
10402 
10403 		if (modereset_required(new_crtc_state))
10404 			goto skip_modeset;
10405 
10406 		if (modeset_required(new_crtc_state, new_stream,
10407 				     dm_old_crtc_state->stream)) {
10408 
10409 			WARN_ON(dm_new_crtc_state->stream);
10410 
10411 			ret = dm_atomic_get_state(state, &dm_state);
10412 			if (ret)
10413 				goto fail;
10414 
10415 			dm_new_crtc_state->stream = new_stream;
10416 
10417 			dc_stream_retain(new_stream);
10418 
10419 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10420 					 crtc->base.id);
10421 
10422 			if (dc_add_stream_to_ctx(
10423 					dm->dc,
10424 					dm_state->context,
10425 					dm_new_crtc_state->stream) != DC_OK) {
10426 				ret = -EINVAL;
10427 				goto fail;
10428 			}
10429 
10430 			*lock_and_validation_needed = true;
10431 		}
10432 	}
10433 
10434 skip_modeset:
10435 	/* Release extra reference */
10436 	if (new_stream)
10437 		 dc_stream_release(new_stream);
10438 
10439 	/*
10440 	 * We want to do dc stream updates that do not require a
10441 	 * full modeset below.
10442 	 */
10443 	if (!(enable && aconnector && new_crtc_state->active))
10444 		return 0;
10445 	/*
10446 	 * Given above conditions, the dc state cannot be NULL because:
10447 	 * 1. We're in the process of enabling CRTCs (just been added
10448 	 *    to the dc context, or already is on the context)
10449 	 * 2. Has a valid connector attached, and
10450 	 * 3. Is currently active and enabled.
10451 	 * => The dc stream state currently exists.
10452 	 */
10453 	BUG_ON(dm_new_crtc_state->stream == NULL);
10454 
10455 	/* Scaling or underscan settings */
10456 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10457 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10458 		update_stream_scaling_settings(
10459 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10460 
10461 	/* ABM settings */
10462 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10463 
10464 	/*
10465 	 * Color management settings. We also update color properties
10466 	 * when a modeset is needed, to ensure it gets reprogrammed.
10467 	 */
10468 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10469 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10470 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10471 		if (ret)
10472 			goto fail;
10473 	}
10474 
10475 	/* Update Freesync settings. */
10476 	get_freesync_config_for_crtc(dm_new_crtc_state,
10477 				     dm_new_conn_state);
10478 
10479 	return ret;
10480 
10481 fail:
10482 	if (new_stream)
10483 		dc_stream_release(new_stream);
10484 	return ret;
10485 }
10486 
10487 static bool should_reset_plane(struct drm_atomic_state *state,
10488 			       struct drm_plane *plane,
10489 			       struct drm_plane_state *old_plane_state,
10490 			       struct drm_plane_state *new_plane_state)
10491 {
10492 	struct drm_plane *other;
10493 	struct drm_plane_state *old_other_state, *new_other_state;
10494 	struct drm_crtc_state *new_crtc_state;
10495 	int i;
10496 
10497 	/*
10498 	 * TODO: Remove this hack once the checks below are sufficient
10499 	 * enough to determine when we need to reset all the planes on
10500 	 * the stream.
10501 	 */
10502 	if (state->allow_modeset)
10503 		return true;
10504 
10505 	/* Exit early if we know that we're adding or removing the plane. */
10506 	if (old_plane_state->crtc != new_plane_state->crtc)
10507 		return true;
10508 
10509 	/* old crtc == new_crtc == NULL, plane not in context. */
10510 	if (!new_plane_state->crtc)
10511 		return false;
10512 
10513 	new_crtc_state =
10514 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10515 
10516 	if (!new_crtc_state)
10517 		return true;
10518 
10519 	/* CRTC Degamma changes currently require us to recreate planes. */
10520 	if (new_crtc_state->color_mgmt_changed)
10521 		return true;
10522 
10523 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10524 		return true;
10525 
10526 	/*
10527 	 * If there are any new primary or overlay planes being added or
10528 	 * removed then the z-order can potentially change. To ensure
10529 	 * correct z-order and pipe acquisition the current DC architecture
10530 	 * requires us to remove and recreate all existing planes.
10531 	 *
10532 	 * TODO: Come up with a more elegant solution for this.
10533 	 */
10534 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10535 		struct amdgpu_framebuffer *old_afb, *new_afb;
10536 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10537 			continue;
10538 
10539 		if (old_other_state->crtc != new_plane_state->crtc &&
10540 		    new_other_state->crtc != new_plane_state->crtc)
10541 			continue;
10542 
10543 		if (old_other_state->crtc != new_other_state->crtc)
10544 			return true;
10545 
10546 		/* Src/dst size and scaling updates. */
10547 		if (old_other_state->src_w != new_other_state->src_w ||
10548 		    old_other_state->src_h != new_other_state->src_h ||
10549 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10550 		    old_other_state->crtc_h != new_other_state->crtc_h)
10551 			return true;
10552 
10553 		/* Rotation / mirroring updates. */
10554 		if (old_other_state->rotation != new_other_state->rotation)
10555 			return true;
10556 
10557 		/* Blending updates. */
10558 		if (old_other_state->pixel_blend_mode !=
10559 		    new_other_state->pixel_blend_mode)
10560 			return true;
10561 
10562 		/* Alpha updates. */
10563 		if (old_other_state->alpha != new_other_state->alpha)
10564 			return true;
10565 
10566 		/* Colorspace changes. */
10567 		if (old_other_state->color_range != new_other_state->color_range ||
10568 		    old_other_state->color_encoding != new_other_state->color_encoding)
10569 			return true;
10570 
10571 		/* Framebuffer checks fall at the end. */
10572 		if (!old_other_state->fb || !new_other_state->fb)
10573 			continue;
10574 
10575 		/* Pixel format changes can require bandwidth updates. */
10576 		if (old_other_state->fb->format != new_other_state->fb->format)
10577 			return true;
10578 
10579 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10580 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10581 
10582 		/* Tiling and DCC changes also require bandwidth updates. */
10583 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10584 		    old_afb->base.modifier != new_afb->base.modifier)
10585 			return true;
10586 	}
10587 
10588 	return false;
10589 }
10590 
10591 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10592 			      struct drm_plane_state *new_plane_state,
10593 			      struct drm_framebuffer *fb)
10594 {
10595 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10596 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10597 	unsigned int pitch;
10598 	bool linear;
10599 
10600 	if (fb->width > new_acrtc->max_cursor_width ||
10601 	    fb->height > new_acrtc->max_cursor_height) {
10602 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10603 				 new_plane_state->fb->width,
10604 				 new_plane_state->fb->height);
10605 		return -EINVAL;
10606 	}
10607 	if (new_plane_state->src_w != fb->width << 16 ||
10608 	    new_plane_state->src_h != fb->height << 16) {
10609 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10610 		return -EINVAL;
10611 	}
10612 
10613 	/* Pitch in pixels */
10614 	pitch = fb->pitches[0] / fb->format->cpp[0];
10615 
10616 	if (fb->width != pitch) {
10617 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10618 				 fb->width, pitch);
10619 		return -EINVAL;
10620 	}
10621 
10622 	switch (pitch) {
10623 	case 64:
10624 	case 128:
10625 	case 256:
10626 		/* FB pitch is supported by cursor plane */
10627 		break;
10628 	default:
10629 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10630 		return -EINVAL;
10631 	}
10632 
10633 	/* Core DRM takes care of checking FB modifiers, so we only need to
10634 	 * check tiling flags when the FB doesn't have a modifier. */
10635 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10636 		if (adev->family < AMDGPU_FAMILY_AI) {
10637 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10638 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10639 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10640 		} else {
10641 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10642 		}
10643 		if (!linear) {
10644 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10645 			return -EINVAL;
10646 		}
10647 	}
10648 
10649 	return 0;
10650 }
10651 
10652 static int dm_update_plane_state(struct dc *dc,
10653 				 struct drm_atomic_state *state,
10654 				 struct drm_plane *plane,
10655 				 struct drm_plane_state *old_plane_state,
10656 				 struct drm_plane_state *new_plane_state,
10657 				 bool enable,
10658 				 bool *lock_and_validation_needed)
10659 {
10660 
10661 	struct dm_atomic_state *dm_state = NULL;
10662 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10663 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10664 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10665 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10666 	struct amdgpu_crtc *new_acrtc;
10667 	bool needs_reset;
10668 	int ret = 0;
10669 
10670 
10671 	new_plane_crtc = new_plane_state->crtc;
10672 	old_plane_crtc = old_plane_state->crtc;
10673 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10674 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10675 
10676 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10677 		if (!enable || !new_plane_crtc ||
10678 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10679 			return 0;
10680 
10681 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10682 
10683 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10684 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10685 			return -EINVAL;
10686 		}
10687 
10688 		if (new_plane_state->fb) {
10689 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10690 						 new_plane_state->fb);
10691 			if (ret)
10692 				return ret;
10693 		}
10694 
10695 		return 0;
10696 	}
10697 
10698 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10699 					 new_plane_state);
10700 
10701 	/* Remove any changed/removed planes */
10702 	if (!enable) {
10703 		if (!needs_reset)
10704 			return 0;
10705 
10706 		if (!old_plane_crtc)
10707 			return 0;
10708 
10709 		old_crtc_state = drm_atomic_get_old_crtc_state(
10710 				state, old_plane_crtc);
10711 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10712 
10713 		if (!dm_old_crtc_state->stream)
10714 			return 0;
10715 
10716 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10717 				plane->base.id, old_plane_crtc->base.id);
10718 
10719 		ret = dm_atomic_get_state(state, &dm_state);
10720 		if (ret)
10721 			return ret;
10722 
10723 		if (!dc_remove_plane_from_context(
10724 				dc,
10725 				dm_old_crtc_state->stream,
10726 				dm_old_plane_state->dc_state,
10727 				dm_state->context)) {
10728 
10729 			return -EINVAL;
10730 		}
10731 
10732 
10733 		dc_plane_state_release(dm_old_plane_state->dc_state);
10734 		dm_new_plane_state->dc_state = NULL;
10735 
10736 		*lock_and_validation_needed = true;
10737 
10738 	} else { /* Add new planes */
10739 		struct dc_plane_state *dc_new_plane_state;
10740 
10741 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10742 			return 0;
10743 
10744 		if (!new_plane_crtc)
10745 			return 0;
10746 
10747 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10748 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10749 
10750 		if (!dm_new_crtc_state->stream)
10751 			return 0;
10752 
10753 		if (!needs_reset)
10754 			return 0;
10755 
10756 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10757 		if (ret)
10758 			return ret;
10759 
10760 		WARN_ON(dm_new_plane_state->dc_state);
10761 
10762 		dc_new_plane_state = dc_create_plane_state(dc);
10763 		if (!dc_new_plane_state)
10764 			return -ENOMEM;
10765 
10766 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10767 				 plane->base.id, new_plane_crtc->base.id);
10768 
10769 		ret = fill_dc_plane_attributes(
10770 			drm_to_adev(new_plane_crtc->dev),
10771 			dc_new_plane_state,
10772 			new_plane_state,
10773 			new_crtc_state);
10774 		if (ret) {
10775 			dc_plane_state_release(dc_new_plane_state);
10776 			return ret;
10777 		}
10778 
10779 		ret = dm_atomic_get_state(state, &dm_state);
10780 		if (ret) {
10781 			dc_plane_state_release(dc_new_plane_state);
10782 			return ret;
10783 		}
10784 
10785 		/*
10786 		 * Any atomic check errors that occur after this will
10787 		 * not need a release. The plane state will be attached
10788 		 * to the stream, and therefore part of the atomic
10789 		 * state. It'll be released when the atomic state is
10790 		 * cleaned.
10791 		 */
10792 		if (!dc_add_plane_to_context(
10793 				dc,
10794 				dm_new_crtc_state->stream,
10795 				dc_new_plane_state,
10796 				dm_state->context)) {
10797 
10798 			dc_plane_state_release(dc_new_plane_state);
10799 			return -EINVAL;
10800 		}
10801 
10802 		dm_new_plane_state->dc_state = dc_new_plane_state;
10803 
10804 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10805 
10806 		/* Tell DC to do a full surface update every time there
10807 		 * is a plane change. Inefficient, but works for now.
10808 		 */
10809 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10810 
10811 		*lock_and_validation_needed = true;
10812 	}
10813 
10814 
10815 	return ret;
10816 }
10817 
10818 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10819 				       int *src_w, int *src_h)
10820 {
10821 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10822 	case DRM_MODE_ROTATE_90:
10823 	case DRM_MODE_ROTATE_270:
10824 		*src_w = plane_state->src_h >> 16;
10825 		*src_h = plane_state->src_w >> 16;
10826 		break;
10827 	case DRM_MODE_ROTATE_0:
10828 	case DRM_MODE_ROTATE_180:
10829 	default:
10830 		*src_w = plane_state->src_w >> 16;
10831 		*src_h = plane_state->src_h >> 16;
10832 		break;
10833 	}
10834 }
10835 
10836 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10837 				struct drm_crtc *crtc,
10838 				struct drm_crtc_state *new_crtc_state)
10839 {
10840 	struct drm_plane *cursor = crtc->cursor, *underlying;
10841 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10842 	int i;
10843 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10844 	int cursor_src_w, cursor_src_h;
10845 	int underlying_src_w, underlying_src_h;
10846 
10847 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10848 	 * cursor per pipe but it's going to inherit the scaling and
10849 	 * positioning from the underlying pipe. Check the cursor plane's
10850 	 * blending properties match the underlying planes'. */
10851 
10852 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10853 	if (!new_cursor_state || !new_cursor_state->fb) {
10854 		return 0;
10855 	}
10856 
10857 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10858 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10859 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10860 
10861 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10862 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10863 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10864 			continue;
10865 
10866 		/* Ignore disabled planes */
10867 		if (!new_underlying_state->fb)
10868 			continue;
10869 
10870 		dm_get_oriented_plane_size(new_underlying_state,
10871 					   &underlying_src_w, &underlying_src_h);
10872 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10873 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10874 
10875 		if (cursor_scale_w != underlying_scale_w ||
10876 		    cursor_scale_h != underlying_scale_h) {
10877 			drm_dbg_atomic(crtc->dev,
10878 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10879 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10880 			return -EINVAL;
10881 		}
10882 
10883 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10884 		if (new_underlying_state->crtc_x <= 0 &&
10885 		    new_underlying_state->crtc_y <= 0 &&
10886 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10887 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10888 			break;
10889 	}
10890 
10891 	return 0;
10892 }
10893 
10894 #if defined(CONFIG_DRM_AMD_DC_DCN)
10895 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10896 {
10897 	struct drm_connector *connector;
10898 	struct drm_connector_state *conn_state, *old_conn_state;
10899 	struct amdgpu_dm_connector *aconnector = NULL;
10900 	int i;
10901 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10902 		if (!conn_state->crtc)
10903 			conn_state = old_conn_state;
10904 
10905 		if (conn_state->crtc != crtc)
10906 			continue;
10907 
10908 		aconnector = to_amdgpu_dm_connector(connector);
10909 		if (!aconnector->port || !aconnector->mst_port)
10910 			aconnector = NULL;
10911 		else
10912 			break;
10913 	}
10914 
10915 	if (!aconnector)
10916 		return 0;
10917 
10918 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10919 }
10920 #endif
10921 
10922 /**
10923  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10924  * @dev: The DRM device
10925  * @state: The atomic state to commit
10926  *
10927  * Validate that the given atomic state is programmable by DC into hardware.
10928  * This involves constructing a &struct dc_state reflecting the new hardware
10929  * state we wish to commit, then querying DC to see if it is programmable. It's
10930  * important not to modify the existing DC state. Otherwise, atomic_check
10931  * may unexpectedly commit hardware changes.
10932  *
10933  * When validating the DC state, it's important that the right locks are
10934  * acquired. For full updates case which removes/adds/updates streams on one
10935  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10936  * that any such full update commit will wait for completion of any outstanding
10937  * flip using DRMs synchronization events.
10938  *
10939  * Note that DM adds the affected connectors for all CRTCs in state, when that
10940  * might not seem necessary. This is because DC stream creation requires the
10941  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10942  * be possible but non-trivial - a possible TODO item.
10943  *
10944  * Return: -Error code if validation failed.
10945  */
10946 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10947 				  struct drm_atomic_state *state)
10948 {
10949 	struct amdgpu_device *adev = drm_to_adev(dev);
10950 	struct dm_atomic_state *dm_state = NULL;
10951 	struct dc *dc = adev->dm.dc;
10952 	struct drm_connector *connector;
10953 	struct drm_connector_state *old_con_state, *new_con_state;
10954 	struct drm_crtc *crtc;
10955 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10956 	struct drm_plane *plane;
10957 	struct drm_plane_state *old_plane_state, *new_plane_state;
10958 	enum dc_status status;
10959 	int ret, i;
10960 	bool lock_and_validation_needed = false;
10961 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10962 #if defined(CONFIG_DRM_AMD_DC_DCN)
10963 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10964 	struct drm_dp_mst_topology_state *mst_state;
10965 	struct drm_dp_mst_topology_mgr *mgr;
10966 #endif
10967 
10968 	trace_amdgpu_dm_atomic_check_begin(state);
10969 
10970 	ret = drm_atomic_helper_check_modeset(dev, state);
10971 	if (ret) {
10972 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10973 		goto fail;
10974 	}
10975 
10976 	/* Check connector changes */
10977 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10978 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10979 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10980 
10981 		/* Skip connectors that are disabled or part of modeset already. */
10982 		if (!old_con_state->crtc && !new_con_state->crtc)
10983 			continue;
10984 
10985 		if (!new_con_state->crtc)
10986 			continue;
10987 
10988 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10989 		if (IS_ERR(new_crtc_state)) {
10990 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10991 			ret = PTR_ERR(new_crtc_state);
10992 			goto fail;
10993 		}
10994 
10995 		if (dm_old_con_state->abm_level !=
10996 		    dm_new_con_state->abm_level)
10997 			new_crtc_state->connectors_changed = true;
10998 	}
10999 
11000 #if defined(CONFIG_DRM_AMD_DC_DCN)
11001 	if (dc_resource_is_dsc_encoding_supported(dc)) {
11002 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11003 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11004 				ret = add_affected_mst_dsc_crtcs(state, crtc);
11005 				if (ret) {
11006 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11007 					goto fail;
11008 				}
11009 			}
11010 		}
11011 		pre_validate_dsc(state, &dm_state, vars);
11012 	}
11013 #endif
11014 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11015 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11016 
11017 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11018 		    !new_crtc_state->color_mgmt_changed &&
11019 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11020 			dm_old_crtc_state->dsc_force_changed == false)
11021 			continue;
11022 
11023 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11024 		if (ret) {
11025 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11026 			goto fail;
11027 		}
11028 
11029 		if (!new_crtc_state->enable)
11030 			continue;
11031 
11032 		ret = drm_atomic_add_affected_connectors(state, crtc);
11033 		if (ret) {
11034 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11035 			goto fail;
11036 		}
11037 
11038 		ret = drm_atomic_add_affected_planes(state, crtc);
11039 		if (ret) {
11040 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11041 			goto fail;
11042 		}
11043 
11044 		if (dm_old_crtc_state->dsc_force_changed)
11045 			new_crtc_state->mode_changed = true;
11046 	}
11047 
11048 	/*
11049 	 * Add all primary and overlay planes on the CRTC to the state
11050 	 * whenever a plane is enabled to maintain correct z-ordering
11051 	 * and to enable fast surface updates.
11052 	 */
11053 	drm_for_each_crtc(crtc, dev) {
11054 		bool modified = false;
11055 
11056 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11057 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11058 				continue;
11059 
11060 			if (new_plane_state->crtc == crtc ||
11061 			    old_plane_state->crtc == crtc) {
11062 				modified = true;
11063 				break;
11064 			}
11065 		}
11066 
11067 		if (!modified)
11068 			continue;
11069 
11070 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11071 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11072 				continue;
11073 
11074 			new_plane_state =
11075 				drm_atomic_get_plane_state(state, plane);
11076 
11077 			if (IS_ERR(new_plane_state)) {
11078 				ret = PTR_ERR(new_plane_state);
11079 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11080 				goto fail;
11081 			}
11082 		}
11083 	}
11084 
11085 	/* Remove exiting planes if they are modified */
11086 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11087 		ret = dm_update_plane_state(dc, state, plane,
11088 					    old_plane_state,
11089 					    new_plane_state,
11090 					    false,
11091 					    &lock_and_validation_needed);
11092 		if (ret) {
11093 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11094 			goto fail;
11095 		}
11096 	}
11097 
11098 	/* Disable all crtcs which require disable */
11099 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11100 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11101 					   old_crtc_state,
11102 					   new_crtc_state,
11103 					   false,
11104 					   &lock_and_validation_needed);
11105 		if (ret) {
11106 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11107 			goto fail;
11108 		}
11109 	}
11110 
11111 	/* Enable all crtcs which require enable */
11112 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11113 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11114 					   old_crtc_state,
11115 					   new_crtc_state,
11116 					   true,
11117 					   &lock_and_validation_needed);
11118 		if (ret) {
11119 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11120 			goto fail;
11121 		}
11122 	}
11123 
11124 	/* Add new/modified planes */
11125 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11126 		ret = dm_update_plane_state(dc, state, plane,
11127 					    old_plane_state,
11128 					    new_plane_state,
11129 					    true,
11130 					    &lock_and_validation_needed);
11131 		if (ret) {
11132 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11133 			goto fail;
11134 		}
11135 	}
11136 
11137 	/* Run this here since we want to validate the streams we created */
11138 	ret = drm_atomic_helper_check_planes(dev, state);
11139 	if (ret) {
11140 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11141 		goto fail;
11142 	}
11143 
11144 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11145 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11146 		if (dm_new_crtc_state->mpo_requested)
11147 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11148 	}
11149 
11150 	/* Check cursor planes scaling */
11151 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11152 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11153 		if (ret) {
11154 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11155 			goto fail;
11156 		}
11157 	}
11158 
11159 	if (state->legacy_cursor_update) {
11160 		/*
11161 		 * This is a fast cursor update coming from the plane update
11162 		 * helper, check if it can be done asynchronously for better
11163 		 * performance.
11164 		 */
11165 		state->async_update =
11166 			!drm_atomic_helper_async_check(dev, state);
11167 
11168 		/*
11169 		 * Skip the remaining global validation if this is an async
11170 		 * update. Cursor updates can be done without affecting
11171 		 * state or bandwidth calcs and this avoids the performance
11172 		 * penalty of locking the private state object and
11173 		 * allocating a new dc_state.
11174 		 */
11175 		if (state->async_update)
11176 			return 0;
11177 	}
11178 
11179 	/* Check scaling and underscan changes*/
11180 	/* TODO Removed scaling changes validation due to inability to commit
11181 	 * new stream into context w\o causing full reset. Need to
11182 	 * decide how to handle.
11183 	 */
11184 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11185 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11186 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11187 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11188 
11189 		/* Skip any modesets/resets */
11190 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11191 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11192 			continue;
11193 
11194 		/* Skip any thing not scale or underscan changes */
11195 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11196 			continue;
11197 
11198 		lock_and_validation_needed = true;
11199 	}
11200 
11201 #if defined(CONFIG_DRM_AMD_DC_DCN)
11202 	/* set the slot info for each mst_state based on the link encoding format */
11203 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11204 		struct amdgpu_dm_connector *aconnector;
11205 		struct drm_connector *connector;
11206 		struct drm_connector_list_iter iter;
11207 		u8 link_coding_cap;
11208 
11209 		if (!mgr->mst_state )
11210 			continue;
11211 
11212 		drm_connector_list_iter_begin(dev, &iter);
11213 		drm_for_each_connector_iter(connector, &iter) {
11214 			int id = connector->index;
11215 
11216 			if (id == mst_state->mgr->conn_base_id) {
11217 				aconnector = to_amdgpu_dm_connector(connector);
11218 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11219 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11220 
11221 				break;
11222 			}
11223 		}
11224 		drm_connector_list_iter_end(&iter);
11225 
11226 	}
11227 #endif
11228 	/**
11229 	 * Streams and planes are reset when there are changes that affect
11230 	 * bandwidth. Anything that affects bandwidth needs to go through
11231 	 * DC global validation to ensure that the configuration can be applied
11232 	 * to hardware.
11233 	 *
11234 	 * We have to currently stall out here in atomic_check for outstanding
11235 	 * commits to finish in this case because our IRQ handlers reference
11236 	 * DRM state directly - we can end up disabling interrupts too early
11237 	 * if we don't.
11238 	 *
11239 	 * TODO: Remove this stall and drop DM state private objects.
11240 	 */
11241 	if (lock_and_validation_needed) {
11242 		ret = dm_atomic_get_state(state, &dm_state);
11243 		if (ret) {
11244 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11245 			goto fail;
11246 		}
11247 
11248 		ret = do_aquire_global_lock(dev, state);
11249 		if (ret) {
11250 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11251 			goto fail;
11252 		}
11253 
11254 #if defined(CONFIG_DRM_AMD_DC_DCN)
11255 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11256 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11257 			goto fail;
11258 		}
11259 
11260 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11261 		if (ret) {
11262 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11263 			goto fail;
11264 		}
11265 #endif
11266 
11267 		/*
11268 		 * Perform validation of MST topology in the state:
11269 		 * We need to perform MST atomic check before calling
11270 		 * dc_validate_global_state(), or there is a chance
11271 		 * to get stuck in an infinite loop and hang eventually.
11272 		 */
11273 		ret = drm_dp_mst_atomic_check(state);
11274 		if (ret) {
11275 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11276 			goto fail;
11277 		}
11278 		status = dc_validate_global_state(dc, dm_state->context, true);
11279 		if (status != DC_OK) {
11280 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11281 				       dc_status_to_str(status), status);
11282 			ret = -EINVAL;
11283 			goto fail;
11284 		}
11285 	} else {
11286 		/*
11287 		 * The commit is a fast update. Fast updates shouldn't change
11288 		 * the DC context, affect global validation, and can have their
11289 		 * commit work done in parallel with other commits not touching
11290 		 * the same resource. If we have a new DC context as part of
11291 		 * the DM atomic state from validation we need to free it and
11292 		 * retain the existing one instead.
11293 		 *
11294 		 * Furthermore, since the DM atomic state only contains the DC
11295 		 * context and can safely be annulled, we can free the state
11296 		 * and clear the associated private object now to free
11297 		 * some memory and avoid a possible use-after-free later.
11298 		 */
11299 
11300 		for (i = 0; i < state->num_private_objs; i++) {
11301 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11302 
11303 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11304 				int j = state->num_private_objs-1;
11305 
11306 				dm_atomic_destroy_state(obj,
11307 						state->private_objs[i].state);
11308 
11309 				/* If i is not at the end of the array then the
11310 				 * last element needs to be moved to where i was
11311 				 * before the array can safely be truncated.
11312 				 */
11313 				if (i != j)
11314 					state->private_objs[i] =
11315 						state->private_objs[j];
11316 
11317 				state->private_objs[j].ptr = NULL;
11318 				state->private_objs[j].state = NULL;
11319 				state->private_objs[j].old_state = NULL;
11320 				state->private_objs[j].new_state = NULL;
11321 
11322 				state->num_private_objs = j;
11323 				break;
11324 			}
11325 		}
11326 	}
11327 
11328 	/* Store the overall update type for use later in atomic check. */
11329 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11330 		struct dm_crtc_state *dm_new_crtc_state =
11331 			to_dm_crtc_state(new_crtc_state);
11332 
11333 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11334 							 UPDATE_TYPE_FULL :
11335 							 UPDATE_TYPE_FAST;
11336 	}
11337 
11338 	/* Must be success */
11339 	WARN_ON(ret);
11340 
11341 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11342 
11343 	return ret;
11344 
11345 fail:
11346 	if (ret == -EDEADLK)
11347 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11348 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11349 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11350 	else
11351 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11352 
11353 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11354 
11355 	return ret;
11356 }
11357 
11358 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11359 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11360 {
11361 	uint8_t dpcd_data;
11362 	bool capable = false;
11363 
11364 	if (amdgpu_dm_connector->dc_link &&
11365 		dm_helpers_dp_read_dpcd(
11366 				NULL,
11367 				amdgpu_dm_connector->dc_link,
11368 				DP_DOWN_STREAM_PORT_COUNT,
11369 				&dpcd_data,
11370 				sizeof(dpcd_data))) {
11371 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11372 	}
11373 
11374 	return capable;
11375 }
11376 
11377 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11378 		unsigned int offset,
11379 		unsigned int total_length,
11380 		uint8_t *data,
11381 		unsigned int length,
11382 		struct amdgpu_hdmi_vsdb_info *vsdb)
11383 {
11384 	bool res;
11385 	union dmub_rb_cmd cmd;
11386 	struct dmub_cmd_send_edid_cea *input;
11387 	struct dmub_cmd_edid_cea_output *output;
11388 
11389 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11390 		return false;
11391 
11392 	memset(&cmd, 0, sizeof(cmd));
11393 
11394 	input = &cmd.edid_cea.data.input;
11395 
11396 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11397 	cmd.edid_cea.header.sub_type = 0;
11398 	cmd.edid_cea.header.payload_bytes =
11399 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11400 	input->offset = offset;
11401 	input->length = length;
11402 	input->cea_total_length = total_length;
11403 	memcpy(input->payload, data, length);
11404 
11405 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11406 	if (!res) {
11407 		DRM_ERROR("EDID CEA parser failed\n");
11408 		return false;
11409 	}
11410 
11411 	output = &cmd.edid_cea.data.output;
11412 
11413 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11414 		if (!output->ack.success) {
11415 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11416 					output->ack.offset);
11417 		}
11418 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11419 		if (!output->amd_vsdb.vsdb_found)
11420 			return false;
11421 
11422 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11423 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11424 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11425 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11426 	} else {
11427 		DRM_WARN("Unknown EDID CEA parser results\n");
11428 		return false;
11429 	}
11430 
11431 	return true;
11432 }
11433 
11434 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11435 		uint8_t *edid_ext, int len,
11436 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11437 {
11438 	int i;
11439 
11440 	/* send extension block to DMCU for parsing */
11441 	for (i = 0; i < len; i += 8) {
11442 		bool res;
11443 		int offset;
11444 
11445 		/* send 8 bytes a time */
11446 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11447 			return false;
11448 
11449 		if (i+8 == len) {
11450 			/* EDID block sent completed, expect result */
11451 			int version, min_rate, max_rate;
11452 
11453 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11454 			if (res) {
11455 				/* amd vsdb found */
11456 				vsdb_info->freesync_supported = 1;
11457 				vsdb_info->amd_vsdb_version = version;
11458 				vsdb_info->min_refresh_rate_hz = min_rate;
11459 				vsdb_info->max_refresh_rate_hz = max_rate;
11460 				return true;
11461 			}
11462 			/* not amd vsdb */
11463 			return false;
11464 		}
11465 
11466 		/* check for ack*/
11467 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11468 		if (!res)
11469 			return false;
11470 	}
11471 
11472 	return false;
11473 }
11474 
11475 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11476 		uint8_t *edid_ext, int len,
11477 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11478 {
11479 	int i;
11480 
11481 	/* send extension block to DMCU for parsing */
11482 	for (i = 0; i < len; i += 8) {
11483 		/* send 8 bytes a time */
11484 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11485 			return false;
11486 	}
11487 
11488 	return vsdb_info->freesync_supported;
11489 }
11490 
11491 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11492 		uint8_t *edid_ext, int len,
11493 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11494 {
11495 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11496 
11497 	if (adev->dm.dmub_srv)
11498 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11499 	else
11500 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11501 }
11502 
11503 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11504 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11505 {
11506 	uint8_t *edid_ext = NULL;
11507 	int i;
11508 	bool valid_vsdb_found = false;
11509 
11510 	/*----- drm_find_cea_extension() -----*/
11511 	/* No EDID or EDID extensions */
11512 	if (edid == NULL || edid->extensions == 0)
11513 		return -ENODEV;
11514 
11515 	/* Find CEA extension */
11516 	for (i = 0; i < edid->extensions; i++) {
11517 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11518 		if (edid_ext[0] == CEA_EXT)
11519 			break;
11520 	}
11521 
11522 	if (i == edid->extensions)
11523 		return -ENODEV;
11524 
11525 	/*----- cea_db_offsets() -----*/
11526 	if (edid_ext[0] != CEA_EXT)
11527 		return -ENODEV;
11528 
11529 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11530 
11531 	return valid_vsdb_found ? i : -ENODEV;
11532 }
11533 
11534 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11535 					struct edid *edid)
11536 {
11537 	int i = 0;
11538 	struct detailed_timing *timing;
11539 	struct detailed_non_pixel *data;
11540 	struct detailed_data_monitor_range *range;
11541 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11542 			to_amdgpu_dm_connector(connector);
11543 	struct dm_connector_state *dm_con_state = NULL;
11544 	struct dc_sink *sink;
11545 
11546 	struct drm_device *dev = connector->dev;
11547 	struct amdgpu_device *adev = drm_to_adev(dev);
11548 	bool freesync_capable = false;
11549 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11550 
11551 	if (!connector->state) {
11552 		DRM_ERROR("%s - Connector has no state", __func__);
11553 		goto update;
11554 	}
11555 
11556 	sink = amdgpu_dm_connector->dc_sink ?
11557 		amdgpu_dm_connector->dc_sink :
11558 		amdgpu_dm_connector->dc_em_sink;
11559 
11560 	if (!edid || !sink) {
11561 		dm_con_state = to_dm_connector_state(connector->state);
11562 
11563 		amdgpu_dm_connector->min_vfreq = 0;
11564 		amdgpu_dm_connector->max_vfreq = 0;
11565 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11566 		connector->display_info.monitor_range.min_vfreq = 0;
11567 		connector->display_info.monitor_range.max_vfreq = 0;
11568 		freesync_capable = false;
11569 
11570 		goto update;
11571 	}
11572 
11573 	dm_con_state = to_dm_connector_state(connector->state);
11574 
11575 	if (!adev->dm.freesync_module)
11576 		goto update;
11577 
11578 
11579 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11580 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11581 		bool edid_check_required = false;
11582 
11583 		if (edid) {
11584 			edid_check_required = is_dp_capable_without_timing_msa(
11585 						adev->dm.dc,
11586 						amdgpu_dm_connector);
11587 		}
11588 
11589 		if (edid_check_required == true && (edid->version > 1 ||
11590 		   (edid->version == 1 && edid->revision > 1))) {
11591 			for (i = 0; i < 4; i++) {
11592 
11593 				timing	= &edid->detailed_timings[i];
11594 				data	= &timing->data.other_data;
11595 				range	= &data->data.range;
11596 				/*
11597 				 * Check if monitor has continuous frequency mode
11598 				 */
11599 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11600 					continue;
11601 				/*
11602 				 * Check for flag range limits only. If flag == 1 then
11603 				 * no additional timing information provided.
11604 				 * Default GTF, GTF Secondary curve and CVT are not
11605 				 * supported
11606 				 */
11607 				if (range->flags != 1)
11608 					continue;
11609 
11610 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11611 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11612 				amdgpu_dm_connector->pixel_clock_mhz =
11613 					range->pixel_clock_mhz * 10;
11614 
11615 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11616 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11617 
11618 				break;
11619 			}
11620 
11621 			if (amdgpu_dm_connector->max_vfreq -
11622 			    amdgpu_dm_connector->min_vfreq > 10) {
11623 
11624 				freesync_capable = true;
11625 			}
11626 		}
11627 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11628 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11629 		if (i >= 0 && vsdb_info.freesync_supported) {
11630 			timing  = &edid->detailed_timings[i];
11631 			data    = &timing->data.other_data;
11632 
11633 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11634 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11635 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11636 				freesync_capable = true;
11637 
11638 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11639 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11640 		}
11641 	}
11642 
11643 update:
11644 	if (dm_con_state)
11645 		dm_con_state->freesync_capable = freesync_capable;
11646 
11647 	if (connector->vrr_capable_property)
11648 		drm_connector_set_vrr_capable_property(connector,
11649 						       freesync_capable);
11650 }
11651 
11652 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11653 {
11654 	struct amdgpu_device *adev = drm_to_adev(dev);
11655 	struct dc *dc = adev->dm.dc;
11656 	int i;
11657 
11658 	mutex_lock(&adev->dm.dc_lock);
11659 	if (dc->current_state) {
11660 		for (i = 0; i < dc->current_state->stream_count; ++i)
11661 			dc->current_state->streams[i]
11662 				->triggered_crtc_reset.enabled =
11663 				adev->dm.force_timing_sync;
11664 
11665 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11666 		dc_trigger_sync(dc, dc->current_state);
11667 	}
11668 	mutex_unlock(&adev->dm.dc_lock);
11669 }
11670 
11671 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11672 		       uint32_t value, const char *func_name)
11673 {
11674 #ifdef DM_CHECK_ADDR_0
11675 	if (address == 0) {
11676 		DC_ERR("invalid register write. address = 0");
11677 		return;
11678 	}
11679 #endif
11680 	cgs_write_register(ctx->cgs_device, address, value);
11681 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11682 }
11683 
11684 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11685 			  const char *func_name)
11686 {
11687 	uint32_t value;
11688 #ifdef DM_CHECK_ADDR_0
11689 	if (address == 0) {
11690 		DC_ERR("invalid register read; address = 0\n");
11691 		return 0;
11692 	}
11693 #endif
11694 
11695 	if (ctx->dmub_srv &&
11696 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11697 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11698 		ASSERT(false);
11699 		return 0;
11700 	}
11701 
11702 	value = cgs_read_register(ctx->cgs_device, address);
11703 
11704 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11705 
11706 	return value;
11707 }
11708 
11709 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11710 						struct dc_context *ctx,
11711 						uint8_t status_type,
11712 						uint32_t *operation_result)
11713 {
11714 	struct amdgpu_device *adev = ctx->driver_context;
11715 	int return_status = -1;
11716 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11717 
11718 	if (is_cmd_aux) {
11719 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11720 			return_status = p_notify->aux_reply.length;
11721 			*operation_result = p_notify->result;
11722 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11723 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11724 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11725 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11726 		} else {
11727 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11728 		}
11729 	} else {
11730 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11731 			return_status = 0;
11732 			*operation_result = p_notify->sc_status;
11733 		} else {
11734 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11735 		}
11736 	}
11737 
11738 	return return_status;
11739 }
11740 
11741 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11742 	unsigned int link_index, void *cmd_payload, void *operation_result)
11743 {
11744 	struct amdgpu_device *adev = ctx->driver_context;
11745 	int ret = 0;
11746 
11747 	if (is_cmd_aux) {
11748 		dc_process_dmub_aux_transfer_async(ctx->dc,
11749 			link_index, (struct aux_payload *)cmd_payload);
11750 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11751 					(struct set_config_cmd_payload *)cmd_payload,
11752 					adev->dm.dmub_notify)) {
11753 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11754 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11755 					(uint32_t *)operation_result);
11756 	}
11757 
11758 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11759 	if (ret == 0) {
11760 		DRM_ERROR("wait_for_completion_timeout timeout!");
11761 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11762 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11763 				(uint32_t *)operation_result);
11764 	}
11765 
11766 	if (is_cmd_aux) {
11767 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11768 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11769 
11770 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11771 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11772 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11773 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11774 				       adev->dm.dmub_notify->aux_reply.length);
11775 			}
11776 		}
11777 	}
11778 
11779 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11780 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11781 			(uint32_t *)operation_result);
11782 }
11783 
11784 /*
11785  * Check whether seamless boot is supported.
11786  *
11787  * So far we only support seamless boot on CHIP_VANGOGH.
11788  * If everything goes well, we may consider expanding
11789  * seamless boot to other ASICs.
11790  */
11791 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11792 {
11793 	switch (adev->asic_type) {
11794 	case CHIP_VANGOGH:
11795 		if (!adev->mman.keep_stolen_vga_memory)
11796 			return true;
11797 		break;
11798 	default:
11799 		break;
11800 	}
11801 
11802 	return false;
11803 }
11804