1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/dp/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85 
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88 
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93 
94 #include "soc15_common.h"
95 #endif
96 
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100 
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
119 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
121 
122 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
123 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
124 
125 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
126 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
127 
128 /* Number of bytes in PSP header for firmware. */
129 #define PSP_HEADER_BYTES 0x100
130 
131 /* Number of bytes in PSP footer for firmware. */
132 #define PSP_FOOTER_BYTES 0x100
133 
134 /**
135  * DOC: overview
136  *
137  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
138  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
139  * requests into DC requests, and DC responses into DRM responses.
140  *
141  * The root control structure is &struct amdgpu_display_manager.
142  */
143 
144 /* basic init/fini API */
145 static int amdgpu_dm_init(struct amdgpu_device *adev);
146 static void amdgpu_dm_fini(struct amdgpu_device *adev);
147 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
148 
149 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
150 {
151 	switch (link->dpcd_caps.dongle_type) {
152 	case DISPLAY_DONGLE_NONE:
153 		return DRM_MODE_SUBCONNECTOR_Native;
154 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
155 		return DRM_MODE_SUBCONNECTOR_VGA;
156 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
157 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
158 		return DRM_MODE_SUBCONNECTOR_DVID;
159 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
160 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
161 		return DRM_MODE_SUBCONNECTOR_HDMIA;
162 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
163 	default:
164 		return DRM_MODE_SUBCONNECTOR_Unknown;
165 	}
166 }
167 
168 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
169 {
170 	struct dc_link *link = aconnector->dc_link;
171 	struct drm_connector *connector = &aconnector->base;
172 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
173 
174 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
175 		return;
176 
177 	if (aconnector->dc_sink)
178 		subconnector = get_subconnector_type(link);
179 
180 	drm_object_property_set_value(&connector->base,
181 			connector->dev->mode_config.dp_subconnector_property,
182 			subconnector);
183 }
184 
185 /*
186  * initializes drm_device display related structures, based on the information
187  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
188  * drm_encoder, drm_mode_config
189  *
190  * Returns 0 on success
191  */
192 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
193 /* removes and deallocates the drm structures, created by the above function */
194 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
195 
196 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
197 				struct drm_plane *plane,
198 				unsigned long possible_crtcs,
199 				const struct dc_plane_cap *plane_cap);
200 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
201 			       struct drm_plane *plane,
202 			       uint32_t link_index);
203 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
204 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
205 				    uint32_t link_index,
206 				    struct amdgpu_encoder *amdgpu_encoder);
207 static int amdgpu_dm_encoder_init(struct drm_device *dev,
208 				  struct amdgpu_encoder *aencoder,
209 				  uint32_t link_index);
210 
211 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
212 
213 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
214 
215 static int amdgpu_dm_atomic_check(struct drm_device *dev,
216 				  struct drm_atomic_state *state);
217 
218 static void handle_cursor_update(struct drm_plane *plane,
219 				 struct drm_plane_state *old_plane_state);
220 
221 static const struct drm_format_info *
222 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
223 
224 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
225 static void handle_hpd_rx_irq(void *param);
226 
227 static bool
228 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
229 				 struct drm_crtc_state *new_crtc_state);
230 /*
231  * dm_vblank_get_counter
232  *
233  * @brief
234  * Get counter for number of vertical blanks
235  *
236  * @param
237  * struct amdgpu_device *adev - [in] desired amdgpu device
238  * int disp_idx - [in] which CRTC to get the counter from
239  *
240  * @return
241  * Counter for vertical blanks
242  */
243 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
244 {
245 	if (crtc >= adev->mode_info.num_crtc)
246 		return 0;
247 	else {
248 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
249 
250 		if (acrtc->dm_irq_params.stream == NULL) {
251 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
252 				  crtc);
253 			return 0;
254 		}
255 
256 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
257 	}
258 }
259 
260 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
261 				  u32 *vbl, u32 *position)
262 {
263 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
264 
265 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
266 		return -EINVAL;
267 	else {
268 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
269 
270 		if (acrtc->dm_irq_params.stream ==  NULL) {
271 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
272 				  crtc);
273 			return 0;
274 		}
275 
276 		/*
277 		 * TODO rework base driver to use values directly.
278 		 * for now parse it back into reg-format
279 		 */
280 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
281 					 &v_blank_start,
282 					 &v_blank_end,
283 					 &h_position,
284 					 &v_position);
285 
286 		*position = v_position | (h_position << 16);
287 		*vbl = v_blank_start | (v_blank_end << 16);
288 	}
289 
290 	return 0;
291 }
292 
293 static bool dm_is_idle(void *handle)
294 {
295 	/* XXX todo */
296 	return true;
297 }
298 
299 static int dm_wait_for_idle(void *handle)
300 {
301 	/* XXX todo */
302 	return 0;
303 }
304 
305 static bool dm_check_soft_reset(void *handle)
306 {
307 	return false;
308 }
309 
310 static int dm_soft_reset(void *handle)
311 {
312 	/* XXX todo */
313 	return 0;
314 }
315 
316 static struct amdgpu_crtc *
317 get_crtc_by_otg_inst(struct amdgpu_device *adev,
318 		     int otg_inst)
319 {
320 	struct drm_device *dev = adev_to_drm(adev);
321 	struct drm_crtc *crtc;
322 	struct amdgpu_crtc *amdgpu_crtc;
323 
324 	if (WARN_ON(otg_inst == -1))
325 		return adev->mode_info.crtcs[0];
326 
327 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
328 		amdgpu_crtc = to_amdgpu_crtc(crtc);
329 
330 		if (amdgpu_crtc->otg_inst == otg_inst)
331 			return amdgpu_crtc;
332 	}
333 
334 	return NULL;
335 }
336 
337 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
338 {
339 	return acrtc->dm_irq_params.freesync_config.state ==
340 		       VRR_STATE_ACTIVE_VARIABLE ||
341 	       acrtc->dm_irq_params.freesync_config.state ==
342 		       VRR_STATE_ACTIVE_FIXED;
343 }
344 
345 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
346 {
347 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
348 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
349 }
350 
351 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
352 					      struct dm_crtc_state *new_state)
353 {
354 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
355 		return true;
356 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
357 		return true;
358 	else
359 		return false;
360 }
361 
362 /**
363  * dm_pflip_high_irq() - Handle pageflip interrupt
364  * @interrupt_params: ignored
365  *
366  * Handles the pageflip interrupt by notifying all interested parties
367  * that the pageflip has been completed.
368  */
369 static void dm_pflip_high_irq(void *interrupt_params)
370 {
371 	struct amdgpu_crtc *amdgpu_crtc;
372 	struct common_irq_params *irq_params = interrupt_params;
373 	struct amdgpu_device *adev = irq_params->adev;
374 	unsigned long flags;
375 	struct drm_pending_vblank_event *e;
376 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
377 	bool vrr_active;
378 
379 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
380 
381 	/* IRQ could occur when in initial stage */
382 	/* TODO work and BO cleanup */
383 	if (amdgpu_crtc == NULL) {
384 		DC_LOG_PFLIP("CRTC is null, returning.\n");
385 		return;
386 	}
387 
388 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
389 
390 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
391 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
392 						 amdgpu_crtc->pflip_status,
393 						 AMDGPU_FLIP_SUBMITTED,
394 						 amdgpu_crtc->crtc_id,
395 						 amdgpu_crtc);
396 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
397 		return;
398 	}
399 
400 	/* page flip completed. */
401 	e = amdgpu_crtc->event;
402 	amdgpu_crtc->event = NULL;
403 
404 	WARN_ON(!e);
405 
406 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
407 
408 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
409 	if (!vrr_active ||
410 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
411 				      &v_blank_end, &hpos, &vpos) ||
412 	    (vpos < v_blank_start)) {
413 		/* Update to correct count and vblank timestamp if racing with
414 		 * vblank irq. This also updates to the correct vblank timestamp
415 		 * even in VRR mode, as scanout is past the front-porch atm.
416 		 */
417 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
418 
419 		/* Wake up userspace by sending the pageflip event with proper
420 		 * count and timestamp of vblank of flip completion.
421 		 */
422 		if (e) {
423 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
424 
425 			/* Event sent, so done with vblank for this flip */
426 			drm_crtc_vblank_put(&amdgpu_crtc->base);
427 		}
428 	} else if (e) {
429 		/* VRR active and inside front-porch: vblank count and
430 		 * timestamp for pageflip event will only be up to date after
431 		 * drm_crtc_handle_vblank() has been executed from late vblank
432 		 * irq handler after start of back-porch (vline 0). We queue the
433 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
434 		 * updated timestamp and count, once it runs after us.
435 		 *
436 		 * We need to open-code this instead of using the helper
437 		 * drm_crtc_arm_vblank_event(), as that helper would
438 		 * call drm_crtc_accurate_vblank_count(), which we must
439 		 * not call in VRR mode while we are in front-porch!
440 		 */
441 
442 		/* sequence will be replaced by real count during send-out. */
443 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
444 		e->pipe = amdgpu_crtc->crtc_id;
445 
446 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
447 		e = NULL;
448 	}
449 
450 	/* Keep track of vblank of this flip for flip throttling. We use the
451 	 * cooked hw counter, as that one incremented at start of this vblank
452 	 * of pageflip completion, so last_flip_vblank is the forbidden count
453 	 * for queueing new pageflips if vsync + VRR is enabled.
454 	 */
455 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
456 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
457 
458 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
459 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
460 
461 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
462 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
463 		     vrr_active, (int) !e);
464 }
465 
466 static void dm_vupdate_high_irq(void *interrupt_params)
467 {
468 	struct common_irq_params *irq_params = interrupt_params;
469 	struct amdgpu_device *adev = irq_params->adev;
470 	struct amdgpu_crtc *acrtc;
471 	struct drm_device *drm_dev;
472 	struct drm_vblank_crtc *vblank;
473 	ktime_t frame_duration_ns, previous_timestamp;
474 	unsigned long flags;
475 	int vrr_active;
476 
477 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
478 
479 	if (acrtc) {
480 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
481 		drm_dev = acrtc->base.dev;
482 		vblank = &drm_dev->vblank[acrtc->base.index];
483 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
484 		frame_duration_ns = vblank->time - previous_timestamp;
485 
486 		if (frame_duration_ns > 0) {
487 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
488 						frame_duration_ns,
489 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
490 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
491 		}
492 
493 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
494 			      acrtc->crtc_id,
495 			      vrr_active);
496 
497 		/* Core vblank handling is done here after end of front-porch in
498 		 * vrr mode, as vblank timestamping will give valid results
499 		 * while now done after front-porch. This will also deliver
500 		 * page-flip completion events that have been queued to us
501 		 * if a pageflip happened inside front-porch.
502 		 */
503 		if (vrr_active) {
504 			drm_crtc_handle_vblank(&acrtc->base);
505 
506 			/* BTR processing for pre-DCE12 ASICs */
507 			if (acrtc->dm_irq_params.stream &&
508 			    adev->family < AMDGPU_FAMILY_AI) {
509 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
510 				mod_freesync_handle_v_update(
511 				    adev->dm.freesync_module,
512 				    acrtc->dm_irq_params.stream,
513 				    &acrtc->dm_irq_params.vrr_params);
514 
515 				dc_stream_adjust_vmin_vmax(
516 				    adev->dm.dc,
517 				    acrtc->dm_irq_params.stream,
518 				    &acrtc->dm_irq_params.vrr_params.adjust);
519 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
520 			}
521 		}
522 	}
523 }
524 
525 /**
526  * dm_crtc_high_irq() - Handles CRTC interrupt
527  * @interrupt_params: used for determining the CRTC instance
528  *
529  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
530  * event handler.
531  */
532 static void dm_crtc_high_irq(void *interrupt_params)
533 {
534 	struct common_irq_params *irq_params = interrupt_params;
535 	struct amdgpu_device *adev = irq_params->adev;
536 	struct amdgpu_crtc *acrtc;
537 	unsigned long flags;
538 	int vrr_active;
539 
540 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
541 	if (!acrtc)
542 		return;
543 
544 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
545 
546 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
547 		      vrr_active, acrtc->dm_irq_params.active_planes);
548 
549 	/**
550 	 * Core vblank handling at start of front-porch is only possible
551 	 * in non-vrr mode, as only there vblank timestamping will give
552 	 * valid results while done in front-porch. Otherwise defer it
553 	 * to dm_vupdate_high_irq after end of front-porch.
554 	 */
555 	if (!vrr_active)
556 		drm_crtc_handle_vblank(&acrtc->base);
557 
558 	/**
559 	 * Following stuff must happen at start of vblank, for crc
560 	 * computation and below-the-range btr support in vrr mode.
561 	 */
562 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
563 
564 	/* BTR updates need to happen before VUPDATE on Vega and above. */
565 	if (adev->family < AMDGPU_FAMILY_AI)
566 		return;
567 
568 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
569 
570 	if (acrtc->dm_irq_params.stream &&
571 	    acrtc->dm_irq_params.vrr_params.supported &&
572 	    acrtc->dm_irq_params.freesync_config.state ==
573 		    VRR_STATE_ACTIVE_VARIABLE) {
574 		mod_freesync_handle_v_update(adev->dm.freesync_module,
575 					     acrtc->dm_irq_params.stream,
576 					     &acrtc->dm_irq_params.vrr_params);
577 
578 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
579 					   &acrtc->dm_irq_params.vrr_params.adjust);
580 	}
581 
582 	/*
583 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
584 	 * In that case, pageflip completion interrupts won't fire and pageflip
585 	 * completion events won't get delivered. Prevent this by sending
586 	 * pending pageflip events from here if a flip is still pending.
587 	 *
588 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
589 	 * avoid race conditions between flip programming and completion,
590 	 * which could cause too early flip completion events.
591 	 */
592 	if (adev->family >= AMDGPU_FAMILY_RV &&
593 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
594 	    acrtc->dm_irq_params.active_planes == 0) {
595 		if (acrtc->event) {
596 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
597 			acrtc->event = NULL;
598 			drm_crtc_vblank_put(&acrtc->base);
599 		}
600 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
601 	}
602 
603 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
604 }
605 
606 #if defined(CONFIG_DRM_AMD_DC_DCN)
607 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
608 /**
609  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
610  * DCN generation ASICs
611  * @interrupt_params: interrupt parameters
612  *
613  * Used to set crc window/read out crc value at vertical line 0 position
614  */
615 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
616 {
617 	struct common_irq_params *irq_params = interrupt_params;
618 	struct amdgpu_device *adev = irq_params->adev;
619 	struct amdgpu_crtc *acrtc;
620 
621 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
622 
623 	if (!acrtc)
624 		return;
625 
626 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
627 }
628 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
629 
630 /**
631  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
632  * @adev: amdgpu_device pointer
633  * @notify: dmub notification structure
634  *
635  * Dmub AUX or SET_CONFIG command completion processing callback
636  * Copies dmub notification to DM which is to be read by AUX command.
637  * issuing thread and also signals the event to wake up the thread.
638  */
639 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
640 					struct dmub_notification *notify)
641 {
642 	if (adev->dm.dmub_notify)
643 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
644 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
645 		complete(&adev->dm.dmub_aux_transfer_done);
646 }
647 
648 /**
649  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
650  * @adev: amdgpu_device pointer
651  * @notify: dmub notification structure
652  *
653  * Dmub Hpd interrupt processing callback. Gets displayindex through the
654  * ink index and calls helper to do the processing.
655  */
656 static void dmub_hpd_callback(struct amdgpu_device *adev,
657 			      struct dmub_notification *notify)
658 {
659 	struct amdgpu_dm_connector *aconnector;
660 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
661 	struct drm_connector *connector;
662 	struct drm_connector_list_iter iter;
663 	struct dc_link *link;
664 	uint8_t link_index = 0;
665 	struct drm_device *dev;
666 
667 	if (adev == NULL)
668 		return;
669 
670 	if (notify == NULL) {
671 		DRM_ERROR("DMUB HPD callback notification was NULL");
672 		return;
673 	}
674 
675 	if (notify->link_index > adev->dm.dc->link_count) {
676 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
677 		return;
678 	}
679 
680 	link_index = notify->link_index;
681 	link = adev->dm.dc->links[link_index];
682 	dev = adev->dm.ddev;
683 
684 	drm_connector_list_iter_begin(dev, &iter);
685 	drm_for_each_connector_iter(connector, &iter) {
686 		aconnector = to_amdgpu_dm_connector(connector);
687 		if (link && aconnector->dc_link == link) {
688 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
689 			hpd_aconnector = aconnector;
690 			break;
691 		}
692 	}
693 	drm_connector_list_iter_end(&iter);
694 
695 	if (hpd_aconnector) {
696 		if (notify->type == DMUB_NOTIFICATION_HPD)
697 			handle_hpd_irq_helper(hpd_aconnector);
698 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
699 			handle_hpd_rx_irq(hpd_aconnector);
700 	}
701 }
702 
703 /**
704  * register_dmub_notify_callback - Sets callback for DMUB notify
705  * @adev: amdgpu_device pointer
706  * @type: Type of dmub notification
707  * @callback: Dmub interrupt callback function
708  * @dmub_int_thread_offload: offload indicator
709  *
710  * API to register a dmub callback handler for a dmub notification
711  * Also sets indicator whether callback processing to be offloaded.
712  * to dmub interrupt handling thread
713  * Return: true if successfully registered, false if there is existing registration
714  */
715 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
716 					  enum dmub_notification_type type,
717 					  dmub_notify_interrupt_callback_t callback,
718 					  bool dmub_int_thread_offload)
719 {
720 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
721 		adev->dm.dmub_callback[type] = callback;
722 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
723 	} else
724 		return false;
725 
726 	return true;
727 }
728 
729 static void dm_handle_hpd_work(struct work_struct *work)
730 {
731 	struct dmub_hpd_work *dmub_hpd_wrk;
732 
733 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
734 
735 	if (!dmub_hpd_wrk->dmub_notify) {
736 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
737 		return;
738 	}
739 
740 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
741 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
742 		dmub_hpd_wrk->dmub_notify);
743 	}
744 
745 	kfree(dmub_hpd_wrk->dmub_notify);
746 	kfree(dmub_hpd_wrk);
747 
748 }
749 
750 #define DMUB_TRACE_MAX_READ 64
751 /**
752  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
753  * @interrupt_params: used for determining the Outbox instance
754  *
755  * Handles the Outbox Interrupt
756  * event handler.
757  */
758 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
759 {
760 	struct dmub_notification notify;
761 	struct common_irq_params *irq_params = interrupt_params;
762 	struct amdgpu_device *adev = irq_params->adev;
763 	struct amdgpu_display_manager *dm = &adev->dm;
764 	struct dmcub_trace_buf_entry entry = { 0 };
765 	uint32_t count = 0;
766 	struct dmub_hpd_work *dmub_hpd_wrk;
767 	struct dc_link *plink = NULL;
768 
769 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
770 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
771 
772 		do {
773 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
774 			if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
775 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
776 				continue;
777 			}
778 			if (!dm->dmub_callback[notify.type]) {
779 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
780 				continue;
781 			}
782 			if (dm->dmub_thread_offload[notify.type] == true) {
783 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
784 				if (!dmub_hpd_wrk) {
785 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
786 					return;
787 				}
788 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
789 				if (!dmub_hpd_wrk->dmub_notify) {
790 					kfree(dmub_hpd_wrk);
791 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
792 					return;
793 				}
794 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
795 				if (dmub_hpd_wrk->dmub_notify)
796 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
797 				dmub_hpd_wrk->adev = adev;
798 				if (notify.type == DMUB_NOTIFICATION_HPD) {
799 					plink = adev->dm.dc->links[notify.link_index];
800 					if (plink) {
801 						plink->hpd_status =
802 							notify.hpd_status == DP_HPD_PLUG;
803 					}
804 				}
805 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
806 			} else {
807 				dm->dmub_callback[notify.type](adev, &notify);
808 			}
809 		} while (notify.pending_notification);
810 	}
811 
812 
813 	do {
814 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
815 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
816 							entry.param0, entry.param1);
817 
818 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
819 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
820 		} else
821 			break;
822 
823 		count++;
824 
825 	} while (count <= DMUB_TRACE_MAX_READ);
826 
827 	if (count > DMUB_TRACE_MAX_READ)
828 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
829 }
830 #endif /* CONFIG_DRM_AMD_DC_DCN */
831 
832 static int dm_set_clockgating_state(void *handle,
833 		  enum amd_clockgating_state state)
834 {
835 	return 0;
836 }
837 
838 static int dm_set_powergating_state(void *handle,
839 		  enum amd_powergating_state state)
840 {
841 	return 0;
842 }
843 
844 /* Prototypes of private functions */
845 static int dm_early_init(void* handle);
846 
847 /* Allocate memory for FBC compressed data  */
848 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
849 {
850 	struct drm_device *dev = connector->dev;
851 	struct amdgpu_device *adev = drm_to_adev(dev);
852 	struct dm_compressor_info *compressor = &adev->dm.compressor;
853 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
854 	struct drm_display_mode *mode;
855 	unsigned long max_size = 0;
856 
857 	if (adev->dm.dc->fbc_compressor == NULL)
858 		return;
859 
860 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
861 		return;
862 
863 	if (compressor->bo_ptr)
864 		return;
865 
866 
867 	list_for_each_entry(mode, &connector->modes, head) {
868 		if (max_size < mode->htotal * mode->vtotal)
869 			max_size = mode->htotal * mode->vtotal;
870 	}
871 
872 	if (max_size) {
873 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
874 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
875 			    &compressor->gpu_addr, &compressor->cpu_addr);
876 
877 		if (r)
878 			DRM_ERROR("DM: Failed to initialize FBC\n");
879 		else {
880 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
881 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
882 		}
883 
884 	}
885 
886 }
887 
888 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
889 					  int pipe, bool *enabled,
890 					  unsigned char *buf, int max_bytes)
891 {
892 	struct drm_device *dev = dev_get_drvdata(kdev);
893 	struct amdgpu_device *adev = drm_to_adev(dev);
894 	struct drm_connector *connector;
895 	struct drm_connector_list_iter conn_iter;
896 	struct amdgpu_dm_connector *aconnector;
897 	int ret = 0;
898 
899 	*enabled = false;
900 
901 	mutex_lock(&adev->dm.audio_lock);
902 
903 	drm_connector_list_iter_begin(dev, &conn_iter);
904 	drm_for_each_connector_iter(connector, &conn_iter) {
905 		aconnector = to_amdgpu_dm_connector(connector);
906 		if (aconnector->audio_inst != port)
907 			continue;
908 
909 		*enabled = true;
910 		ret = drm_eld_size(connector->eld);
911 		memcpy(buf, connector->eld, min(max_bytes, ret));
912 
913 		break;
914 	}
915 	drm_connector_list_iter_end(&conn_iter);
916 
917 	mutex_unlock(&adev->dm.audio_lock);
918 
919 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
920 
921 	return ret;
922 }
923 
924 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
925 	.get_eld = amdgpu_dm_audio_component_get_eld,
926 };
927 
928 static int amdgpu_dm_audio_component_bind(struct device *kdev,
929 				       struct device *hda_kdev, void *data)
930 {
931 	struct drm_device *dev = dev_get_drvdata(kdev);
932 	struct amdgpu_device *adev = drm_to_adev(dev);
933 	struct drm_audio_component *acomp = data;
934 
935 	acomp->ops = &amdgpu_dm_audio_component_ops;
936 	acomp->dev = kdev;
937 	adev->dm.audio_component = acomp;
938 
939 	return 0;
940 }
941 
942 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
943 					  struct device *hda_kdev, void *data)
944 {
945 	struct drm_device *dev = dev_get_drvdata(kdev);
946 	struct amdgpu_device *adev = drm_to_adev(dev);
947 	struct drm_audio_component *acomp = data;
948 
949 	acomp->ops = NULL;
950 	acomp->dev = NULL;
951 	adev->dm.audio_component = NULL;
952 }
953 
954 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
955 	.bind	= amdgpu_dm_audio_component_bind,
956 	.unbind	= amdgpu_dm_audio_component_unbind,
957 };
958 
959 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
960 {
961 	int i, ret;
962 
963 	if (!amdgpu_audio)
964 		return 0;
965 
966 	adev->mode_info.audio.enabled = true;
967 
968 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
969 
970 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
971 		adev->mode_info.audio.pin[i].channels = -1;
972 		adev->mode_info.audio.pin[i].rate = -1;
973 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
974 		adev->mode_info.audio.pin[i].status_bits = 0;
975 		adev->mode_info.audio.pin[i].category_code = 0;
976 		adev->mode_info.audio.pin[i].connected = false;
977 		adev->mode_info.audio.pin[i].id =
978 			adev->dm.dc->res_pool->audios[i]->inst;
979 		adev->mode_info.audio.pin[i].offset = 0;
980 	}
981 
982 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
983 	if (ret < 0)
984 		return ret;
985 
986 	adev->dm.audio_registered = true;
987 
988 	return 0;
989 }
990 
991 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
992 {
993 	if (!amdgpu_audio)
994 		return;
995 
996 	if (!adev->mode_info.audio.enabled)
997 		return;
998 
999 	if (adev->dm.audio_registered) {
1000 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1001 		adev->dm.audio_registered = false;
1002 	}
1003 
1004 	/* TODO: Disable audio? */
1005 
1006 	adev->mode_info.audio.enabled = false;
1007 }
1008 
1009 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1010 {
1011 	struct drm_audio_component *acomp = adev->dm.audio_component;
1012 
1013 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1014 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1015 
1016 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1017 						 pin, -1);
1018 	}
1019 }
1020 
1021 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1022 {
1023 	const struct dmcub_firmware_header_v1_0 *hdr;
1024 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1025 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1026 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1027 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1028 	struct abm *abm = adev->dm.dc->res_pool->abm;
1029 	struct dmub_srv_hw_params hw_params;
1030 	enum dmub_status status;
1031 	const unsigned char *fw_inst_const, *fw_bss_data;
1032 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1033 	bool has_hw_support;
1034 
1035 	if (!dmub_srv)
1036 		/* DMUB isn't supported on the ASIC. */
1037 		return 0;
1038 
1039 	if (!fb_info) {
1040 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1041 		return -EINVAL;
1042 	}
1043 
1044 	if (!dmub_fw) {
1045 		/* Firmware required for DMUB support. */
1046 		DRM_ERROR("No firmware provided for DMUB.\n");
1047 		return -EINVAL;
1048 	}
1049 
1050 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1051 	if (status != DMUB_STATUS_OK) {
1052 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1053 		return -EINVAL;
1054 	}
1055 
1056 	if (!has_hw_support) {
1057 		DRM_INFO("DMUB unsupported on ASIC\n");
1058 		return 0;
1059 	}
1060 
1061 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1062 	status = dmub_srv_hw_reset(dmub_srv);
1063 	if (status != DMUB_STATUS_OK)
1064 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1065 
1066 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1067 
1068 	fw_inst_const = dmub_fw->data +
1069 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1070 			PSP_HEADER_BYTES;
1071 
1072 	fw_bss_data = dmub_fw->data +
1073 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1074 		      le32_to_cpu(hdr->inst_const_bytes);
1075 
1076 	/* Copy firmware and bios info into FB memory. */
1077 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1078 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1079 
1080 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1081 
1082 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1083 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1084 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1085 	 * will be done by dm_dmub_hw_init
1086 	 */
1087 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1088 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1089 				fw_inst_const_size);
1090 	}
1091 
1092 	if (fw_bss_data_size)
1093 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1094 		       fw_bss_data, fw_bss_data_size);
1095 
1096 	/* Copy firmware bios info into FB memory. */
1097 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1098 	       adev->bios_size);
1099 
1100 	/* Reset regions that need to be reset. */
1101 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1102 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1103 
1104 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1105 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1106 
1107 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1108 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1109 
1110 	/* Initialize hardware. */
1111 	memset(&hw_params, 0, sizeof(hw_params));
1112 	hw_params.fb_base = adev->gmc.fb_start;
1113 	hw_params.fb_offset = adev->gmc.aper_base;
1114 
1115 	/* backdoor load firmware and trigger dmub running */
1116 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1117 		hw_params.load_inst_const = true;
1118 
1119 	if (dmcu)
1120 		hw_params.psp_version = dmcu->psp_version;
1121 
1122 	for (i = 0; i < fb_info->num_fb; ++i)
1123 		hw_params.fb[i] = &fb_info->fb[i];
1124 
1125 	switch (adev->ip_versions[DCE_HWIP][0]) {
1126 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1127 		hw_params.dpia_supported = true;
1128 #if defined(CONFIG_DRM_AMD_DC_DCN)
1129 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1130 #endif
1131 		break;
1132 	default:
1133 		break;
1134 	}
1135 
1136 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1137 	if (status != DMUB_STATUS_OK) {
1138 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1139 		return -EINVAL;
1140 	}
1141 
1142 	/* Wait for firmware load to finish. */
1143 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1144 	if (status != DMUB_STATUS_OK)
1145 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1146 
1147 	/* Init DMCU and ABM if available. */
1148 	if (dmcu && abm) {
1149 		dmcu->funcs->dmcu_init(dmcu);
1150 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1151 	}
1152 
1153 	if (!adev->dm.dc->ctx->dmub_srv)
1154 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1155 	if (!adev->dm.dc->ctx->dmub_srv) {
1156 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1157 		return -ENOMEM;
1158 	}
1159 
1160 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1161 		 adev->dm.dmcub_fw_version);
1162 
1163 	return 0;
1164 }
1165 
1166 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1167 {
1168 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1169 	enum dmub_status status;
1170 	bool init;
1171 
1172 	if (!dmub_srv) {
1173 		/* DMUB isn't supported on the ASIC. */
1174 		return;
1175 	}
1176 
1177 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1178 	if (status != DMUB_STATUS_OK)
1179 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1180 
1181 	if (status == DMUB_STATUS_OK && init) {
1182 		/* Wait for firmware load to finish. */
1183 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1184 		if (status != DMUB_STATUS_OK)
1185 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1186 	} else {
1187 		/* Perform the full hardware initialization. */
1188 		dm_dmub_hw_init(adev);
1189 	}
1190 }
1191 
1192 #if defined(CONFIG_DRM_AMD_DC_DCN)
1193 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1194 {
1195 	uint64_t pt_base;
1196 	uint32_t logical_addr_low;
1197 	uint32_t logical_addr_high;
1198 	uint32_t agp_base, agp_bot, agp_top;
1199 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1200 
1201 	memset(pa_config, 0, sizeof(*pa_config));
1202 
1203 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1204 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1205 
1206 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1207 		/*
1208 		 * Raven2 has a HW issue that it is unable to use the vram which
1209 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1210 		 * workaround that increase system aperture high address (add 1)
1211 		 * to get rid of the VM fault and hardware hang.
1212 		 */
1213 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1214 	else
1215 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1216 
1217 	agp_base = 0;
1218 	agp_bot = adev->gmc.agp_start >> 24;
1219 	agp_top = adev->gmc.agp_end >> 24;
1220 
1221 
1222 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1223 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1224 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1225 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1226 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1227 	page_table_base.low_part = lower_32_bits(pt_base);
1228 
1229 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1230 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1231 
1232 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1233 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1234 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1235 
1236 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1237 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1238 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1239 
1240 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1241 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1242 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1243 
1244 	pa_config->is_hvm_enabled = 0;
1245 
1246 }
1247 #endif
1248 #if defined(CONFIG_DRM_AMD_DC_DCN)
1249 static void vblank_control_worker(struct work_struct *work)
1250 {
1251 	struct vblank_control_work *vblank_work =
1252 		container_of(work, struct vblank_control_work, work);
1253 	struct amdgpu_display_manager *dm = vblank_work->dm;
1254 
1255 	mutex_lock(&dm->dc_lock);
1256 
1257 	if (vblank_work->enable)
1258 		dm->active_vblank_irq_count++;
1259 	else if(dm->active_vblank_irq_count)
1260 		dm->active_vblank_irq_count--;
1261 
1262 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1263 
1264 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1265 
1266 	/* Control PSR based on vblank requirements from OS */
1267 	if (vblank_work->stream && vblank_work->stream->link) {
1268 		if (vblank_work->enable) {
1269 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1270 				amdgpu_dm_psr_disable(vblank_work->stream);
1271 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1272 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1273 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1274 			amdgpu_dm_psr_enable(vblank_work->stream);
1275 		}
1276 	}
1277 
1278 	mutex_unlock(&dm->dc_lock);
1279 
1280 	dc_stream_release(vblank_work->stream);
1281 
1282 	kfree(vblank_work);
1283 }
1284 
1285 #endif
1286 
1287 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1288 {
1289 	struct hpd_rx_irq_offload_work *offload_work;
1290 	struct amdgpu_dm_connector *aconnector;
1291 	struct dc_link *dc_link;
1292 	struct amdgpu_device *adev;
1293 	enum dc_connection_type new_connection_type = dc_connection_none;
1294 	unsigned long flags;
1295 
1296 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1297 	aconnector = offload_work->offload_wq->aconnector;
1298 
1299 	if (!aconnector) {
1300 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1301 		goto skip;
1302 	}
1303 
1304 	adev = drm_to_adev(aconnector->base.dev);
1305 	dc_link = aconnector->dc_link;
1306 
1307 	mutex_lock(&aconnector->hpd_lock);
1308 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1309 		DRM_ERROR("KMS: Failed to detect connector\n");
1310 	mutex_unlock(&aconnector->hpd_lock);
1311 
1312 	if (new_connection_type == dc_connection_none)
1313 		goto skip;
1314 
1315 	if (amdgpu_in_reset(adev))
1316 		goto skip;
1317 
1318 	mutex_lock(&adev->dm.dc_lock);
1319 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1320 		dc_link_dp_handle_automated_test(dc_link);
1321 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1322 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1323 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1324 		dc_link_dp_handle_link_loss(dc_link);
1325 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1326 		offload_work->offload_wq->is_handling_link_loss = false;
1327 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1328 	}
1329 	mutex_unlock(&adev->dm.dc_lock);
1330 
1331 skip:
1332 	kfree(offload_work);
1333 
1334 }
1335 
1336 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1337 {
1338 	int max_caps = dc->caps.max_links;
1339 	int i = 0;
1340 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1341 
1342 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1343 
1344 	if (!hpd_rx_offload_wq)
1345 		return NULL;
1346 
1347 
1348 	for (i = 0; i < max_caps; i++) {
1349 		hpd_rx_offload_wq[i].wq =
1350 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1351 
1352 		if (hpd_rx_offload_wq[i].wq == NULL) {
1353 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1354 			return NULL;
1355 		}
1356 
1357 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1358 	}
1359 
1360 	return hpd_rx_offload_wq;
1361 }
1362 
1363 struct amdgpu_stutter_quirk {
1364 	u16 chip_vendor;
1365 	u16 chip_device;
1366 	u16 subsys_vendor;
1367 	u16 subsys_device;
1368 	u8 revision;
1369 };
1370 
1371 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1372 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1373 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1374 	{ 0, 0, 0, 0, 0 },
1375 };
1376 
1377 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1378 {
1379 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1380 
1381 	while (p && p->chip_device != 0) {
1382 		if (pdev->vendor == p->chip_vendor &&
1383 		    pdev->device == p->chip_device &&
1384 		    pdev->subsystem_vendor == p->subsys_vendor &&
1385 		    pdev->subsystem_device == p->subsys_device &&
1386 		    pdev->revision == p->revision) {
1387 			return true;
1388 		}
1389 		++p;
1390 	}
1391 	return false;
1392 }
1393 
1394 static int amdgpu_dm_init(struct amdgpu_device *adev)
1395 {
1396 	struct dc_init_data init_data;
1397 #ifdef CONFIG_DRM_AMD_DC_HDCP
1398 	struct dc_callback_init init_params;
1399 #endif
1400 	int r;
1401 
1402 	adev->dm.ddev = adev_to_drm(adev);
1403 	adev->dm.adev = adev;
1404 
1405 	/* Zero all the fields */
1406 	memset(&init_data, 0, sizeof(init_data));
1407 #ifdef CONFIG_DRM_AMD_DC_HDCP
1408 	memset(&init_params, 0, sizeof(init_params));
1409 #endif
1410 
1411 	mutex_init(&adev->dm.dc_lock);
1412 	mutex_init(&adev->dm.audio_lock);
1413 #if defined(CONFIG_DRM_AMD_DC_DCN)
1414 	spin_lock_init(&adev->dm.vblank_lock);
1415 #endif
1416 
1417 	if(amdgpu_dm_irq_init(adev)) {
1418 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1419 		goto error;
1420 	}
1421 
1422 	init_data.asic_id.chip_family = adev->family;
1423 
1424 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1425 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1426 	init_data.asic_id.chip_id = adev->pdev->device;
1427 
1428 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1429 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1430 	init_data.asic_id.atombios_base_address =
1431 		adev->mode_info.atom_context->bios;
1432 
1433 	init_data.driver = adev;
1434 
1435 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1436 
1437 	if (!adev->dm.cgs_device) {
1438 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1439 		goto error;
1440 	}
1441 
1442 	init_data.cgs_device = adev->dm.cgs_device;
1443 
1444 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1445 
1446 	switch (adev->ip_versions[DCE_HWIP][0]) {
1447 	case IP_VERSION(2, 1, 0):
1448 		switch (adev->dm.dmcub_fw_version) {
1449 		case 0: /* development */
1450 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1451 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1452 			init_data.flags.disable_dmcu = false;
1453 			break;
1454 		default:
1455 			init_data.flags.disable_dmcu = true;
1456 		}
1457 		break;
1458 	case IP_VERSION(2, 0, 3):
1459 		init_data.flags.disable_dmcu = true;
1460 		break;
1461 	default:
1462 		break;
1463 	}
1464 
1465 	switch (adev->asic_type) {
1466 	case CHIP_CARRIZO:
1467 	case CHIP_STONEY:
1468 		init_data.flags.gpu_vm_support = true;
1469 		break;
1470 	default:
1471 		switch (adev->ip_versions[DCE_HWIP][0]) {
1472 		case IP_VERSION(1, 0, 0):
1473 		case IP_VERSION(1, 0, 1):
1474 			/* enable S/G on PCO and RV2 */
1475 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1476 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1477 				init_data.flags.gpu_vm_support = true;
1478 			break;
1479 		case IP_VERSION(2, 1, 0):
1480 		case IP_VERSION(3, 0, 1):
1481 		case IP_VERSION(3, 1, 2):
1482 		case IP_VERSION(3, 1, 3):
1483 		case IP_VERSION(3, 1, 5):
1484 		case IP_VERSION(3, 1, 6):
1485 			init_data.flags.gpu_vm_support = true;
1486 			break;
1487 		default:
1488 			break;
1489 		}
1490 		break;
1491 	}
1492 
1493 	if (init_data.flags.gpu_vm_support)
1494 		adev->mode_info.gpu_vm_support = true;
1495 
1496 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1497 		init_data.flags.fbc_support = true;
1498 
1499 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1500 		init_data.flags.multi_mon_pp_mclk_switch = true;
1501 
1502 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1503 		init_data.flags.disable_fractional_pwm = true;
1504 
1505 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1506 		init_data.flags.edp_no_power_sequencing = true;
1507 
1508 #ifdef CONFIG_DRM_AMD_DC_DCN
1509 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1510 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1511 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1512 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1513 #endif
1514 
1515 	init_data.flags.seamless_boot_edp_requested = false;
1516 
1517 	if (check_seamless_boot_capability(adev)) {
1518 		init_data.flags.seamless_boot_edp_requested = true;
1519 		init_data.flags.allow_seamless_boot_optimization = true;
1520 		DRM_INFO("Seamless boot condition check passed\n");
1521 	}
1522 
1523 	INIT_LIST_HEAD(&adev->dm.da_list);
1524 	/* Display Core create. */
1525 	adev->dm.dc = dc_create(&init_data);
1526 
1527 	if (adev->dm.dc) {
1528 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1529 	} else {
1530 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1531 		goto error;
1532 	}
1533 
1534 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1535 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1536 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1537 	}
1538 
1539 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1540 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1541 	if (dm_should_disable_stutter(adev->pdev))
1542 		adev->dm.dc->debug.disable_stutter = true;
1543 
1544 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1545 		adev->dm.dc->debug.disable_stutter = true;
1546 
1547 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1548 		adev->dm.dc->debug.disable_dsc = true;
1549 		adev->dm.dc->debug.disable_dsc_edp = true;
1550 	}
1551 
1552 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1553 		adev->dm.dc->debug.disable_clock_gate = true;
1554 
1555 	r = dm_dmub_hw_init(adev);
1556 	if (r) {
1557 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1558 		goto error;
1559 	}
1560 
1561 	dc_hardware_init(adev->dm.dc);
1562 
1563 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1564 	if (!adev->dm.hpd_rx_offload_wq) {
1565 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1566 		goto error;
1567 	}
1568 
1569 #if defined(CONFIG_DRM_AMD_DC_DCN)
1570 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1571 		struct dc_phy_addr_space_config pa_config;
1572 
1573 		mmhub_read_system_context(adev, &pa_config);
1574 
1575 		// Call the DC init_memory func
1576 		dc_setup_system_context(adev->dm.dc, &pa_config);
1577 	}
1578 #endif
1579 
1580 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1581 	if (!adev->dm.freesync_module) {
1582 		DRM_ERROR(
1583 		"amdgpu: failed to initialize freesync_module.\n");
1584 	} else
1585 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1586 				adev->dm.freesync_module);
1587 
1588 	amdgpu_dm_init_color_mod();
1589 
1590 #if defined(CONFIG_DRM_AMD_DC_DCN)
1591 	if (adev->dm.dc->caps.max_links > 0) {
1592 		adev->dm.vblank_control_workqueue =
1593 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1594 		if (!adev->dm.vblank_control_workqueue)
1595 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1596 	}
1597 #endif
1598 
1599 #ifdef CONFIG_DRM_AMD_DC_HDCP
1600 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1601 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1602 
1603 		if (!adev->dm.hdcp_workqueue)
1604 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1605 		else
1606 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1607 
1608 		dc_init_callbacks(adev->dm.dc, &init_params);
1609 	}
1610 #endif
1611 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1612 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1613 #endif
1614 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1615 		init_completion(&adev->dm.dmub_aux_transfer_done);
1616 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1617 		if (!adev->dm.dmub_notify) {
1618 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1619 			goto error;
1620 		}
1621 
1622 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1623 		if (!adev->dm.delayed_hpd_wq) {
1624 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1625 			goto error;
1626 		}
1627 
1628 		amdgpu_dm_outbox_init(adev);
1629 #if defined(CONFIG_DRM_AMD_DC_DCN)
1630 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1631 			dmub_aux_setconfig_callback, false)) {
1632 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1633 			goto error;
1634 		}
1635 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1636 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1637 			goto error;
1638 		}
1639 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1640 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1641 			goto error;
1642 		}
1643 #endif /* CONFIG_DRM_AMD_DC_DCN */
1644 	}
1645 
1646 	if (amdgpu_dm_initialize_drm_device(adev)) {
1647 		DRM_ERROR(
1648 		"amdgpu: failed to initialize sw for display support.\n");
1649 		goto error;
1650 	}
1651 
1652 	/* create fake encoders for MST */
1653 	dm_dp_create_fake_mst_encoders(adev);
1654 
1655 	/* TODO: Add_display_info? */
1656 
1657 	/* TODO use dynamic cursor width */
1658 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1659 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1660 
1661 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1662 		DRM_ERROR(
1663 		"amdgpu: failed to initialize sw for display support.\n");
1664 		goto error;
1665 	}
1666 
1667 
1668 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1669 
1670 	return 0;
1671 error:
1672 	amdgpu_dm_fini(adev);
1673 
1674 	return -EINVAL;
1675 }
1676 
1677 static int amdgpu_dm_early_fini(void *handle)
1678 {
1679 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1680 
1681 	amdgpu_dm_audio_fini(adev);
1682 
1683 	return 0;
1684 }
1685 
1686 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1687 {
1688 	int i;
1689 
1690 #if defined(CONFIG_DRM_AMD_DC_DCN)
1691 	if (adev->dm.vblank_control_workqueue) {
1692 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1693 		adev->dm.vblank_control_workqueue = NULL;
1694 	}
1695 #endif
1696 
1697 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1698 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1699 	}
1700 
1701 	amdgpu_dm_destroy_drm_device(&adev->dm);
1702 
1703 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1704 	if (adev->dm.crc_rd_wrk) {
1705 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1706 		kfree(adev->dm.crc_rd_wrk);
1707 		adev->dm.crc_rd_wrk = NULL;
1708 	}
1709 #endif
1710 #ifdef CONFIG_DRM_AMD_DC_HDCP
1711 	if (adev->dm.hdcp_workqueue) {
1712 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1713 		adev->dm.hdcp_workqueue = NULL;
1714 	}
1715 
1716 	if (adev->dm.dc)
1717 		dc_deinit_callbacks(adev->dm.dc);
1718 #endif
1719 
1720 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1721 
1722 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1723 		kfree(adev->dm.dmub_notify);
1724 		adev->dm.dmub_notify = NULL;
1725 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1726 		adev->dm.delayed_hpd_wq = NULL;
1727 	}
1728 
1729 	if (adev->dm.dmub_bo)
1730 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1731 				      &adev->dm.dmub_bo_gpu_addr,
1732 				      &adev->dm.dmub_bo_cpu_addr);
1733 
1734 	if (adev->dm.hpd_rx_offload_wq) {
1735 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1736 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1737 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1738 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1739 			}
1740 		}
1741 
1742 		kfree(adev->dm.hpd_rx_offload_wq);
1743 		adev->dm.hpd_rx_offload_wq = NULL;
1744 	}
1745 
1746 	/* DC Destroy TODO: Replace destroy DAL */
1747 	if (adev->dm.dc)
1748 		dc_destroy(&adev->dm.dc);
1749 	/*
1750 	 * TODO: pageflip, vlank interrupt
1751 	 *
1752 	 * amdgpu_dm_irq_fini(adev);
1753 	 */
1754 
1755 	if (adev->dm.cgs_device) {
1756 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1757 		adev->dm.cgs_device = NULL;
1758 	}
1759 	if (adev->dm.freesync_module) {
1760 		mod_freesync_destroy(adev->dm.freesync_module);
1761 		adev->dm.freesync_module = NULL;
1762 	}
1763 
1764 	mutex_destroy(&adev->dm.audio_lock);
1765 	mutex_destroy(&adev->dm.dc_lock);
1766 
1767 	return;
1768 }
1769 
1770 static int load_dmcu_fw(struct amdgpu_device *adev)
1771 {
1772 	const char *fw_name_dmcu = NULL;
1773 	int r;
1774 	const struct dmcu_firmware_header_v1_0 *hdr;
1775 
1776 	switch(adev->asic_type) {
1777 #if defined(CONFIG_DRM_AMD_DC_SI)
1778 	case CHIP_TAHITI:
1779 	case CHIP_PITCAIRN:
1780 	case CHIP_VERDE:
1781 	case CHIP_OLAND:
1782 #endif
1783 	case CHIP_BONAIRE:
1784 	case CHIP_HAWAII:
1785 	case CHIP_KAVERI:
1786 	case CHIP_KABINI:
1787 	case CHIP_MULLINS:
1788 	case CHIP_TONGA:
1789 	case CHIP_FIJI:
1790 	case CHIP_CARRIZO:
1791 	case CHIP_STONEY:
1792 	case CHIP_POLARIS11:
1793 	case CHIP_POLARIS10:
1794 	case CHIP_POLARIS12:
1795 	case CHIP_VEGAM:
1796 	case CHIP_VEGA10:
1797 	case CHIP_VEGA12:
1798 	case CHIP_VEGA20:
1799 		return 0;
1800 	case CHIP_NAVI12:
1801 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1802 		break;
1803 	case CHIP_RAVEN:
1804 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1805 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1806 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1807 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1808 		else
1809 			return 0;
1810 		break;
1811 	default:
1812 		switch (adev->ip_versions[DCE_HWIP][0]) {
1813 		case IP_VERSION(2, 0, 2):
1814 		case IP_VERSION(2, 0, 3):
1815 		case IP_VERSION(2, 0, 0):
1816 		case IP_VERSION(2, 1, 0):
1817 		case IP_VERSION(3, 0, 0):
1818 		case IP_VERSION(3, 0, 2):
1819 		case IP_VERSION(3, 0, 3):
1820 		case IP_VERSION(3, 0, 1):
1821 		case IP_VERSION(3, 1, 2):
1822 		case IP_VERSION(3, 1, 3):
1823 		case IP_VERSION(3, 1, 5):
1824 		case IP_VERSION(3, 1, 6):
1825 			return 0;
1826 		default:
1827 			break;
1828 		}
1829 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1830 		return -EINVAL;
1831 	}
1832 
1833 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1834 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1835 		return 0;
1836 	}
1837 
1838 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1839 	if (r == -ENOENT) {
1840 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1841 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1842 		adev->dm.fw_dmcu = NULL;
1843 		return 0;
1844 	}
1845 	if (r) {
1846 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1847 			fw_name_dmcu);
1848 		return r;
1849 	}
1850 
1851 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1852 	if (r) {
1853 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1854 			fw_name_dmcu);
1855 		release_firmware(adev->dm.fw_dmcu);
1856 		adev->dm.fw_dmcu = NULL;
1857 		return r;
1858 	}
1859 
1860 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1861 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1862 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1863 	adev->firmware.fw_size +=
1864 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1865 
1866 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1867 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1868 	adev->firmware.fw_size +=
1869 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1870 
1871 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1872 
1873 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1874 
1875 	return 0;
1876 }
1877 
1878 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1879 {
1880 	struct amdgpu_device *adev = ctx;
1881 
1882 	return dm_read_reg(adev->dm.dc->ctx, address);
1883 }
1884 
1885 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1886 				     uint32_t value)
1887 {
1888 	struct amdgpu_device *adev = ctx;
1889 
1890 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1891 }
1892 
1893 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1894 {
1895 	struct dmub_srv_create_params create_params;
1896 	struct dmub_srv_region_params region_params;
1897 	struct dmub_srv_region_info region_info;
1898 	struct dmub_srv_fb_params fb_params;
1899 	struct dmub_srv_fb_info *fb_info;
1900 	struct dmub_srv *dmub_srv;
1901 	const struct dmcub_firmware_header_v1_0 *hdr;
1902 	const char *fw_name_dmub;
1903 	enum dmub_asic dmub_asic;
1904 	enum dmub_status status;
1905 	int r;
1906 
1907 	switch (adev->ip_versions[DCE_HWIP][0]) {
1908 	case IP_VERSION(2, 1, 0):
1909 		dmub_asic = DMUB_ASIC_DCN21;
1910 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1911 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1912 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1913 		break;
1914 	case IP_VERSION(3, 0, 0):
1915 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1916 			dmub_asic = DMUB_ASIC_DCN30;
1917 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1918 		} else {
1919 			dmub_asic = DMUB_ASIC_DCN30;
1920 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1921 		}
1922 		break;
1923 	case IP_VERSION(3, 0, 1):
1924 		dmub_asic = DMUB_ASIC_DCN301;
1925 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1926 		break;
1927 	case IP_VERSION(3, 0, 2):
1928 		dmub_asic = DMUB_ASIC_DCN302;
1929 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1930 		break;
1931 	case IP_VERSION(3, 0, 3):
1932 		dmub_asic = DMUB_ASIC_DCN303;
1933 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1934 		break;
1935 	case IP_VERSION(3, 1, 2):
1936 	case IP_VERSION(3, 1, 3):
1937 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1938 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1939 		break;
1940 	case IP_VERSION(3, 1, 5):
1941 		dmub_asic = DMUB_ASIC_DCN315;
1942 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1943 		break;
1944 	case IP_VERSION(3, 1, 6):
1945 		dmub_asic = DMUB_ASIC_DCN316;
1946 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
1947 		break;
1948 	default:
1949 		/* ASIC doesn't support DMUB. */
1950 		return 0;
1951 	}
1952 
1953 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1954 	if (r) {
1955 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1956 		return 0;
1957 	}
1958 
1959 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1960 	if (r) {
1961 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1962 		return 0;
1963 	}
1964 
1965 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1966 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1967 
1968 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1969 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1970 			AMDGPU_UCODE_ID_DMCUB;
1971 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1972 			adev->dm.dmub_fw;
1973 		adev->firmware.fw_size +=
1974 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1975 
1976 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1977 			 adev->dm.dmcub_fw_version);
1978 	}
1979 
1980 
1981 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1982 	dmub_srv = adev->dm.dmub_srv;
1983 
1984 	if (!dmub_srv) {
1985 		DRM_ERROR("Failed to allocate DMUB service!\n");
1986 		return -ENOMEM;
1987 	}
1988 
1989 	memset(&create_params, 0, sizeof(create_params));
1990 	create_params.user_ctx = adev;
1991 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1992 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1993 	create_params.asic = dmub_asic;
1994 
1995 	/* Create the DMUB service. */
1996 	status = dmub_srv_create(dmub_srv, &create_params);
1997 	if (status != DMUB_STATUS_OK) {
1998 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1999 		return -EINVAL;
2000 	}
2001 
2002 	/* Calculate the size of all the regions for the DMUB service. */
2003 	memset(&region_params, 0, sizeof(region_params));
2004 
2005 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2006 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2007 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2008 	region_params.vbios_size = adev->bios_size;
2009 	region_params.fw_bss_data = region_params.bss_data_size ?
2010 		adev->dm.dmub_fw->data +
2011 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2012 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
2013 	region_params.fw_inst_const =
2014 		adev->dm.dmub_fw->data +
2015 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2016 		PSP_HEADER_BYTES;
2017 
2018 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2019 					   &region_info);
2020 
2021 	if (status != DMUB_STATUS_OK) {
2022 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2023 		return -EINVAL;
2024 	}
2025 
2026 	/*
2027 	 * Allocate a framebuffer based on the total size of all the regions.
2028 	 * TODO: Move this into GART.
2029 	 */
2030 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2031 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2032 				    &adev->dm.dmub_bo_gpu_addr,
2033 				    &adev->dm.dmub_bo_cpu_addr);
2034 	if (r)
2035 		return r;
2036 
2037 	/* Rebase the regions on the framebuffer address. */
2038 	memset(&fb_params, 0, sizeof(fb_params));
2039 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2040 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2041 	fb_params.region_info = &region_info;
2042 
2043 	adev->dm.dmub_fb_info =
2044 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2045 	fb_info = adev->dm.dmub_fb_info;
2046 
2047 	if (!fb_info) {
2048 		DRM_ERROR(
2049 			"Failed to allocate framebuffer info for DMUB service!\n");
2050 		return -ENOMEM;
2051 	}
2052 
2053 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2054 	if (status != DMUB_STATUS_OK) {
2055 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2056 		return -EINVAL;
2057 	}
2058 
2059 	return 0;
2060 }
2061 
2062 static int dm_sw_init(void *handle)
2063 {
2064 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2065 	int r;
2066 
2067 	r = dm_dmub_sw_init(adev);
2068 	if (r)
2069 		return r;
2070 
2071 	return load_dmcu_fw(adev);
2072 }
2073 
2074 static int dm_sw_fini(void *handle)
2075 {
2076 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2077 
2078 	kfree(adev->dm.dmub_fb_info);
2079 	adev->dm.dmub_fb_info = NULL;
2080 
2081 	if (adev->dm.dmub_srv) {
2082 		dmub_srv_destroy(adev->dm.dmub_srv);
2083 		adev->dm.dmub_srv = NULL;
2084 	}
2085 
2086 	release_firmware(adev->dm.dmub_fw);
2087 	adev->dm.dmub_fw = NULL;
2088 
2089 	release_firmware(adev->dm.fw_dmcu);
2090 	adev->dm.fw_dmcu = NULL;
2091 
2092 	return 0;
2093 }
2094 
2095 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2096 {
2097 	struct amdgpu_dm_connector *aconnector;
2098 	struct drm_connector *connector;
2099 	struct drm_connector_list_iter iter;
2100 	int ret = 0;
2101 
2102 	drm_connector_list_iter_begin(dev, &iter);
2103 	drm_for_each_connector_iter(connector, &iter) {
2104 		aconnector = to_amdgpu_dm_connector(connector);
2105 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2106 		    aconnector->mst_mgr.aux) {
2107 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2108 					 aconnector,
2109 					 aconnector->base.base.id);
2110 
2111 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2112 			if (ret < 0) {
2113 				DRM_ERROR("DM_MST: Failed to start MST\n");
2114 				aconnector->dc_link->type =
2115 					dc_connection_single;
2116 				break;
2117 			}
2118 		}
2119 	}
2120 	drm_connector_list_iter_end(&iter);
2121 
2122 	return ret;
2123 }
2124 
2125 static int dm_late_init(void *handle)
2126 {
2127 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2128 
2129 	struct dmcu_iram_parameters params;
2130 	unsigned int linear_lut[16];
2131 	int i;
2132 	struct dmcu *dmcu = NULL;
2133 
2134 	dmcu = adev->dm.dc->res_pool->dmcu;
2135 
2136 	for (i = 0; i < 16; i++)
2137 		linear_lut[i] = 0xFFFF * i / 15;
2138 
2139 	params.set = 0;
2140 	params.backlight_ramping_override = false;
2141 	params.backlight_ramping_start = 0xCCCC;
2142 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2143 	params.backlight_lut_array_size = 16;
2144 	params.backlight_lut_array = linear_lut;
2145 
2146 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2147 	 * 0xFFFF x 0.01 = 0x28F
2148 	 */
2149 	params.min_abm_backlight = 0x28F;
2150 	/* In the case where abm is implemented on dmcub,
2151 	* dmcu object will be null.
2152 	* ABM 2.4 and up are implemented on dmcub.
2153 	*/
2154 	if (dmcu) {
2155 		if (!dmcu_load_iram(dmcu, params))
2156 			return -EINVAL;
2157 	} else if (adev->dm.dc->ctx->dmub_srv) {
2158 		struct dc_link *edp_links[MAX_NUM_EDP];
2159 		int edp_num;
2160 
2161 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2162 		for (i = 0; i < edp_num; i++) {
2163 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2164 				return -EINVAL;
2165 		}
2166 	}
2167 
2168 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2169 }
2170 
2171 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2172 {
2173 	struct amdgpu_dm_connector *aconnector;
2174 	struct drm_connector *connector;
2175 	struct drm_connector_list_iter iter;
2176 	struct drm_dp_mst_topology_mgr *mgr;
2177 	int ret;
2178 	bool need_hotplug = false;
2179 
2180 	drm_connector_list_iter_begin(dev, &iter);
2181 	drm_for_each_connector_iter(connector, &iter) {
2182 		aconnector = to_amdgpu_dm_connector(connector);
2183 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2184 		    aconnector->mst_port)
2185 			continue;
2186 
2187 		mgr = &aconnector->mst_mgr;
2188 
2189 		if (suspend) {
2190 			drm_dp_mst_topology_mgr_suspend(mgr);
2191 		} else {
2192 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2193 			if (ret < 0) {
2194 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2195 				need_hotplug = true;
2196 			}
2197 		}
2198 	}
2199 	drm_connector_list_iter_end(&iter);
2200 
2201 	if (need_hotplug)
2202 		drm_kms_helper_hotplug_event(dev);
2203 }
2204 
2205 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2206 {
2207 	int ret = 0;
2208 
2209 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2210 	 * on window driver dc implementation.
2211 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2212 	 * should be passed to smu during boot up and resume from s3.
2213 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2214 	 * dcn20_resource_construct
2215 	 * then call pplib functions below to pass the settings to smu:
2216 	 * smu_set_watermarks_for_clock_ranges
2217 	 * smu_set_watermarks_table
2218 	 * navi10_set_watermarks_table
2219 	 * smu_write_watermarks_table
2220 	 *
2221 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2222 	 * dc has implemented different flow for window driver:
2223 	 * dc_hardware_init / dc_set_power_state
2224 	 * dcn10_init_hw
2225 	 * notify_wm_ranges
2226 	 * set_wm_ranges
2227 	 * -- Linux
2228 	 * smu_set_watermarks_for_clock_ranges
2229 	 * renoir_set_watermarks_table
2230 	 * smu_write_watermarks_table
2231 	 *
2232 	 * For Linux,
2233 	 * dc_hardware_init -> amdgpu_dm_init
2234 	 * dc_set_power_state --> dm_resume
2235 	 *
2236 	 * therefore, this function apply to navi10/12/14 but not Renoir
2237 	 * *
2238 	 */
2239 	switch (adev->ip_versions[DCE_HWIP][0]) {
2240 	case IP_VERSION(2, 0, 2):
2241 	case IP_VERSION(2, 0, 0):
2242 		break;
2243 	default:
2244 		return 0;
2245 	}
2246 
2247 	ret = amdgpu_dpm_write_watermarks_table(adev);
2248 	if (ret) {
2249 		DRM_ERROR("Failed to update WMTABLE!\n");
2250 		return ret;
2251 	}
2252 
2253 	return 0;
2254 }
2255 
2256 /**
2257  * dm_hw_init() - Initialize DC device
2258  * @handle: The base driver device containing the amdgpu_dm device.
2259  *
2260  * Initialize the &struct amdgpu_display_manager device. This involves calling
2261  * the initializers of each DM component, then populating the struct with them.
2262  *
2263  * Although the function implies hardware initialization, both hardware and
2264  * software are initialized here. Splitting them out to their relevant init
2265  * hooks is a future TODO item.
2266  *
2267  * Some notable things that are initialized here:
2268  *
2269  * - Display Core, both software and hardware
2270  * - DC modules that we need (freesync and color management)
2271  * - DRM software states
2272  * - Interrupt sources and handlers
2273  * - Vblank support
2274  * - Debug FS entries, if enabled
2275  */
2276 static int dm_hw_init(void *handle)
2277 {
2278 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2279 	/* Create DAL display manager */
2280 	amdgpu_dm_init(adev);
2281 	amdgpu_dm_hpd_init(adev);
2282 
2283 	return 0;
2284 }
2285 
2286 /**
2287  * dm_hw_fini() - Teardown DC device
2288  * @handle: The base driver device containing the amdgpu_dm device.
2289  *
2290  * Teardown components within &struct amdgpu_display_manager that require
2291  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2292  * were loaded. Also flush IRQ workqueues and disable them.
2293  */
2294 static int dm_hw_fini(void *handle)
2295 {
2296 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2297 
2298 	amdgpu_dm_hpd_fini(adev);
2299 
2300 	amdgpu_dm_irq_fini(adev);
2301 	amdgpu_dm_fini(adev);
2302 	return 0;
2303 }
2304 
2305 
2306 static int dm_enable_vblank(struct drm_crtc *crtc);
2307 static void dm_disable_vblank(struct drm_crtc *crtc);
2308 
2309 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2310 				 struct dc_state *state, bool enable)
2311 {
2312 	enum dc_irq_source irq_source;
2313 	struct amdgpu_crtc *acrtc;
2314 	int rc = -EBUSY;
2315 	int i = 0;
2316 
2317 	for (i = 0; i < state->stream_count; i++) {
2318 		acrtc = get_crtc_by_otg_inst(
2319 				adev, state->stream_status[i].primary_otg_inst);
2320 
2321 		if (acrtc && state->stream_status[i].plane_count != 0) {
2322 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2323 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2324 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2325 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2326 			if (rc)
2327 				DRM_WARN("Failed to %s pflip interrupts\n",
2328 					 enable ? "enable" : "disable");
2329 
2330 			if (enable) {
2331 				rc = dm_enable_vblank(&acrtc->base);
2332 				if (rc)
2333 					DRM_WARN("Failed to enable vblank interrupts\n");
2334 			} else {
2335 				dm_disable_vblank(&acrtc->base);
2336 			}
2337 
2338 		}
2339 	}
2340 
2341 }
2342 
2343 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2344 {
2345 	struct dc_state *context = NULL;
2346 	enum dc_status res = DC_ERROR_UNEXPECTED;
2347 	int i;
2348 	struct dc_stream_state *del_streams[MAX_PIPES];
2349 	int del_streams_count = 0;
2350 
2351 	memset(del_streams, 0, sizeof(del_streams));
2352 
2353 	context = dc_create_state(dc);
2354 	if (context == NULL)
2355 		goto context_alloc_fail;
2356 
2357 	dc_resource_state_copy_construct_current(dc, context);
2358 
2359 	/* First remove from context all streams */
2360 	for (i = 0; i < context->stream_count; i++) {
2361 		struct dc_stream_state *stream = context->streams[i];
2362 
2363 		del_streams[del_streams_count++] = stream;
2364 	}
2365 
2366 	/* Remove all planes for removed streams and then remove the streams */
2367 	for (i = 0; i < del_streams_count; i++) {
2368 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2369 			res = DC_FAIL_DETACH_SURFACES;
2370 			goto fail;
2371 		}
2372 
2373 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2374 		if (res != DC_OK)
2375 			goto fail;
2376 	}
2377 
2378 	res = dc_commit_state(dc, context);
2379 
2380 fail:
2381 	dc_release_state(context);
2382 
2383 context_alloc_fail:
2384 	return res;
2385 }
2386 
2387 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2388 {
2389 	int i;
2390 
2391 	if (dm->hpd_rx_offload_wq) {
2392 		for (i = 0; i < dm->dc->caps.max_links; i++)
2393 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2394 	}
2395 }
2396 
2397 static int dm_suspend(void *handle)
2398 {
2399 	struct amdgpu_device *adev = handle;
2400 	struct amdgpu_display_manager *dm = &adev->dm;
2401 	int ret = 0;
2402 
2403 	if (amdgpu_in_reset(adev)) {
2404 		mutex_lock(&dm->dc_lock);
2405 
2406 #if defined(CONFIG_DRM_AMD_DC_DCN)
2407 		dc_allow_idle_optimizations(adev->dm.dc, false);
2408 #endif
2409 
2410 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2411 
2412 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2413 
2414 		amdgpu_dm_commit_zero_streams(dm->dc);
2415 
2416 		amdgpu_dm_irq_suspend(adev);
2417 
2418 		hpd_rx_irq_work_suspend(dm);
2419 
2420 		return ret;
2421 	}
2422 
2423 	WARN_ON(adev->dm.cached_state);
2424 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2425 
2426 	s3_handle_mst(adev_to_drm(adev), true);
2427 
2428 	amdgpu_dm_irq_suspend(adev);
2429 
2430 	hpd_rx_irq_work_suspend(dm);
2431 
2432 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2433 
2434 	return 0;
2435 }
2436 
2437 struct amdgpu_dm_connector *
2438 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2439 					     struct drm_crtc *crtc)
2440 {
2441 	uint32_t i;
2442 	struct drm_connector_state *new_con_state;
2443 	struct drm_connector *connector;
2444 	struct drm_crtc *crtc_from_state;
2445 
2446 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2447 		crtc_from_state = new_con_state->crtc;
2448 
2449 		if (crtc_from_state == crtc)
2450 			return to_amdgpu_dm_connector(connector);
2451 	}
2452 
2453 	return NULL;
2454 }
2455 
2456 static void emulated_link_detect(struct dc_link *link)
2457 {
2458 	struct dc_sink_init_data sink_init_data = { 0 };
2459 	struct display_sink_capability sink_caps = { 0 };
2460 	enum dc_edid_status edid_status;
2461 	struct dc_context *dc_ctx = link->ctx;
2462 	struct dc_sink *sink = NULL;
2463 	struct dc_sink *prev_sink = NULL;
2464 
2465 	link->type = dc_connection_none;
2466 	prev_sink = link->local_sink;
2467 
2468 	if (prev_sink)
2469 		dc_sink_release(prev_sink);
2470 
2471 	switch (link->connector_signal) {
2472 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2473 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2474 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2475 		break;
2476 	}
2477 
2478 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2479 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2480 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2481 		break;
2482 	}
2483 
2484 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2485 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2486 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2487 		break;
2488 	}
2489 
2490 	case SIGNAL_TYPE_LVDS: {
2491 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2492 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2493 		break;
2494 	}
2495 
2496 	case SIGNAL_TYPE_EDP: {
2497 		sink_caps.transaction_type =
2498 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2499 		sink_caps.signal = SIGNAL_TYPE_EDP;
2500 		break;
2501 	}
2502 
2503 	case SIGNAL_TYPE_DISPLAY_PORT: {
2504 		sink_caps.transaction_type =
2505 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2506 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2507 		break;
2508 	}
2509 
2510 	default:
2511 		DC_ERROR("Invalid connector type! signal:%d\n",
2512 			link->connector_signal);
2513 		return;
2514 	}
2515 
2516 	sink_init_data.link = link;
2517 	sink_init_data.sink_signal = sink_caps.signal;
2518 
2519 	sink = dc_sink_create(&sink_init_data);
2520 	if (!sink) {
2521 		DC_ERROR("Failed to create sink!\n");
2522 		return;
2523 	}
2524 
2525 	/* dc_sink_create returns a new reference */
2526 	link->local_sink = sink;
2527 
2528 	edid_status = dm_helpers_read_local_edid(
2529 			link->ctx,
2530 			link,
2531 			sink);
2532 
2533 	if (edid_status != EDID_OK)
2534 		DC_ERROR("Failed to read EDID");
2535 
2536 }
2537 
2538 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2539 				     struct amdgpu_display_manager *dm)
2540 {
2541 	struct {
2542 		struct dc_surface_update surface_updates[MAX_SURFACES];
2543 		struct dc_plane_info plane_infos[MAX_SURFACES];
2544 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2545 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2546 		struct dc_stream_update stream_update;
2547 	} * bundle;
2548 	int k, m;
2549 
2550 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2551 
2552 	if (!bundle) {
2553 		dm_error("Failed to allocate update bundle\n");
2554 		goto cleanup;
2555 	}
2556 
2557 	for (k = 0; k < dc_state->stream_count; k++) {
2558 		bundle->stream_update.stream = dc_state->streams[k];
2559 
2560 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2561 			bundle->surface_updates[m].surface =
2562 				dc_state->stream_status->plane_states[m];
2563 			bundle->surface_updates[m].surface->force_full_update =
2564 				true;
2565 		}
2566 		dc_commit_updates_for_stream(
2567 			dm->dc, bundle->surface_updates,
2568 			dc_state->stream_status->plane_count,
2569 			dc_state->streams[k], &bundle->stream_update, dc_state);
2570 	}
2571 
2572 cleanup:
2573 	kfree(bundle);
2574 
2575 	return;
2576 }
2577 
2578 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2579 {
2580 	struct dc_stream_state *stream_state;
2581 	struct amdgpu_dm_connector *aconnector = link->priv;
2582 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2583 	struct dc_stream_update stream_update;
2584 	bool dpms_off = true;
2585 
2586 	memset(&stream_update, 0, sizeof(stream_update));
2587 	stream_update.dpms_off = &dpms_off;
2588 
2589 	mutex_lock(&adev->dm.dc_lock);
2590 	stream_state = dc_stream_find_from_link(link);
2591 
2592 	if (stream_state == NULL) {
2593 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2594 		mutex_unlock(&adev->dm.dc_lock);
2595 		return;
2596 	}
2597 
2598 	stream_update.stream = stream_state;
2599 	acrtc_state->force_dpms_off = true;
2600 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2601 				     stream_state, &stream_update,
2602 				     stream_state->ctx->dc->current_state);
2603 	mutex_unlock(&adev->dm.dc_lock);
2604 }
2605 
2606 static int dm_resume(void *handle)
2607 {
2608 	struct amdgpu_device *adev = handle;
2609 	struct drm_device *ddev = adev_to_drm(adev);
2610 	struct amdgpu_display_manager *dm = &adev->dm;
2611 	struct amdgpu_dm_connector *aconnector;
2612 	struct drm_connector *connector;
2613 	struct drm_connector_list_iter iter;
2614 	struct drm_crtc *crtc;
2615 	struct drm_crtc_state *new_crtc_state;
2616 	struct dm_crtc_state *dm_new_crtc_state;
2617 	struct drm_plane *plane;
2618 	struct drm_plane_state *new_plane_state;
2619 	struct dm_plane_state *dm_new_plane_state;
2620 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2621 	enum dc_connection_type new_connection_type = dc_connection_none;
2622 	struct dc_state *dc_state;
2623 	int i, r, j;
2624 
2625 	if (amdgpu_in_reset(adev)) {
2626 		dc_state = dm->cached_dc_state;
2627 
2628 		/*
2629 		 * The dc->current_state is backed up into dm->cached_dc_state
2630 		 * before we commit 0 streams.
2631 		 *
2632 		 * DC will clear link encoder assignments on the real state
2633 		 * but the changes won't propagate over to the copy we made
2634 		 * before the 0 streams commit.
2635 		 *
2636 		 * DC expects that link encoder assignments are *not* valid
2637 		 * when committing a state, so as a workaround we can copy
2638 		 * off of the current state.
2639 		 *
2640 		 * We lose the previous assignments, but we had already
2641 		 * commit 0 streams anyway.
2642 		 */
2643 		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2644 
2645 		if (dc_enable_dmub_notifications(adev->dm.dc))
2646 			amdgpu_dm_outbox_init(adev);
2647 
2648 		r = dm_dmub_hw_init(adev);
2649 		if (r)
2650 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2651 
2652 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2653 		dc_resume(dm->dc);
2654 
2655 		amdgpu_dm_irq_resume_early(adev);
2656 
2657 		for (i = 0; i < dc_state->stream_count; i++) {
2658 			dc_state->streams[i]->mode_changed = true;
2659 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2660 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2661 					= 0xffffffff;
2662 			}
2663 		}
2664 
2665 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2666 
2667 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2668 
2669 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2670 
2671 		dc_release_state(dm->cached_dc_state);
2672 		dm->cached_dc_state = NULL;
2673 
2674 		amdgpu_dm_irq_resume_late(adev);
2675 
2676 		mutex_unlock(&dm->dc_lock);
2677 
2678 		return 0;
2679 	}
2680 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2681 	dc_release_state(dm_state->context);
2682 	dm_state->context = dc_create_state(dm->dc);
2683 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2684 	dc_resource_state_construct(dm->dc, dm_state->context);
2685 
2686 	/* Re-enable outbox interrupts for DPIA. */
2687 	if (dc_enable_dmub_notifications(adev->dm.dc))
2688 		amdgpu_dm_outbox_init(adev);
2689 
2690 	/* Before powering on DC we need to re-initialize DMUB. */
2691 	dm_dmub_hw_resume(adev);
2692 
2693 	/* power on hardware */
2694 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2695 
2696 	/* program HPD filter */
2697 	dc_resume(dm->dc);
2698 
2699 	/*
2700 	 * early enable HPD Rx IRQ, should be done before set mode as short
2701 	 * pulse interrupts are used for MST
2702 	 */
2703 	amdgpu_dm_irq_resume_early(adev);
2704 
2705 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2706 	s3_handle_mst(ddev, false);
2707 
2708 	/* Do detection*/
2709 	drm_connector_list_iter_begin(ddev, &iter);
2710 	drm_for_each_connector_iter(connector, &iter) {
2711 		aconnector = to_amdgpu_dm_connector(connector);
2712 
2713 		/*
2714 		 * this is the case when traversing through already created
2715 		 * MST connectors, should be skipped
2716 		 */
2717 		if (aconnector->dc_link &&
2718 		    aconnector->dc_link->type == dc_connection_mst_branch)
2719 			continue;
2720 
2721 		mutex_lock(&aconnector->hpd_lock);
2722 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2723 			DRM_ERROR("KMS: Failed to detect connector\n");
2724 
2725 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2726 			emulated_link_detect(aconnector->dc_link);
2727 		else
2728 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2729 
2730 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2731 			aconnector->fake_enable = false;
2732 
2733 		if (aconnector->dc_sink)
2734 			dc_sink_release(aconnector->dc_sink);
2735 		aconnector->dc_sink = NULL;
2736 		amdgpu_dm_update_connector_after_detect(aconnector);
2737 		mutex_unlock(&aconnector->hpd_lock);
2738 	}
2739 	drm_connector_list_iter_end(&iter);
2740 
2741 	/* Force mode set in atomic commit */
2742 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2743 		new_crtc_state->active_changed = true;
2744 
2745 	/*
2746 	 * atomic_check is expected to create the dc states. We need to release
2747 	 * them here, since they were duplicated as part of the suspend
2748 	 * procedure.
2749 	 */
2750 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2751 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2752 		if (dm_new_crtc_state->stream) {
2753 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2754 			dc_stream_release(dm_new_crtc_state->stream);
2755 			dm_new_crtc_state->stream = NULL;
2756 		}
2757 	}
2758 
2759 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2760 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2761 		if (dm_new_plane_state->dc_state) {
2762 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2763 			dc_plane_state_release(dm_new_plane_state->dc_state);
2764 			dm_new_plane_state->dc_state = NULL;
2765 		}
2766 	}
2767 
2768 	drm_atomic_helper_resume(ddev, dm->cached_state);
2769 
2770 	dm->cached_state = NULL;
2771 
2772 	amdgpu_dm_irq_resume_late(adev);
2773 
2774 	amdgpu_dm_smu_write_watermarks_table(adev);
2775 
2776 	return 0;
2777 }
2778 
2779 /**
2780  * DOC: DM Lifecycle
2781  *
2782  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2783  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2784  * the base driver's device list to be initialized and torn down accordingly.
2785  *
2786  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2787  */
2788 
2789 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2790 	.name = "dm",
2791 	.early_init = dm_early_init,
2792 	.late_init = dm_late_init,
2793 	.sw_init = dm_sw_init,
2794 	.sw_fini = dm_sw_fini,
2795 	.early_fini = amdgpu_dm_early_fini,
2796 	.hw_init = dm_hw_init,
2797 	.hw_fini = dm_hw_fini,
2798 	.suspend = dm_suspend,
2799 	.resume = dm_resume,
2800 	.is_idle = dm_is_idle,
2801 	.wait_for_idle = dm_wait_for_idle,
2802 	.check_soft_reset = dm_check_soft_reset,
2803 	.soft_reset = dm_soft_reset,
2804 	.set_clockgating_state = dm_set_clockgating_state,
2805 	.set_powergating_state = dm_set_powergating_state,
2806 };
2807 
2808 const struct amdgpu_ip_block_version dm_ip_block =
2809 {
2810 	.type = AMD_IP_BLOCK_TYPE_DCE,
2811 	.major = 1,
2812 	.minor = 0,
2813 	.rev = 0,
2814 	.funcs = &amdgpu_dm_funcs,
2815 };
2816 
2817 
2818 /**
2819  * DOC: atomic
2820  *
2821  * *WIP*
2822  */
2823 
2824 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2825 	.fb_create = amdgpu_display_user_framebuffer_create,
2826 	.get_format_info = amd_get_format_info,
2827 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2828 	.atomic_check = amdgpu_dm_atomic_check,
2829 	.atomic_commit = drm_atomic_helper_commit,
2830 };
2831 
2832 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2833 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2834 };
2835 
2836 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2837 {
2838 	u32 max_cll, min_cll, max, min, q, r;
2839 	struct amdgpu_dm_backlight_caps *caps;
2840 	struct amdgpu_display_manager *dm;
2841 	struct drm_connector *conn_base;
2842 	struct amdgpu_device *adev;
2843 	struct dc_link *link = NULL;
2844 	static const u8 pre_computed_values[] = {
2845 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2846 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2847 	int i;
2848 
2849 	if (!aconnector || !aconnector->dc_link)
2850 		return;
2851 
2852 	link = aconnector->dc_link;
2853 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2854 		return;
2855 
2856 	conn_base = &aconnector->base;
2857 	adev = drm_to_adev(conn_base->dev);
2858 	dm = &adev->dm;
2859 	for (i = 0; i < dm->num_of_edps; i++) {
2860 		if (link == dm->backlight_link[i])
2861 			break;
2862 	}
2863 	if (i >= dm->num_of_edps)
2864 		return;
2865 	caps = &dm->backlight_caps[i];
2866 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2867 	caps->aux_support = false;
2868 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2869 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2870 
2871 	if (caps->ext_caps->bits.oled == 1 /*||
2872 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2873 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2874 		caps->aux_support = true;
2875 
2876 	if (amdgpu_backlight == 0)
2877 		caps->aux_support = false;
2878 	else if (amdgpu_backlight == 1)
2879 		caps->aux_support = true;
2880 
2881 	/* From the specification (CTA-861-G), for calculating the maximum
2882 	 * luminance we need to use:
2883 	 *	Luminance = 50*2**(CV/32)
2884 	 * Where CV is a one-byte value.
2885 	 * For calculating this expression we may need float point precision;
2886 	 * to avoid this complexity level, we take advantage that CV is divided
2887 	 * by a constant. From the Euclids division algorithm, we know that CV
2888 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2889 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2890 	 * need to pre-compute the value of r/32. For pre-computing the values
2891 	 * We just used the following Ruby line:
2892 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2893 	 * The results of the above expressions can be verified at
2894 	 * pre_computed_values.
2895 	 */
2896 	q = max_cll >> 5;
2897 	r = max_cll % 32;
2898 	max = (1 << q) * pre_computed_values[r];
2899 
2900 	// min luminance: maxLum * (CV/255)^2 / 100
2901 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2902 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2903 
2904 	caps->aux_max_input_signal = max;
2905 	caps->aux_min_input_signal = min;
2906 }
2907 
2908 void amdgpu_dm_update_connector_after_detect(
2909 		struct amdgpu_dm_connector *aconnector)
2910 {
2911 	struct drm_connector *connector = &aconnector->base;
2912 	struct drm_device *dev = connector->dev;
2913 	struct dc_sink *sink;
2914 
2915 	/* MST handled by drm_mst framework */
2916 	if (aconnector->mst_mgr.mst_state == true)
2917 		return;
2918 
2919 	sink = aconnector->dc_link->local_sink;
2920 	if (sink)
2921 		dc_sink_retain(sink);
2922 
2923 	/*
2924 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2925 	 * the connector sink is set to either fake or physical sink depends on link status.
2926 	 * Skip if already done during boot.
2927 	 */
2928 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2929 			&& aconnector->dc_em_sink) {
2930 
2931 		/*
2932 		 * For S3 resume with headless use eml_sink to fake stream
2933 		 * because on resume connector->sink is set to NULL
2934 		 */
2935 		mutex_lock(&dev->mode_config.mutex);
2936 
2937 		if (sink) {
2938 			if (aconnector->dc_sink) {
2939 				amdgpu_dm_update_freesync_caps(connector, NULL);
2940 				/*
2941 				 * retain and release below are used to
2942 				 * bump up refcount for sink because the link doesn't point
2943 				 * to it anymore after disconnect, so on next crtc to connector
2944 				 * reshuffle by UMD we will get into unwanted dc_sink release
2945 				 */
2946 				dc_sink_release(aconnector->dc_sink);
2947 			}
2948 			aconnector->dc_sink = sink;
2949 			dc_sink_retain(aconnector->dc_sink);
2950 			amdgpu_dm_update_freesync_caps(connector,
2951 					aconnector->edid);
2952 		} else {
2953 			amdgpu_dm_update_freesync_caps(connector, NULL);
2954 			if (!aconnector->dc_sink) {
2955 				aconnector->dc_sink = aconnector->dc_em_sink;
2956 				dc_sink_retain(aconnector->dc_sink);
2957 			}
2958 		}
2959 
2960 		mutex_unlock(&dev->mode_config.mutex);
2961 
2962 		if (sink)
2963 			dc_sink_release(sink);
2964 		return;
2965 	}
2966 
2967 	/*
2968 	 * TODO: temporary guard to look for proper fix
2969 	 * if this sink is MST sink, we should not do anything
2970 	 */
2971 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2972 		dc_sink_release(sink);
2973 		return;
2974 	}
2975 
2976 	if (aconnector->dc_sink == sink) {
2977 		/*
2978 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2979 		 * Do nothing!!
2980 		 */
2981 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2982 				aconnector->connector_id);
2983 		if (sink)
2984 			dc_sink_release(sink);
2985 		return;
2986 	}
2987 
2988 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2989 		aconnector->connector_id, aconnector->dc_sink, sink);
2990 
2991 	mutex_lock(&dev->mode_config.mutex);
2992 
2993 	/*
2994 	 * 1. Update status of the drm connector
2995 	 * 2. Send an event and let userspace tell us what to do
2996 	 */
2997 	if (sink) {
2998 		/*
2999 		 * TODO: check if we still need the S3 mode update workaround.
3000 		 * If yes, put it here.
3001 		 */
3002 		if (aconnector->dc_sink) {
3003 			amdgpu_dm_update_freesync_caps(connector, NULL);
3004 			dc_sink_release(aconnector->dc_sink);
3005 		}
3006 
3007 		aconnector->dc_sink = sink;
3008 		dc_sink_retain(aconnector->dc_sink);
3009 		if (sink->dc_edid.length == 0) {
3010 			aconnector->edid = NULL;
3011 			if (aconnector->dc_link->aux_mode) {
3012 				drm_dp_cec_unset_edid(
3013 					&aconnector->dm_dp_aux.aux);
3014 			}
3015 		} else {
3016 			aconnector->edid =
3017 				(struct edid *)sink->dc_edid.raw_edid;
3018 
3019 			if (aconnector->dc_link->aux_mode)
3020 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3021 						    aconnector->edid);
3022 		}
3023 
3024 		drm_connector_update_edid_property(connector, aconnector->edid);
3025 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3026 		update_connector_ext_caps(aconnector);
3027 	} else {
3028 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3029 		amdgpu_dm_update_freesync_caps(connector, NULL);
3030 		drm_connector_update_edid_property(connector, NULL);
3031 		aconnector->num_modes = 0;
3032 		dc_sink_release(aconnector->dc_sink);
3033 		aconnector->dc_sink = NULL;
3034 		aconnector->edid = NULL;
3035 #ifdef CONFIG_DRM_AMD_DC_HDCP
3036 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3037 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3038 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3039 #endif
3040 	}
3041 
3042 	mutex_unlock(&dev->mode_config.mutex);
3043 
3044 	update_subconnector_property(aconnector);
3045 
3046 	if (sink)
3047 		dc_sink_release(sink);
3048 }
3049 
3050 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3051 {
3052 	struct drm_connector *connector = &aconnector->base;
3053 	struct drm_device *dev = connector->dev;
3054 	enum dc_connection_type new_connection_type = dc_connection_none;
3055 	struct amdgpu_device *adev = drm_to_adev(dev);
3056 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3057 	struct dm_crtc_state *dm_crtc_state = NULL;
3058 
3059 	if (adev->dm.disable_hpd_irq)
3060 		return;
3061 
3062 	if (dm_con_state->base.state && dm_con_state->base.crtc)
3063 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3064 					dm_con_state->base.state,
3065 					dm_con_state->base.crtc));
3066 	/*
3067 	 * In case of failure or MST no need to update connector status or notify the OS
3068 	 * since (for MST case) MST does this in its own context.
3069 	 */
3070 	mutex_lock(&aconnector->hpd_lock);
3071 
3072 #ifdef CONFIG_DRM_AMD_DC_HDCP
3073 	if (adev->dm.hdcp_workqueue) {
3074 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3075 		dm_con_state->update_hdcp = true;
3076 	}
3077 #endif
3078 	if (aconnector->fake_enable)
3079 		aconnector->fake_enable = false;
3080 
3081 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3082 		DRM_ERROR("KMS: Failed to detect connector\n");
3083 
3084 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3085 		emulated_link_detect(aconnector->dc_link);
3086 
3087 		drm_modeset_lock_all(dev);
3088 		dm_restore_drm_connector_state(dev, connector);
3089 		drm_modeset_unlock_all(dev);
3090 
3091 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3092 			drm_kms_helper_connector_hotplug_event(connector);
3093 
3094 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3095 		if (new_connection_type == dc_connection_none &&
3096 		    aconnector->dc_link->type == dc_connection_none &&
3097 		    dm_crtc_state)
3098 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3099 
3100 		amdgpu_dm_update_connector_after_detect(aconnector);
3101 
3102 		drm_modeset_lock_all(dev);
3103 		dm_restore_drm_connector_state(dev, connector);
3104 		drm_modeset_unlock_all(dev);
3105 
3106 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3107 			drm_kms_helper_connector_hotplug_event(connector);
3108 	}
3109 	mutex_unlock(&aconnector->hpd_lock);
3110 
3111 }
3112 
3113 static void handle_hpd_irq(void *param)
3114 {
3115 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3116 
3117 	handle_hpd_irq_helper(aconnector);
3118 
3119 }
3120 
3121 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3122 {
3123 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3124 	uint8_t dret;
3125 	bool new_irq_handled = false;
3126 	int dpcd_addr;
3127 	int dpcd_bytes_to_read;
3128 
3129 	const int max_process_count = 30;
3130 	int process_count = 0;
3131 
3132 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3133 
3134 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3135 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3136 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3137 		dpcd_addr = DP_SINK_COUNT;
3138 	} else {
3139 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3140 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3141 		dpcd_addr = DP_SINK_COUNT_ESI;
3142 	}
3143 
3144 	dret = drm_dp_dpcd_read(
3145 		&aconnector->dm_dp_aux.aux,
3146 		dpcd_addr,
3147 		esi,
3148 		dpcd_bytes_to_read);
3149 
3150 	while (dret == dpcd_bytes_to_read &&
3151 		process_count < max_process_count) {
3152 		uint8_t retry;
3153 		dret = 0;
3154 
3155 		process_count++;
3156 
3157 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3158 		/* handle HPD short pulse irq */
3159 		if (aconnector->mst_mgr.mst_state)
3160 			drm_dp_mst_hpd_irq(
3161 				&aconnector->mst_mgr,
3162 				esi,
3163 				&new_irq_handled);
3164 
3165 		if (new_irq_handled) {
3166 			/* ACK at DPCD to notify down stream */
3167 			const int ack_dpcd_bytes_to_write =
3168 				dpcd_bytes_to_read - 1;
3169 
3170 			for (retry = 0; retry < 3; retry++) {
3171 				uint8_t wret;
3172 
3173 				wret = drm_dp_dpcd_write(
3174 					&aconnector->dm_dp_aux.aux,
3175 					dpcd_addr + 1,
3176 					&esi[1],
3177 					ack_dpcd_bytes_to_write);
3178 				if (wret == ack_dpcd_bytes_to_write)
3179 					break;
3180 			}
3181 
3182 			/* check if there is new irq to be handled */
3183 			dret = drm_dp_dpcd_read(
3184 				&aconnector->dm_dp_aux.aux,
3185 				dpcd_addr,
3186 				esi,
3187 				dpcd_bytes_to_read);
3188 
3189 			new_irq_handled = false;
3190 		} else {
3191 			break;
3192 		}
3193 	}
3194 
3195 	if (process_count == max_process_count)
3196 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3197 }
3198 
3199 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3200 							union hpd_irq_data hpd_irq_data)
3201 {
3202 	struct hpd_rx_irq_offload_work *offload_work =
3203 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3204 
3205 	if (!offload_work) {
3206 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3207 		return;
3208 	}
3209 
3210 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3211 	offload_work->data = hpd_irq_data;
3212 	offload_work->offload_wq = offload_wq;
3213 
3214 	queue_work(offload_wq->wq, &offload_work->work);
3215 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3216 }
3217 
3218 static void handle_hpd_rx_irq(void *param)
3219 {
3220 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3221 	struct drm_connector *connector = &aconnector->base;
3222 	struct drm_device *dev = connector->dev;
3223 	struct dc_link *dc_link = aconnector->dc_link;
3224 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3225 	bool result = false;
3226 	enum dc_connection_type new_connection_type = dc_connection_none;
3227 	struct amdgpu_device *adev = drm_to_adev(dev);
3228 	union hpd_irq_data hpd_irq_data;
3229 	bool link_loss = false;
3230 	bool has_left_work = false;
3231 	int idx = aconnector->base.index;
3232 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3233 
3234 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3235 
3236 	if (adev->dm.disable_hpd_irq)
3237 		return;
3238 
3239 	/*
3240 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3241 	 * conflict, after implement i2c helper, this mutex should be
3242 	 * retired.
3243 	 */
3244 	mutex_lock(&aconnector->hpd_lock);
3245 
3246 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3247 						&link_loss, true, &has_left_work);
3248 
3249 	if (!has_left_work)
3250 		goto out;
3251 
3252 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3253 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3254 		goto out;
3255 	}
3256 
3257 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3258 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3259 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3260 			dm_handle_mst_sideband_msg(aconnector);
3261 			goto out;
3262 		}
3263 
3264 		if (link_loss) {
3265 			bool skip = false;
3266 
3267 			spin_lock(&offload_wq->offload_lock);
3268 			skip = offload_wq->is_handling_link_loss;
3269 
3270 			if (!skip)
3271 				offload_wq->is_handling_link_loss = true;
3272 
3273 			spin_unlock(&offload_wq->offload_lock);
3274 
3275 			if (!skip)
3276 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3277 
3278 			goto out;
3279 		}
3280 	}
3281 
3282 out:
3283 	if (result && !is_mst_root_connector) {
3284 		/* Downstream Port status changed. */
3285 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3286 			DRM_ERROR("KMS: Failed to detect connector\n");
3287 
3288 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3289 			emulated_link_detect(dc_link);
3290 
3291 			if (aconnector->fake_enable)
3292 				aconnector->fake_enable = false;
3293 
3294 			amdgpu_dm_update_connector_after_detect(aconnector);
3295 
3296 
3297 			drm_modeset_lock_all(dev);
3298 			dm_restore_drm_connector_state(dev, connector);
3299 			drm_modeset_unlock_all(dev);
3300 
3301 			drm_kms_helper_connector_hotplug_event(connector);
3302 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3303 
3304 			if (aconnector->fake_enable)
3305 				aconnector->fake_enable = false;
3306 
3307 			amdgpu_dm_update_connector_after_detect(aconnector);
3308 
3309 
3310 			drm_modeset_lock_all(dev);
3311 			dm_restore_drm_connector_state(dev, connector);
3312 			drm_modeset_unlock_all(dev);
3313 
3314 			drm_kms_helper_connector_hotplug_event(connector);
3315 		}
3316 	}
3317 #ifdef CONFIG_DRM_AMD_DC_HDCP
3318 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3319 		if (adev->dm.hdcp_workqueue)
3320 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3321 	}
3322 #endif
3323 
3324 	if (dc_link->type != dc_connection_mst_branch)
3325 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3326 
3327 	mutex_unlock(&aconnector->hpd_lock);
3328 }
3329 
3330 static void register_hpd_handlers(struct amdgpu_device *adev)
3331 {
3332 	struct drm_device *dev = adev_to_drm(adev);
3333 	struct drm_connector *connector;
3334 	struct amdgpu_dm_connector *aconnector;
3335 	const struct dc_link *dc_link;
3336 	struct dc_interrupt_params int_params = {0};
3337 
3338 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3339 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3340 
3341 	list_for_each_entry(connector,
3342 			&dev->mode_config.connector_list, head)	{
3343 
3344 		aconnector = to_amdgpu_dm_connector(connector);
3345 		dc_link = aconnector->dc_link;
3346 
3347 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3348 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3349 			int_params.irq_source = dc_link->irq_source_hpd;
3350 
3351 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3352 					handle_hpd_irq,
3353 					(void *) aconnector);
3354 		}
3355 
3356 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3357 
3358 			/* Also register for DP short pulse (hpd_rx). */
3359 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3360 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3361 
3362 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3363 					handle_hpd_rx_irq,
3364 					(void *) aconnector);
3365 
3366 			if (adev->dm.hpd_rx_offload_wq)
3367 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3368 					aconnector;
3369 		}
3370 	}
3371 }
3372 
3373 #if defined(CONFIG_DRM_AMD_DC_SI)
3374 /* Register IRQ sources and initialize IRQ callbacks */
3375 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3376 {
3377 	struct dc *dc = adev->dm.dc;
3378 	struct common_irq_params *c_irq_params;
3379 	struct dc_interrupt_params int_params = {0};
3380 	int r;
3381 	int i;
3382 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3383 
3384 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3385 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3386 
3387 	/*
3388 	 * Actions of amdgpu_irq_add_id():
3389 	 * 1. Register a set() function with base driver.
3390 	 *    Base driver will call set() function to enable/disable an
3391 	 *    interrupt in DC hardware.
3392 	 * 2. Register amdgpu_dm_irq_handler().
3393 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3394 	 *    coming from DC hardware.
3395 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3396 	 *    for acknowledging and handling. */
3397 
3398 	/* Use VBLANK interrupt */
3399 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3400 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3401 		if (r) {
3402 			DRM_ERROR("Failed to add crtc irq id!\n");
3403 			return r;
3404 		}
3405 
3406 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3407 		int_params.irq_source =
3408 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3409 
3410 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3411 
3412 		c_irq_params->adev = adev;
3413 		c_irq_params->irq_src = int_params.irq_source;
3414 
3415 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3416 				dm_crtc_high_irq, c_irq_params);
3417 	}
3418 
3419 	/* Use GRPH_PFLIP interrupt */
3420 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3421 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3422 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3423 		if (r) {
3424 			DRM_ERROR("Failed to add page flip irq id!\n");
3425 			return r;
3426 		}
3427 
3428 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3429 		int_params.irq_source =
3430 			dc_interrupt_to_irq_source(dc, i, 0);
3431 
3432 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3433 
3434 		c_irq_params->adev = adev;
3435 		c_irq_params->irq_src = int_params.irq_source;
3436 
3437 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3438 				dm_pflip_high_irq, c_irq_params);
3439 
3440 	}
3441 
3442 	/* HPD */
3443 	r = amdgpu_irq_add_id(adev, client_id,
3444 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3445 	if (r) {
3446 		DRM_ERROR("Failed to add hpd irq id!\n");
3447 		return r;
3448 	}
3449 
3450 	register_hpd_handlers(adev);
3451 
3452 	return 0;
3453 }
3454 #endif
3455 
3456 /* Register IRQ sources and initialize IRQ callbacks */
3457 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3458 {
3459 	struct dc *dc = adev->dm.dc;
3460 	struct common_irq_params *c_irq_params;
3461 	struct dc_interrupt_params int_params = {0};
3462 	int r;
3463 	int i;
3464 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3465 
3466 	if (adev->family >= AMDGPU_FAMILY_AI)
3467 		client_id = SOC15_IH_CLIENTID_DCE;
3468 
3469 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3470 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3471 
3472 	/*
3473 	 * Actions of amdgpu_irq_add_id():
3474 	 * 1. Register a set() function with base driver.
3475 	 *    Base driver will call set() function to enable/disable an
3476 	 *    interrupt in DC hardware.
3477 	 * 2. Register amdgpu_dm_irq_handler().
3478 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3479 	 *    coming from DC hardware.
3480 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3481 	 *    for acknowledging and handling. */
3482 
3483 	/* Use VBLANK interrupt */
3484 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3485 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3486 		if (r) {
3487 			DRM_ERROR("Failed to add crtc irq id!\n");
3488 			return r;
3489 		}
3490 
3491 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3492 		int_params.irq_source =
3493 			dc_interrupt_to_irq_source(dc, i, 0);
3494 
3495 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3496 
3497 		c_irq_params->adev = adev;
3498 		c_irq_params->irq_src = int_params.irq_source;
3499 
3500 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3501 				dm_crtc_high_irq, c_irq_params);
3502 	}
3503 
3504 	/* Use VUPDATE interrupt */
3505 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3506 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3507 		if (r) {
3508 			DRM_ERROR("Failed to add vupdate irq id!\n");
3509 			return r;
3510 		}
3511 
3512 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3513 		int_params.irq_source =
3514 			dc_interrupt_to_irq_source(dc, i, 0);
3515 
3516 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3517 
3518 		c_irq_params->adev = adev;
3519 		c_irq_params->irq_src = int_params.irq_source;
3520 
3521 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3522 				dm_vupdate_high_irq, c_irq_params);
3523 	}
3524 
3525 	/* Use GRPH_PFLIP interrupt */
3526 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3527 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3528 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3529 		if (r) {
3530 			DRM_ERROR("Failed to add page flip irq id!\n");
3531 			return r;
3532 		}
3533 
3534 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3535 		int_params.irq_source =
3536 			dc_interrupt_to_irq_source(dc, i, 0);
3537 
3538 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3539 
3540 		c_irq_params->adev = adev;
3541 		c_irq_params->irq_src = int_params.irq_source;
3542 
3543 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3544 				dm_pflip_high_irq, c_irq_params);
3545 
3546 	}
3547 
3548 	/* HPD */
3549 	r = amdgpu_irq_add_id(adev, client_id,
3550 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3551 	if (r) {
3552 		DRM_ERROR("Failed to add hpd irq id!\n");
3553 		return r;
3554 	}
3555 
3556 	register_hpd_handlers(adev);
3557 
3558 	return 0;
3559 }
3560 
3561 #if defined(CONFIG_DRM_AMD_DC_DCN)
3562 /* Register IRQ sources and initialize IRQ callbacks */
3563 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3564 {
3565 	struct dc *dc = adev->dm.dc;
3566 	struct common_irq_params *c_irq_params;
3567 	struct dc_interrupt_params int_params = {0};
3568 	int r;
3569 	int i;
3570 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3571 	static const unsigned int vrtl_int_srcid[] = {
3572 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3573 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3574 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3575 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3576 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3577 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3578 	};
3579 #endif
3580 
3581 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3582 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3583 
3584 	/*
3585 	 * Actions of amdgpu_irq_add_id():
3586 	 * 1. Register a set() function with base driver.
3587 	 *    Base driver will call set() function to enable/disable an
3588 	 *    interrupt in DC hardware.
3589 	 * 2. Register amdgpu_dm_irq_handler().
3590 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3591 	 *    coming from DC hardware.
3592 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3593 	 *    for acknowledging and handling.
3594 	 */
3595 
3596 	/* Use VSTARTUP interrupt */
3597 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3598 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3599 			i++) {
3600 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3601 
3602 		if (r) {
3603 			DRM_ERROR("Failed to add crtc irq id!\n");
3604 			return r;
3605 		}
3606 
3607 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3608 		int_params.irq_source =
3609 			dc_interrupt_to_irq_source(dc, i, 0);
3610 
3611 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3612 
3613 		c_irq_params->adev = adev;
3614 		c_irq_params->irq_src = int_params.irq_source;
3615 
3616 		amdgpu_dm_irq_register_interrupt(
3617 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3618 	}
3619 
3620 	/* Use otg vertical line interrupt */
3621 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3622 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3623 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3624 				vrtl_int_srcid[i], &adev->vline0_irq);
3625 
3626 		if (r) {
3627 			DRM_ERROR("Failed to add vline0 irq id!\n");
3628 			return r;
3629 		}
3630 
3631 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3632 		int_params.irq_source =
3633 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3634 
3635 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3636 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3637 			break;
3638 		}
3639 
3640 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3641 					- DC_IRQ_SOURCE_DC1_VLINE0];
3642 
3643 		c_irq_params->adev = adev;
3644 		c_irq_params->irq_src = int_params.irq_source;
3645 
3646 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3647 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3648 	}
3649 #endif
3650 
3651 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3652 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3653 	 * to trigger at end of each vblank, regardless of state of the lock,
3654 	 * matching DCE behaviour.
3655 	 */
3656 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3657 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3658 	     i++) {
3659 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3660 
3661 		if (r) {
3662 			DRM_ERROR("Failed to add vupdate irq id!\n");
3663 			return r;
3664 		}
3665 
3666 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3667 		int_params.irq_source =
3668 			dc_interrupt_to_irq_source(dc, i, 0);
3669 
3670 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3671 
3672 		c_irq_params->adev = adev;
3673 		c_irq_params->irq_src = int_params.irq_source;
3674 
3675 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3676 				dm_vupdate_high_irq, c_irq_params);
3677 	}
3678 
3679 	/* Use GRPH_PFLIP interrupt */
3680 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3681 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3682 			i++) {
3683 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3684 		if (r) {
3685 			DRM_ERROR("Failed to add page flip irq id!\n");
3686 			return r;
3687 		}
3688 
3689 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3690 		int_params.irq_source =
3691 			dc_interrupt_to_irq_source(dc, i, 0);
3692 
3693 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3694 
3695 		c_irq_params->adev = adev;
3696 		c_irq_params->irq_src = int_params.irq_source;
3697 
3698 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3699 				dm_pflip_high_irq, c_irq_params);
3700 
3701 	}
3702 
3703 	/* HPD */
3704 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3705 			&adev->hpd_irq);
3706 	if (r) {
3707 		DRM_ERROR("Failed to add hpd irq id!\n");
3708 		return r;
3709 	}
3710 
3711 	register_hpd_handlers(adev);
3712 
3713 	return 0;
3714 }
3715 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3716 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3717 {
3718 	struct dc *dc = adev->dm.dc;
3719 	struct common_irq_params *c_irq_params;
3720 	struct dc_interrupt_params int_params = {0};
3721 	int r, i;
3722 
3723 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3724 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3725 
3726 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3727 			&adev->dmub_outbox_irq);
3728 	if (r) {
3729 		DRM_ERROR("Failed to add outbox irq id!\n");
3730 		return r;
3731 	}
3732 
3733 	if (dc->ctx->dmub_srv) {
3734 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3735 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3736 		int_params.irq_source =
3737 		dc_interrupt_to_irq_source(dc, i, 0);
3738 
3739 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3740 
3741 		c_irq_params->adev = adev;
3742 		c_irq_params->irq_src = int_params.irq_source;
3743 
3744 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3745 				dm_dmub_outbox1_low_irq, c_irq_params);
3746 	}
3747 
3748 	return 0;
3749 }
3750 #endif
3751 
3752 /*
3753  * Acquires the lock for the atomic state object and returns
3754  * the new atomic state.
3755  *
3756  * This should only be called during atomic check.
3757  */
3758 int dm_atomic_get_state(struct drm_atomic_state *state,
3759 			struct dm_atomic_state **dm_state)
3760 {
3761 	struct drm_device *dev = state->dev;
3762 	struct amdgpu_device *adev = drm_to_adev(dev);
3763 	struct amdgpu_display_manager *dm = &adev->dm;
3764 	struct drm_private_state *priv_state;
3765 
3766 	if (*dm_state)
3767 		return 0;
3768 
3769 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3770 	if (IS_ERR(priv_state))
3771 		return PTR_ERR(priv_state);
3772 
3773 	*dm_state = to_dm_atomic_state(priv_state);
3774 
3775 	return 0;
3776 }
3777 
3778 static struct dm_atomic_state *
3779 dm_atomic_get_new_state(struct drm_atomic_state *state)
3780 {
3781 	struct drm_device *dev = state->dev;
3782 	struct amdgpu_device *adev = drm_to_adev(dev);
3783 	struct amdgpu_display_manager *dm = &adev->dm;
3784 	struct drm_private_obj *obj;
3785 	struct drm_private_state *new_obj_state;
3786 	int i;
3787 
3788 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3789 		if (obj->funcs == dm->atomic_obj.funcs)
3790 			return to_dm_atomic_state(new_obj_state);
3791 	}
3792 
3793 	return NULL;
3794 }
3795 
3796 static struct drm_private_state *
3797 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3798 {
3799 	struct dm_atomic_state *old_state, *new_state;
3800 
3801 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3802 	if (!new_state)
3803 		return NULL;
3804 
3805 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3806 
3807 	old_state = to_dm_atomic_state(obj->state);
3808 
3809 	if (old_state && old_state->context)
3810 		new_state->context = dc_copy_state(old_state->context);
3811 
3812 	if (!new_state->context) {
3813 		kfree(new_state);
3814 		return NULL;
3815 	}
3816 
3817 	return &new_state->base;
3818 }
3819 
3820 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3821 				    struct drm_private_state *state)
3822 {
3823 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3824 
3825 	if (dm_state && dm_state->context)
3826 		dc_release_state(dm_state->context);
3827 
3828 	kfree(dm_state);
3829 }
3830 
3831 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3832 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3833 	.atomic_destroy_state = dm_atomic_destroy_state,
3834 };
3835 
3836 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3837 {
3838 	struct dm_atomic_state *state;
3839 	int r;
3840 
3841 	adev->mode_info.mode_config_initialized = true;
3842 
3843 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3844 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3845 
3846 	adev_to_drm(adev)->mode_config.max_width = 16384;
3847 	adev_to_drm(adev)->mode_config.max_height = 16384;
3848 
3849 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3850 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3851 	/* indicates support for immediate flip */
3852 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3853 
3854 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3855 
3856 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3857 	if (!state)
3858 		return -ENOMEM;
3859 
3860 	state->context = dc_create_state(adev->dm.dc);
3861 	if (!state->context) {
3862 		kfree(state);
3863 		return -ENOMEM;
3864 	}
3865 
3866 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3867 
3868 	drm_atomic_private_obj_init(adev_to_drm(adev),
3869 				    &adev->dm.atomic_obj,
3870 				    &state->base,
3871 				    &dm_atomic_state_funcs);
3872 
3873 	r = amdgpu_display_modeset_create_props(adev);
3874 	if (r) {
3875 		dc_release_state(state->context);
3876 		kfree(state);
3877 		return r;
3878 	}
3879 
3880 	r = amdgpu_dm_audio_init(adev);
3881 	if (r) {
3882 		dc_release_state(state->context);
3883 		kfree(state);
3884 		return r;
3885 	}
3886 
3887 	return 0;
3888 }
3889 
3890 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3891 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3892 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3893 
3894 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3895 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3896 
3897 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3898 					    int bl_idx)
3899 {
3900 #if defined(CONFIG_ACPI)
3901 	struct amdgpu_dm_backlight_caps caps;
3902 
3903 	memset(&caps, 0, sizeof(caps));
3904 
3905 	if (dm->backlight_caps[bl_idx].caps_valid)
3906 		return;
3907 
3908 	amdgpu_acpi_get_backlight_caps(&caps);
3909 	if (caps.caps_valid) {
3910 		dm->backlight_caps[bl_idx].caps_valid = true;
3911 		if (caps.aux_support)
3912 			return;
3913 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3914 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3915 	} else {
3916 		dm->backlight_caps[bl_idx].min_input_signal =
3917 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3918 		dm->backlight_caps[bl_idx].max_input_signal =
3919 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3920 	}
3921 #else
3922 	if (dm->backlight_caps[bl_idx].aux_support)
3923 		return;
3924 
3925 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3926 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3927 #endif
3928 }
3929 
3930 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3931 				unsigned *min, unsigned *max)
3932 {
3933 	if (!caps)
3934 		return 0;
3935 
3936 	if (caps->aux_support) {
3937 		// Firmware limits are in nits, DC API wants millinits.
3938 		*max = 1000 * caps->aux_max_input_signal;
3939 		*min = 1000 * caps->aux_min_input_signal;
3940 	} else {
3941 		// Firmware limits are 8-bit, PWM control is 16-bit.
3942 		*max = 0x101 * caps->max_input_signal;
3943 		*min = 0x101 * caps->min_input_signal;
3944 	}
3945 	return 1;
3946 }
3947 
3948 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3949 					uint32_t brightness)
3950 {
3951 	unsigned min, max;
3952 
3953 	if (!get_brightness_range(caps, &min, &max))
3954 		return brightness;
3955 
3956 	// Rescale 0..255 to min..max
3957 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3958 				       AMDGPU_MAX_BL_LEVEL);
3959 }
3960 
3961 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3962 				      uint32_t brightness)
3963 {
3964 	unsigned min, max;
3965 
3966 	if (!get_brightness_range(caps, &min, &max))
3967 		return brightness;
3968 
3969 	if (brightness < min)
3970 		return 0;
3971 	// Rescale min..max to 0..255
3972 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3973 				 max - min);
3974 }
3975 
3976 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3977 					 int bl_idx,
3978 					 u32 user_brightness)
3979 {
3980 	struct amdgpu_dm_backlight_caps caps;
3981 	struct dc_link *link;
3982 	u32 brightness;
3983 	bool rc;
3984 
3985 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3986 	caps = dm->backlight_caps[bl_idx];
3987 
3988 	dm->brightness[bl_idx] = user_brightness;
3989 	/* update scratch register */
3990 	if (bl_idx == 0)
3991 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3992 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3993 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3994 
3995 	/* Change brightness based on AUX property */
3996 	if (caps.aux_support) {
3997 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3998 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3999 		if (!rc)
4000 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4001 	} else {
4002 		rc = dc_link_set_backlight_level(link, brightness, 0);
4003 		if (!rc)
4004 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4005 	}
4006 
4007 	if (rc)
4008 		dm->actual_brightness[bl_idx] = user_brightness;
4009 }
4010 
4011 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4012 {
4013 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4014 	int i;
4015 
4016 	for (i = 0; i < dm->num_of_edps; i++) {
4017 		if (bd == dm->backlight_dev[i])
4018 			break;
4019 	}
4020 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4021 		i = 0;
4022 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4023 
4024 	return 0;
4025 }
4026 
4027 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4028 					 int bl_idx)
4029 {
4030 	struct amdgpu_dm_backlight_caps caps;
4031 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4032 
4033 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4034 	caps = dm->backlight_caps[bl_idx];
4035 
4036 	if (caps.aux_support) {
4037 		u32 avg, peak;
4038 		bool rc;
4039 
4040 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4041 		if (!rc)
4042 			return dm->brightness[bl_idx];
4043 		return convert_brightness_to_user(&caps, avg);
4044 	} else {
4045 		int ret = dc_link_get_backlight_level(link);
4046 
4047 		if (ret == DC_ERROR_UNEXPECTED)
4048 			return dm->brightness[bl_idx];
4049 		return convert_brightness_to_user(&caps, ret);
4050 	}
4051 }
4052 
4053 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4054 {
4055 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4056 	int i;
4057 
4058 	for (i = 0; i < dm->num_of_edps; i++) {
4059 		if (bd == dm->backlight_dev[i])
4060 			break;
4061 	}
4062 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4063 		i = 0;
4064 	return amdgpu_dm_backlight_get_level(dm, i);
4065 }
4066 
4067 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4068 	.options = BL_CORE_SUSPENDRESUME,
4069 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4070 	.update_status	= amdgpu_dm_backlight_update_status,
4071 };
4072 
4073 static void
4074 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4075 {
4076 	char bl_name[16];
4077 	struct backlight_properties props = { 0 };
4078 
4079 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4080 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4081 
4082 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4083 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4084 	props.type = BACKLIGHT_RAW;
4085 
4086 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4087 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4088 
4089 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4090 								       adev_to_drm(dm->adev)->dev,
4091 								       dm,
4092 								       &amdgpu_dm_backlight_ops,
4093 								       &props);
4094 
4095 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4096 		DRM_ERROR("DM: Backlight registration failed!\n");
4097 	else
4098 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4099 }
4100 #endif
4101 
4102 static int initialize_plane(struct amdgpu_display_manager *dm,
4103 			    struct amdgpu_mode_info *mode_info, int plane_id,
4104 			    enum drm_plane_type plane_type,
4105 			    const struct dc_plane_cap *plane_cap)
4106 {
4107 	struct drm_plane *plane;
4108 	unsigned long possible_crtcs;
4109 	int ret = 0;
4110 
4111 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4112 	if (!plane) {
4113 		DRM_ERROR("KMS: Failed to allocate plane\n");
4114 		return -ENOMEM;
4115 	}
4116 	plane->type = plane_type;
4117 
4118 	/*
4119 	 * HACK: IGT tests expect that the primary plane for a CRTC
4120 	 * can only have one possible CRTC. Only expose support for
4121 	 * any CRTC if they're not going to be used as a primary plane
4122 	 * for a CRTC - like overlay or underlay planes.
4123 	 */
4124 	possible_crtcs = 1 << plane_id;
4125 	if (plane_id >= dm->dc->caps.max_streams)
4126 		possible_crtcs = 0xff;
4127 
4128 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4129 
4130 	if (ret) {
4131 		DRM_ERROR("KMS: Failed to initialize plane\n");
4132 		kfree(plane);
4133 		return ret;
4134 	}
4135 
4136 	if (mode_info)
4137 		mode_info->planes[plane_id] = plane;
4138 
4139 	return ret;
4140 }
4141 
4142 
4143 static void register_backlight_device(struct amdgpu_display_manager *dm,
4144 				      struct dc_link *link)
4145 {
4146 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4147 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4148 
4149 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4150 	    link->type != dc_connection_none) {
4151 		/*
4152 		 * Event if registration failed, we should continue with
4153 		 * DM initialization because not having a backlight control
4154 		 * is better then a black screen.
4155 		 */
4156 		if (!dm->backlight_dev[dm->num_of_edps])
4157 			amdgpu_dm_register_backlight_device(dm);
4158 
4159 		if (dm->backlight_dev[dm->num_of_edps]) {
4160 			dm->backlight_link[dm->num_of_edps] = link;
4161 			dm->num_of_edps++;
4162 		}
4163 	}
4164 #endif
4165 }
4166 
4167 
4168 /*
4169  * In this architecture, the association
4170  * connector -> encoder -> crtc
4171  * id not really requried. The crtc and connector will hold the
4172  * display_index as an abstraction to use with DAL component
4173  *
4174  * Returns 0 on success
4175  */
4176 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4177 {
4178 	struct amdgpu_display_manager *dm = &adev->dm;
4179 	int32_t i;
4180 	struct amdgpu_dm_connector *aconnector = NULL;
4181 	struct amdgpu_encoder *aencoder = NULL;
4182 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4183 	uint32_t link_cnt;
4184 	int32_t primary_planes;
4185 	enum dc_connection_type new_connection_type = dc_connection_none;
4186 	const struct dc_plane_cap *plane;
4187 	bool psr_feature_enabled = false;
4188 
4189 	dm->display_indexes_num = dm->dc->caps.max_streams;
4190 	/* Update the actual used number of crtc */
4191 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4192 
4193 	link_cnt = dm->dc->caps.max_links;
4194 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4195 		DRM_ERROR("DM: Failed to initialize mode config\n");
4196 		return -EINVAL;
4197 	}
4198 
4199 	/* There is one primary plane per CRTC */
4200 	primary_planes = dm->dc->caps.max_streams;
4201 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4202 
4203 	/*
4204 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4205 	 * Order is reversed to match iteration order in atomic check.
4206 	 */
4207 	for (i = (primary_planes - 1); i >= 0; i--) {
4208 		plane = &dm->dc->caps.planes[i];
4209 
4210 		if (initialize_plane(dm, mode_info, i,
4211 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4212 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4213 			goto fail;
4214 		}
4215 	}
4216 
4217 	/*
4218 	 * Initialize overlay planes, index starting after primary planes.
4219 	 * These planes have a higher DRM index than the primary planes since
4220 	 * they should be considered as having a higher z-order.
4221 	 * Order is reversed to match iteration order in atomic check.
4222 	 *
4223 	 * Only support DCN for now, and only expose one so we don't encourage
4224 	 * userspace to use up all the pipes.
4225 	 */
4226 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4227 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4228 
4229 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4230 			continue;
4231 
4232 		if (!plane->blends_with_above || !plane->blends_with_below)
4233 			continue;
4234 
4235 		if (!plane->pixel_format_support.argb8888)
4236 			continue;
4237 
4238 		if (initialize_plane(dm, NULL, primary_planes + i,
4239 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4240 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4241 			goto fail;
4242 		}
4243 
4244 		/* Only create one overlay plane. */
4245 		break;
4246 	}
4247 
4248 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4249 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4250 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4251 			goto fail;
4252 		}
4253 
4254 #if defined(CONFIG_DRM_AMD_DC_DCN)
4255 	/* Use Outbox interrupt */
4256 	switch (adev->ip_versions[DCE_HWIP][0]) {
4257 	case IP_VERSION(3, 0, 0):
4258 	case IP_VERSION(3, 1, 2):
4259 	case IP_VERSION(3, 1, 3):
4260 	case IP_VERSION(3, 1, 5):
4261 	case IP_VERSION(3, 1, 6):
4262 	case IP_VERSION(2, 1, 0):
4263 		if (register_outbox_irq_handlers(dm->adev)) {
4264 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4265 			goto fail;
4266 		}
4267 		break;
4268 	default:
4269 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4270 			      adev->ip_versions[DCE_HWIP][0]);
4271 	}
4272 
4273 	/* Determine whether to enable PSR support by default. */
4274 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4275 		switch (adev->ip_versions[DCE_HWIP][0]) {
4276 		case IP_VERSION(3, 1, 2):
4277 		case IP_VERSION(3, 1, 3):
4278 		case IP_VERSION(3, 1, 5):
4279 		case IP_VERSION(3, 1, 6):
4280 			psr_feature_enabled = true;
4281 			break;
4282 		default:
4283 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4284 			break;
4285 		}
4286 	}
4287 #endif
4288 
4289 	/* Disable vblank IRQs aggressively for power-saving. */
4290 	adev_to_drm(adev)->vblank_disable_immediate = true;
4291 
4292 	/* loops over all connectors on the board */
4293 	for (i = 0; i < link_cnt; i++) {
4294 		struct dc_link *link = NULL;
4295 
4296 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4297 			DRM_ERROR(
4298 				"KMS: Cannot support more than %d display indexes\n",
4299 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4300 			continue;
4301 		}
4302 
4303 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4304 		if (!aconnector)
4305 			goto fail;
4306 
4307 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4308 		if (!aencoder)
4309 			goto fail;
4310 
4311 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4312 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4313 			goto fail;
4314 		}
4315 
4316 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4317 			DRM_ERROR("KMS: Failed to initialize connector\n");
4318 			goto fail;
4319 		}
4320 
4321 		link = dc_get_link_at_index(dm->dc, i);
4322 
4323 		if (!dc_link_detect_sink(link, &new_connection_type))
4324 			DRM_ERROR("KMS: Failed to detect connector\n");
4325 
4326 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4327 			emulated_link_detect(link);
4328 			amdgpu_dm_update_connector_after_detect(aconnector);
4329 
4330 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4331 			amdgpu_dm_update_connector_after_detect(aconnector);
4332 			register_backlight_device(dm, link);
4333 			if (dm->num_of_edps)
4334 				update_connector_ext_caps(aconnector);
4335 			if (psr_feature_enabled)
4336 				amdgpu_dm_set_psr_caps(link);
4337 
4338 			/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4339 			 * PSR is also supported.
4340 			 */
4341 			if (link->psr_settings.psr_feature_enabled)
4342 				adev_to_drm(adev)->vblank_disable_immediate = false;
4343 		}
4344 
4345 
4346 	}
4347 
4348 	/* Software is initialized. Now we can register interrupt handlers. */
4349 	switch (adev->asic_type) {
4350 #if defined(CONFIG_DRM_AMD_DC_SI)
4351 	case CHIP_TAHITI:
4352 	case CHIP_PITCAIRN:
4353 	case CHIP_VERDE:
4354 	case CHIP_OLAND:
4355 		if (dce60_register_irq_handlers(dm->adev)) {
4356 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4357 			goto fail;
4358 		}
4359 		break;
4360 #endif
4361 	case CHIP_BONAIRE:
4362 	case CHIP_HAWAII:
4363 	case CHIP_KAVERI:
4364 	case CHIP_KABINI:
4365 	case CHIP_MULLINS:
4366 	case CHIP_TONGA:
4367 	case CHIP_FIJI:
4368 	case CHIP_CARRIZO:
4369 	case CHIP_STONEY:
4370 	case CHIP_POLARIS11:
4371 	case CHIP_POLARIS10:
4372 	case CHIP_POLARIS12:
4373 	case CHIP_VEGAM:
4374 	case CHIP_VEGA10:
4375 	case CHIP_VEGA12:
4376 	case CHIP_VEGA20:
4377 		if (dce110_register_irq_handlers(dm->adev)) {
4378 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4379 			goto fail;
4380 		}
4381 		break;
4382 	default:
4383 #if defined(CONFIG_DRM_AMD_DC_DCN)
4384 		switch (adev->ip_versions[DCE_HWIP][0]) {
4385 		case IP_VERSION(1, 0, 0):
4386 		case IP_VERSION(1, 0, 1):
4387 		case IP_VERSION(2, 0, 2):
4388 		case IP_VERSION(2, 0, 3):
4389 		case IP_VERSION(2, 0, 0):
4390 		case IP_VERSION(2, 1, 0):
4391 		case IP_VERSION(3, 0, 0):
4392 		case IP_VERSION(3, 0, 2):
4393 		case IP_VERSION(3, 0, 3):
4394 		case IP_VERSION(3, 0, 1):
4395 		case IP_VERSION(3, 1, 2):
4396 		case IP_VERSION(3, 1, 3):
4397 		case IP_VERSION(3, 1, 5):
4398 		case IP_VERSION(3, 1, 6):
4399 			if (dcn10_register_irq_handlers(dm->adev)) {
4400 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4401 				goto fail;
4402 			}
4403 			break;
4404 		default:
4405 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4406 					adev->ip_versions[DCE_HWIP][0]);
4407 			goto fail;
4408 		}
4409 #endif
4410 		break;
4411 	}
4412 
4413 	return 0;
4414 fail:
4415 	kfree(aencoder);
4416 	kfree(aconnector);
4417 
4418 	return -EINVAL;
4419 }
4420 
4421 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4422 {
4423 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4424 	return;
4425 }
4426 
4427 /******************************************************************************
4428  * amdgpu_display_funcs functions
4429  *****************************************************************************/
4430 
4431 /*
4432  * dm_bandwidth_update - program display watermarks
4433  *
4434  * @adev: amdgpu_device pointer
4435  *
4436  * Calculate and program the display watermarks and line buffer allocation.
4437  */
4438 static void dm_bandwidth_update(struct amdgpu_device *adev)
4439 {
4440 	/* TODO: implement later */
4441 }
4442 
4443 static const struct amdgpu_display_funcs dm_display_funcs = {
4444 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4445 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4446 	.backlight_set_level = NULL, /* never called for DC */
4447 	.backlight_get_level = NULL, /* never called for DC */
4448 	.hpd_sense = NULL,/* called unconditionally */
4449 	.hpd_set_polarity = NULL, /* called unconditionally */
4450 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4451 	.page_flip_get_scanoutpos =
4452 		dm_crtc_get_scanoutpos,/* called unconditionally */
4453 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4454 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4455 };
4456 
4457 #if defined(CONFIG_DEBUG_KERNEL_DC)
4458 
4459 static ssize_t s3_debug_store(struct device *device,
4460 			      struct device_attribute *attr,
4461 			      const char *buf,
4462 			      size_t count)
4463 {
4464 	int ret;
4465 	int s3_state;
4466 	struct drm_device *drm_dev = dev_get_drvdata(device);
4467 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4468 
4469 	ret = kstrtoint(buf, 0, &s3_state);
4470 
4471 	if (ret == 0) {
4472 		if (s3_state) {
4473 			dm_resume(adev);
4474 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4475 		} else
4476 			dm_suspend(adev);
4477 	}
4478 
4479 	return ret == 0 ? count : 0;
4480 }
4481 
4482 DEVICE_ATTR_WO(s3_debug);
4483 
4484 #endif
4485 
4486 static int dm_early_init(void *handle)
4487 {
4488 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4489 
4490 	switch (adev->asic_type) {
4491 #if defined(CONFIG_DRM_AMD_DC_SI)
4492 	case CHIP_TAHITI:
4493 	case CHIP_PITCAIRN:
4494 	case CHIP_VERDE:
4495 		adev->mode_info.num_crtc = 6;
4496 		adev->mode_info.num_hpd = 6;
4497 		adev->mode_info.num_dig = 6;
4498 		break;
4499 	case CHIP_OLAND:
4500 		adev->mode_info.num_crtc = 2;
4501 		adev->mode_info.num_hpd = 2;
4502 		adev->mode_info.num_dig = 2;
4503 		break;
4504 #endif
4505 	case CHIP_BONAIRE:
4506 	case CHIP_HAWAII:
4507 		adev->mode_info.num_crtc = 6;
4508 		adev->mode_info.num_hpd = 6;
4509 		adev->mode_info.num_dig = 6;
4510 		break;
4511 	case CHIP_KAVERI:
4512 		adev->mode_info.num_crtc = 4;
4513 		adev->mode_info.num_hpd = 6;
4514 		adev->mode_info.num_dig = 7;
4515 		break;
4516 	case CHIP_KABINI:
4517 	case CHIP_MULLINS:
4518 		adev->mode_info.num_crtc = 2;
4519 		adev->mode_info.num_hpd = 6;
4520 		adev->mode_info.num_dig = 6;
4521 		break;
4522 	case CHIP_FIJI:
4523 	case CHIP_TONGA:
4524 		adev->mode_info.num_crtc = 6;
4525 		adev->mode_info.num_hpd = 6;
4526 		adev->mode_info.num_dig = 7;
4527 		break;
4528 	case CHIP_CARRIZO:
4529 		adev->mode_info.num_crtc = 3;
4530 		adev->mode_info.num_hpd = 6;
4531 		adev->mode_info.num_dig = 9;
4532 		break;
4533 	case CHIP_STONEY:
4534 		adev->mode_info.num_crtc = 2;
4535 		adev->mode_info.num_hpd = 6;
4536 		adev->mode_info.num_dig = 9;
4537 		break;
4538 	case CHIP_POLARIS11:
4539 	case CHIP_POLARIS12:
4540 		adev->mode_info.num_crtc = 5;
4541 		adev->mode_info.num_hpd = 5;
4542 		adev->mode_info.num_dig = 5;
4543 		break;
4544 	case CHIP_POLARIS10:
4545 	case CHIP_VEGAM:
4546 		adev->mode_info.num_crtc = 6;
4547 		adev->mode_info.num_hpd = 6;
4548 		adev->mode_info.num_dig = 6;
4549 		break;
4550 	case CHIP_VEGA10:
4551 	case CHIP_VEGA12:
4552 	case CHIP_VEGA20:
4553 		adev->mode_info.num_crtc = 6;
4554 		adev->mode_info.num_hpd = 6;
4555 		adev->mode_info.num_dig = 6;
4556 		break;
4557 	default:
4558 #if defined(CONFIG_DRM_AMD_DC_DCN)
4559 		switch (adev->ip_versions[DCE_HWIP][0]) {
4560 		case IP_VERSION(2, 0, 2):
4561 		case IP_VERSION(3, 0, 0):
4562 			adev->mode_info.num_crtc = 6;
4563 			adev->mode_info.num_hpd = 6;
4564 			adev->mode_info.num_dig = 6;
4565 			break;
4566 		case IP_VERSION(2, 0, 0):
4567 		case IP_VERSION(3, 0, 2):
4568 			adev->mode_info.num_crtc = 5;
4569 			adev->mode_info.num_hpd = 5;
4570 			adev->mode_info.num_dig = 5;
4571 			break;
4572 		case IP_VERSION(2, 0, 3):
4573 		case IP_VERSION(3, 0, 3):
4574 			adev->mode_info.num_crtc = 2;
4575 			adev->mode_info.num_hpd = 2;
4576 			adev->mode_info.num_dig = 2;
4577 			break;
4578 		case IP_VERSION(1, 0, 0):
4579 		case IP_VERSION(1, 0, 1):
4580 		case IP_VERSION(3, 0, 1):
4581 		case IP_VERSION(2, 1, 0):
4582 		case IP_VERSION(3, 1, 2):
4583 		case IP_VERSION(3, 1, 3):
4584 		case IP_VERSION(3, 1, 5):
4585 		case IP_VERSION(3, 1, 6):
4586 			adev->mode_info.num_crtc = 4;
4587 			adev->mode_info.num_hpd = 4;
4588 			adev->mode_info.num_dig = 4;
4589 			break;
4590 		default:
4591 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4592 					adev->ip_versions[DCE_HWIP][0]);
4593 			return -EINVAL;
4594 		}
4595 #endif
4596 		break;
4597 	}
4598 
4599 	amdgpu_dm_set_irq_funcs(adev);
4600 
4601 	if (adev->mode_info.funcs == NULL)
4602 		adev->mode_info.funcs = &dm_display_funcs;
4603 
4604 	/*
4605 	 * Note: Do NOT change adev->audio_endpt_rreg and
4606 	 * adev->audio_endpt_wreg because they are initialised in
4607 	 * amdgpu_device_init()
4608 	 */
4609 #if defined(CONFIG_DEBUG_KERNEL_DC)
4610 	device_create_file(
4611 		adev_to_drm(adev)->dev,
4612 		&dev_attr_s3_debug);
4613 #endif
4614 
4615 	return 0;
4616 }
4617 
4618 static bool modeset_required(struct drm_crtc_state *crtc_state,
4619 			     struct dc_stream_state *new_stream,
4620 			     struct dc_stream_state *old_stream)
4621 {
4622 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4623 }
4624 
4625 static bool modereset_required(struct drm_crtc_state *crtc_state)
4626 {
4627 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4628 }
4629 
4630 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4631 {
4632 	drm_encoder_cleanup(encoder);
4633 	kfree(encoder);
4634 }
4635 
4636 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4637 	.destroy = amdgpu_dm_encoder_destroy,
4638 };
4639 
4640 
4641 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4642 					 struct drm_framebuffer *fb,
4643 					 int *min_downscale, int *max_upscale)
4644 {
4645 	struct amdgpu_device *adev = drm_to_adev(dev);
4646 	struct dc *dc = adev->dm.dc;
4647 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4648 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4649 
4650 	switch (fb->format->format) {
4651 	case DRM_FORMAT_P010:
4652 	case DRM_FORMAT_NV12:
4653 	case DRM_FORMAT_NV21:
4654 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4655 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4656 		break;
4657 
4658 	case DRM_FORMAT_XRGB16161616F:
4659 	case DRM_FORMAT_ARGB16161616F:
4660 	case DRM_FORMAT_XBGR16161616F:
4661 	case DRM_FORMAT_ABGR16161616F:
4662 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4663 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4664 		break;
4665 
4666 	default:
4667 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4668 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4669 		break;
4670 	}
4671 
4672 	/*
4673 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4674 	 * scaling factor of 1.0 == 1000 units.
4675 	 */
4676 	if (*max_upscale == 1)
4677 		*max_upscale = 1000;
4678 
4679 	if (*min_downscale == 1)
4680 		*min_downscale = 1000;
4681 }
4682 
4683 
4684 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4685 				const struct drm_plane_state *state,
4686 				struct dc_scaling_info *scaling_info)
4687 {
4688 	int scale_w, scale_h, min_downscale, max_upscale;
4689 
4690 	memset(scaling_info, 0, sizeof(*scaling_info));
4691 
4692 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4693 	scaling_info->src_rect.x = state->src_x >> 16;
4694 	scaling_info->src_rect.y = state->src_y >> 16;
4695 
4696 	/*
4697 	 * For reasons we don't (yet) fully understand a non-zero
4698 	 * src_y coordinate into an NV12 buffer can cause a
4699 	 * system hang on DCN1x.
4700 	 * To avoid hangs (and maybe be overly cautious)
4701 	 * let's reject both non-zero src_x and src_y.
4702 	 *
4703 	 * We currently know of only one use-case to reproduce a
4704 	 * scenario with non-zero src_x and src_y for NV12, which
4705 	 * is to gesture the YouTube Android app into full screen
4706 	 * on ChromeOS.
4707 	 */
4708 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4709 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4710 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4711 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4712 		return -EINVAL;
4713 
4714 	scaling_info->src_rect.width = state->src_w >> 16;
4715 	if (scaling_info->src_rect.width == 0)
4716 		return -EINVAL;
4717 
4718 	scaling_info->src_rect.height = state->src_h >> 16;
4719 	if (scaling_info->src_rect.height == 0)
4720 		return -EINVAL;
4721 
4722 	scaling_info->dst_rect.x = state->crtc_x;
4723 	scaling_info->dst_rect.y = state->crtc_y;
4724 
4725 	if (state->crtc_w == 0)
4726 		return -EINVAL;
4727 
4728 	scaling_info->dst_rect.width = state->crtc_w;
4729 
4730 	if (state->crtc_h == 0)
4731 		return -EINVAL;
4732 
4733 	scaling_info->dst_rect.height = state->crtc_h;
4734 
4735 	/* DRM doesn't specify clipping on destination output. */
4736 	scaling_info->clip_rect = scaling_info->dst_rect;
4737 
4738 	/* Validate scaling per-format with DC plane caps */
4739 	if (state->plane && state->plane->dev && state->fb) {
4740 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4741 					     &min_downscale, &max_upscale);
4742 	} else {
4743 		min_downscale = 250;
4744 		max_upscale = 16000;
4745 	}
4746 
4747 	scale_w = scaling_info->dst_rect.width * 1000 /
4748 		  scaling_info->src_rect.width;
4749 
4750 	if (scale_w < min_downscale || scale_w > max_upscale)
4751 		return -EINVAL;
4752 
4753 	scale_h = scaling_info->dst_rect.height * 1000 /
4754 		  scaling_info->src_rect.height;
4755 
4756 	if (scale_h < min_downscale || scale_h > max_upscale)
4757 		return -EINVAL;
4758 
4759 	/*
4760 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4761 	 * assume reasonable defaults based on the format.
4762 	 */
4763 
4764 	return 0;
4765 }
4766 
4767 static void
4768 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4769 				 uint64_t tiling_flags)
4770 {
4771 	/* Fill GFX8 params */
4772 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4773 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4774 
4775 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4776 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4777 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4778 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4779 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4780 
4781 		/* XXX fix me for VI */
4782 		tiling_info->gfx8.num_banks = num_banks;
4783 		tiling_info->gfx8.array_mode =
4784 				DC_ARRAY_2D_TILED_THIN1;
4785 		tiling_info->gfx8.tile_split = tile_split;
4786 		tiling_info->gfx8.bank_width = bankw;
4787 		tiling_info->gfx8.bank_height = bankh;
4788 		tiling_info->gfx8.tile_aspect = mtaspect;
4789 		tiling_info->gfx8.tile_mode =
4790 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4791 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4792 			== DC_ARRAY_1D_TILED_THIN1) {
4793 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4794 	}
4795 
4796 	tiling_info->gfx8.pipe_config =
4797 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4798 }
4799 
4800 static void
4801 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4802 				  union dc_tiling_info *tiling_info)
4803 {
4804 	tiling_info->gfx9.num_pipes =
4805 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4806 	tiling_info->gfx9.num_banks =
4807 		adev->gfx.config.gb_addr_config_fields.num_banks;
4808 	tiling_info->gfx9.pipe_interleave =
4809 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4810 	tiling_info->gfx9.num_shader_engines =
4811 		adev->gfx.config.gb_addr_config_fields.num_se;
4812 	tiling_info->gfx9.max_compressed_frags =
4813 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4814 	tiling_info->gfx9.num_rb_per_se =
4815 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4816 	tiling_info->gfx9.shaderEnable = 1;
4817 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4818 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4819 }
4820 
4821 static int
4822 validate_dcc(struct amdgpu_device *adev,
4823 	     const enum surface_pixel_format format,
4824 	     const enum dc_rotation_angle rotation,
4825 	     const union dc_tiling_info *tiling_info,
4826 	     const struct dc_plane_dcc_param *dcc,
4827 	     const struct dc_plane_address *address,
4828 	     const struct plane_size *plane_size)
4829 {
4830 	struct dc *dc = adev->dm.dc;
4831 	struct dc_dcc_surface_param input;
4832 	struct dc_surface_dcc_cap output;
4833 
4834 	memset(&input, 0, sizeof(input));
4835 	memset(&output, 0, sizeof(output));
4836 
4837 	if (!dcc->enable)
4838 		return 0;
4839 
4840 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4841 	    !dc->cap_funcs.get_dcc_compression_cap)
4842 		return -EINVAL;
4843 
4844 	input.format = format;
4845 	input.surface_size.width = plane_size->surface_size.width;
4846 	input.surface_size.height = plane_size->surface_size.height;
4847 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4848 
4849 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4850 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4851 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4852 		input.scan = SCAN_DIRECTION_VERTICAL;
4853 
4854 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4855 		return -EINVAL;
4856 
4857 	if (!output.capable)
4858 		return -EINVAL;
4859 
4860 	if (dcc->independent_64b_blks == 0 &&
4861 	    output.grph.rgb.independent_64b_blks != 0)
4862 		return -EINVAL;
4863 
4864 	return 0;
4865 }
4866 
4867 static bool
4868 modifier_has_dcc(uint64_t modifier)
4869 {
4870 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4871 }
4872 
4873 static unsigned
4874 modifier_gfx9_swizzle_mode(uint64_t modifier)
4875 {
4876 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4877 		return 0;
4878 
4879 	return AMD_FMT_MOD_GET(TILE, modifier);
4880 }
4881 
4882 static const struct drm_format_info *
4883 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4884 {
4885 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4886 }
4887 
4888 static void
4889 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4890 				    union dc_tiling_info *tiling_info,
4891 				    uint64_t modifier)
4892 {
4893 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4894 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4895 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4896 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4897 
4898 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4899 
4900 	if (!IS_AMD_FMT_MOD(modifier))
4901 		return;
4902 
4903 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4904 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4905 
4906 	if (adev->family >= AMDGPU_FAMILY_NV) {
4907 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4908 	} else {
4909 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4910 
4911 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4912 	}
4913 }
4914 
4915 enum dm_micro_swizzle {
4916 	MICRO_SWIZZLE_Z = 0,
4917 	MICRO_SWIZZLE_S = 1,
4918 	MICRO_SWIZZLE_D = 2,
4919 	MICRO_SWIZZLE_R = 3
4920 };
4921 
4922 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4923 					  uint32_t format,
4924 					  uint64_t modifier)
4925 {
4926 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4927 	const struct drm_format_info *info = drm_format_info(format);
4928 	int i;
4929 
4930 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4931 
4932 	if (!info)
4933 		return false;
4934 
4935 	/*
4936 	 * We always have to allow these modifiers:
4937 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4938 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4939 	 */
4940 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4941 	    modifier == DRM_FORMAT_MOD_INVALID) {
4942 		return true;
4943 	}
4944 
4945 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4946 	for (i = 0; i < plane->modifier_count; i++) {
4947 		if (modifier == plane->modifiers[i])
4948 			break;
4949 	}
4950 	if (i == plane->modifier_count)
4951 		return false;
4952 
4953 	/*
4954 	 * For D swizzle the canonical modifier depends on the bpp, so check
4955 	 * it here.
4956 	 */
4957 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4958 	    adev->family >= AMDGPU_FAMILY_NV) {
4959 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4960 			return false;
4961 	}
4962 
4963 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4964 	    info->cpp[0] < 8)
4965 		return false;
4966 
4967 	if (modifier_has_dcc(modifier)) {
4968 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4969 		if (info->cpp[0] != 4)
4970 			return false;
4971 		/* We support multi-planar formats, but not when combined with
4972 		 * additional DCC metadata planes. */
4973 		if (info->num_planes > 1)
4974 			return false;
4975 	}
4976 
4977 	return true;
4978 }
4979 
4980 static void
4981 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4982 {
4983 	if (!*mods)
4984 		return;
4985 
4986 	if (*cap - *size < 1) {
4987 		uint64_t new_cap = *cap * 2;
4988 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4989 
4990 		if (!new_mods) {
4991 			kfree(*mods);
4992 			*mods = NULL;
4993 			return;
4994 		}
4995 
4996 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4997 		kfree(*mods);
4998 		*mods = new_mods;
4999 		*cap = new_cap;
5000 	}
5001 
5002 	(*mods)[*size] = mod;
5003 	*size += 1;
5004 }
5005 
5006 static void
5007 add_gfx9_modifiers(const struct amdgpu_device *adev,
5008 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
5009 {
5010 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5011 	int pipe_xor_bits = min(8, pipes +
5012 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5013 	int bank_xor_bits = min(8 - pipe_xor_bits,
5014 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5015 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5016 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5017 
5018 
5019 	if (adev->family == AMDGPU_FAMILY_RV) {
5020 		/* Raven2 and later */
5021 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5022 
5023 		/*
5024 		 * No _D DCC swizzles yet because we only allow 32bpp, which
5025 		 * doesn't support _D on DCN
5026 		 */
5027 
5028 		if (has_constant_encode) {
5029 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5030 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5031 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5032 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5033 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5034 				    AMD_FMT_MOD_SET(DCC, 1) |
5035 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5036 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5037 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5038 		}
5039 
5040 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5041 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5042 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5043 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5044 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5045 			    AMD_FMT_MOD_SET(DCC, 1) |
5046 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5047 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5048 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5049 
5050 		if (has_constant_encode) {
5051 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5052 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5053 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5054 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5055 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5056 				    AMD_FMT_MOD_SET(DCC, 1) |
5057 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5058 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5059 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5060 
5061 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5062 				    AMD_FMT_MOD_SET(RB, rb) |
5063 				    AMD_FMT_MOD_SET(PIPE, pipes));
5064 		}
5065 
5066 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5067 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5068 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5069 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5070 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5071 			    AMD_FMT_MOD_SET(DCC, 1) |
5072 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5073 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5074 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5075 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5076 			    AMD_FMT_MOD_SET(RB, rb) |
5077 			    AMD_FMT_MOD_SET(PIPE, pipes));
5078 	}
5079 
5080 	/*
5081 	 * Only supported for 64bpp on Raven, will be filtered on format in
5082 	 * dm_plane_format_mod_supported.
5083 	 */
5084 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5085 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5086 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5087 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5088 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5089 
5090 	if (adev->family == AMDGPU_FAMILY_RV) {
5091 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5092 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5093 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5094 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5095 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5096 	}
5097 
5098 	/*
5099 	 * Only supported for 64bpp on Raven, will be filtered on format in
5100 	 * dm_plane_format_mod_supported.
5101 	 */
5102 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5103 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5104 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5105 
5106 	if (adev->family == AMDGPU_FAMILY_RV) {
5107 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5108 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5109 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5110 	}
5111 }
5112 
5113 static void
5114 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5115 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5116 {
5117 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5118 
5119 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5120 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5121 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5122 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5123 		    AMD_FMT_MOD_SET(DCC, 1) |
5124 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5125 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5126 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5127 
5128 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5129 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5130 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5131 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5132 		    AMD_FMT_MOD_SET(DCC, 1) |
5133 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5134 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5135 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5136 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5137 
5138 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5139 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5140 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5141 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5142 
5143 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5144 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5145 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5146 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5147 
5148 
5149 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5150 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5151 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5152 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5153 
5154 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5155 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5156 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5157 }
5158 
5159 static void
5160 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5161 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5162 {
5163 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5164 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5165 
5166 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5167 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5168 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5169 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5170 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5171 		    AMD_FMT_MOD_SET(DCC, 1) |
5172 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5173 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5174 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5175 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5176 
5177 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5178 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5179 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5180 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5181 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5182 		    AMD_FMT_MOD_SET(DCC, 1) |
5183 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5184 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5185 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5186 
5187 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5188 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5189 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5190 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5191 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5192 		    AMD_FMT_MOD_SET(DCC, 1) |
5193 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5194 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5195 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5196 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5197 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5198 
5199 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5200 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5201 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5202 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5203 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5204 		    AMD_FMT_MOD_SET(DCC, 1) |
5205 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5206 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5207 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5208 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5209 
5210 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5211 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5212 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5213 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5214 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5215 
5216 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5217 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5218 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5219 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5220 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5221 
5222 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5223 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5224 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5225 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5226 
5227 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5228 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5229 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5230 }
5231 
5232 static int
5233 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5234 {
5235 	uint64_t size = 0, capacity = 128;
5236 	*mods = NULL;
5237 
5238 	/* We have not hooked up any pre-GFX9 modifiers. */
5239 	if (adev->family < AMDGPU_FAMILY_AI)
5240 		return 0;
5241 
5242 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5243 
5244 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5245 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5246 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5247 		return *mods ? 0 : -ENOMEM;
5248 	}
5249 
5250 	switch (adev->family) {
5251 	case AMDGPU_FAMILY_AI:
5252 	case AMDGPU_FAMILY_RV:
5253 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5254 		break;
5255 	case AMDGPU_FAMILY_NV:
5256 	case AMDGPU_FAMILY_VGH:
5257 	case AMDGPU_FAMILY_YC:
5258 	case AMDGPU_FAMILY_GC_10_3_6:
5259 	case AMDGPU_FAMILY_GC_10_3_7:
5260 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5261 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5262 		else
5263 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5264 		break;
5265 	}
5266 
5267 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5268 
5269 	/* INVALID marks the end of the list. */
5270 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5271 
5272 	if (!*mods)
5273 		return -ENOMEM;
5274 
5275 	return 0;
5276 }
5277 
5278 static int
5279 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5280 					  const struct amdgpu_framebuffer *afb,
5281 					  const enum surface_pixel_format format,
5282 					  const enum dc_rotation_angle rotation,
5283 					  const struct plane_size *plane_size,
5284 					  union dc_tiling_info *tiling_info,
5285 					  struct dc_plane_dcc_param *dcc,
5286 					  struct dc_plane_address *address,
5287 					  const bool force_disable_dcc)
5288 {
5289 	const uint64_t modifier = afb->base.modifier;
5290 	int ret = 0;
5291 
5292 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5293 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5294 
5295 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5296 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5297 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5298 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5299 
5300 		dcc->enable = 1;
5301 		dcc->meta_pitch = afb->base.pitches[1];
5302 		dcc->independent_64b_blks = independent_64b_blks;
5303 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5304 			if (independent_64b_blks && independent_128b_blks)
5305 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5306 			else if (independent_128b_blks)
5307 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5308 			else if (independent_64b_blks && !independent_128b_blks)
5309 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5310 			else
5311 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5312 		} else {
5313 			if (independent_64b_blks)
5314 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5315 			else
5316 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5317 		}
5318 
5319 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5320 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5321 	}
5322 
5323 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5324 	if (ret)
5325 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5326 
5327 	return ret;
5328 }
5329 
5330 static int
5331 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5332 			     const struct amdgpu_framebuffer *afb,
5333 			     const enum surface_pixel_format format,
5334 			     const enum dc_rotation_angle rotation,
5335 			     const uint64_t tiling_flags,
5336 			     union dc_tiling_info *tiling_info,
5337 			     struct plane_size *plane_size,
5338 			     struct dc_plane_dcc_param *dcc,
5339 			     struct dc_plane_address *address,
5340 			     bool tmz_surface,
5341 			     bool force_disable_dcc)
5342 {
5343 	const struct drm_framebuffer *fb = &afb->base;
5344 	int ret;
5345 
5346 	memset(tiling_info, 0, sizeof(*tiling_info));
5347 	memset(plane_size, 0, sizeof(*plane_size));
5348 	memset(dcc, 0, sizeof(*dcc));
5349 	memset(address, 0, sizeof(*address));
5350 
5351 	address->tmz_surface = tmz_surface;
5352 
5353 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5354 		uint64_t addr = afb->address + fb->offsets[0];
5355 
5356 		plane_size->surface_size.x = 0;
5357 		plane_size->surface_size.y = 0;
5358 		plane_size->surface_size.width = fb->width;
5359 		plane_size->surface_size.height = fb->height;
5360 		plane_size->surface_pitch =
5361 			fb->pitches[0] / fb->format->cpp[0];
5362 
5363 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5364 		address->grph.addr.low_part = lower_32_bits(addr);
5365 		address->grph.addr.high_part = upper_32_bits(addr);
5366 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5367 		uint64_t luma_addr = afb->address + fb->offsets[0];
5368 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5369 
5370 		plane_size->surface_size.x = 0;
5371 		plane_size->surface_size.y = 0;
5372 		plane_size->surface_size.width = fb->width;
5373 		plane_size->surface_size.height = fb->height;
5374 		plane_size->surface_pitch =
5375 			fb->pitches[0] / fb->format->cpp[0];
5376 
5377 		plane_size->chroma_size.x = 0;
5378 		plane_size->chroma_size.y = 0;
5379 		/* TODO: set these based on surface format */
5380 		plane_size->chroma_size.width = fb->width / 2;
5381 		plane_size->chroma_size.height = fb->height / 2;
5382 
5383 		plane_size->chroma_pitch =
5384 			fb->pitches[1] / fb->format->cpp[1];
5385 
5386 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5387 		address->video_progressive.luma_addr.low_part =
5388 			lower_32_bits(luma_addr);
5389 		address->video_progressive.luma_addr.high_part =
5390 			upper_32_bits(luma_addr);
5391 		address->video_progressive.chroma_addr.low_part =
5392 			lower_32_bits(chroma_addr);
5393 		address->video_progressive.chroma_addr.high_part =
5394 			upper_32_bits(chroma_addr);
5395 	}
5396 
5397 	if (adev->family >= AMDGPU_FAMILY_AI) {
5398 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5399 								rotation, plane_size,
5400 								tiling_info, dcc,
5401 								address,
5402 								force_disable_dcc);
5403 		if (ret)
5404 			return ret;
5405 	} else {
5406 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5407 	}
5408 
5409 	return 0;
5410 }
5411 
5412 static void
5413 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5414 			       bool *per_pixel_alpha, bool *global_alpha,
5415 			       int *global_alpha_value)
5416 {
5417 	*per_pixel_alpha = false;
5418 	*global_alpha = false;
5419 	*global_alpha_value = 0xff;
5420 
5421 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5422 		return;
5423 
5424 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5425 		static const uint32_t alpha_formats[] = {
5426 			DRM_FORMAT_ARGB8888,
5427 			DRM_FORMAT_RGBA8888,
5428 			DRM_FORMAT_ABGR8888,
5429 		};
5430 		uint32_t format = plane_state->fb->format->format;
5431 		unsigned int i;
5432 
5433 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5434 			if (format == alpha_formats[i]) {
5435 				*per_pixel_alpha = true;
5436 				break;
5437 			}
5438 		}
5439 	}
5440 
5441 	if (plane_state->alpha < 0xffff) {
5442 		*global_alpha = true;
5443 		*global_alpha_value = plane_state->alpha >> 8;
5444 	}
5445 }
5446 
5447 static int
5448 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5449 			    const enum surface_pixel_format format,
5450 			    enum dc_color_space *color_space)
5451 {
5452 	bool full_range;
5453 
5454 	*color_space = COLOR_SPACE_SRGB;
5455 
5456 	/* DRM color properties only affect non-RGB formats. */
5457 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5458 		return 0;
5459 
5460 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5461 
5462 	switch (plane_state->color_encoding) {
5463 	case DRM_COLOR_YCBCR_BT601:
5464 		if (full_range)
5465 			*color_space = COLOR_SPACE_YCBCR601;
5466 		else
5467 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5468 		break;
5469 
5470 	case DRM_COLOR_YCBCR_BT709:
5471 		if (full_range)
5472 			*color_space = COLOR_SPACE_YCBCR709;
5473 		else
5474 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5475 		break;
5476 
5477 	case DRM_COLOR_YCBCR_BT2020:
5478 		if (full_range)
5479 			*color_space = COLOR_SPACE_2020_YCBCR;
5480 		else
5481 			return -EINVAL;
5482 		break;
5483 
5484 	default:
5485 		return -EINVAL;
5486 	}
5487 
5488 	return 0;
5489 }
5490 
5491 static int
5492 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5493 			    const struct drm_plane_state *plane_state,
5494 			    const uint64_t tiling_flags,
5495 			    struct dc_plane_info *plane_info,
5496 			    struct dc_plane_address *address,
5497 			    bool tmz_surface,
5498 			    bool force_disable_dcc)
5499 {
5500 	const struct drm_framebuffer *fb = plane_state->fb;
5501 	const struct amdgpu_framebuffer *afb =
5502 		to_amdgpu_framebuffer(plane_state->fb);
5503 	int ret;
5504 
5505 	memset(plane_info, 0, sizeof(*plane_info));
5506 
5507 	switch (fb->format->format) {
5508 	case DRM_FORMAT_C8:
5509 		plane_info->format =
5510 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5511 		break;
5512 	case DRM_FORMAT_RGB565:
5513 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5514 		break;
5515 	case DRM_FORMAT_XRGB8888:
5516 	case DRM_FORMAT_ARGB8888:
5517 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5518 		break;
5519 	case DRM_FORMAT_XRGB2101010:
5520 	case DRM_FORMAT_ARGB2101010:
5521 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5522 		break;
5523 	case DRM_FORMAT_XBGR2101010:
5524 	case DRM_FORMAT_ABGR2101010:
5525 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5526 		break;
5527 	case DRM_FORMAT_XBGR8888:
5528 	case DRM_FORMAT_ABGR8888:
5529 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5530 		break;
5531 	case DRM_FORMAT_NV21:
5532 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5533 		break;
5534 	case DRM_FORMAT_NV12:
5535 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5536 		break;
5537 	case DRM_FORMAT_P010:
5538 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5539 		break;
5540 	case DRM_FORMAT_XRGB16161616F:
5541 	case DRM_FORMAT_ARGB16161616F:
5542 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5543 		break;
5544 	case DRM_FORMAT_XBGR16161616F:
5545 	case DRM_FORMAT_ABGR16161616F:
5546 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5547 		break;
5548 	case DRM_FORMAT_XRGB16161616:
5549 	case DRM_FORMAT_ARGB16161616:
5550 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5551 		break;
5552 	case DRM_FORMAT_XBGR16161616:
5553 	case DRM_FORMAT_ABGR16161616:
5554 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5555 		break;
5556 	default:
5557 		DRM_ERROR(
5558 			"Unsupported screen format %p4cc\n",
5559 			&fb->format->format);
5560 		return -EINVAL;
5561 	}
5562 
5563 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5564 	case DRM_MODE_ROTATE_0:
5565 		plane_info->rotation = ROTATION_ANGLE_0;
5566 		break;
5567 	case DRM_MODE_ROTATE_90:
5568 		plane_info->rotation = ROTATION_ANGLE_90;
5569 		break;
5570 	case DRM_MODE_ROTATE_180:
5571 		plane_info->rotation = ROTATION_ANGLE_180;
5572 		break;
5573 	case DRM_MODE_ROTATE_270:
5574 		plane_info->rotation = ROTATION_ANGLE_270;
5575 		break;
5576 	default:
5577 		plane_info->rotation = ROTATION_ANGLE_0;
5578 		break;
5579 	}
5580 
5581 	plane_info->visible = true;
5582 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5583 
5584 	plane_info->layer_index = 0;
5585 
5586 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5587 					  &plane_info->color_space);
5588 	if (ret)
5589 		return ret;
5590 
5591 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5592 					   plane_info->rotation, tiling_flags,
5593 					   &plane_info->tiling_info,
5594 					   &plane_info->plane_size,
5595 					   &plane_info->dcc, address, tmz_surface,
5596 					   force_disable_dcc);
5597 	if (ret)
5598 		return ret;
5599 
5600 	fill_blending_from_plane_state(
5601 		plane_state, &plane_info->per_pixel_alpha,
5602 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5603 
5604 	return 0;
5605 }
5606 
5607 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5608 				    struct dc_plane_state *dc_plane_state,
5609 				    struct drm_plane_state *plane_state,
5610 				    struct drm_crtc_state *crtc_state)
5611 {
5612 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5613 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5614 	struct dc_scaling_info scaling_info;
5615 	struct dc_plane_info plane_info;
5616 	int ret;
5617 	bool force_disable_dcc = false;
5618 
5619 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5620 	if (ret)
5621 		return ret;
5622 
5623 	dc_plane_state->src_rect = scaling_info.src_rect;
5624 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5625 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5626 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5627 
5628 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5629 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5630 					  afb->tiling_flags,
5631 					  &plane_info,
5632 					  &dc_plane_state->address,
5633 					  afb->tmz_surface,
5634 					  force_disable_dcc);
5635 	if (ret)
5636 		return ret;
5637 
5638 	dc_plane_state->format = plane_info.format;
5639 	dc_plane_state->color_space = plane_info.color_space;
5640 	dc_plane_state->format = plane_info.format;
5641 	dc_plane_state->plane_size = plane_info.plane_size;
5642 	dc_plane_state->rotation = plane_info.rotation;
5643 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5644 	dc_plane_state->stereo_format = plane_info.stereo_format;
5645 	dc_plane_state->tiling_info = plane_info.tiling_info;
5646 	dc_plane_state->visible = plane_info.visible;
5647 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5648 	dc_plane_state->global_alpha = plane_info.global_alpha;
5649 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5650 	dc_plane_state->dcc = plane_info.dcc;
5651 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5652 	dc_plane_state->flip_int_enabled = true;
5653 
5654 	/*
5655 	 * Always set input transfer function, since plane state is refreshed
5656 	 * every time.
5657 	 */
5658 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5659 	if (ret)
5660 		return ret;
5661 
5662 	return 0;
5663 }
5664 
5665 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5666 					   const struct dm_connector_state *dm_state,
5667 					   struct dc_stream_state *stream)
5668 {
5669 	enum amdgpu_rmx_type rmx_type;
5670 
5671 	struct rect src = { 0 }; /* viewport in composition space*/
5672 	struct rect dst = { 0 }; /* stream addressable area */
5673 
5674 	/* no mode. nothing to be done */
5675 	if (!mode)
5676 		return;
5677 
5678 	/* Full screen scaling by default */
5679 	src.width = mode->hdisplay;
5680 	src.height = mode->vdisplay;
5681 	dst.width = stream->timing.h_addressable;
5682 	dst.height = stream->timing.v_addressable;
5683 
5684 	if (dm_state) {
5685 		rmx_type = dm_state->scaling;
5686 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5687 			if (src.width * dst.height <
5688 					src.height * dst.width) {
5689 				/* height needs less upscaling/more downscaling */
5690 				dst.width = src.width *
5691 						dst.height / src.height;
5692 			} else {
5693 				/* width needs less upscaling/more downscaling */
5694 				dst.height = src.height *
5695 						dst.width / src.width;
5696 			}
5697 		} else if (rmx_type == RMX_CENTER) {
5698 			dst = src;
5699 		}
5700 
5701 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5702 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5703 
5704 		if (dm_state->underscan_enable) {
5705 			dst.x += dm_state->underscan_hborder / 2;
5706 			dst.y += dm_state->underscan_vborder / 2;
5707 			dst.width -= dm_state->underscan_hborder;
5708 			dst.height -= dm_state->underscan_vborder;
5709 		}
5710 	}
5711 
5712 	stream->src = src;
5713 	stream->dst = dst;
5714 
5715 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5716 		      dst.x, dst.y, dst.width, dst.height);
5717 
5718 }
5719 
5720 static enum dc_color_depth
5721 convert_color_depth_from_display_info(const struct drm_connector *connector,
5722 				      bool is_y420, int requested_bpc)
5723 {
5724 	uint8_t bpc;
5725 
5726 	if (is_y420) {
5727 		bpc = 8;
5728 
5729 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5730 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5731 			bpc = 16;
5732 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5733 			bpc = 12;
5734 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5735 			bpc = 10;
5736 	} else {
5737 		bpc = (uint8_t)connector->display_info.bpc;
5738 		/* Assume 8 bpc by default if no bpc is specified. */
5739 		bpc = bpc ? bpc : 8;
5740 	}
5741 
5742 	if (requested_bpc > 0) {
5743 		/*
5744 		 * Cap display bpc based on the user requested value.
5745 		 *
5746 		 * The value for state->max_bpc may not correctly updated
5747 		 * depending on when the connector gets added to the state
5748 		 * or if this was called outside of atomic check, so it
5749 		 * can't be used directly.
5750 		 */
5751 		bpc = min_t(u8, bpc, requested_bpc);
5752 
5753 		/* Round down to the nearest even number. */
5754 		bpc = bpc - (bpc & 1);
5755 	}
5756 
5757 	switch (bpc) {
5758 	case 0:
5759 		/*
5760 		 * Temporary Work around, DRM doesn't parse color depth for
5761 		 * EDID revision before 1.4
5762 		 * TODO: Fix edid parsing
5763 		 */
5764 		return COLOR_DEPTH_888;
5765 	case 6:
5766 		return COLOR_DEPTH_666;
5767 	case 8:
5768 		return COLOR_DEPTH_888;
5769 	case 10:
5770 		return COLOR_DEPTH_101010;
5771 	case 12:
5772 		return COLOR_DEPTH_121212;
5773 	case 14:
5774 		return COLOR_DEPTH_141414;
5775 	case 16:
5776 		return COLOR_DEPTH_161616;
5777 	default:
5778 		return COLOR_DEPTH_UNDEFINED;
5779 	}
5780 }
5781 
5782 static enum dc_aspect_ratio
5783 get_aspect_ratio(const struct drm_display_mode *mode_in)
5784 {
5785 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5786 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5787 }
5788 
5789 static enum dc_color_space
5790 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5791 {
5792 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5793 
5794 	switch (dc_crtc_timing->pixel_encoding)	{
5795 	case PIXEL_ENCODING_YCBCR422:
5796 	case PIXEL_ENCODING_YCBCR444:
5797 	case PIXEL_ENCODING_YCBCR420:
5798 	{
5799 		/*
5800 		 * 27030khz is the separation point between HDTV and SDTV
5801 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5802 		 * respectively
5803 		 */
5804 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5805 			if (dc_crtc_timing->flags.Y_ONLY)
5806 				color_space =
5807 					COLOR_SPACE_YCBCR709_LIMITED;
5808 			else
5809 				color_space = COLOR_SPACE_YCBCR709;
5810 		} else {
5811 			if (dc_crtc_timing->flags.Y_ONLY)
5812 				color_space =
5813 					COLOR_SPACE_YCBCR601_LIMITED;
5814 			else
5815 				color_space = COLOR_SPACE_YCBCR601;
5816 		}
5817 
5818 	}
5819 	break;
5820 	case PIXEL_ENCODING_RGB:
5821 		color_space = COLOR_SPACE_SRGB;
5822 		break;
5823 
5824 	default:
5825 		WARN_ON(1);
5826 		break;
5827 	}
5828 
5829 	return color_space;
5830 }
5831 
5832 static bool adjust_colour_depth_from_display_info(
5833 	struct dc_crtc_timing *timing_out,
5834 	const struct drm_display_info *info)
5835 {
5836 	enum dc_color_depth depth = timing_out->display_color_depth;
5837 	int normalized_clk;
5838 	do {
5839 		normalized_clk = timing_out->pix_clk_100hz / 10;
5840 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5841 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5842 			normalized_clk /= 2;
5843 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5844 		switch (depth) {
5845 		case COLOR_DEPTH_888:
5846 			break;
5847 		case COLOR_DEPTH_101010:
5848 			normalized_clk = (normalized_clk * 30) / 24;
5849 			break;
5850 		case COLOR_DEPTH_121212:
5851 			normalized_clk = (normalized_clk * 36) / 24;
5852 			break;
5853 		case COLOR_DEPTH_161616:
5854 			normalized_clk = (normalized_clk * 48) / 24;
5855 			break;
5856 		default:
5857 			/* The above depths are the only ones valid for HDMI. */
5858 			return false;
5859 		}
5860 		if (normalized_clk <= info->max_tmds_clock) {
5861 			timing_out->display_color_depth = depth;
5862 			return true;
5863 		}
5864 	} while (--depth > COLOR_DEPTH_666);
5865 	return false;
5866 }
5867 
5868 static void fill_stream_properties_from_drm_display_mode(
5869 	struct dc_stream_state *stream,
5870 	const struct drm_display_mode *mode_in,
5871 	const struct drm_connector *connector,
5872 	const struct drm_connector_state *connector_state,
5873 	const struct dc_stream_state *old_stream,
5874 	int requested_bpc)
5875 {
5876 	struct dc_crtc_timing *timing_out = &stream->timing;
5877 	const struct drm_display_info *info = &connector->display_info;
5878 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5879 	struct hdmi_vendor_infoframe hv_frame;
5880 	struct hdmi_avi_infoframe avi_frame;
5881 
5882 	memset(&hv_frame, 0, sizeof(hv_frame));
5883 	memset(&avi_frame, 0, sizeof(avi_frame));
5884 
5885 	timing_out->h_border_left = 0;
5886 	timing_out->h_border_right = 0;
5887 	timing_out->v_border_top = 0;
5888 	timing_out->v_border_bottom = 0;
5889 	/* TODO: un-hardcode */
5890 	if (drm_mode_is_420_only(info, mode_in)
5891 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5892 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5893 	else if (drm_mode_is_420_also(info, mode_in)
5894 			&& aconnector->force_yuv420_output)
5895 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5896 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5897 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5898 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5899 	else
5900 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5901 
5902 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5903 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5904 		connector,
5905 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5906 		requested_bpc);
5907 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5908 	timing_out->hdmi_vic = 0;
5909 
5910 	if(old_stream) {
5911 		timing_out->vic = old_stream->timing.vic;
5912 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5913 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5914 	} else {
5915 		timing_out->vic = drm_match_cea_mode(mode_in);
5916 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5917 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5918 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5919 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5920 	}
5921 
5922 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5923 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5924 		timing_out->vic = avi_frame.video_code;
5925 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5926 		timing_out->hdmi_vic = hv_frame.vic;
5927 	}
5928 
5929 	if (is_freesync_video_mode(mode_in, aconnector)) {
5930 		timing_out->h_addressable = mode_in->hdisplay;
5931 		timing_out->h_total = mode_in->htotal;
5932 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5933 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5934 		timing_out->v_total = mode_in->vtotal;
5935 		timing_out->v_addressable = mode_in->vdisplay;
5936 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5937 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5938 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5939 	} else {
5940 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5941 		timing_out->h_total = mode_in->crtc_htotal;
5942 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5943 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5944 		timing_out->v_total = mode_in->crtc_vtotal;
5945 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5946 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5947 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5948 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5949 	}
5950 
5951 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5952 
5953 	stream->output_color_space = get_output_color_space(timing_out);
5954 
5955 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5956 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5957 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5958 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5959 		    drm_mode_is_420_also(info, mode_in) &&
5960 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5961 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5962 			adjust_colour_depth_from_display_info(timing_out, info);
5963 		}
5964 	}
5965 }
5966 
5967 static void fill_audio_info(struct audio_info *audio_info,
5968 			    const struct drm_connector *drm_connector,
5969 			    const struct dc_sink *dc_sink)
5970 {
5971 	int i = 0;
5972 	int cea_revision = 0;
5973 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5974 
5975 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5976 	audio_info->product_id = edid_caps->product_id;
5977 
5978 	cea_revision = drm_connector->display_info.cea_rev;
5979 
5980 	strscpy(audio_info->display_name,
5981 		edid_caps->display_name,
5982 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5983 
5984 	if (cea_revision >= 3) {
5985 		audio_info->mode_count = edid_caps->audio_mode_count;
5986 
5987 		for (i = 0; i < audio_info->mode_count; ++i) {
5988 			audio_info->modes[i].format_code =
5989 					(enum audio_format_code)
5990 					(edid_caps->audio_modes[i].format_code);
5991 			audio_info->modes[i].channel_count =
5992 					edid_caps->audio_modes[i].channel_count;
5993 			audio_info->modes[i].sample_rates.all =
5994 					edid_caps->audio_modes[i].sample_rate;
5995 			audio_info->modes[i].sample_size =
5996 					edid_caps->audio_modes[i].sample_size;
5997 		}
5998 	}
5999 
6000 	audio_info->flags.all = edid_caps->speaker_flags;
6001 
6002 	/* TODO: We only check for the progressive mode, check for interlace mode too */
6003 	if (drm_connector->latency_present[0]) {
6004 		audio_info->video_latency = drm_connector->video_latency[0];
6005 		audio_info->audio_latency = drm_connector->audio_latency[0];
6006 	}
6007 
6008 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6009 
6010 }
6011 
6012 static void
6013 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6014 				      struct drm_display_mode *dst_mode)
6015 {
6016 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6017 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6018 	dst_mode->crtc_clock = src_mode->crtc_clock;
6019 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6020 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6021 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6022 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6023 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
6024 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
6025 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6026 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6027 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6028 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6029 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6030 }
6031 
6032 static void
6033 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6034 					const struct drm_display_mode *native_mode,
6035 					bool scale_enabled)
6036 {
6037 	if (scale_enabled) {
6038 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6039 	} else if (native_mode->clock == drm_mode->clock &&
6040 			native_mode->htotal == drm_mode->htotal &&
6041 			native_mode->vtotal == drm_mode->vtotal) {
6042 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6043 	} else {
6044 		/* no scaling nor amdgpu inserted, no need to patch */
6045 	}
6046 }
6047 
6048 static struct dc_sink *
6049 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6050 {
6051 	struct dc_sink_init_data sink_init_data = { 0 };
6052 	struct dc_sink *sink = NULL;
6053 	sink_init_data.link = aconnector->dc_link;
6054 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6055 
6056 	sink = dc_sink_create(&sink_init_data);
6057 	if (!sink) {
6058 		DRM_ERROR("Failed to create sink!\n");
6059 		return NULL;
6060 	}
6061 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6062 
6063 	return sink;
6064 }
6065 
6066 static void set_multisync_trigger_params(
6067 		struct dc_stream_state *stream)
6068 {
6069 	struct dc_stream_state *master = NULL;
6070 
6071 	if (stream->triggered_crtc_reset.enabled) {
6072 		master = stream->triggered_crtc_reset.event_source;
6073 		stream->triggered_crtc_reset.event =
6074 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6075 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6076 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6077 	}
6078 }
6079 
6080 static void set_master_stream(struct dc_stream_state *stream_set[],
6081 			      int stream_count)
6082 {
6083 	int j, highest_rfr = 0, master_stream = 0;
6084 
6085 	for (j = 0;  j < stream_count; j++) {
6086 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6087 			int refresh_rate = 0;
6088 
6089 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6090 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6091 			if (refresh_rate > highest_rfr) {
6092 				highest_rfr = refresh_rate;
6093 				master_stream = j;
6094 			}
6095 		}
6096 	}
6097 	for (j = 0;  j < stream_count; j++) {
6098 		if (stream_set[j])
6099 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6100 	}
6101 }
6102 
6103 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6104 {
6105 	int i = 0;
6106 	struct dc_stream_state *stream;
6107 
6108 	if (context->stream_count < 2)
6109 		return;
6110 	for (i = 0; i < context->stream_count ; i++) {
6111 		if (!context->streams[i])
6112 			continue;
6113 		/*
6114 		 * TODO: add a function to read AMD VSDB bits and set
6115 		 * crtc_sync_master.multi_sync_enabled flag
6116 		 * For now it's set to false
6117 		 */
6118 	}
6119 
6120 	set_master_stream(context->streams, context->stream_count);
6121 
6122 	for (i = 0; i < context->stream_count ; i++) {
6123 		stream = context->streams[i];
6124 
6125 		if (!stream)
6126 			continue;
6127 
6128 		set_multisync_trigger_params(stream);
6129 	}
6130 }
6131 
6132 #if defined(CONFIG_DRM_AMD_DC_DCN)
6133 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6134 							struct dc_sink *sink, struct dc_stream_state *stream,
6135 							struct dsc_dec_dpcd_caps *dsc_caps)
6136 {
6137 	stream->timing.flags.DSC = 0;
6138 	dsc_caps->is_dsc_supported = false;
6139 
6140 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6141 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6142 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6143 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6144 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6145 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6146 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6147 				dsc_caps);
6148 	}
6149 }
6150 
6151 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6152 				    struct dc_sink *sink, struct dc_stream_state *stream,
6153 				    struct dsc_dec_dpcd_caps *dsc_caps,
6154 				    uint32_t max_dsc_target_bpp_limit_override)
6155 {
6156 	const struct dc_link_settings *verified_link_cap = NULL;
6157 	uint32_t link_bw_in_kbps;
6158 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6159 	struct dc *dc = sink->ctx->dc;
6160 	struct dc_dsc_bw_range bw_range = {0};
6161 	struct dc_dsc_config dsc_cfg = {0};
6162 
6163 	verified_link_cap = dc_link_get_link_cap(stream->link);
6164 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6165 	edp_min_bpp_x16 = 8 * 16;
6166 	edp_max_bpp_x16 = 8 * 16;
6167 
6168 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6169 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6170 
6171 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6172 		edp_min_bpp_x16 = edp_max_bpp_x16;
6173 
6174 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6175 				dc->debug.dsc_min_slice_height_override,
6176 				edp_min_bpp_x16, edp_max_bpp_x16,
6177 				dsc_caps,
6178 				&stream->timing,
6179 				&bw_range)) {
6180 
6181 		if (bw_range.max_kbps < link_bw_in_kbps) {
6182 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6183 					dsc_caps,
6184 					dc->debug.dsc_min_slice_height_override,
6185 					max_dsc_target_bpp_limit_override,
6186 					0,
6187 					&stream->timing,
6188 					&dsc_cfg)) {
6189 				stream->timing.dsc_cfg = dsc_cfg;
6190 				stream->timing.flags.DSC = 1;
6191 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6192 			}
6193 			return;
6194 		}
6195 	}
6196 
6197 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6198 				dsc_caps,
6199 				dc->debug.dsc_min_slice_height_override,
6200 				max_dsc_target_bpp_limit_override,
6201 				link_bw_in_kbps,
6202 				&stream->timing,
6203 				&dsc_cfg)) {
6204 		stream->timing.dsc_cfg = dsc_cfg;
6205 		stream->timing.flags.DSC = 1;
6206 	}
6207 }
6208 
6209 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6210 										struct dc_sink *sink, struct dc_stream_state *stream,
6211 										struct dsc_dec_dpcd_caps *dsc_caps)
6212 {
6213 	struct drm_connector *drm_connector = &aconnector->base;
6214 	uint32_t link_bandwidth_kbps;
6215 	uint32_t max_dsc_target_bpp_limit_override = 0;
6216 	struct dc *dc = sink->ctx->dc;
6217 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6218 	uint32_t dsc_max_supported_bw_in_kbps;
6219 
6220 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6221 							dc_link_get_link_cap(aconnector->dc_link));
6222 
6223 	if (stream->link && stream->link->local_sink)
6224 		max_dsc_target_bpp_limit_override =
6225 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6226 
6227 	/* Set DSC policy according to dsc_clock_en */
6228 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6229 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6230 
6231 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6232 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6233 
6234 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6235 
6236 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6237 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6238 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6239 						dsc_caps,
6240 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6241 						max_dsc_target_bpp_limit_override,
6242 						link_bandwidth_kbps,
6243 						&stream->timing,
6244 						&stream->timing.dsc_cfg)) {
6245 				stream->timing.flags.DSC = 1;
6246 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6247 								 __func__, drm_connector->name);
6248 			}
6249 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6250 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6251 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6252 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6253 
6254 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6255 					max_supported_bw_in_kbps > 0 &&
6256 					dsc_max_supported_bw_in_kbps > 0)
6257 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6258 						dsc_caps,
6259 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6260 						max_dsc_target_bpp_limit_override,
6261 						dsc_max_supported_bw_in_kbps,
6262 						&stream->timing,
6263 						&stream->timing.dsc_cfg)) {
6264 					stream->timing.flags.DSC = 1;
6265 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6266 									 __func__, drm_connector->name);
6267 				}
6268 		}
6269 	}
6270 
6271 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6272 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6273 		stream->timing.flags.DSC = 1;
6274 
6275 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6276 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6277 
6278 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6279 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6280 
6281 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6282 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6283 }
6284 #endif /* CONFIG_DRM_AMD_DC_DCN */
6285 
6286 /**
6287  * DOC: FreeSync Video
6288  *
6289  * When a userspace application wants to play a video, the content follows a
6290  * standard format definition that usually specifies the FPS for that format.
6291  * The below list illustrates some video format and the expected FPS,
6292  * respectively:
6293  *
6294  * - TV/NTSC (23.976 FPS)
6295  * - Cinema (24 FPS)
6296  * - TV/PAL (25 FPS)
6297  * - TV/NTSC (29.97 FPS)
6298  * - TV/NTSC (30 FPS)
6299  * - Cinema HFR (48 FPS)
6300  * - TV/PAL (50 FPS)
6301  * - Commonly used (60 FPS)
6302  * - Multiples of 24 (48,72,96,120 FPS)
6303  *
6304  * The list of standards video format is not huge and can be added to the
6305  * connector modeset list beforehand. With that, userspace can leverage
6306  * FreeSync to extends the front porch in order to attain the target refresh
6307  * rate. Such a switch will happen seamlessly, without screen blanking or
6308  * reprogramming of the output in any other way. If the userspace requests a
6309  * modesetting change compatible with FreeSync modes that only differ in the
6310  * refresh rate, DC will skip the full update and avoid blink during the
6311  * transition. For example, the video player can change the modesetting from
6312  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6313  * causing any display blink. This same concept can be applied to a mode
6314  * setting change.
6315  */
6316 static struct drm_display_mode *
6317 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6318 			  bool use_probed_modes)
6319 {
6320 	struct drm_display_mode *m, *m_pref = NULL;
6321 	u16 current_refresh, highest_refresh;
6322 	struct list_head *list_head = use_probed_modes ?
6323 						    &aconnector->base.probed_modes :
6324 						    &aconnector->base.modes;
6325 
6326 	if (aconnector->freesync_vid_base.clock != 0)
6327 		return &aconnector->freesync_vid_base;
6328 
6329 	/* Find the preferred mode */
6330 	list_for_each_entry (m, list_head, head) {
6331 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6332 			m_pref = m;
6333 			break;
6334 		}
6335 	}
6336 
6337 	if (!m_pref) {
6338 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6339 		m_pref = list_first_entry_or_null(
6340 			&aconnector->base.modes, struct drm_display_mode, head);
6341 		if (!m_pref) {
6342 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6343 			return NULL;
6344 		}
6345 	}
6346 
6347 	highest_refresh = drm_mode_vrefresh(m_pref);
6348 
6349 	/*
6350 	 * Find the mode with highest refresh rate with same resolution.
6351 	 * For some monitors, preferred mode is not the mode with highest
6352 	 * supported refresh rate.
6353 	 */
6354 	list_for_each_entry (m, list_head, head) {
6355 		current_refresh  = drm_mode_vrefresh(m);
6356 
6357 		if (m->hdisplay == m_pref->hdisplay &&
6358 		    m->vdisplay == m_pref->vdisplay &&
6359 		    highest_refresh < current_refresh) {
6360 			highest_refresh = current_refresh;
6361 			m_pref = m;
6362 		}
6363 	}
6364 
6365 	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6366 	return m_pref;
6367 }
6368 
6369 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6370 				   struct amdgpu_dm_connector *aconnector)
6371 {
6372 	struct drm_display_mode *high_mode;
6373 	int timing_diff;
6374 
6375 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6376 	if (!high_mode || !mode)
6377 		return false;
6378 
6379 	timing_diff = high_mode->vtotal - mode->vtotal;
6380 
6381 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6382 	    high_mode->hdisplay != mode->hdisplay ||
6383 	    high_mode->vdisplay != mode->vdisplay ||
6384 	    high_mode->hsync_start != mode->hsync_start ||
6385 	    high_mode->hsync_end != mode->hsync_end ||
6386 	    high_mode->htotal != mode->htotal ||
6387 	    high_mode->hskew != mode->hskew ||
6388 	    high_mode->vscan != mode->vscan ||
6389 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6390 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6391 		return false;
6392 	else
6393 		return true;
6394 }
6395 
6396 static struct dc_stream_state *
6397 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6398 		       const struct drm_display_mode *drm_mode,
6399 		       const struct dm_connector_state *dm_state,
6400 		       const struct dc_stream_state *old_stream,
6401 		       int requested_bpc)
6402 {
6403 	struct drm_display_mode *preferred_mode = NULL;
6404 	struct drm_connector *drm_connector;
6405 	const struct drm_connector_state *con_state =
6406 		dm_state ? &dm_state->base : NULL;
6407 	struct dc_stream_state *stream = NULL;
6408 	struct drm_display_mode mode = *drm_mode;
6409 	struct drm_display_mode saved_mode;
6410 	struct drm_display_mode *freesync_mode = NULL;
6411 	bool native_mode_found = false;
6412 	bool recalculate_timing = false;
6413 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6414 	int mode_refresh;
6415 	int preferred_refresh = 0;
6416 #if defined(CONFIG_DRM_AMD_DC_DCN)
6417 	struct dsc_dec_dpcd_caps dsc_caps;
6418 #endif
6419 	struct dc_sink *sink = NULL;
6420 
6421 	memset(&saved_mode, 0, sizeof(saved_mode));
6422 
6423 	if (aconnector == NULL) {
6424 		DRM_ERROR("aconnector is NULL!\n");
6425 		return stream;
6426 	}
6427 
6428 	drm_connector = &aconnector->base;
6429 
6430 	if (!aconnector->dc_sink) {
6431 		sink = create_fake_sink(aconnector);
6432 		if (!sink)
6433 			return stream;
6434 	} else {
6435 		sink = aconnector->dc_sink;
6436 		dc_sink_retain(sink);
6437 	}
6438 
6439 	stream = dc_create_stream_for_sink(sink);
6440 
6441 	if (stream == NULL) {
6442 		DRM_ERROR("Failed to create stream for sink!\n");
6443 		goto finish;
6444 	}
6445 
6446 	stream->dm_stream_context = aconnector;
6447 
6448 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6449 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6450 
6451 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6452 		/* Search for preferred mode */
6453 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6454 			native_mode_found = true;
6455 			break;
6456 		}
6457 	}
6458 	if (!native_mode_found)
6459 		preferred_mode = list_first_entry_or_null(
6460 				&aconnector->base.modes,
6461 				struct drm_display_mode,
6462 				head);
6463 
6464 	mode_refresh = drm_mode_vrefresh(&mode);
6465 
6466 	if (preferred_mode == NULL) {
6467 		/*
6468 		 * This may not be an error, the use case is when we have no
6469 		 * usermode calls to reset and set mode upon hotplug. In this
6470 		 * case, we call set mode ourselves to restore the previous mode
6471 		 * and the modelist may not be filled in in time.
6472 		 */
6473 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6474 	} else {
6475 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6476 		if (recalculate_timing) {
6477 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6478 			drm_mode_copy(&saved_mode, &mode);
6479 			drm_mode_copy(&mode, freesync_mode);
6480 		} else {
6481 			decide_crtc_timing_for_drm_display_mode(
6482 				&mode, preferred_mode, scale);
6483 
6484 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6485 		}
6486 	}
6487 
6488 	if (recalculate_timing)
6489 		drm_mode_set_crtcinfo(&saved_mode, 0);
6490 	else if (!dm_state)
6491 		drm_mode_set_crtcinfo(&mode, 0);
6492 
6493        /*
6494 	* If scaling is enabled and refresh rate didn't change
6495 	* we copy the vic and polarities of the old timings
6496 	*/
6497 	if (!scale || mode_refresh != preferred_refresh)
6498 		fill_stream_properties_from_drm_display_mode(
6499 			stream, &mode, &aconnector->base, con_state, NULL,
6500 			requested_bpc);
6501 	else
6502 		fill_stream_properties_from_drm_display_mode(
6503 			stream, &mode, &aconnector->base, con_state, old_stream,
6504 			requested_bpc);
6505 
6506 #if defined(CONFIG_DRM_AMD_DC_DCN)
6507 	/* SST DSC determination policy */
6508 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6509 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6510 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6511 #endif
6512 
6513 	update_stream_scaling_settings(&mode, dm_state, stream);
6514 
6515 	fill_audio_info(
6516 		&stream->audio_info,
6517 		drm_connector,
6518 		sink);
6519 
6520 	update_stream_signal(stream, sink);
6521 
6522 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6523 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6524 
6525 	if (stream->link->psr_settings.psr_feature_enabled) {
6526 		//
6527 		// should decide stream support vsc sdp colorimetry capability
6528 		// before building vsc info packet
6529 		//
6530 		stream->use_vsc_sdp_for_colorimetry = false;
6531 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6532 			stream->use_vsc_sdp_for_colorimetry =
6533 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6534 		} else {
6535 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6536 				stream->use_vsc_sdp_for_colorimetry = true;
6537 		}
6538 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6539 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6540 
6541 	}
6542 finish:
6543 	dc_sink_release(sink);
6544 
6545 	return stream;
6546 }
6547 
6548 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6549 {
6550 	drm_crtc_cleanup(crtc);
6551 	kfree(crtc);
6552 }
6553 
6554 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6555 				  struct drm_crtc_state *state)
6556 {
6557 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6558 
6559 	/* TODO Destroy dc_stream objects are stream object is flattened */
6560 	if (cur->stream)
6561 		dc_stream_release(cur->stream);
6562 
6563 
6564 	__drm_atomic_helper_crtc_destroy_state(state);
6565 
6566 
6567 	kfree(state);
6568 }
6569 
6570 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6571 {
6572 	struct dm_crtc_state *state;
6573 
6574 	if (crtc->state)
6575 		dm_crtc_destroy_state(crtc, crtc->state);
6576 
6577 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6578 	if (WARN_ON(!state))
6579 		return;
6580 
6581 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6582 }
6583 
6584 static struct drm_crtc_state *
6585 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6586 {
6587 	struct dm_crtc_state *state, *cur;
6588 
6589 	cur = to_dm_crtc_state(crtc->state);
6590 
6591 	if (WARN_ON(!crtc->state))
6592 		return NULL;
6593 
6594 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6595 	if (!state)
6596 		return NULL;
6597 
6598 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6599 
6600 	if (cur->stream) {
6601 		state->stream = cur->stream;
6602 		dc_stream_retain(state->stream);
6603 	}
6604 
6605 	state->active_planes = cur->active_planes;
6606 	state->vrr_infopacket = cur->vrr_infopacket;
6607 	state->abm_level = cur->abm_level;
6608 	state->vrr_supported = cur->vrr_supported;
6609 	state->freesync_config = cur->freesync_config;
6610 	state->cm_has_degamma = cur->cm_has_degamma;
6611 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6612 	state->force_dpms_off = cur->force_dpms_off;
6613 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6614 
6615 	return &state->base;
6616 }
6617 
6618 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6619 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6620 {
6621 	crtc_debugfs_init(crtc);
6622 
6623 	return 0;
6624 }
6625 #endif
6626 
6627 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6628 {
6629 	enum dc_irq_source irq_source;
6630 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6631 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6632 	int rc;
6633 
6634 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6635 
6636 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6637 
6638 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6639 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6640 	return rc;
6641 }
6642 
6643 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6644 {
6645 	enum dc_irq_source irq_source;
6646 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6647 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6648 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6649 #if defined(CONFIG_DRM_AMD_DC_DCN)
6650 	struct amdgpu_display_manager *dm = &adev->dm;
6651 	struct vblank_control_work *work;
6652 #endif
6653 	int rc = 0;
6654 
6655 	if (enable) {
6656 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6657 		if (amdgpu_dm_vrr_active(acrtc_state))
6658 			rc = dm_set_vupdate_irq(crtc, true);
6659 	} else {
6660 		/* vblank irq off -> vupdate irq off */
6661 		rc = dm_set_vupdate_irq(crtc, false);
6662 	}
6663 
6664 	if (rc)
6665 		return rc;
6666 
6667 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6668 
6669 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6670 		return -EBUSY;
6671 
6672 	if (amdgpu_in_reset(adev))
6673 		return 0;
6674 
6675 #if defined(CONFIG_DRM_AMD_DC_DCN)
6676 	if (dm->vblank_control_workqueue) {
6677 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6678 		if (!work)
6679 			return -ENOMEM;
6680 
6681 		INIT_WORK(&work->work, vblank_control_worker);
6682 		work->dm = dm;
6683 		work->acrtc = acrtc;
6684 		work->enable = enable;
6685 
6686 		if (acrtc_state->stream) {
6687 			dc_stream_retain(acrtc_state->stream);
6688 			work->stream = acrtc_state->stream;
6689 		}
6690 
6691 		queue_work(dm->vblank_control_workqueue, &work->work);
6692 	}
6693 #endif
6694 
6695 	return 0;
6696 }
6697 
6698 static int dm_enable_vblank(struct drm_crtc *crtc)
6699 {
6700 	return dm_set_vblank(crtc, true);
6701 }
6702 
6703 static void dm_disable_vblank(struct drm_crtc *crtc)
6704 {
6705 	dm_set_vblank(crtc, false);
6706 }
6707 
6708 /* Implemented only the options currently availible for the driver */
6709 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6710 	.reset = dm_crtc_reset_state,
6711 	.destroy = amdgpu_dm_crtc_destroy,
6712 	.set_config = drm_atomic_helper_set_config,
6713 	.page_flip = drm_atomic_helper_page_flip,
6714 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6715 	.atomic_destroy_state = dm_crtc_destroy_state,
6716 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6717 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6718 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6719 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6720 	.enable_vblank = dm_enable_vblank,
6721 	.disable_vblank = dm_disable_vblank,
6722 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6723 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6724 	.late_register = amdgpu_dm_crtc_late_register,
6725 #endif
6726 };
6727 
6728 static enum drm_connector_status
6729 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6730 {
6731 	bool connected;
6732 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6733 
6734 	/*
6735 	 * Notes:
6736 	 * 1. This interface is NOT called in context of HPD irq.
6737 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6738 	 * makes it a bad place for *any* MST-related activity.
6739 	 */
6740 
6741 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6742 	    !aconnector->fake_enable)
6743 		connected = (aconnector->dc_sink != NULL);
6744 	else
6745 		connected = (aconnector->base.force == DRM_FORCE_ON);
6746 
6747 	update_subconnector_property(aconnector);
6748 
6749 	return (connected ? connector_status_connected :
6750 			connector_status_disconnected);
6751 }
6752 
6753 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6754 					    struct drm_connector_state *connector_state,
6755 					    struct drm_property *property,
6756 					    uint64_t val)
6757 {
6758 	struct drm_device *dev = connector->dev;
6759 	struct amdgpu_device *adev = drm_to_adev(dev);
6760 	struct dm_connector_state *dm_old_state =
6761 		to_dm_connector_state(connector->state);
6762 	struct dm_connector_state *dm_new_state =
6763 		to_dm_connector_state(connector_state);
6764 
6765 	int ret = -EINVAL;
6766 
6767 	if (property == dev->mode_config.scaling_mode_property) {
6768 		enum amdgpu_rmx_type rmx_type;
6769 
6770 		switch (val) {
6771 		case DRM_MODE_SCALE_CENTER:
6772 			rmx_type = RMX_CENTER;
6773 			break;
6774 		case DRM_MODE_SCALE_ASPECT:
6775 			rmx_type = RMX_ASPECT;
6776 			break;
6777 		case DRM_MODE_SCALE_FULLSCREEN:
6778 			rmx_type = RMX_FULL;
6779 			break;
6780 		case DRM_MODE_SCALE_NONE:
6781 		default:
6782 			rmx_type = RMX_OFF;
6783 			break;
6784 		}
6785 
6786 		if (dm_old_state->scaling == rmx_type)
6787 			return 0;
6788 
6789 		dm_new_state->scaling = rmx_type;
6790 		ret = 0;
6791 	} else if (property == adev->mode_info.underscan_hborder_property) {
6792 		dm_new_state->underscan_hborder = val;
6793 		ret = 0;
6794 	} else if (property == adev->mode_info.underscan_vborder_property) {
6795 		dm_new_state->underscan_vborder = val;
6796 		ret = 0;
6797 	} else if (property == adev->mode_info.underscan_property) {
6798 		dm_new_state->underscan_enable = val;
6799 		ret = 0;
6800 	} else if (property == adev->mode_info.abm_level_property) {
6801 		dm_new_state->abm_level = val;
6802 		ret = 0;
6803 	}
6804 
6805 	return ret;
6806 }
6807 
6808 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6809 					    const struct drm_connector_state *state,
6810 					    struct drm_property *property,
6811 					    uint64_t *val)
6812 {
6813 	struct drm_device *dev = connector->dev;
6814 	struct amdgpu_device *adev = drm_to_adev(dev);
6815 	struct dm_connector_state *dm_state =
6816 		to_dm_connector_state(state);
6817 	int ret = -EINVAL;
6818 
6819 	if (property == dev->mode_config.scaling_mode_property) {
6820 		switch (dm_state->scaling) {
6821 		case RMX_CENTER:
6822 			*val = DRM_MODE_SCALE_CENTER;
6823 			break;
6824 		case RMX_ASPECT:
6825 			*val = DRM_MODE_SCALE_ASPECT;
6826 			break;
6827 		case RMX_FULL:
6828 			*val = DRM_MODE_SCALE_FULLSCREEN;
6829 			break;
6830 		case RMX_OFF:
6831 		default:
6832 			*val = DRM_MODE_SCALE_NONE;
6833 			break;
6834 		}
6835 		ret = 0;
6836 	} else if (property == adev->mode_info.underscan_hborder_property) {
6837 		*val = dm_state->underscan_hborder;
6838 		ret = 0;
6839 	} else if (property == adev->mode_info.underscan_vborder_property) {
6840 		*val = dm_state->underscan_vborder;
6841 		ret = 0;
6842 	} else if (property == adev->mode_info.underscan_property) {
6843 		*val = dm_state->underscan_enable;
6844 		ret = 0;
6845 	} else if (property == adev->mode_info.abm_level_property) {
6846 		*val = dm_state->abm_level;
6847 		ret = 0;
6848 	}
6849 
6850 	return ret;
6851 }
6852 
6853 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6854 {
6855 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6856 
6857 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6858 }
6859 
6860 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6861 {
6862 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6863 	const struct dc_link *link = aconnector->dc_link;
6864 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6865 	struct amdgpu_display_manager *dm = &adev->dm;
6866 	int i;
6867 
6868 	/*
6869 	 * Call only if mst_mgr was iniitalized before since it's not done
6870 	 * for all connector types.
6871 	 */
6872 	if (aconnector->mst_mgr.dev)
6873 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6874 
6875 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6876 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6877 	for (i = 0; i < dm->num_of_edps; i++) {
6878 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6879 			backlight_device_unregister(dm->backlight_dev[i]);
6880 			dm->backlight_dev[i] = NULL;
6881 		}
6882 	}
6883 #endif
6884 
6885 	if (aconnector->dc_em_sink)
6886 		dc_sink_release(aconnector->dc_em_sink);
6887 	aconnector->dc_em_sink = NULL;
6888 	if (aconnector->dc_sink)
6889 		dc_sink_release(aconnector->dc_sink);
6890 	aconnector->dc_sink = NULL;
6891 
6892 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6893 	drm_connector_unregister(connector);
6894 	drm_connector_cleanup(connector);
6895 	if (aconnector->i2c) {
6896 		i2c_del_adapter(&aconnector->i2c->base);
6897 		kfree(aconnector->i2c);
6898 	}
6899 	kfree(aconnector->dm_dp_aux.aux.name);
6900 
6901 	kfree(connector);
6902 }
6903 
6904 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6905 {
6906 	struct dm_connector_state *state =
6907 		to_dm_connector_state(connector->state);
6908 
6909 	if (connector->state)
6910 		__drm_atomic_helper_connector_destroy_state(connector->state);
6911 
6912 	kfree(state);
6913 
6914 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6915 
6916 	if (state) {
6917 		state->scaling = RMX_OFF;
6918 		state->underscan_enable = false;
6919 		state->underscan_hborder = 0;
6920 		state->underscan_vborder = 0;
6921 		state->base.max_requested_bpc = 8;
6922 		state->vcpi_slots = 0;
6923 		state->pbn = 0;
6924 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6925 			state->abm_level = amdgpu_dm_abm_level;
6926 
6927 		__drm_atomic_helper_connector_reset(connector, &state->base);
6928 	}
6929 }
6930 
6931 struct drm_connector_state *
6932 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6933 {
6934 	struct dm_connector_state *state =
6935 		to_dm_connector_state(connector->state);
6936 
6937 	struct dm_connector_state *new_state =
6938 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6939 
6940 	if (!new_state)
6941 		return NULL;
6942 
6943 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6944 
6945 	new_state->freesync_capable = state->freesync_capable;
6946 	new_state->abm_level = state->abm_level;
6947 	new_state->scaling = state->scaling;
6948 	new_state->underscan_enable = state->underscan_enable;
6949 	new_state->underscan_hborder = state->underscan_hborder;
6950 	new_state->underscan_vborder = state->underscan_vborder;
6951 	new_state->vcpi_slots = state->vcpi_slots;
6952 	new_state->pbn = state->pbn;
6953 	return &new_state->base;
6954 }
6955 
6956 static int
6957 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6958 {
6959 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6960 		to_amdgpu_dm_connector(connector);
6961 	int r;
6962 
6963 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6964 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6965 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6966 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6967 		if (r)
6968 			return r;
6969 	}
6970 
6971 #if defined(CONFIG_DEBUG_FS)
6972 	connector_debugfs_init(amdgpu_dm_connector);
6973 #endif
6974 
6975 	return 0;
6976 }
6977 
6978 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6979 	.reset = amdgpu_dm_connector_funcs_reset,
6980 	.detect = amdgpu_dm_connector_detect,
6981 	.fill_modes = drm_helper_probe_single_connector_modes,
6982 	.destroy = amdgpu_dm_connector_destroy,
6983 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6984 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6985 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6986 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6987 	.late_register = amdgpu_dm_connector_late_register,
6988 	.early_unregister = amdgpu_dm_connector_unregister
6989 };
6990 
6991 static int get_modes(struct drm_connector *connector)
6992 {
6993 	return amdgpu_dm_connector_get_modes(connector);
6994 }
6995 
6996 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6997 {
6998 	struct dc_sink_init_data init_params = {
6999 			.link = aconnector->dc_link,
7000 			.sink_signal = SIGNAL_TYPE_VIRTUAL
7001 	};
7002 	struct edid *edid;
7003 
7004 	if (!aconnector->base.edid_blob_ptr) {
7005 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7006 				aconnector->base.name);
7007 
7008 		aconnector->base.force = DRM_FORCE_OFF;
7009 		aconnector->base.override_edid = false;
7010 		return;
7011 	}
7012 
7013 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7014 
7015 	aconnector->edid = edid;
7016 
7017 	aconnector->dc_em_sink = dc_link_add_remote_sink(
7018 		aconnector->dc_link,
7019 		(uint8_t *)edid,
7020 		(edid->extensions + 1) * EDID_LENGTH,
7021 		&init_params);
7022 
7023 	if (aconnector->base.force == DRM_FORCE_ON) {
7024 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
7025 		aconnector->dc_link->local_sink :
7026 		aconnector->dc_em_sink;
7027 		dc_sink_retain(aconnector->dc_sink);
7028 	}
7029 }
7030 
7031 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7032 {
7033 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7034 
7035 	/*
7036 	 * In case of headless boot with force on for DP managed connector
7037 	 * Those settings have to be != 0 to get initial modeset
7038 	 */
7039 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7040 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7041 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7042 	}
7043 
7044 
7045 	aconnector->base.override_edid = true;
7046 	create_eml_sink(aconnector);
7047 }
7048 
7049 struct dc_stream_state *
7050 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7051 				const struct drm_display_mode *drm_mode,
7052 				const struct dm_connector_state *dm_state,
7053 				const struct dc_stream_state *old_stream)
7054 {
7055 	struct drm_connector *connector = &aconnector->base;
7056 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7057 	struct dc_stream_state *stream;
7058 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7059 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7060 	enum dc_status dc_result = DC_OK;
7061 
7062 	do {
7063 		stream = create_stream_for_sink(aconnector, drm_mode,
7064 						dm_state, old_stream,
7065 						requested_bpc);
7066 		if (stream == NULL) {
7067 			DRM_ERROR("Failed to create stream for sink!\n");
7068 			break;
7069 		}
7070 
7071 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7072 
7073 		if (dc_result != DC_OK) {
7074 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7075 				      drm_mode->hdisplay,
7076 				      drm_mode->vdisplay,
7077 				      drm_mode->clock,
7078 				      dc_result,
7079 				      dc_status_to_str(dc_result));
7080 
7081 			dc_stream_release(stream);
7082 			stream = NULL;
7083 			requested_bpc -= 2; /* lower bpc to retry validation */
7084 		}
7085 
7086 	} while (stream == NULL && requested_bpc >= 6);
7087 
7088 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7089 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7090 
7091 		aconnector->force_yuv420_output = true;
7092 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7093 						dm_state, old_stream);
7094 		aconnector->force_yuv420_output = false;
7095 	}
7096 
7097 	return stream;
7098 }
7099 
7100 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7101 				   struct drm_display_mode *mode)
7102 {
7103 	int result = MODE_ERROR;
7104 	struct dc_sink *dc_sink;
7105 	/* TODO: Unhardcode stream count */
7106 	struct dc_stream_state *stream;
7107 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7108 
7109 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7110 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7111 		return result;
7112 
7113 	/*
7114 	 * Only run this the first time mode_valid is called to initilialize
7115 	 * EDID mgmt
7116 	 */
7117 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7118 		!aconnector->dc_em_sink)
7119 		handle_edid_mgmt(aconnector);
7120 
7121 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7122 
7123 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7124 				aconnector->base.force != DRM_FORCE_ON) {
7125 		DRM_ERROR("dc_sink is NULL!\n");
7126 		goto fail;
7127 	}
7128 
7129 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7130 	if (stream) {
7131 		dc_stream_release(stream);
7132 		result = MODE_OK;
7133 	}
7134 
7135 fail:
7136 	/* TODO: error handling*/
7137 	return result;
7138 }
7139 
7140 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7141 				struct dc_info_packet *out)
7142 {
7143 	struct hdmi_drm_infoframe frame;
7144 	unsigned char buf[30]; /* 26 + 4 */
7145 	ssize_t len;
7146 	int ret, i;
7147 
7148 	memset(out, 0, sizeof(*out));
7149 
7150 	if (!state->hdr_output_metadata)
7151 		return 0;
7152 
7153 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7154 	if (ret)
7155 		return ret;
7156 
7157 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7158 	if (len < 0)
7159 		return (int)len;
7160 
7161 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7162 	if (len != 30)
7163 		return -EINVAL;
7164 
7165 	/* Prepare the infopacket for DC. */
7166 	switch (state->connector->connector_type) {
7167 	case DRM_MODE_CONNECTOR_HDMIA:
7168 		out->hb0 = 0x87; /* type */
7169 		out->hb1 = 0x01; /* version */
7170 		out->hb2 = 0x1A; /* length */
7171 		out->sb[0] = buf[3]; /* checksum */
7172 		i = 1;
7173 		break;
7174 
7175 	case DRM_MODE_CONNECTOR_DisplayPort:
7176 	case DRM_MODE_CONNECTOR_eDP:
7177 		out->hb0 = 0x00; /* sdp id, zero */
7178 		out->hb1 = 0x87; /* type */
7179 		out->hb2 = 0x1D; /* payload len - 1 */
7180 		out->hb3 = (0x13 << 2); /* sdp version */
7181 		out->sb[0] = 0x01; /* version */
7182 		out->sb[1] = 0x1A; /* length */
7183 		i = 2;
7184 		break;
7185 
7186 	default:
7187 		return -EINVAL;
7188 	}
7189 
7190 	memcpy(&out->sb[i], &buf[4], 26);
7191 	out->valid = true;
7192 
7193 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7194 		       sizeof(out->sb), false);
7195 
7196 	return 0;
7197 }
7198 
7199 static int
7200 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7201 				 struct drm_atomic_state *state)
7202 {
7203 	struct drm_connector_state *new_con_state =
7204 		drm_atomic_get_new_connector_state(state, conn);
7205 	struct drm_connector_state *old_con_state =
7206 		drm_atomic_get_old_connector_state(state, conn);
7207 	struct drm_crtc *crtc = new_con_state->crtc;
7208 	struct drm_crtc_state *new_crtc_state;
7209 	int ret;
7210 
7211 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7212 
7213 	if (!crtc)
7214 		return 0;
7215 
7216 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7217 		struct dc_info_packet hdr_infopacket;
7218 
7219 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7220 		if (ret)
7221 			return ret;
7222 
7223 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7224 		if (IS_ERR(new_crtc_state))
7225 			return PTR_ERR(new_crtc_state);
7226 
7227 		/*
7228 		 * DC considers the stream backends changed if the
7229 		 * static metadata changes. Forcing the modeset also
7230 		 * gives a simple way for userspace to switch from
7231 		 * 8bpc to 10bpc when setting the metadata to enter
7232 		 * or exit HDR.
7233 		 *
7234 		 * Changing the static metadata after it's been
7235 		 * set is permissible, however. So only force a
7236 		 * modeset if we're entering or exiting HDR.
7237 		 */
7238 		new_crtc_state->mode_changed =
7239 			!old_con_state->hdr_output_metadata ||
7240 			!new_con_state->hdr_output_metadata;
7241 	}
7242 
7243 	return 0;
7244 }
7245 
7246 static const struct drm_connector_helper_funcs
7247 amdgpu_dm_connector_helper_funcs = {
7248 	/*
7249 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7250 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7251 	 * are missing after user start lightdm. So we need to renew modes list.
7252 	 * in get_modes call back, not just return the modes count
7253 	 */
7254 	.get_modes = get_modes,
7255 	.mode_valid = amdgpu_dm_connector_mode_valid,
7256 	.atomic_check = amdgpu_dm_connector_atomic_check,
7257 };
7258 
7259 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7260 {
7261 }
7262 
7263 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7264 {
7265 	struct drm_atomic_state *state = new_crtc_state->state;
7266 	struct drm_plane *plane;
7267 	int num_active = 0;
7268 
7269 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7270 		struct drm_plane_state *new_plane_state;
7271 
7272 		/* Cursor planes are "fake". */
7273 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7274 			continue;
7275 
7276 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7277 
7278 		if (!new_plane_state) {
7279 			/*
7280 			 * The plane is enable on the CRTC and hasn't changed
7281 			 * state. This means that it previously passed
7282 			 * validation and is therefore enabled.
7283 			 */
7284 			num_active += 1;
7285 			continue;
7286 		}
7287 
7288 		/* We need a framebuffer to be considered enabled. */
7289 		num_active += (new_plane_state->fb != NULL);
7290 	}
7291 
7292 	return num_active;
7293 }
7294 
7295 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7296 					 struct drm_crtc_state *new_crtc_state)
7297 {
7298 	struct dm_crtc_state *dm_new_crtc_state =
7299 		to_dm_crtc_state(new_crtc_state);
7300 
7301 	dm_new_crtc_state->active_planes = 0;
7302 
7303 	if (!dm_new_crtc_state->stream)
7304 		return;
7305 
7306 	dm_new_crtc_state->active_planes =
7307 		count_crtc_active_planes(new_crtc_state);
7308 }
7309 
7310 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7311 				       struct drm_atomic_state *state)
7312 {
7313 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7314 									  crtc);
7315 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7316 	struct dc *dc = adev->dm.dc;
7317 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7318 	int ret = -EINVAL;
7319 
7320 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7321 
7322 	dm_update_crtc_active_planes(crtc, crtc_state);
7323 
7324 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7325 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7326 		return ret;
7327 	}
7328 
7329 	/*
7330 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7331 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7332 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7333 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7334 	 */
7335 	if (crtc_state->enable &&
7336 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7337 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7338 		return -EINVAL;
7339 	}
7340 
7341 	/* In some use cases, like reset, no stream is attached */
7342 	if (!dm_crtc_state->stream)
7343 		return 0;
7344 
7345 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7346 		return 0;
7347 
7348 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7349 	return ret;
7350 }
7351 
7352 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7353 				      const struct drm_display_mode *mode,
7354 				      struct drm_display_mode *adjusted_mode)
7355 {
7356 	return true;
7357 }
7358 
7359 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7360 	.disable = dm_crtc_helper_disable,
7361 	.atomic_check = dm_crtc_helper_atomic_check,
7362 	.mode_fixup = dm_crtc_helper_mode_fixup,
7363 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7364 };
7365 
7366 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7367 {
7368 
7369 }
7370 
7371 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7372 {
7373 	switch (display_color_depth) {
7374 		case COLOR_DEPTH_666:
7375 			return 6;
7376 		case COLOR_DEPTH_888:
7377 			return 8;
7378 		case COLOR_DEPTH_101010:
7379 			return 10;
7380 		case COLOR_DEPTH_121212:
7381 			return 12;
7382 		case COLOR_DEPTH_141414:
7383 			return 14;
7384 		case COLOR_DEPTH_161616:
7385 			return 16;
7386 		default:
7387 			break;
7388 		}
7389 	return 0;
7390 }
7391 
7392 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7393 					  struct drm_crtc_state *crtc_state,
7394 					  struct drm_connector_state *conn_state)
7395 {
7396 	struct drm_atomic_state *state = crtc_state->state;
7397 	struct drm_connector *connector = conn_state->connector;
7398 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7399 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7400 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7401 	struct drm_dp_mst_topology_mgr *mst_mgr;
7402 	struct drm_dp_mst_port *mst_port;
7403 	enum dc_color_depth color_depth;
7404 	int clock, bpp = 0;
7405 	bool is_y420 = false;
7406 
7407 	if (!aconnector->port || !aconnector->dc_sink)
7408 		return 0;
7409 
7410 	mst_port = aconnector->port;
7411 	mst_mgr = &aconnector->mst_port->mst_mgr;
7412 
7413 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7414 		return 0;
7415 
7416 	if (!state->duplicated) {
7417 		int max_bpc = conn_state->max_requested_bpc;
7418 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7419 				aconnector->force_yuv420_output;
7420 		color_depth = convert_color_depth_from_display_info(connector,
7421 								    is_y420,
7422 								    max_bpc);
7423 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7424 		clock = adjusted_mode->clock;
7425 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7426 	}
7427 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7428 									   mst_mgr,
7429 									   mst_port,
7430 									   dm_new_connector_state->pbn,
7431 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7432 	if (dm_new_connector_state->vcpi_slots < 0) {
7433 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7434 		return dm_new_connector_state->vcpi_slots;
7435 	}
7436 	return 0;
7437 }
7438 
7439 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7440 	.disable = dm_encoder_helper_disable,
7441 	.atomic_check = dm_encoder_helper_atomic_check
7442 };
7443 
7444 #if defined(CONFIG_DRM_AMD_DC_DCN)
7445 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7446 					    struct dc_state *dc_state,
7447 					    struct dsc_mst_fairness_vars *vars)
7448 {
7449 	struct dc_stream_state *stream = NULL;
7450 	struct drm_connector *connector;
7451 	struct drm_connector_state *new_con_state;
7452 	struct amdgpu_dm_connector *aconnector;
7453 	struct dm_connector_state *dm_conn_state;
7454 	int i, j;
7455 	int vcpi, pbn_div, pbn, slot_num = 0;
7456 
7457 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7458 
7459 		aconnector = to_amdgpu_dm_connector(connector);
7460 
7461 		if (!aconnector->port)
7462 			continue;
7463 
7464 		if (!new_con_state || !new_con_state->crtc)
7465 			continue;
7466 
7467 		dm_conn_state = to_dm_connector_state(new_con_state);
7468 
7469 		for (j = 0; j < dc_state->stream_count; j++) {
7470 			stream = dc_state->streams[j];
7471 			if (!stream)
7472 				continue;
7473 
7474 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7475 				break;
7476 
7477 			stream = NULL;
7478 		}
7479 
7480 		if (!stream)
7481 			continue;
7482 
7483 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7484 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7485 		for (j = 0; j < dc_state->stream_count; j++) {
7486 			if (vars[j].aconnector == aconnector) {
7487 				pbn = vars[j].pbn;
7488 				break;
7489 			}
7490 		}
7491 
7492 		if (j == dc_state->stream_count)
7493 			continue;
7494 
7495 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7496 
7497 		if (stream->timing.flags.DSC != 1) {
7498 			dm_conn_state->pbn = pbn;
7499 			dm_conn_state->vcpi_slots = slot_num;
7500 
7501 			drm_dp_mst_atomic_enable_dsc(state,
7502 						     aconnector->port,
7503 						     dm_conn_state->pbn,
7504 						     0,
7505 						     false);
7506 			continue;
7507 		}
7508 
7509 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7510 						    aconnector->port,
7511 						    pbn, pbn_div,
7512 						    true);
7513 		if (vcpi < 0)
7514 			return vcpi;
7515 
7516 		dm_conn_state->pbn = pbn;
7517 		dm_conn_state->vcpi_slots = vcpi;
7518 	}
7519 	return 0;
7520 }
7521 #endif
7522 
7523 static void dm_drm_plane_reset(struct drm_plane *plane)
7524 {
7525 	struct dm_plane_state *amdgpu_state = NULL;
7526 
7527 	if (plane->state)
7528 		plane->funcs->atomic_destroy_state(plane, plane->state);
7529 
7530 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7531 	WARN_ON(amdgpu_state == NULL);
7532 
7533 	if (amdgpu_state)
7534 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7535 }
7536 
7537 static struct drm_plane_state *
7538 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7539 {
7540 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7541 
7542 	old_dm_plane_state = to_dm_plane_state(plane->state);
7543 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7544 	if (!dm_plane_state)
7545 		return NULL;
7546 
7547 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7548 
7549 	if (old_dm_plane_state->dc_state) {
7550 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7551 		dc_plane_state_retain(dm_plane_state->dc_state);
7552 	}
7553 
7554 	return &dm_plane_state->base;
7555 }
7556 
7557 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7558 				struct drm_plane_state *state)
7559 {
7560 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7561 
7562 	if (dm_plane_state->dc_state)
7563 		dc_plane_state_release(dm_plane_state->dc_state);
7564 
7565 	drm_atomic_helper_plane_destroy_state(plane, state);
7566 }
7567 
7568 static const struct drm_plane_funcs dm_plane_funcs = {
7569 	.update_plane	= drm_atomic_helper_update_plane,
7570 	.disable_plane	= drm_atomic_helper_disable_plane,
7571 	.destroy	= drm_primary_helper_destroy,
7572 	.reset = dm_drm_plane_reset,
7573 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7574 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7575 	.format_mod_supported = dm_plane_format_mod_supported,
7576 };
7577 
7578 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7579 				      struct drm_plane_state *new_state)
7580 {
7581 	struct amdgpu_framebuffer *afb;
7582 	struct drm_gem_object *obj;
7583 	struct amdgpu_device *adev;
7584 	struct amdgpu_bo *rbo;
7585 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7586 	struct list_head list;
7587 	struct ttm_validate_buffer tv;
7588 	struct ww_acquire_ctx ticket;
7589 	uint32_t domain;
7590 	int r;
7591 
7592 	if (!new_state->fb) {
7593 		DRM_DEBUG_KMS("No FB bound\n");
7594 		return 0;
7595 	}
7596 
7597 	afb = to_amdgpu_framebuffer(new_state->fb);
7598 	obj = new_state->fb->obj[0];
7599 	rbo = gem_to_amdgpu_bo(obj);
7600 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7601 	INIT_LIST_HEAD(&list);
7602 
7603 	tv.bo = &rbo->tbo;
7604 	tv.num_shared = 1;
7605 	list_add(&tv.head, &list);
7606 
7607 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7608 	if (r) {
7609 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7610 		return r;
7611 	}
7612 
7613 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7614 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7615 	else
7616 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7617 
7618 	r = amdgpu_bo_pin(rbo, domain);
7619 	if (unlikely(r != 0)) {
7620 		if (r != -ERESTARTSYS)
7621 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7622 		ttm_eu_backoff_reservation(&ticket, &list);
7623 		return r;
7624 	}
7625 
7626 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7627 	if (unlikely(r != 0)) {
7628 		amdgpu_bo_unpin(rbo);
7629 		ttm_eu_backoff_reservation(&ticket, &list);
7630 		DRM_ERROR("%p bind failed\n", rbo);
7631 		return r;
7632 	}
7633 
7634 	ttm_eu_backoff_reservation(&ticket, &list);
7635 
7636 	afb->address = amdgpu_bo_gpu_offset(rbo);
7637 
7638 	amdgpu_bo_ref(rbo);
7639 
7640 	/**
7641 	 * We don't do surface updates on planes that have been newly created,
7642 	 * but we also don't have the afb->address during atomic check.
7643 	 *
7644 	 * Fill in buffer attributes depending on the address here, but only on
7645 	 * newly created planes since they're not being used by DC yet and this
7646 	 * won't modify global state.
7647 	 */
7648 	dm_plane_state_old = to_dm_plane_state(plane->state);
7649 	dm_plane_state_new = to_dm_plane_state(new_state);
7650 
7651 	if (dm_plane_state_new->dc_state &&
7652 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7653 		struct dc_plane_state *plane_state =
7654 			dm_plane_state_new->dc_state;
7655 		bool force_disable_dcc = !plane_state->dcc.enable;
7656 
7657 		fill_plane_buffer_attributes(
7658 			adev, afb, plane_state->format, plane_state->rotation,
7659 			afb->tiling_flags,
7660 			&plane_state->tiling_info, &plane_state->plane_size,
7661 			&plane_state->dcc, &plane_state->address,
7662 			afb->tmz_surface, force_disable_dcc);
7663 	}
7664 
7665 	return 0;
7666 }
7667 
7668 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7669 				       struct drm_plane_state *old_state)
7670 {
7671 	struct amdgpu_bo *rbo;
7672 	int r;
7673 
7674 	if (!old_state->fb)
7675 		return;
7676 
7677 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7678 	r = amdgpu_bo_reserve(rbo, false);
7679 	if (unlikely(r)) {
7680 		DRM_ERROR("failed to reserve rbo before unpin\n");
7681 		return;
7682 	}
7683 
7684 	amdgpu_bo_unpin(rbo);
7685 	amdgpu_bo_unreserve(rbo);
7686 	amdgpu_bo_unref(&rbo);
7687 }
7688 
7689 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7690 				       struct drm_crtc_state *new_crtc_state)
7691 {
7692 	struct drm_framebuffer *fb = state->fb;
7693 	int min_downscale, max_upscale;
7694 	int min_scale = 0;
7695 	int max_scale = INT_MAX;
7696 
7697 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7698 	if (fb && state->crtc) {
7699 		/* Validate viewport to cover the case when only the position changes */
7700 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7701 			int viewport_width = state->crtc_w;
7702 			int viewport_height = state->crtc_h;
7703 
7704 			if (state->crtc_x < 0)
7705 				viewport_width += state->crtc_x;
7706 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7707 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7708 
7709 			if (state->crtc_y < 0)
7710 				viewport_height += state->crtc_y;
7711 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7712 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7713 
7714 			if (viewport_width < 0 || viewport_height < 0) {
7715 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7716 				return -EINVAL;
7717 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7718 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7719 				return -EINVAL;
7720 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7721 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7722 				return -EINVAL;
7723 			}
7724 
7725 		}
7726 
7727 		/* Get min/max allowed scaling factors from plane caps. */
7728 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7729 					     &min_downscale, &max_upscale);
7730 		/*
7731 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7732 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7733 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7734 		 */
7735 		min_scale = (1000 << 16) / max_upscale;
7736 		max_scale = (1000 << 16) / min_downscale;
7737 	}
7738 
7739 	return drm_atomic_helper_check_plane_state(
7740 		state, new_crtc_state, min_scale, max_scale, true, true);
7741 }
7742 
7743 static int dm_plane_atomic_check(struct drm_plane *plane,
7744 				 struct drm_atomic_state *state)
7745 {
7746 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7747 										 plane);
7748 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7749 	struct dc *dc = adev->dm.dc;
7750 	struct dm_plane_state *dm_plane_state;
7751 	struct dc_scaling_info scaling_info;
7752 	struct drm_crtc_state *new_crtc_state;
7753 	int ret;
7754 
7755 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7756 
7757 	dm_plane_state = to_dm_plane_state(new_plane_state);
7758 
7759 	if (!dm_plane_state->dc_state)
7760 		return 0;
7761 
7762 	new_crtc_state =
7763 		drm_atomic_get_new_crtc_state(state,
7764 					      new_plane_state->crtc);
7765 	if (!new_crtc_state)
7766 		return -EINVAL;
7767 
7768 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7769 	if (ret)
7770 		return ret;
7771 
7772 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7773 	if (ret)
7774 		return ret;
7775 
7776 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7777 		return 0;
7778 
7779 	return -EINVAL;
7780 }
7781 
7782 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7783 				       struct drm_atomic_state *state)
7784 {
7785 	/* Only support async updates on cursor planes. */
7786 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7787 		return -EINVAL;
7788 
7789 	return 0;
7790 }
7791 
7792 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7793 					 struct drm_atomic_state *state)
7794 {
7795 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7796 									   plane);
7797 	struct drm_plane_state *old_state =
7798 		drm_atomic_get_old_plane_state(state, plane);
7799 
7800 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7801 
7802 	swap(plane->state->fb, new_state->fb);
7803 
7804 	plane->state->src_x = new_state->src_x;
7805 	plane->state->src_y = new_state->src_y;
7806 	plane->state->src_w = new_state->src_w;
7807 	plane->state->src_h = new_state->src_h;
7808 	plane->state->crtc_x = new_state->crtc_x;
7809 	plane->state->crtc_y = new_state->crtc_y;
7810 	plane->state->crtc_w = new_state->crtc_w;
7811 	plane->state->crtc_h = new_state->crtc_h;
7812 
7813 	handle_cursor_update(plane, old_state);
7814 }
7815 
7816 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7817 	.prepare_fb = dm_plane_helper_prepare_fb,
7818 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7819 	.atomic_check = dm_plane_atomic_check,
7820 	.atomic_async_check = dm_plane_atomic_async_check,
7821 	.atomic_async_update = dm_plane_atomic_async_update
7822 };
7823 
7824 /*
7825  * TODO: these are currently initialized to rgb formats only.
7826  * For future use cases we should either initialize them dynamically based on
7827  * plane capabilities, or initialize this array to all formats, so internal drm
7828  * check will succeed, and let DC implement proper check
7829  */
7830 static const uint32_t rgb_formats[] = {
7831 	DRM_FORMAT_XRGB8888,
7832 	DRM_FORMAT_ARGB8888,
7833 	DRM_FORMAT_RGBA8888,
7834 	DRM_FORMAT_XRGB2101010,
7835 	DRM_FORMAT_XBGR2101010,
7836 	DRM_FORMAT_ARGB2101010,
7837 	DRM_FORMAT_ABGR2101010,
7838 	DRM_FORMAT_XRGB16161616,
7839 	DRM_FORMAT_XBGR16161616,
7840 	DRM_FORMAT_ARGB16161616,
7841 	DRM_FORMAT_ABGR16161616,
7842 	DRM_FORMAT_XBGR8888,
7843 	DRM_FORMAT_ABGR8888,
7844 	DRM_FORMAT_RGB565,
7845 };
7846 
7847 static const uint32_t overlay_formats[] = {
7848 	DRM_FORMAT_XRGB8888,
7849 	DRM_FORMAT_ARGB8888,
7850 	DRM_FORMAT_RGBA8888,
7851 	DRM_FORMAT_XBGR8888,
7852 	DRM_FORMAT_ABGR8888,
7853 	DRM_FORMAT_RGB565
7854 };
7855 
7856 static const u32 cursor_formats[] = {
7857 	DRM_FORMAT_ARGB8888
7858 };
7859 
7860 static int get_plane_formats(const struct drm_plane *plane,
7861 			     const struct dc_plane_cap *plane_cap,
7862 			     uint32_t *formats, int max_formats)
7863 {
7864 	int i, num_formats = 0;
7865 
7866 	/*
7867 	 * TODO: Query support for each group of formats directly from
7868 	 * DC plane caps. This will require adding more formats to the
7869 	 * caps list.
7870 	 */
7871 
7872 	switch (plane->type) {
7873 	case DRM_PLANE_TYPE_PRIMARY:
7874 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7875 			if (num_formats >= max_formats)
7876 				break;
7877 
7878 			formats[num_formats++] = rgb_formats[i];
7879 		}
7880 
7881 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7882 			formats[num_formats++] = DRM_FORMAT_NV12;
7883 		if (plane_cap && plane_cap->pixel_format_support.p010)
7884 			formats[num_formats++] = DRM_FORMAT_P010;
7885 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7886 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7887 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7888 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7889 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7890 		}
7891 		break;
7892 
7893 	case DRM_PLANE_TYPE_OVERLAY:
7894 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7895 			if (num_formats >= max_formats)
7896 				break;
7897 
7898 			formats[num_formats++] = overlay_formats[i];
7899 		}
7900 		break;
7901 
7902 	case DRM_PLANE_TYPE_CURSOR:
7903 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7904 			if (num_formats >= max_formats)
7905 				break;
7906 
7907 			formats[num_formats++] = cursor_formats[i];
7908 		}
7909 		break;
7910 	}
7911 
7912 	return num_formats;
7913 }
7914 
7915 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7916 				struct drm_plane *plane,
7917 				unsigned long possible_crtcs,
7918 				const struct dc_plane_cap *plane_cap)
7919 {
7920 	uint32_t formats[32];
7921 	int num_formats;
7922 	int res = -EPERM;
7923 	unsigned int supported_rotations;
7924 	uint64_t *modifiers = NULL;
7925 
7926 	num_formats = get_plane_formats(plane, plane_cap, formats,
7927 					ARRAY_SIZE(formats));
7928 
7929 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7930 	if (res)
7931 		return res;
7932 
7933 	if (modifiers == NULL)
7934 		adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7935 
7936 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7937 				       &dm_plane_funcs, formats, num_formats,
7938 				       modifiers, plane->type, NULL);
7939 	kfree(modifiers);
7940 	if (res)
7941 		return res;
7942 
7943 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7944 	    plane_cap && plane_cap->per_pixel_alpha) {
7945 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7946 					  BIT(DRM_MODE_BLEND_PREMULTI);
7947 
7948 		drm_plane_create_alpha_property(plane);
7949 		drm_plane_create_blend_mode_property(plane, blend_caps);
7950 	}
7951 
7952 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7953 	    plane_cap &&
7954 	    (plane_cap->pixel_format_support.nv12 ||
7955 	     plane_cap->pixel_format_support.p010)) {
7956 		/* This only affects YUV formats. */
7957 		drm_plane_create_color_properties(
7958 			plane,
7959 			BIT(DRM_COLOR_YCBCR_BT601) |
7960 			BIT(DRM_COLOR_YCBCR_BT709) |
7961 			BIT(DRM_COLOR_YCBCR_BT2020),
7962 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7963 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7964 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7965 	}
7966 
7967 	supported_rotations =
7968 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7969 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7970 
7971 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7972 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7973 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7974 						   supported_rotations);
7975 
7976 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7977 
7978 	/* Create (reset) the plane state */
7979 	if (plane->funcs->reset)
7980 		plane->funcs->reset(plane);
7981 
7982 	return 0;
7983 }
7984 
7985 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7986 			       struct drm_plane *plane,
7987 			       uint32_t crtc_index)
7988 {
7989 	struct amdgpu_crtc *acrtc = NULL;
7990 	struct drm_plane *cursor_plane;
7991 
7992 	int res = -ENOMEM;
7993 
7994 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7995 	if (!cursor_plane)
7996 		goto fail;
7997 
7998 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7999 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
8000 
8001 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8002 	if (!acrtc)
8003 		goto fail;
8004 
8005 	res = drm_crtc_init_with_planes(
8006 			dm->ddev,
8007 			&acrtc->base,
8008 			plane,
8009 			cursor_plane,
8010 			&amdgpu_dm_crtc_funcs, NULL);
8011 
8012 	if (res)
8013 		goto fail;
8014 
8015 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8016 
8017 	/* Create (reset) the plane state */
8018 	if (acrtc->base.funcs->reset)
8019 		acrtc->base.funcs->reset(&acrtc->base);
8020 
8021 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8022 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8023 
8024 	acrtc->crtc_id = crtc_index;
8025 	acrtc->base.enabled = false;
8026 	acrtc->otg_inst = -1;
8027 
8028 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8029 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8030 				   true, MAX_COLOR_LUT_ENTRIES);
8031 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8032 
8033 	return 0;
8034 
8035 fail:
8036 	kfree(acrtc);
8037 	kfree(cursor_plane);
8038 	return res;
8039 }
8040 
8041 
8042 static int to_drm_connector_type(enum signal_type st)
8043 {
8044 	switch (st) {
8045 	case SIGNAL_TYPE_HDMI_TYPE_A:
8046 		return DRM_MODE_CONNECTOR_HDMIA;
8047 	case SIGNAL_TYPE_EDP:
8048 		return DRM_MODE_CONNECTOR_eDP;
8049 	case SIGNAL_TYPE_LVDS:
8050 		return DRM_MODE_CONNECTOR_LVDS;
8051 	case SIGNAL_TYPE_RGB:
8052 		return DRM_MODE_CONNECTOR_VGA;
8053 	case SIGNAL_TYPE_DISPLAY_PORT:
8054 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
8055 		return DRM_MODE_CONNECTOR_DisplayPort;
8056 	case SIGNAL_TYPE_DVI_DUAL_LINK:
8057 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
8058 		return DRM_MODE_CONNECTOR_DVID;
8059 	case SIGNAL_TYPE_VIRTUAL:
8060 		return DRM_MODE_CONNECTOR_VIRTUAL;
8061 
8062 	default:
8063 		return DRM_MODE_CONNECTOR_Unknown;
8064 	}
8065 }
8066 
8067 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8068 {
8069 	struct drm_encoder *encoder;
8070 
8071 	/* There is only one encoder per connector */
8072 	drm_connector_for_each_possible_encoder(connector, encoder)
8073 		return encoder;
8074 
8075 	return NULL;
8076 }
8077 
8078 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8079 {
8080 	struct drm_encoder *encoder;
8081 	struct amdgpu_encoder *amdgpu_encoder;
8082 
8083 	encoder = amdgpu_dm_connector_to_encoder(connector);
8084 
8085 	if (encoder == NULL)
8086 		return;
8087 
8088 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8089 
8090 	amdgpu_encoder->native_mode.clock = 0;
8091 
8092 	if (!list_empty(&connector->probed_modes)) {
8093 		struct drm_display_mode *preferred_mode = NULL;
8094 
8095 		list_for_each_entry(preferred_mode,
8096 				    &connector->probed_modes,
8097 				    head) {
8098 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8099 				amdgpu_encoder->native_mode = *preferred_mode;
8100 
8101 			break;
8102 		}
8103 
8104 	}
8105 }
8106 
8107 static struct drm_display_mode *
8108 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8109 			     char *name,
8110 			     int hdisplay, int vdisplay)
8111 {
8112 	struct drm_device *dev = encoder->dev;
8113 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8114 	struct drm_display_mode *mode = NULL;
8115 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8116 
8117 	mode = drm_mode_duplicate(dev, native_mode);
8118 
8119 	if (mode == NULL)
8120 		return NULL;
8121 
8122 	mode->hdisplay = hdisplay;
8123 	mode->vdisplay = vdisplay;
8124 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8125 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8126 
8127 	return mode;
8128 
8129 }
8130 
8131 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8132 						 struct drm_connector *connector)
8133 {
8134 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8135 	struct drm_display_mode *mode = NULL;
8136 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8137 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8138 				to_amdgpu_dm_connector(connector);
8139 	int i;
8140 	int n;
8141 	struct mode_size {
8142 		char name[DRM_DISPLAY_MODE_LEN];
8143 		int w;
8144 		int h;
8145 	} common_modes[] = {
8146 		{  "640x480",  640,  480},
8147 		{  "800x600",  800,  600},
8148 		{ "1024x768", 1024,  768},
8149 		{ "1280x720", 1280,  720},
8150 		{ "1280x800", 1280,  800},
8151 		{"1280x1024", 1280, 1024},
8152 		{ "1440x900", 1440,  900},
8153 		{"1680x1050", 1680, 1050},
8154 		{"1600x1200", 1600, 1200},
8155 		{"1920x1080", 1920, 1080},
8156 		{"1920x1200", 1920, 1200}
8157 	};
8158 
8159 	n = ARRAY_SIZE(common_modes);
8160 
8161 	for (i = 0; i < n; i++) {
8162 		struct drm_display_mode *curmode = NULL;
8163 		bool mode_existed = false;
8164 
8165 		if (common_modes[i].w > native_mode->hdisplay ||
8166 		    common_modes[i].h > native_mode->vdisplay ||
8167 		   (common_modes[i].w == native_mode->hdisplay &&
8168 		    common_modes[i].h == native_mode->vdisplay))
8169 			continue;
8170 
8171 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8172 			if (common_modes[i].w == curmode->hdisplay &&
8173 			    common_modes[i].h == curmode->vdisplay) {
8174 				mode_existed = true;
8175 				break;
8176 			}
8177 		}
8178 
8179 		if (mode_existed)
8180 			continue;
8181 
8182 		mode = amdgpu_dm_create_common_mode(encoder,
8183 				common_modes[i].name, common_modes[i].w,
8184 				common_modes[i].h);
8185 		if (!mode)
8186 			continue;
8187 
8188 		drm_mode_probed_add(connector, mode);
8189 		amdgpu_dm_connector->num_modes++;
8190 	}
8191 }
8192 
8193 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8194 {
8195 	struct drm_encoder *encoder;
8196 	struct amdgpu_encoder *amdgpu_encoder;
8197 	const struct drm_display_mode *native_mode;
8198 
8199 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8200 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8201 		return;
8202 
8203 	encoder = amdgpu_dm_connector_to_encoder(connector);
8204 	if (!encoder)
8205 		return;
8206 
8207 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8208 
8209 	native_mode = &amdgpu_encoder->native_mode;
8210 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8211 		return;
8212 
8213 	drm_connector_set_panel_orientation_with_quirk(connector,
8214 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8215 						       native_mode->hdisplay,
8216 						       native_mode->vdisplay);
8217 }
8218 
8219 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8220 					      struct edid *edid)
8221 {
8222 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8223 			to_amdgpu_dm_connector(connector);
8224 
8225 	if (edid) {
8226 		/* empty probed_modes */
8227 		INIT_LIST_HEAD(&connector->probed_modes);
8228 		amdgpu_dm_connector->num_modes =
8229 				drm_add_edid_modes(connector, edid);
8230 
8231 		/* sorting the probed modes before calling function
8232 		 * amdgpu_dm_get_native_mode() since EDID can have
8233 		 * more than one preferred mode. The modes that are
8234 		 * later in the probed mode list could be of higher
8235 		 * and preferred resolution. For example, 3840x2160
8236 		 * resolution in base EDID preferred timing and 4096x2160
8237 		 * preferred resolution in DID extension block later.
8238 		 */
8239 		drm_mode_sort(&connector->probed_modes);
8240 		amdgpu_dm_get_native_mode(connector);
8241 
8242 		/* Freesync capabilities are reset by calling
8243 		 * drm_add_edid_modes() and need to be
8244 		 * restored here.
8245 		 */
8246 		amdgpu_dm_update_freesync_caps(connector, edid);
8247 
8248 		amdgpu_set_panel_orientation(connector);
8249 	} else {
8250 		amdgpu_dm_connector->num_modes = 0;
8251 	}
8252 }
8253 
8254 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8255 			      struct drm_display_mode *mode)
8256 {
8257 	struct drm_display_mode *m;
8258 
8259 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8260 		if (drm_mode_equal(m, mode))
8261 			return true;
8262 	}
8263 
8264 	return false;
8265 }
8266 
8267 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8268 {
8269 	const struct drm_display_mode *m;
8270 	struct drm_display_mode *new_mode;
8271 	uint i;
8272 	uint32_t new_modes_count = 0;
8273 
8274 	/* Standard FPS values
8275 	 *
8276 	 * 23.976       - TV/NTSC
8277 	 * 24 	        - Cinema
8278 	 * 25 	        - TV/PAL
8279 	 * 29.97        - TV/NTSC
8280 	 * 30 	        - TV/NTSC
8281 	 * 48 	        - Cinema HFR
8282 	 * 50 	        - TV/PAL
8283 	 * 60 	        - Commonly used
8284 	 * 48,72,96,120 - Multiples of 24
8285 	 */
8286 	static const uint32_t common_rates[] = {
8287 		23976, 24000, 25000, 29970, 30000,
8288 		48000, 50000, 60000, 72000, 96000, 120000
8289 	};
8290 
8291 	/*
8292 	 * Find mode with highest refresh rate with the same resolution
8293 	 * as the preferred mode. Some monitors report a preferred mode
8294 	 * with lower resolution than the highest refresh rate supported.
8295 	 */
8296 
8297 	m = get_highest_refresh_rate_mode(aconnector, true);
8298 	if (!m)
8299 		return 0;
8300 
8301 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8302 		uint64_t target_vtotal, target_vtotal_diff;
8303 		uint64_t num, den;
8304 
8305 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8306 			continue;
8307 
8308 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8309 		    common_rates[i] > aconnector->max_vfreq * 1000)
8310 			continue;
8311 
8312 		num = (unsigned long long)m->clock * 1000 * 1000;
8313 		den = common_rates[i] * (unsigned long long)m->htotal;
8314 		target_vtotal = div_u64(num, den);
8315 		target_vtotal_diff = target_vtotal - m->vtotal;
8316 
8317 		/* Check for illegal modes */
8318 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8319 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8320 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8321 			continue;
8322 
8323 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8324 		if (!new_mode)
8325 			goto out;
8326 
8327 		new_mode->vtotal += (u16)target_vtotal_diff;
8328 		new_mode->vsync_start += (u16)target_vtotal_diff;
8329 		new_mode->vsync_end += (u16)target_vtotal_diff;
8330 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8331 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8332 
8333 		if (!is_duplicate_mode(aconnector, new_mode)) {
8334 			drm_mode_probed_add(&aconnector->base, new_mode);
8335 			new_modes_count += 1;
8336 		} else
8337 			drm_mode_destroy(aconnector->base.dev, new_mode);
8338 	}
8339  out:
8340 	return new_modes_count;
8341 }
8342 
8343 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8344 						   struct edid *edid)
8345 {
8346 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8347 		to_amdgpu_dm_connector(connector);
8348 
8349 	if (!edid)
8350 		return;
8351 
8352 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8353 		amdgpu_dm_connector->num_modes +=
8354 			add_fs_modes(amdgpu_dm_connector);
8355 }
8356 
8357 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8358 {
8359 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8360 			to_amdgpu_dm_connector(connector);
8361 	struct drm_encoder *encoder;
8362 	struct edid *edid = amdgpu_dm_connector->edid;
8363 
8364 	encoder = amdgpu_dm_connector_to_encoder(connector);
8365 
8366 	if (!drm_edid_is_valid(edid)) {
8367 		amdgpu_dm_connector->num_modes =
8368 				drm_add_modes_noedid(connector, 640, 480);
8369 	} else {
8370 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8371 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8372 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8373 	}
8374 	amdgpu_dm_fbc_init(connector);
8375 
8376 	return amdgpu_dm_connector->num_modes;
8377 }
8378 
8379 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8380 				     struct amdgpu_dm_connector *aconnector,
8381 				     int connector_type,
8382 				     struct dc_link *link,
8383 				     int link_index)
8384 {
8385 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8386 
8387 	/*
8388 	 * Some of the properties below require access to state, like bpc.
8389 	 * Allocate some default initial connector state with our reset helper.
8390 	 */
8391 	if (aconnector->base.funcs->reset)
8392 		aconnector->base.funcs->reset(&aconnector->base);
8393 
8394 	aconnector->connector_id = link_index;
8395 	aconnector->dc_link = link;
8396 	aconnector->base.interlace_allowed = false;
8397 	aconnector->base.doublescan_allowed = false;
8398 	aconnector->base.stereo_allowed = false;
8399 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8400 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8401 	aconnector->audio_inst = -1;
8402 	mutex_init(&aconnector->hpd_lock);
8403 
8404 	/*
8405 	 * configure support HPD hot plug connector_>polled default value is 0
8406 	 * which means HPD hot plug not supported
8407 	 */
8408 	switch (connector_type) {
8409 	case DRM_MODE_CONNECTOR_HDMIA:
8410 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8411 		aconnector->base.ycbcr_420_allowed =
8412 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8413 		break;
8414 	case DRM_MODE_CONNECTOR_DisplayPort:
8415 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8416 		link->link_enc = link_enc_cfg_get_link_enc(link);
8417 		ASSERT(link->link_enc);
8418 		if (link->link_enc)
8419 			aconnector->base.ycbcr_420_allowed =
8420 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8421 		break;
8422 	case DRM_MODE_CONNECTOR_DVID:
8423 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8424 		break;
8425 	default:
8426 		break;
8427 	}
8428 
8429 	drm_object_attach_property(&aconnector->base.base,
8430 				dm->ddev->mode_config.scaling_mode_property,
8431 				DRM_MODE_SCALE_NONE);
8432 
8433 	drm_object_attach_property(&aconnector->base.base,
8434 				adev->mode_info.underscan_property,
8435 				UNDERSCAN_OFF);
8436 	drm_object_attach_property(&aconnector->base.base,
8437 				adev->mode_info.underscan_hborder_property,
8438 				0);
8439 	drm_object_attach_property(&aconnector->base.base,
8440 				adev->mode_info.underscan_vborder_property,
8441 				0);
8442 
8443 	if (!aconnector->mst_port)
8444 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8445 
8446 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8447 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8448 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8449 
8450 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8451 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8452 		drm_object_attach_property(&aconnector->base.base,
8453 				adev->mode_info.abm_level_property, 0);
8454 	}
8455 
8456 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8457 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8458 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8459 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8460 
8461 		if (!aconnector->mst_port)
8462 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8463 
8464 #ifdef CONFIG_DRM_AMD_DC_HDCP
8465 		if (adev->dm.hdcp_workqueue)
8466 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8467 #endif
8468 	}
8469 }
8470 
8471 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8472 			      struct i2c_msg *msgs, int num)
8473 {
8474 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8475 	struct ddc_service *ddc_service = i2c->ddc_service;
8476 	struct i2c_command cmd;
8477 	int i;
8478 	int result = -EIO;
8479 
8480 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8481 
8482 	if (!cmd.payloads)
8483 		return result;
8484 
8485 	cmd.number_of_payloads = num;
8486 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8487 	cmd.speed = 100;
8488 
8489 	for (i = 0; i < num; i++) {
8490 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8491 		cmd.payloads[i].address = msgs[i].addr;
8492 		cmd.payloads[i].length = msgs[i].len;
8493 		cmd.payloads[i].data = msgs[i].buf;
8494 	}
8495 
8496 	if (dc_submit_i2c(
8497 			ddc_service->ctx->dc,
8498 			ddc_service->ddc_pin->hw_info.ddc_channel,
8499 			&cmd))
8500 		result = num;
8501 
8502 	kfree(cmd.payloads);
8503 	return result;
8504 }
8505 
8506 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8507 {
8508 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8509 }
8510 
8511 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8512 	.master_xfer = amdgpu_dm_i2c_xfer,
8513 	.functionality = amdgpu_dm_i2c_func,
8514 };
8515 
8516 static struct amdgpu_i2c_adapter *
8517 create_i2c(struct ddc_service *ddc_service,
8518 	   int link_index,
8519 	   int *res)
8520 {
8521 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8522 	struct amdgpu_i2c_adapter *i2c;
8523 
8524 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8525 	if (!i2c)
8526 		return NULL;
8527 	i2c->base.owner = THIS_MODULE;
8528 	i2c->base.class = I2C_CLASS_DDC;
8529 	i2c->base.dev.parent = &adev->pdev->dev;
8530 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8531 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8532 	i2c_set_adapdata(&i2c->base, i2c);
8533 	i2c->ddc_service = ddc_service;
8534 	if (i2c->ddc_service->ddc_pin)
8535 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8536 
8537 	return i2c;
8538 }
8539 
8540 
8541 /*
8542  * Note: this function assumes that dc_link_detect() was called for the
8543  * dc_link which will be represented by this aconnector.
8544  */
8545 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8546 				    struct amdgpu_dm_connector *aconnector,
8547 				    uint32_t link_index,
8548 				    struct amdgpu_encoder *aencoder)
8549 {
8550 	int res = 0;
8551 	int connector_type;
8552 	struct dc *dc = dm->dc;
8553 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8554 	struct amdgpu_i2c_adapter *i2c;
8555 
8556 	link->priv = aconnector;
8557 
8558 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8559 
8560 	i2c = create_i2c(link->ddc, link->link_index, &res);
8561 	if (!i2c) {
8562 		DRM_ERROR("Failed to create i2c adapter data\n");
8563 		return -ENOMEM;
8564 	}
8565 
8566 	aconnector->i2c = i2c;
8567 	res = i2c_add_adapter(&i2c->base);
8568 
8569 	if (res) {
8570 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8571 		goto out_free;
8572 	}
8573 
8574 	connector_type = to_drm_connector_type(link->connector_signal);
8575 
8576 	res = drm_connector_init_with_ddc(
8577 			dm->ddev,
8578 			&aconnector->base,
8579 			&amdgpu_dm_connector_funcs,
8580 			connector_type,
8581 			&i2c->base);
8582 
8583 	if (res) {
8584 		DRM_ERROR("connector_init failed\n");
8585 		aconnector->connector_id = -1;
8586 		goto out_free;
8587 	}
8588 
8589 	drm_connector_helper_add(
8590 			&aconnector->base,
8591 			&amdgpu_dm_connector_helper_funcs);
8592 
8593 	amdgpu_dm_connector_init_helper(
8594 		dm,
8595 		aconnector,
8596 		connector_type,
8597 		link,
8598 		link_index);
8599 
8600 	drm_connector_attach_encoder(
8601 		&aconnector->base, &aencoder->base);
8602 
8603 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8604 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8605 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8606 
8607 out_free:
8608 	if (res) {
8609 		kfree(i2c);
8610 		aconnector->i2c = NULL;
8611 	}
8612 	return res;
8613 }
8614 
8615 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8616 {
8617 	switch (adev->mode_info.num_crtc) {
8618 	case 1:
8619 		return 0x1;
8620 	case 2:
8621 		return 0x3;
8622 	case 3:
8623 		return 0x7;
8624 	case 4:
8625 		return 0xf;
8626 	case 5:
8627 		return 0x1f;
8628 	case 6:
8629 	default:
8630 		return 0x3f;
8631 	}
8632 }
8633 
8634 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8635 				  struct amdgpu_encoder *aencoder,
8636 				  uint32_t link_index)
8637 {
8638 	struct amdgpu_device *adev = drm_to_adev(dev);
8639 
8640 	int res = drm_encoder_init(dev,
8641 				   &aencoder->base,
8642 				   &amdgpu_dm_encoder_funcs,
8643 				   DRM_MODE_ENCODER_TMDS,
8644 				   NULL);
8645 
8646 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8647 
8648 	if (!res)
8649 		aencoder->encoder_id = link_index;
8650 	else
8651 		aencoder->encoder_id = -1;
8652 
8653 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8654 
8655 	return res;
8656 }
8657 
8658 static void manage_dm_interrupts(struct amdgpu_device *adev,
8659 				 struct amdgpu_crtc *acrtc,
8660 				 bool enable)
8661 {
8662 	/*
8663 	 * We have no guarantee that the frontend index maps to the same
8664 	 * backend index - some even map to more than one.
8665 	 *
8666 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8667 	 */
8668 	int irq_type =
8669 		amdgpu_display_crtc_idx_to_irq_type(
8670 			adev,
8671 			acrtc->crtc_id);
8672 
8673 	if (enable) {
8674 		drm_crtc_vblank_on(&acrtc->base);
8675 		amdgpu_irq_get(
8676 			adev,
8677 			&adev->pageflip_irq,
8678 			irq_type);
8679 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8680 		amdgpu_irq_get(
8681 			adev,
8682 			&adev->vline0_irq,
8683 			irq_type);
8684 #endif
8685 	} else {
8686 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8687 		amdgpu_irq_put(
8688 			adev,
8689 			&adev->vline0_irq,
8690 			irq_type);
8691 #endif
8692 		amdgpu_irq_put(
8693 			adev,
8694 			&adev->pageflip_irq,
8695 			irq_type);
8696 		drm_crtc_vblank_off(&acrtc->base);
8697 	}
8698 }
8699 
8700 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8701 				      struct amdgpu_crtc *acrtc)
8702 {
8703 	int irq_type =
8704 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8705 
8706 	/**
8707 	 * This reads the current state for the IRQ and force reapplies
8708 	 * the setting to hardware.
8709 	 */
8710 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8711 }
8712 
8713 static bool
8714 is_scaling_state_different(const struct dm_connector_state *dm_state,
8715 			   const struct dm_connector_state *old_dm_state)
8716 {
8717 	if (dm_state->scaling != old_dm_state->scaling)
8718 		return true;
8719 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8720 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8721 			return true;
8722 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8723 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8724 			return true;
8725 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8726 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8727 		return true;
8728 	return false;
8729 }
8730 
8731 #ifdef CONFIG_DRM_AMD_DC_HDCP
8732 static bool is_content_protection_different(struct drm_connector_state *state,
8733 					    const struct drm_connector_state *old_state,
8734 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8735 {
8736 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8737 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8738 
8739 	/* Handle: Type0/1 change */
8740 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8741 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8742 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8743 		return true;
8744 	}
8745 
8746 	/* CP is being re enabled, ignore this
8747 	 *
8748 	 * Handles:	ENABLED -> DESIRED
8749 	 */
8750 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8751 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8752 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8753 		return false;
8754 	}
8755 
8756 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8757 	 *
8758 	 * Handles:	UNDESIRED -> ENABLED
8759 	 */
8760 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8761 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8762 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8763 
8764 	/* Stream removed and re-enabled
8765 	 *
8766 	 * Can sometimes overlap with the HPD case,
8767 	 * thus set update_hdcp to false to avoid
8768 	 * setting HDCP multiple times.
8769 	 *
8770 	 * Handles:	DESIRED -> DESIRED (Special case)
8771 	 */
8772 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8773 		state->crtc && state->crtc->enabled &&
8774 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8775 		dm_con_state->update_hdcp = false;
8776 		return true;
8777 	}
8778 
8779 	/* Hot-plug, headless s3, dpms
8780 	 *
8781 	 * Only start HDCP if the display is connected/enabled.
8782 	 * update_hdcp flag will be set to false until the next
8783 	 * HPD comes in.
8784 	 *
8785 	 * Handles:	DESIRED -> DESIRED (Special case)
8786 	 */
8787 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8788 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8789 		dm_con_state->update_hdcp = false;
8790 		return true;
8791 	}
8792 
8793 	/*
8794 	 * Handles:	UNDESIRED -> UNDESIRED
8795 	 *		DESIRED -> DESIRED
8796 	 *		ENABLED -> ENABLED
8797 	 */
8798 	if (old_state->content_protection == state->content_protection)
8799 		return false;
8800 
8801 	/*
8802 	 * Handles:	UNDESIRED -> DESIRED
8803 	 *		DESIRED -> UNDESIRED
8804 	 *		ENABLED -> UNDESIRED
8805 	 */
8806 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8807 		return true;
8808 
8809 	/*
8810 	 * Handles:	DESIRED -> ENABLED
8811 	 */
8812 	return false;
8813 }
8814 
8815 #endif
8816 static void remove_stream(struct amdgpu_device *adev,
8817 			  struct amdgpu_crtc *acrtc,
8818 			  struct dc_stream_state *stream)
8819 {
8820 	/* this is the update mode case */
8821 
8822 	acrtc->otg_inst = -1;
8823 	acrtc->enabled = false;
8824 }
8825 
8826 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8827 			       struct dc_cursor_position *position)
8828 {
8829 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8830 	int x, y;
8831 	int xorigin = 0, yorigin = 0;
8832 
8833 	if (!crtc || !plane->state->fb)
8834 		return 0;
8835 
8836 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8837 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8838 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8839 			  __func__,
8840 			  plane->state->crtc_w,
8841 			  plane->state->crtc_h);
8842 		return -EINVAL;
8843 	}
8844 
8845 	x = plane->state->crtc_x;
8846 	y = plane->state->crtc_y;
8847 
8848 	if (x <= -amdgpu_crtc->max_cursor_width ||
8849 	    y <= -amdgpu_crtc->max_cursor_height)
8850 		return 0;
8851 
8852 	if (x < 0) {
8853 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8854 		x = 0;
8855 	}
8856 	if (y < 0) {
8857 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8858 		y = 0;
8859 	}
8860 	position->enable = true;
8861 	position->translate_by_source = true;
8862 	position->x = x;
8863 	position->y = y;
8864 	position->x_hotspot = xorigin;
8865 	position->y_hotspot = yorigin;
8866 
8867 	return 0;
8868 }
8869 
8870 static void handle_cursor_update(struct drm_plane *plane,
8871 				 struct drm_plane_state *old_plane_state)
8872 {
8873 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8874 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8875 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8876 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8877 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8878 	uint64_t address = afb ? afb->address : 0;
8879 	struct dc_cursor_position position = {0};
8880 	struct dc_cursor_attributes attributes;
8881 	int ret;
8882 
8883 	if (!plane->state->fb && !old_plane_state->fb)
8884 		return;
8885 
8886 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8887 		      __func__,
8888 		      amdgpu_crtc->crtc_id,
8889 		      plane->state->crtc_w,
8890 		      plane->state->crtc_h);
8891 
8892 	ret = get_cursor_position(plane, crtc, &position);
8893 	if (ret)
8894 		return;
8895 
8896 	if (!position.enable) {
8897 		/* turn off cursor */
8898 		if (crtc_state && crtc_state->stream) {
8899 			mutex_lock(&adev->dm.dc_lock);
8900 			dc_stream_set_cursor_position(crtc_state->stream,
8901 						      &position);
8902 			mutex_unlock(&adev->dm.dc_lock);
8903 		}
8904 		return;
8905 	}
8906 
8907 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8908 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8909 
8910 	memset(&attributes, 0, sizeof(attributes));
8911 	attributes.address.high_part = upper_32_bits(address);
8912 	attributes.address.low_part  = lower_32_bits(address);
8913 	attributes.width             = plane->state->crtc_w;
8914 	attributes.height            = plane->state->crtc_h;
8915 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8916 	attributes.rotation_angle    = 0;
8917 	attributes.attribute_flags.value = 0;
8918 
8919 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8920 
8921 	if (crtc_state->stream) {
8922 		mutex_lock(&adev->dm.dc_lock);
8923 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8924 							 &attributes))
8925 			DRM_ERROR("DC failed to set cursor attributes\n");
8926 
8927 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8928 						   &position))
8929 			DRM_ERROR("DC failed to set cursor position\n");
8930 		mutex_unlock(&adev->dm.dc_lock);
8931 	}
8932 }
8933 
8934 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8935 {
8936 
8937 	assert_spin_locked(&acrtc->base.dev->event_lock);
8938 	WARN_ON(acrtc->event);
8939 
8940 	acrtc->event = acrtc->base.state->event;
8941 
8942 	/* Set the flip status */
8943 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8944 
8945 	/* Mark this event as consumed */
8946 	acrtc->base.state->event = NULL;
8947 
8948 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8949 		     acrtc->crtc_id);
8950 }
8951 
8952 static void update_freesync_state_on_stream(
8953 	struct amdgpu_display_manager *dm,
8954 	struct dm_crtc_state *new_crtc_state,
8955 	struct dc_stream_state *new_stream,
8956 	struct dc_plane_state *surface,
8957 	u32 flip_timestamp_in_us)
8958 {
8959 	struct mod_vrr_params vrr_params;
8960 	struct dc_info_packet vrr_infopacket = {0};
8961 	struct amdgpu_device *adev = dm->adev;
8962 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8963 	unsigned long flags;
8964 	bool pack_sdp_v1_3 = false;
8965 
8966 	if (!new_stream)
8967 		return;
8968 
8969 	/*
8970 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8971 	 * For now it's sufficient to just guard against these conditions.
8972 	 */
8973 
8974 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8975 		return;
8976 
8977 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8978         vrr_params = acrtc->dm_irq_params.vrr_params;
8979 
8980 	if (surface) {
8981 		mod_freesync_handle_preflip(
8982 			dm->freesync_module,
8983 			surface,
8984 			new_stream,
8985 			flip_timestamp_in_us,
8986 			&vrr_params);
8987 
8988 		if (adev->family < AMDGPU_FAMILY_AI &&
8989 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8990 			mod_freesync_handle_v_update(dm->freesync_module,
8991 						     new_stream, &vrr_params);
8992 
8993 			/* Need to call this before the frame ends. */
8994 			dc_stream_adjust_vmin_vmax(dm->dc,
8995 						   new_crtc_state->stream,
8996 						   &vrr_params.adjust);
8997 		}
8998 	}
8999 
9000 	mod_freesync_build_vrr_infopacket(
9001 		dm->freesync_module,
9002 		new_stream,
9003 		&vrr_params,
9004 		PACKET_TYPE_VRR,
9005 		TRANSFER_FUNC_UNKNOWN,
9006 		&vrr_infopacket,
9007 		pack_sdp_v1_3);
9008 
9009 	new_crtc_state->freesync_timing_changed |=
9010 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9011 			&vrr_params.adjust,
9012 			sizeof(vrr_params.adjust)) != 0);
9013 
9014 	new_crtc_state->freesync_vrr_info_changed |=
9015 		(memcmp(&new_crtc_state->vrr_infopacket,
9016 			&vrr_infopacket,
9017 			sizeof(vrr_infopacket)) != 0);
9018 
9019 	acrtc->dm_irq_params.vrr_params = vrr_params;
9020 	new_crtc_state->vrr_infopacket = vrr_infopacket;
9021 
9022 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9023 	new_stream->vrr_infopacket = vrr_infopacket;
9024 
9025 	if (new_crtc_state->freesync_vrr_info_changed)
9026 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9027 			      new_crtc_state->base.crtc->base.id,
9028 			      (int)new_crtc_state->base.vrr_enabled,
9029 			      (int)vrr_params.state);
9030 
9031 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9032 }
9033 
9034 static void update_stream_irq_parameters(
9035 	struct amdgpu_display_manager *dm,
9036 	struct dm_crtc_state *new_crtc_state)
9037 {
9038 	struct dc_stream_state *new_stream = new_crtc_state->stream;
9039 	struct mod_vrr_params vrr_params;
9040 	struct mod_freesync_config config = new_crtc_state->freesync_config;
9041 	struct amdgpu_device *adev = dm->adev;
9042 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9043 	unsigned long flags;
9044 
9045 	if (!new_stream)
9046 		return;
9047 
9048 	/*
9049 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9050 	 * For now it's sufficient to just guard against these conditions.
9051 	 */
9052 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9053 		return;
9054 
9055 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9056 	vrr_params = acrtc->dm_irq_params.vrr_params;
9057 
9058 	if (new_crtc_state->vrr_supported &&
9059 	    config.min_refresh_in_uhz &&
9060 	    config.max_refresh_in_uhz) {
9061 		/*
9062 		 * if freesync compatible mode was set, config.state will be set
9063 		 * in atomic check
9064 		 */
9065 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9066 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9067 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9068 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9069 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9070 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9071 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9072 		} else {
9073 			config.state = new_crtc_state->base.vrr_enabled ?
9074 						     VRR_STATE_ACTIVE_VARIABLE :
9075 						     VRR_STATE_INACTIVE;
9076 		}
9077 	} else {
9078 		config.state = VRR_STATE_UNSUPPORTED;
9079 	}
9080 
9081 	mod_freesync_build_vrr_params(dm->freesync_module,
9082 				      new_stream,
9083 				      &config, &vrr_params);
9084 
9085 	new_crtc_state->freesync_timing_changed |=
9086 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9087 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9088 
9089 	new_crtc_state->freesync_config = config;
9090 	/* Copy state for access from DM IRQ handler */
9091 	acrtc->dm_irq_params.freesync_config = config;
9092 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9093 	acrtc->dm_irq_params.vrr_params = vrr_params;
9094 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9095 }
9096 
9097 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9098 					    struct dm_crtc_state *new_state)
9099 {
9100 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9101 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9102 
9103 	if (!old_vrr_active && new_vrr_active) {
9104 		/* Transition VRR inactive -> active:
9105 		 * While VRR is active, we must not disable vblank irq, as a
9106 		 * reenable after disable would compute bogus vblank/pflip
9107 		 * timestamps if it likely happened inside display front-porch.
9108 		 *
9109 		 * We also need vupdate irq for the actual core vblank handling
9110 		 * at end of vblank.
9111 		 */
9112 		dm_set_vupdate_irq(new_state->base.crtc, true);
9113 		drm_crtc_vblank_get(new_state->base.crtc);
9114 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9115 				 __func__, new_state->base.crtc->base.id);
9116 	} else if (old_vrr_active && !new_vrr_active) {
9117 		/* Transition VRR active -> inactive:
9118 		 * Allow vblank irq disable again for fixed refresh rate.
9119 		 */
9120 		dm_set_vupdate_irq(new_state->base.crtc, false);
9121 		drm_crtc_vblank_put(new_state->base.crtc);
9122 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9123 				 __func__, new_state->base.crtc->base.id);
9124 	}
9125 }
9126 
9127 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9128 {
9129 	struct drm_plane *plane;
9130 	struct drm_plane_state *old_plane_state;
9131 	int i;
9132 
9133 	/*
9134 	 * TODO: Make this per-stream so we don't issue redundant updates for
9135 	 * commits with multiple streams.
9136 	 */
9137 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9138 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9139 			handle_cursor_update(plane, old_plane_state);
9140 }
9141 
9142 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9143 				    struct dc_state *dc_state,
9144 				    struct drm_device *dev,
9145 				    struct amdgpu_display_manager *dm,
9146 				    struct drm_crtc *pcrtc,
9147 				    bool wait_for_vblank)
9148 {
9149 	uint32_t i;
9150 	uint64_t timestamp_ns;
9151 	struct drm_plane *plane;
9152 	struct drm_plane_state *old_plane_state, *new_plane_state;
9153 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9154 	struct drm_crtc_state *new_pcrtc_state =
9155 			drm_atomic_get_new_crtc_state(state, pcrtc);
9156 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9157 	struct dm_crtc_state *dm_old_crtc_state =
9158 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9159 	int planes_count = 0, vpos, hpos;
9160 	long r;
9161 	unsigned long flags;
9162 	struct amdgpu_bo *abo;
9163 	uint32_t target_vblank, last_flip_vblank;
9164 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9165 	bool pflip_present = false;
9166 	struct {
9167 		struct dc_surface_update surface_updates[MAX_SURFACES];
9168 		struct dc_plane_info plane_infos[MAX_SURFACES];
9169 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9170 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9171 		struct dc_stream_update stream_update;
9172 	} *bundle;
9173 
9174 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9175 
9176 	if (!bundle) {
9177 		dm_error("Failed to allocate update bundle\n");
9178 		goto cleanup;
9179 	}
9180 
9181 	/*
9182 	 * Disable the cursor first if we're disabling all the planes.
9183 	 * It'll remain on the screen after the planes are re-enabled
9184 	 * if we don't.
9185 	 */
9186 	if (acrtc_state->active_planes == 0)
9187 		amdgpu_dm_commit_cursors(state);
9188 
9189 	/* update planes when needed */
9190 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9191 		struct drm_crtc *crtc = new_plane_state->crtc;
9192 		struct drm_crtc_state *new_crtc_state;
9193 		struct drm_framebuffer *fb = new_plane_state->fb;
9194 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9195 		bool plane_needs_flip;
9196 		struct dc_plane_state *dc_plane;
9197 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9198 
9199 		/* Cursor plane is handled after stream updates */
9200 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9201 			continue;
9202 
9203 		if (!fb || !crtc || pcrtc != crtc)
9204 			continue;
9205 
9206 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9207 		if (!new_crtc_state->active)
9208 			continue;
9209 
9210 		dc_plane = dm_new_plane_state->dc_state;
9211 
9212 		bundle->surface_updates[planes_count].surface = dc_plane;
9213 		if (new_pcrtc_state->color_mgmt_changed) {
9214 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9215 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9216 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9217 		}
9218 
9219 		fill_dc_scaling_info(dm->adev, new_plane_state,
9220 				     &bundle->scaling_infos[planes_count]);
9221 
9222 		bundle->surface_updates[planes_count].scaling_info =
9223 			&bundle->scaling_infos[planes_count];
9224 
9225 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9226 
9227 		pflip_present = pflip_present || plane_needs_flip;
9228 
9229 		if (!plane_needs_flip) {
9230 			planes_count += 1;
9231 			continue;
9232 		}
9233 
9234 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9235 
9236 		/*
9237 		 * Wait for all fences on this FB. Do limited wait to avoid
9238 		 * deadlock during GPU reset when this fence will not signal
9239 		 * but we hold reservation lock for the BO.
9240 		 */
9241 		r = dma_resv_wait_timeout(abo->tbo.base.resv,
9242 					  DMA_RESV_USAGE_WRITE, false,
9243 					  msecs_to_jiffies(5000));
9244 		if (unlikely(r <= 0))
9245 			DRM_ERROR("Waiting for fences timed out!");
9246 
9247 		fill_dc_plane_info_and_addr(
9248 			dm->adev, new_plane_state,
9249 			afb->tiling_flags,
9250 			&bundle->plane_infos[planes_count],
9251 			&bundle->flip_addrs[planes_count].address,
9252 			afb->tmz_surface, false);
9253 
9254 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9255 				 new_plane_state->plane->index,
9256 				 bundle->plane_infos[planes_count].dcc.enable);
9257 
9258 		bundle->surface_updates[planes_count].plane_info =
9259 			&bundle->plane_infos[planes_count];
9260 
9261 		/*
9262 		 * Only allow immediate flips for fast updates that don't
9263 		 * change FB pitch, DCC state, rotation or mirroing.
9264 		 */
9265 		bundle->flip_addrs[planes_count].flip_immediate =
9266 			crtc->state->async_flip &&
9267 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9268 
9269 		timestamp_ns = ktime_get_ns();
9270 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9271 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9272 		bundle->surface_updates[planes_count].surface = dc_plane;
9273 
9274 		if (!bundle->surface_updates[planes_count].surface) {
9275 			DRM_ERROR("No surface for CRTC: id=%d\n",
9276 					acrtc_attach->crtc_id);
9277 			continue;
9278 		}
9279 
9280 		if (plane == pcrtc->primary)
9281 			update_freesync_state_on_stream(
9282 				dm,
9283 				acrtc_state,
9284 				acrtc_state->stream,
9285 				dc_plane,
9286 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9287 
9288 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9289 				 __func__,
9290 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9291 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9292 
9293 		planes_count += 1;
9294 
9295 	}
9296 
9297 	if (pflip_present) {
9298 		if (!vrr_active) {
9299 			/* Use old throttling in non-vrr fixed refresh rate mode
9300 			 * to keep flip scheduling based on target vblank counts
9301 			 * working in a backwards compatible way, e.g., for
9302 			 * clients using the GLX_OML_sync_control extension or
9303 			 * DRI3/Present extension with defined target_msc.
9304 			 */
9305 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9306 		}
9307 		else {
9308 			/* For variable refresh rate mode only:
9309 			 * Get vblank of last completed flip to avoid > 1 vrr
9310 			 * flips per video frame by use of throttling, but allow
9311 			 * flip programming anywhere in the possibly large
9312 			 * variable vrr vblank interval for fine-grained flip
9313 			 * timing control and more opportunity to avoid stutter
9314 			 * on late submission of flips.
9315 			 */
9316 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9317 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9318 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9319 		}
9320 
9321 		target_vblank = last_flip_vblank + wait_for_vblank;
9322 
9323 		/*
9324 		 * Wait until we're out of the vertical blank period before the one
9325 		 * targeted by the flip
9326 		 */
9327 		while ((acrtc_attach->enabled &&
9328 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9329 							    0, &vpos, &hpos, NULL,
9330 							    NULL, &pcrtc->hwmode)
9331 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9332 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9333 			(int)(target_vblank -
9334 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9335 			usleep_range(1000, 1100);
9336 		}
9337 
9338 		/**
9339 		 * Prepare the flip event for the pageflip interrupt to handle.
9340 		 *
9341 		 * This only works in the case where we've already turned on the
9342 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9343 		 * from 0 -> n planes we have to skip a hardware generated event
9344 		 * and rely on sending it from software.
9345 		 */
9346 		if (acrtc_attach->base.state->event &&
9347 		    acrtc_state->active_planes > 0 &&
9348 		    !acrtc_state->force_dpms_off) {
9349 			drm_crtc_vblank_get(pcrtc);
9350 
9351 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9352 
9353 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9354 			prepare_flip_isr(acrtc_attach);
9355 
9356 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9357 		}
9358 
9359 		if (acrtc_state->stream) {
9360 			if (acrtc_state->freesync_vrr_info_changed)
9361 				bundle->stream_update.vrr_infopacket =
9362 					&acrtc_state->stream->vrr_infopacket;
9363 		}
9364 	}
9365 
9366 	/* Update the planes if changed or disable if we don't have any. */
9367 	if ((planes_count || acrtc_state->active_planes == 0) &&
9368 		acrtc_state->stream) {
9369 #if defined(CONFIG_DRM_AMD_DC_DCN)
9370 		/*
9371 		 * If PSR or idle optimizations are enabled then flush out
9372 		 * any pending work before hardware programming.
9373 		 */
9374 		if (dm->vblank_control_workqueue)
9375 			flush_workqueue(dm->vblank_control_workqueue);
9376 #endif
9377 
9378 		bundle->stream_update.stream = acrtc_state->stream;
9379 		if (new_pcrtc_state->mode_changed) {
9380 			bundle->stream_update.src = acrtc_state->stream->src;
9381 			bundle->stream_update.dst = acrtc_state->stream->dst;
9382 		}
9383 
9384 		if (new_pcrtc_state->color_mgmt_changed) {
9385 			/*
9386 			 * TODO: This isn't fully correct since we've actually
9387 			 * already modified the stream in place.
9388 			 */
9389 			bundle->stream_update.gamut_remap =
9390 				&acrtc_state->stream->gamut_remap_matrix;
9391 			bundle->stream_update.output_csc_transform =
9392 				&acrtc_state->stream->csc_color_matrix;
9393 			bundle->stream_update.out_transfer_func =
9394 				acrtc_state->stream->out_transfer_func;
9395 		}
9396 
9397 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9398 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9399 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9400 
9401 		/*
9402 		 * If FreeSync state on the stream has changed then we need to
9403 		 * re-adjust the min/max bounds now that DC doesn't handle this
9404 		 * as part of commit.
9405 		 */
9406 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9407 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9408 			dc_stream_adjust_vmin_vmax(
9409 				dm->dc, acrtc_state->stream,
9410 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9411 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9412 		}
9413 		mutex_lock(&dm->dc_lock);
9414 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9415 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9416 			amdgpu_dm_psr_disable(acrtc_state->stream);
9417 
9418 		dc_commit_updates_for_stream(dm->dc,
9419 						     bundle->surface_updates,
9420 						     planes_count,
9421 						     acrtc_state->stream,
9422 						     &bundle->stream_update,
9423 						     dc_state);
9424 
9425 		/**
9426 		 * Enable or disable the interrupts on the backend.
9427 		 *
9428 		 * Most pipes are put into power gating when unused.
9429 		 *
9430 		 * When power gating is enabled on a pipe we lose the
9431 		 * interrupt enablement state when power gating is disabled.
9432 		 *
9433 		 * So we need to update the IRQ control state in hardware
9434 		 * whenever the pipe turns on (since it could be previously
9435 		 * power gated) or off (since some pipes can't be power gated
9436 		 * on some ASICs).
9437 		 */
9438 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9439 			dm_update_pflip_irq_state(drm_to_adev(dev),
9440 						  acrtc_attach);
9441 
9442 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9443 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9444 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9445 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9446 
9447 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9448 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9449 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9450 			struct amdgpu_dm_connector *aconn =
9451 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9452 
9453 			if (aconn->psr_skip_count > 0)
9454 				aconn->psr_skip_count--;
9455 
9456 			/* Allow PSR when skip count is 0. */
9457 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9458 		} else {
9459 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9460 		}
9461 
9462 		mutex_unlock(&dm->dc_lock);
9463 	}
9464 
9465 	/*
9466 	 * Update cursor state *after* programming all the planes.
9467 	 * This avoids redundant programming in the case where we're going
9468 	 * to be disabling a single plane - those pipes are being disabled.
9469 	 */
9470 	if (acrtc_state->active_planes)
9471 		amdgpu_dm_commit_cursors(state);
9472 
9473 cleanup:
9474 	kfree(bundle);
9475 }
9476 
9477 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9478 				   struct drm_atomic_state *state)
9479 {
9480 	struct amdgpu_device *adev = drm_to_adev(dev);
9481 	struct amdgpu_dm_connector *aconnector;
9482 	struct drm_connector *connector;
9483 	struct drm_connector_state *old_con_state, *new_con_state;
9484 	struct drm_crtc_state *new_crtc_state;
9485 	struct dm_crtc_state *new_dm_crtc_state;
9486 	const struct dc_stream_status *status;
9487 	int i, inst;
9488 
9489 	/* Notify device removals. */
9490 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9491 		if (old_con_state->crtc != new_con_state->crtc) {
9492 			/* CRTC changes require notification. */
9493 			goto notify;
9494 		}
9495 
9496 		if (!new_con_state->crtc)
9497 			continue;
9498 
9499 		new_crtc_state = drm_atomic_get_new_crtc_state(
9500 			state, new_con_state->crtc);
9501 
9502 		if (!new_crtc_state)
9503 			continue;
9504 
9505 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9506 			continue;
9507 
9508 	notify:
9509 		aconnector = to_amdgpu_dm_connector(connector);
9510 
9511 		mutex_lock(&adev->dm.audio_lock);
9512 		inst = aconnector->audio_inst;
9513 		aconnector->audio_inst = -1;
9514 		mutex_unlock(&adev->dm.audio_lock);
9515 
9516 		amdgpu_dm_audio_eld_notify(adev, inst);
9517 	}
9518 
9519 	/* Notify audio device additions. */
9520 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9521 		if (!new_con_state->crtc)
9522 			continue;
9523 
9524 		new_crtc_state = drm_atomic_get_new_crtc_state(
9525 			state, new_con_state->crtc);
9526 
9527 		if (!new_crtc_state)
9528 			continue;
9529 
9530 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9531 			continue;
9532 
9533 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9534 		if (!new_dm_crtc_state->stream)
9535 			continue;
9536 
9537 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9538 		if (!status)
9539 			continue;
9540 
9541 		aconnector = to_amdgpu_dm_connector(connector);
9542 
9543 		mutex_lock(&adev->dm.audio_lock);
9544 		inst = status->audio_inst;
9545 		aconnector->audio_inst = inst;
9546 		mutex_unlock(&adev->dm.audio_lock);
9547 
9548 		amdgpu_dm_audio_eld_notify(adev, inst);
9549 	}
9550 }
9551 
9552 /*
9553  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9554  * @crtc_state: the DRM CRTC state
9555  * @stream_state: the DC stream state.
9556  *
9557  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9558  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9559  */
9560 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9561 						struct dc_stream_state *stream_state)
9562 {
9563 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9564 }
9565 
9566 /**
9567  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9568  * @state: The atomic state to commit
9569  *
9570  * This will tell DC to commit the constructed DC state from atomic_check,
9571  * programming the hardware. Any failures here implies a hardware failure, since
9572  * atomic check should have filtered anything non-kosher.
9573  */
9574 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9575 {
9576 	struct drm_device *dev = state->dev;
9577 	struct amdgpu_device *adev = drm_to_adev(dev);
9578 	struct amdgpu_display_manager *dm = &adev->dm;
9579 	struct dm_atomic_state *dm_state;
9580 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9581 	uint32_t i, j;
9582 	struct drm_crtc *crtc;
9583 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9584 	unsigned long flags;
9585 	bool wait_for_vblank = true;
9586 	struct drm_connector *connector;
9587 	struct drm_connector_state *old_con_state, *new_con_state;
9588 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9589 	int crtc_disable_count = 0;
9590 	bool mode_set_reset_required = false;
9591 
9592 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9593 
9594 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9595 
9596 	dm_state = dm_atomic_get_new_state(state);
9597 	if (dm_state && dm_state->context) {
9598 		dc_state = dm_state->context;
9599 	} else {
9600 		/* No state changes, retain current state. */
9601 		dc_state_temp = dc_create_state(dm->dc);
9602 		ASSERT(dc_state_temp);
9603 		dc_state = dc_state_temp;
9604 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9605 	}
9606 
9607 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9608 				       new_crtc_state, i) {
9609 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9610 
9611 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9612 
9613 		if (old_crtc_state->active &&
9614 		    (!new_crtc_state->active ||
9615 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9616 			manage_dm_interrupts(adev, acrtc, false);
9617 			dc_stream_release(dm_old_crtc_state->stream);
9618 		}
9619 	}
9620 
9621 	drm_atomic_helper_calc_timestamping_constants(state);
9622 
9623 	/* update changed items */
9624 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9625 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9626 
9627 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9628 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9629 
9630 		DRM_DEBUG_ATOMIC(
9631 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9632 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9633 			"connectors_changed:%d\n",
9634 			acrtc->crtc_id,
9635 			new_crtc_state->enable,
9636 			new_crtc_state->active,
9637 			new_crtc_state->planes_changed,
9638 			new_crtc_state->mode_changed,
9639 			new_crtc_state->active_changed,
9640 			new_crtc_state->connectors_changed);
9641 
9642 		/* Disable cursor if disabling crtc */
9643 		if (old_crtc_state->active && !new_crtc_state->active) {
9644 			struct dc_cursor_position position;
9645 
9646 			memset(&position, 0, sizeof(position));
9647 			mutex_lock(&dm->dc_lock);
9648 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9649 			mutex_unlock(&dm->dc_lock);
9650 		}
9651 
9652 		/* Copy all transient state flags into dc state */
9653 		if (dm_new_crtc_state->stream) {
9654 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9655 							    dm_new_crtc_state->stream);
9656 		}
9657 
9658 		/* handles headless hotplug case, updating new_state and
9659 		 * aconnector as needed
9660 		 */
9661 
9662 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9663 
9664 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9665 
9666 			if (!dm_new_crtc_state->stream) {
9667 				/*
9668 				 * this could happen because of issues with
9669 				 * userspace notifications delivery.
9670 				 * In this case userspace tries to set mode on
9671 				 * display which is disconnected in fact.
9672 				 * dc_sink is NULL in this case on aconnector.
9673 				 * We expect reset mode will come soon.
9674 				 *
9675 				 * This can also happen when unplug is done
9676 				 * during resume sequence ended
9677 				 *
9678 				 * In this case, we want to pretend we still
9679 				 * have a sink to keep the pipe running so that
9680 				 * hw state is consistent with the sw state
9681 				 */
9682 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9683 						__func__, acrtc->base.base.id);
9684 				continue;
9685 			}
9686 
9687 			if (dm_old_crtc_state->stream)
9688 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9689 
9690 			pm_runtime_get_noresume(dev->dev);
9691 
9692 			acrtc->enabled = true;
9693 			acrtc->hw_mode = new_crtc_state->mode;
9694 			crtc->hwmode = new_crtc_state->mode;
9695 			mode_set_reset_required = true;
9696 		} else if (modereset_required(new_crtc_state)) {
9697 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9698 			/* i.e. reset mode */
9699 			if (dm_old_crtc_state->stream)
9700 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9701 
9702 			mode_set_reset_required = true;
9703 		}
9704 	} /* for_each_crtc_in_state() */
9705 
9706 	if (dc_state) {
9707 		/* if there mode set or reset, disable eDP PSR */
9708 		if (mode_set_reset_required) {
9709 #if defined(CONFIG_DRM_AMD_DC_DCN)
9710 			if (dm->vblank_control_workqueue)
9711 				flush_workqueue(dm->vblank_control_workqueue);
9712 #endif
9713 			amdgpu_dm_psr_disable_all(dm);
9714 		}
9715 
9716 		dm_enable_per_frame_crtc_master_sync(dc_state);
9717 		mutex_lock(&dm->dc_lock);
9718 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9719 #if defined(CONFIG_DRM_AMD_DC_DCN)
9720                /* Allow idle optimization when vblank count is 0 for display off */
9721                if (dm->active_vblank_irq_count == 0)
9722                    dc_allow_idle_optimizations(dm->dc,true);
9723 #endif
9724 		mutex_unlock(&dm->dc_lock);
9725 	}
9726 
9727 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9728 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9729 
9730 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9731 
9732 		if (dm_new_crtc_state->stream != NULL) {
9733 			const struct dc_stream_status *status =
9734 					dc_stream_get_status(dm_new_crtc_state->stream);
9735 
9736 			if (!status)
9737 				status = dc_stream_get_status_from_state(dc_state,
9738 									 dm_new_crtc_state->stream);
9739 			if (!status)
9740 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9741 			else
9742 				acrtc->otg_inst = status->primary_otg_inst;
9743 		}
9744 	}
9745 #ifdef CONFIG_DRM_AMD_DC_HDCP
9746 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9747 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9748 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9749 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9750 
9751 		new_crtc_state = NULL;
9752 
9753 		if (acrtc)
9754 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9755 
9756 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9757 
9758 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9759 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9760 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9761 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9762 			dm_new_con_state->update_hdcp = true;
9763 			continue;
9764 		}
9765 
9766 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9767 			hdcp_update_display(
9768 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9769 				new_con_state->hdcp_content_type,
9770 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9771 	}
9772 #endif
9773 
9774 	/* Handle connector state changes */
9775 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9776 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9777 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9778 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9779 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9780 		struct dc_stream_update stream_update;
9781 		struct dc_info_packet hdr_packet;
9782 		struct dc_stream_status *status = NULL;
9783 		bool abm_changed, hdr_changed, scaling_changed;
9784 
9785 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9786 		memset(&stream_update, 0, sizeof(stream_update));
9787 
9788 		if (acrtc) {
9789 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9790 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9791 		}
9792 
9793 		/* Skip any modesets/resets */
9794 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9795 			continue;
9796 
9797 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9798 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9799 
9800 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9801 							     dm_old_con_state);
9802 
9803 		abm_changed = dm_new_crtc_state->abm_level !=
9804 			      dm_old_crtc_state->abm_level;
9805 
9806 		hdr_changed =
9807 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9808 
9809 		if (!scaling_changed && !abm_changed && !hdr_changed)
9810 			continue;
9811 
9812 		stream_update.stream = dm_new_crtc_state->stream;
9813 		if (scaling_changed) {
9814 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9815 					dm_new_con_state, dm_new_crtc_state->stream);
9816 
9817 			stream_update.src = dm_new_crtc_state->stream->src;
9818 			stream_update.dst = dm_new_crtc_state->stream->dst;
9819 		}
9820 
9821 		if (abm_changed) {
9822 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9823 
9824 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9825 		}
9826 
9827 		if (hdr_changed) {
9828 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9829 			stream_update.hdr_static_metadata = &hdr_packet;
9830 		}
9831 
9832 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9833 
9834 		if (WARN_ON(!status))
9835 			continue;
9836 
9837 		WARN_ON(!status->plane_count);
9838 
9839 		/*
9840 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9841 		 * Here we create an empty update on each plane.
9842 		 * To fix this, DC should permit updating only stream properties.
9843 		 */
9844 		for (j = 0; j < status->plane_count; j++)
9845 			dummy_updates[j].surface = status->plane_states[0];
9846 
9847 
9848 		mutex_lock(&dm->dc_lock);
9849 		dc_commit_updates_for_stream(dm->dc,
9850 						     dummy_updates,
9851 						     status->plane_count,
9852 						     dm_new_crtc_state->stream,
9853 						     &stream_update,
9854 						     dc_state);
9855 		mutex_unlock(&dm->dc_lock);
9856 	}
9857 
9858 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9859 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9860 				      new_crtc_state, i) {
9861 		if (old_crtc_state->active && !new_crtc_state->active)
9862 			crtc_disable_count++;
9863 
9864 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9865 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9866 
9867 		/* For freesync config update on crtc state and params for irq */
9868 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9869 
9870 		/* Handle vrr on->off / off->on transitions */
9871 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9872 						dm_new_crtc_state);
9873 	}
9874 
9875 	/**
9876 	 * Enable interrupts for CRTCs that are newly enabled or went through
9877 	 * a modeset. It was intentionally deferred until after the front end
9878 	 * state was modified to wait until the OTG was on and so the IRQ
9879 	 * handlers didn't access stale or invalid state.
9880 	 */
9881 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9882 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9883 #ifdef CONFIG_DEBUG_FS
9884 		bool configure_crc = false;
9885 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9886 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9887 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9888 #endif
9889 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9890 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9891 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9892 #endif
9893 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9894 
9895 		if (new_crtc_state->active &&
9896 		    (!old_crtc_state->active ||
9897 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9898 			dc_stream_retain(dm_new_crtc_state->stream);
9899 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9900 			manage_dm_interrupts(adev, acrtc, true);
9901 
9902 #ifdef CONFIG_DEBUG_FS
9903 			/**
9904 			 * Frontend may have changed so reapply the CRC capture
9905 			 * settings for the stream.
9906 			 */
9907 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9908 
9909 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9910 				configure_crc = true;
9911 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9912 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9913 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9914 					acrtc->dm_irq_params.crc_window.update_win = true;
9915 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9916 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9917 					crc_rd_wrk->crtc = crtc;
9918 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9919 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9920 				}
9921 #endif
9922 			}
9923 
9924 			if (configure_crc)
9925 				if (amdgpu_dm_crtc_configure_crc_source(
9926 					crtc, dm_new_crtc_state, cur_crc_src))
9927 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9928 #endif
9929 		}
9930 	}
9931 
9932 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9933 		if (new_crtc_state->async_flip)
9934 			wait_for_vblank = false;
9935 
9936 	/* update planes when needed per crtc*/
9937 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9938 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9939 
9940 		if (dm_new_crtc_state->stream)
9941 			amdgpu_dm_commit_planes(state, dc_state, dev,
9942 						dm, crtc, wait_for_vblank);
9943 	}
9944 
9945 	/* Update audio instances for each connector. */
9946 	amdgpu_dm_commit_audio(dev, state);
9947 
9948 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9949 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9950 	/* restore the backlight level */
9951 	for (i = 0; i < dm->num_of_edps; i++) {
9952 		if (dm->backlight_dev[i] &&
9953 		    (dm->actual_brightness[i] != dm->brightness[i]))
9954 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9955 	}
9956 #endif
9957 	/*
9958 	 * send vblank event on all events not handled in flip and
9959 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9960 	 */
9961 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9962 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9963 
9964 		if (new_crtc_state->event)
9965 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9966 
9967 		new_crtc_state->event = NULL;
9968 	}
9969 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9970 
9971 	/* Signal HW programming completion */
9972 	drm_atomic_helper_commit_hw_done(state);
9973 
9974 	if (wait_for_vblank)
9975 		drm_atomic_helper_wait_for_flip_done(dev, state);
9976 
9977 	drm_atomic_helper_cleanup_planes(dev, state);
9978 
9979 	/* return the stolen vga memory back to VRAM */
9980 	if (!adev->mman.keep_stolen_vga_memory)
9981 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9982 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9983 
9984 	/*
9985 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9986 	 * so we can put the GPU into runtime suspend if we're not driving any
9987 	 * displays anymore
9988 	 */
9989 	for (i = 0; i < crtc_disable_count; i++)
9990 		pm_runtime_put_autosuspend(dev->dev);
9991 	pm_runtime_mark_last_busy(dev->dev);
9992 
9993 	if (dc_state_temp)
9994 		dc_release_state(dc_state_temp);
9995 }
9996 
9997 
9998 static int dm_force_atomic_commit(struct drm_connector *connector)
9999 {
10000 	int ret = 0;
10001 	struct drm_device *ddev = connector->dev;
10002 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10003 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10004 	struct drm_plane *plane = disconnected_acrtc->base.primary;
10005 	struct drm_connector_state *conn_state;
10006 	struct drm_crtc_state *crtc_state;
10007 	struct drm_plane_state *plane_state;
10008 
10009 	if (!state)
10010 		return -ENOMEM;
10011 
10012 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
10013 
10014 	/* Construct an atomic state to restore previous display setting */
10015 
10016 	/*
10017 	 * Attach connectors to drm_atomic_state
10018 	 */
10019 	conn_state = drm_atomic_get_connector_state(state, connector);
10020 
10021 	ret = PTR_ERR_OR_ZERO(conn_state);
10022 	if (ret)
10023 		goto out;
10024 
10025 	/* Attach crtc to drm_atomic_state*/
10026 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10027 
10028 	ret = PTR_ERR_OR_ZERO(crtc_state);
10029 	if (ret)
10030 		goto out;
10031 
10032 	/* force a restore */
10033 	crtc_state->mode_changed = true;
10034 
10035 	/* Attach plane to drm_atomic_state */
10036 	plane_state = drm_atomic_get_plane_state(state, plane);
10037 
10038 	ret = PTR_ERR_OR_ZERO(plane_state);
10039 	if (ret)
10040 		goto out;
10041 
10042 	/* Call commit internally with the state we just constructed */
10043 	ret = drm_atomic_commit(state);
10044 
10045 out:
10046 	drm_atomic_state_put(state);
10047 	if (ret)
10048 		DRM_ERROR("Restoring old state failed with %i\n", ret);
10049 
10050 	return ret;
10051 }
10052 
10053 /*
10054  * This function handles all cases when set mode does not come upon hotplug.
10055  * This includes when a display is unplugged then plugged back into the
10056  * same port and when running without usermode desktop manager supprot
10057  */
10058 void dm_restore_drm_connector_state(struct drm_device *dev,
10059 				    struct drm_connector *connector)
10060 {
10061 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10062 	struct amdgpu_crtc *disconnected_acrtc;
10063 	struct dm_crtc_state *acrtc_state;
10064 
10065 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10066 		return;
10067 
10068 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10069 	if (!disconnected_acrtc)
10070 		return;
10071 
10072 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10073 	if (!acrtc_state->stream)
10074 		return;
10075 
10076 	/*
10077 	 * If the previous sink is not released and different from the current,
10078 	 * we deduce we are in a state where we can not rely on usermode call
10079 	 * to turn on the display, so we do it here
10080 	 */
10081 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10082 		dm_force_atomic_commit(&aconnector->base);
10083 }
10084 
10085 /*
10086  * Grabs all modesetting locks to serialize against any blocking commits,
10087  * Waits for completion of all non blocking commits.
10088  */
10089 static int do_aquire_global_lock(struct drm_device *dev,
10090 				 struct drm_atomic_state *state)
10091 {
10092 	struct drm_crtc *crtc;
10093 	struct drm_crtc_commit *commit;
10094 	long ret;
10095 
10096 	/*
10097 	 * Adding all modeset locks to aquire_ctx will
10098 	 * ensure that when the framework release it the
10099 	 * extra locks we are locking here will get released to
10100 	 */
10101 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10102 	if (ret)
10103 		return ret;
10104 
10105 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10106 		spin_lock(&crtc->commit_lock);
10107 		commit = list_first_entry_or_null(&crtc->commit_list,
10108 				struct drm_crtc_commit, commit_entry);
10109 		if (commit)
10110 			drm_crtc_commit_get(commit);
10111 		spin_unlock(&crtc->commit_lock);
10112 
10113 		if (!commit)
10114 			continue;
10115 
10116 		/*
10117 		 * Make sure all pending HW programming completed and
10118 		 * page flips done
10119 		 */
10120 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10121 
10122 		if (ret > 0)
10123 			ret = wait_for_completion_interruptible_timeout(
10124 					&commit->flip_done, 10*HZ);
10125 
10126 		if (ret == 0)
10127 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10128 				  "timed out\n", crtc->base.id, crtc->name);
10129 
10130 		drm_crtc_commit_put(commit);
10131 	}
10132 
10133 	return ret < 0 ? ret : 0;
10134 }
10135 
10136 static void get_freesync_config_for_crtc(
10137 	struct dm_crtc_state *new_crtc_state,
10138 	struct dm_connector_state *new_con_state)
10139 {
10140 	struct mod_freesync_config config = {0};
10141 	struct amdgpu_dm_connector *aconnector =
10142 			to_amdgpu_dm_connector(new_con_state->base.connector);
10143 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10144 	int vrefresh = drm_mode_vrefresh(mode);
10145 	bool fs_vid_mode = false;
10146 
10147 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10148 					vrefresh >= aconnector->min_vfreq &&
10149 					vrefresh <= aconnector->max_vfreq;
10150 
10151 	if (new_crtc_state->vrr_supported) {
10152 		new_crtc_state->stream->ignore_msa_timing_param = true;
10153 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10154 
10155 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10156 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10157 		config.vsif_supported = true;
10158 		config.btr = true;
10159 
10160 		if (fs_vid_mode) {
10161 			config.state = VRR_STATE_ACTIVE_FIXED;
10162 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10163 			goto out;
10164 		} else if (new_crtc_state->base.vrr_enabled) {
10165 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10166 		} else {
10167 			config.state = VRR_STATE_INACTIVE;
10168 		}
10169 	}
10170 out:
10171 	new_crtc_state->freesync_config = config;
10172 }
10173 
10174 static void reset_freesync_config_for_crtc(
10175 	struct dm_crtc_state *new_crtc_state)
10176 {
10177 	new_crtc_state->vrr_supported = false;
10178 
10179 	memset(&new_crtc_state->vrr_infopacket, 0,
10180 	       sizeof(new_crtc_state->vrr_infopacket));
10181 }
10182 
10183 static bool
10184 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10185 				 struct drm_crtc_state *new_crtc_state)
10186 {
10187 	const struct drm_display_mode *old_mode, *new_mode;
10188 
10189 	if (!old_crtc_state || !new_crtc_state)
10190 		return false;
10191 
10192 	old_mode = &old_crtc_state->mode;
10193 	new_mode = &new_crtc_state->mode;
10194 
10195 	if (old_mode->clock       == new_mode->clock &&
10196 	    old_mode->hdisplay    == new_mode->hdisplay &&
10197 	    old_mode->vdisplay    == new_mode->vdisplay &&
10198 	    old_mode->htotal      == new_mode->htotal &&
10199 	    old_mode->vtotal      != new_mode->vtotal &&
10200 	    old_mode->hsync_start == new_mode->hsync_start &&
10201 	    old_mode->vsync_start != new_mode->vsync_start &&
10202 	    old_mode->hsync_end   == new_mode->hsync_end &&
10203 	    old_mode->vsync_end   != new_mode->vsync_end &&
10204 	    old_mode->hskew       == new_mode->hskew &&
10205 	    old_mode->vscan       == new_mode->vscan &&
10206 	    (old_mode->vsync_end - old_mode->vsync_start) ==
10207 	    (new_mode->vsync_end - new_mode->vsync_start))
10208 		return true;
10209 
10210 	return false;
10211 }
10212 
10213 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10214 	uint64_t num, den, res;
10215 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10216 
10217 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10218 
10219 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10220 	den = (unsigned long long)new_crtc_state->mode.htotal *
10221 	      (unsigned long long)new_crtc_state->mode.vtotal;
10222 
10223 	res = div_u64(num, den);
10224 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10225 }
10226 
10227 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10228 			 struct drm_atomic_state *state,
10229 			 struct drm_crtc *crtc,
10230 			 struct drm_crtc_state *old_crtc_state,
10231 			 struct drm_crtc_state *new_crtc_state,
10232 			 bool enable,
10233 			 bool *lock_and_validation_needed)
10234 {
10235 	struct dm_atomic_state *dm_state = NULL;
10236 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10237 	struct dc_stream_state *new_stream;
10238 	int ret = 0;
10239 
10240 	/*
10241 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10242 	 * update changed items
10243 	 */
10244 	struct amdgpu_crtc *acrtc = NULL;
10245 	struct amdgpu_dm_connector *aconnector = NULL;
10246 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10247 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10248 
10249 	new_stream = NULL;
10250 
10251 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10252 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10253 	acrtc = to_amdgpu_crtc(crtc);
10254 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10255 
10256 	/* TODO This hack should go away */
10257 	if (aconnector && enable) {
10258 		/* Make sure fake sink is created in plug-in scenario */
10259 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10260 							    &aconnector->base);
10261 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10262 							    &aconnector->base);
10263 
10264 		if (IS_ERR(drm_new_conn_state)) {
10265 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10266 			goto fail;
10267 		}
10268 
10269 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10270 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10271 
10272 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10273 			goto skip_modeset;
10274 
10275 		new_stream = create_validate_stream_for_sink(aconnector,
10276 							     &new_crtc_state->mode,
10277 							     dm_new_conn_state,
10278 							     dm_old_crtc_state->stream);
10279 
10280 		/*
10281 		 * we can have no stream on ACTION_SET if a display
10282 		 * was disconnected during S3, in this case it is not an
10283 		 * error, the OS will be updated after detection, and
10284 		 * will do the right thing on next atomic commit
10285 		 */
10286 
10287 		if (!new_stream) {
10288 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10289 					__func__, acrtc->base.base.id);
10290 			ret = -ENOMEM;
10291 			goto fail;
10292 		}
10293 
10294 		/*
10295 		 * TODO: Check VSDB bits to decide whether this should
10296 		 * be enabled or not.
10297 		 */
10298 		new_stream->triggered_crtc_reset.enabled =
10299 			dm->force_timing_sync;
10300 
10301 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10302 
10303 		ret = fill_hdr_info_packet(drm_new_conn_state,
10304 					   &new_stream->hdr_static_metadata);
10305 		if (ret)
10306 			goto fail;
10307 
10308 		/*
10309 		 * If we already removed the old stream from the context
10310 		 * (and set the new stream to NULL) then we can't reuse
10311 		 * the old stream even if the stream and scaling are unchanged.
10312 		 * We'll hit the BUG_ON and black screen.
10313 		 *
10314 		 * TODO: Refactor this function to allow this check to work
10315 		 * in all conditions.
10316 		 */
10317 		if (dm_new_crtc_state->stream &&
10318 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10319 			goto skip_modeset;
10320 
10321 		if (dm_new_crtc_state->stream &&
10322 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10323 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10324 			new_crtc_state->mode_changed = false;
10325 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10326 					 new_crtc_state->mode_changed);
10327 		}
10328 	}
10329 
10330 	/* mode_changed flag may get updated above, need to check again */
10331 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10332 		goto skip_modeset;
10333 
10334 	DRM_DEBUG_ATOMIC(
10335 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10336 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10337 		"connectors_changed:%d\n",
10338 		acrtc->crtc_id,
10339 		new_crtc_state->enable,
10340 		new_crtc_state->active,
10341 		new_crtc_state->planes_changed,
10342 		new_crtc_state->mode_changed,
10343 		new_crtc_state->active_changed,
10344 		new_crtc_state->connectors_changed);
10345 
10346 	/* Remove stream for any changed/disabled CRTC */
10347 	if (!enable) {
10348 
10349 		if (!dm_old_crtc_state->stream)
10350 			goto skip_modeset;
10351 
10352 		if (dm_new_crtc_state->stream &&
10353 		    is_timing_unchanged_for_freesync(new_crtc_state,
10354 						     old_crtc_state)) {
10355 			new_crtc_state->mode_changed = false;
10356 			DRM_DEBUG_DRIVER(
10357 				"Mode change not required for front porch change, "
10358 				"setting mode_changed to %d",
10359 				new_crtc_state->mode_changed);
10360 
10361 			set_freesync_fixed_config(dm_new_crtc_state);
10362 
10363 			goto skip_modeset;
10364 		} else if (aconnector &&
10365 			   is_freesync_video_mode(&new_crtc_state->mode,
10366 						  aconnector)) {
10367 			struct drm_display_mode *high_mode;
10368 
10369 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10370 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10371 				set_freesync_fixed_config(dm_new_crtc_state);
10372 			}
10373 		}
10374 
10375 		ret = dm_atomic_get_state(state, &dm_state);
10376 		if (ret)
10377 			goto fail;
10378 
10379 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10380 				crtc->base.id);
10381 
10382 		/* i.e. reset mode */
10383 		if (dc_remove_stream_from_ctx(
10384 				dm->dc,
10385 				dm_state->context,
10386 				dm_old_crtc_state->stream) != DC_OK) {
10387 			ret = -EINVAL;
10388 			goto fail;
10389 		}
10390 
10391 		dc_stream_release(dm_old_crtc_state->stream);
10392 		dm_new_crtc_state->stream = NULL;
10393 
10394 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10395 
10396 		*lock_and_validation_needed = true;
10397 
10398 	} else {/* Add stream for any updated/enabled CRTC */
10399 		/*
10400 		 * Quick fix to prevent NULL pointer on new_stream when
10401 		 * added MST connectors not found in existing crtc_state in the chained mode
10402 		 * TODO: need to dig out the root cause of that
10403 		 */
10404 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10405 			goto skip_modeset;
10406 
10407 		if (modereset_required(new_crtc_state))
10408 			goto skip_modeset;
10409 
10410 		if (modeset_required(new_crtc_state, new_stream,
10411 				     dm_old_crtc_state->stream)) {
10412 
10413 			WARN_ON(dm_new_crtc_state->stream);
10414 
10415 			ret = dm_atomic_get_state(state, &dm_state);
10416 			if (ret)
10417 				goto fail;
10418 
10419 			dm_new_crtc_state->stream = new_stream;
10420 
10421 			dc_stream_retain(new_stream);
10422 
10423 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10424 					 crtc->base.id);
10425 
10426 			if (dc_add_stream_to_ctx(
10427 					dm->dc,
10428 					dm_state->context,
10429 					dm_new_crtc_state->stream) != DC_OK) {
10430 				ret = -EINVAL;
10431 				goto fail;
10432 			}
10433 
10434 			*lock_and_validation_needed = true;
10435 		}
10436 	}
10437 
10438 skip_modeset:
10439 	/* Release extra reference */
10440 	if (new_stream)
10441 		 dc_stream_release(new_stream);
10442 
10443 	/*
10444 	 * We want to do dc stream updates that do not require a
10445 	 * full modeset below.
10446 	 */
10447 	if (!(enable && aconnector && new_crtc_state->active))
10448 		return 0;
10449 	/*
10450 	 * Given above conditions, the dc state cannot be NULL because:
10451 	 * 1. We're in the process of enabling CRTCs (just been added
10452 	 *    to the dc context, or already is on the context)
10453 	 * 2. Has a valid connector attached, and
10454 	 * 3. Is currently active and enabled.
10455 	 * => The dc stream state currently exists.
10456 	 */
10457 	BUG_ON(dm_new_crtc_state->stream == NULL);
10458 
10459 	/* Scaling or underscan settings */
10460 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10461 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10462 		update_stream_scaling_settings(
10463 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10464 
10465 	/* ABM settings */
10466 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10467 
10468 	/*
10469 	 * Color management settings. We also update color properties
10470 	 * when a modeset is needed, to ensure it gets reprogrammed.
10471 	 */
10472 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10473 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10474 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10475 		if (ret)
10476 			goto fail;
10477 	}
10478 
10479 	/* Update Freesync settings. */
10480 	get_freesync_config_for_crtc(dm_new_crtc_state,
10481 				     dm_new_conn_state);
10482 
10483 	return ret;
10484 
10485 fail:
10486 	if (new_stream)
10487 		dc_stream_release(new_stream);
10488 	return ret;
10489 }
10490 
10491 static bool should_reset_plane(struct drm_atomic_state *state,
10492 			       struct drm_plane *plane,
10493 			       struct drm_plane_state *old_plane_state,
10494 			       struct drm_plane_state *new_plane_state)
10495 {
10496 	struct drm_plane *other;
10497 	struct drm_plane_state *old_other_state, *new_other_state;
10498 	struct drm_crtc_state *new_crtc_state;
10499 	int i;
10500 
10501 	/*
10502 	 * TODO: Remove this hack once the checks below are sufficient
10503 	 * enough to determine when we need to reset all the planes on
10504 	 * the stream.
10505 	 */
10506 	if (state->allow_modeset)
10507 		return true;
10508 
10509 	/* Exit early if we know that we're adding or removing the plane. */
10510 	if (old_plane_state->crtc != new_plane_state->crtc)
10511 		return true;
10512 
10513 	/* old crtc == new_crtc == NULL, plane not in context. */
10514 	if (!new_plane_state->crtc)
10515 		return false;
10516 
10517 	new_crtc_state =
10518 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10519 
10520 	if (!new_crtc_state)
10521 		return true;
10522 
10523 	/* CRTC Degamma changes currently require us to recreate planes. */
10524 	if (new_crtc_state->color_mgmt_changed)
10525 		return true;
10526 
10527 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10528 		return true;
10529 
10530 	/*
10531 	 * If there are any new primary or overlay planes being added or
10532 	 * removed then the z-order can potentially change. To ensure
10533 	 * correct z-order and pipe acquisition the current DC architecture
10534 	 * requires us to remove and recreate all existing planes.
10535 	 *
10536 	 * TODO: Come up with a more elegant solution for this.
10537 	 */
10538 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10539 		struct amdgpu_framebuffer *old_afb, *new_afb;
10540 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10541 			continue;
10542 
10543 		if (old_other_state->crtc != new_plane_state->crtc &&
10544 		    new_other_state->crtc != new_plane_state->crtc)
10545 			continue;
10546 
10547 		if (old_other_state->crtc != new_other_state->crtc)
10548 			return true;
10549 
10550 		/* Src/dst size and scaling updates. */
10551 		if (old_other_state->src_w != new_other_state->src_w ||
10552 		    old_other_state->src_h != new_other_state->src_h ||
10553 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10554 		    old_other_state->crtc_h != new_other_state->crtc_h)
10555 			return true;
10556 
10557 		/* Rotation / mirroring updates. */
10558 		if (old_other_state->rotation != new_other_state->rotation)
10559 			return true;
10560 
10561 		/* Blending updates. */
10562 		if (old_other_state->pixel_blend_mode !=
10563 		    new_other_state->pixel_blend_mode)
10564 			return true;
10565 
10566 		/* Alpha updates. */
10567 		if (old_other_state->alpha != new_other_state->alpha)
10568 			return true;
10569 
10570 		/* Colorspace changes. */
10571 		if (old_other_state->color_range != new_other_state->color_range ||
10572 		    old_other_state->color_encoding != new_other_state->color_encoding)
10573 			return true;
10574 
10575 		/* Framebuffer checks fall at the end. */
10576 		if (!old_other_state->fb || !new_other_state->fb)
10577 			continue;
10578 
10579 		/* Pixel format changes can require bandwidth updates. */
10580 		if (old_other_state->fb->format != new_other_state->fb->format)
10581 			return true;
10582 
10583 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10584 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10585 
10586 		/* Tiling and DCC changes also require bandwidth updates. */
10587 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10588 		    old_afb->base.modifier != new_afb->base.modifier)
10589 			return true;
10590 	}
10591 
10592 	return false;
10593 }
10594 
10595 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10596 			      struct drm_plane_state *new_plane_state,
10597 			      struct drm_framebuffer *fb)
10598 {
10599 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10600 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10601 	unsigned int pitch;
10602 	bool linear;
10603 
10604 	if (fb->width > new_acrtc->max_cursor_width ||
10605 	    fb->height > new_acrtc->max_cursor_height) {
10606 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10607 				 new_plane_state->fb->width,
10608 				 new_plane_state->fb->height);
10609 		return -EINVAL;
10610 	}
10611 	if (new_plane_state->src_w != fb->width << 16 ||
10612 	    new_plane_state->src_h != fb->height << 16) {
10613 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10614 		return -EINVAL;
10615 	}
10616 
10617 	/* Pitch in pixels */
10618 	pitch = fb->pitches[0] / fb->format->cpp[0];
10619 
10620 	if (fb->width != pitch) {
10621 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10622 				 fb->width, pitch);
10623 		return -EINVAL;
10624 	}
10625 
10626 	switch (pitch) {
10627 	case 64:
10628 	case 128:
10629 	case 256:
10630 		/* FB pitch is supported by cursor plane */
10631 		break;
10632 	default:
10633 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10634 		return -EINVAL;
10635 	}
10636 
10637 	/* Core DRM takes care of checking FB modifiers, so we only need to
10638 	 * check tiling flags when the FB doesn't have a modifier. */
10639 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10640 		if (adev->family < AMDGPU_FAMILY_AI) {
10641 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10642 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10643 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10644 		} else {
10645 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10646 		}
10647 		if (!linear) {
10648 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10649 			return -EINVAL;
10650 		}
10651 	}
10652 
10653 	return 0;
10654 }
10655 
10656 static int dm_update_plane_state(struct dc *dc,
10657 				 struct drm_atomic_state *state,
10658 				 struct drm_plane *plane,
10659 				 struct drm_plane_state *old_plane_state,
10660 				 struct drm_plane_state *new_plane_state,
10661 				 bool enable,
10662 				 bool *lock_and_validation_needed)
10663 {
10664 
10665 	struct dm_atomic_state *dm_state = NULL;
10666 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10667 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10668 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10669 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10670 	struct amdgpu_crtc *new_acrtc;
10671 	bool needs_reset;
10672 	int ret = 0;
10673 
10674 
10675 	new_plane_crtc = new_plane_state->crtc;
10676 	old_plane_crtc = old_plane_state->crtc;
10677 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10678 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10679 
10680 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10681 		if (!enable || !new_plane_crtc ||
10682 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10683 			return 0;
10684 
10685 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10686 
10687 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10688 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10689 			return -EINVAL;
10690 		}
10691 
10692 		if (new_plane_state->fb) {
10693 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10694 						 new_plane_state->fb);
10695 			if (ret)
10696 				return ret;
10697 		}
10698 
10699 		return 0;
10700 	}
10701 
10702 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10703 					 new_plane_state);
10704 
10705 	/* Remove any changed/removed planes */
10706 	if (!enable) {
10707 		if (!needs_reset)
10708 			return 0;
10709 
10710 		if (!old_plane_crtc)
10711 			return 0;
10712 
10713 		old_crtc_state = drm_atomic_get_old_crtc_state(
10714 				state, old_plane_crtc);
10715 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10716 
10717 		if (!dm_old_crtc_state->stream)
10718 			return 0;
10719 
10720 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10721 				plane->base.id, old_plane_crtc->base.id);
10722 
10723 		ret = dm_atomic_get_state(state, &dm_state);
10724 		if (ret)
10725 			return ret;
10726 
10727 		if (!dc_remove_plane_from_context(
10728 				dc,
10729 				dm_old_crtc_state->stream,
10730 				dm_old_plane_state->dc_state,
10731 				dm_state->context)) {
10732 
10733 			return -EINVAL;
10734 		}
10735 
10736 
10737 		dc_plane_state_release(dm_old_plane_state->dc_state);
10738 		dm_new_plane_state->dc_state = NULL;
10739 
10740 		*lock_and_validation_needed = true;
10741 
10742 	} else { /* Add new planes */
10743 		struct dc_plane_state *dc_new_plane_state;
10744 
10745 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10746 			return 0;
10747 
10748 		if (!new_plane_crtc)
10749 			return 0;
10750 
10751 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10752 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10753 
10754 		if (!dm_new_crtc_state->stream)
10755 			return 0;
10756 
10757 		if (!needs_reset)
10758 			return 0;
10759 
10760 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10761 		if (ret)
10762 			return ret;
10763 
10764 		WARN_ON(dm_new_plane_state->dc_state);
10765 
10766 		dc_new_plane_state = dc_create_plane_state(dc);
10767 		if (!dc_new_plane_state)
10768 			return -ENOMEM;
10769 
10770 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10771 				 plane->base.id, new_plane_crtc->base.id);
10772 
10773 		ret = fill_dc_plane_attributes(
10774 			drm_to_adev(new_plane_crtc->dev),
10775 			dc_new_plane_state,
10776 			new_plane_state,
10777 			new_crtc_state);
10778 		if (ret) {
10779 			dc_plane_state_release(dc_new_plane_state);
10780 			return ret;
10781 		}
10782 
10783 		ret = dm_atomic_get_state(state, &dm_state);
10784 		if (ret) {
10785 			dc_plane_state_release(dc_new_plane_state);
10786 			return ret;
10787 		}
10788 
10789 		/*
10790 		 * Any atomic check errors that occur after this will
10791 		 * not need a release. The plane state will be attached
10792 		 * to the stream, and therefore part of the atomic
10793 		 * state. It'll be released when the atomic state is
10794 		 * cleaned.
10795 		 */
10796 		if (!dc_add_plane_to_context(
10797 				dc,
10798 				dm_new_crtc_state->stream,
10799 				dc_new_plane_state,
10800 				dm_state->context)) {
10801 
10802 			dc_plane_state_release(dc_new_plane_state);
10803 			return -EINVAL;
10804 		}
10805 
10806 		dm_new_plane_state->dc_state = dc_new_plane_state;
10807 
10808 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10809 
10810 		/* Tell DC to do a full surface update every time there
10811 		 * is a plane change. Inefficient, but works for now.
10812 		 */
10813 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10814 
10815 		*lock_and_validation_needed = true;
10816 	}
10817 
10818 
10819 	return ret;
10820 }
10821 
10822 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10823 				       int *src_w, int *src_h)
10824 {
10825 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10826 	case DRM_MODE_ROTATE_90:
10827 	case DRM_MODE_ROTATE_270:
10828 		*src_w = plane_state->src_h >> 16;
10829 		*src_h = plane_state->src_w >> 16;
10830 		break;
10831 	case DRM_MODE_ROTATE_0:
10832 	case DRM_MODE_ROTATE_180:
10833 	default:
10834 		*src_w = plane_state->src_w >> 16;
10835 		*src_h = plane_state->src_h >> 16;
10836 		break;
10837 	}
10838 }
10839 
10840 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10841 				struct drm_crtc *crtc,
10842 				struct drm_crtc_state *new_crtc_state)
10843 {
10844 	struct drm_plane *cursor = crtc->cursor, *underlying;
10845 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10846 	int i;
10847 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10848 	int cursor_src_w, cursor_src_h;
10849 	int underlying_src_w, underlying_src_h;
10850 
10851 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10852 	 * cursor per pipe but it's going to inherit the scaling and
10853 	 * positioning from the underlying pipe. Check the cursor plane's
10854 	 * blending properties match the underlying planes'. */
10855 
10856 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10857 	if (!new_cursor_state || !new_cursor_state->fb) {
10858 		return 0;
10859 	}
10860 
10861 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10862 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10863 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10864 
10865 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10866 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10867 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10868 			continue;
10869 
10870 		/* Ignore disabled planes */
10871 		if (!new_underlying_state->fb)
10872 			continue;
10873 
10874 		dm_get_oriented_plane_size(new_underlying_state,
10875 					   &underlying_src_w, &underlying_src_h);
10876 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10877 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10878 
10879 		if (cursor_scale_w != underlying_scale_w ||
10880 		    cursor_scale_h != underlying_scale_h) {
10881 			drm_dbg_atomic(crtc->dev,
10882 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10883 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10884 			return -EINVAL;
10885 		}
10886 
10887 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10888 		if (new_underlying_state->crtc_x <= 0 &&
10889 		    new_underlying_state->crtc_y <= 0 &&
10890 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10891 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10892 			break;
10893 	}
10894 
10895 	return 0;
10896 }
10897 
10898 #if defined(CONFIG_DRM_AMD_DC_DCN)
10899 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10900 {
10901 	struct drm_connector *connector;
10902 	struct drm_connector_state *conn_state, *old_conn_state;
10903 	struct amdgpu_dm_connector *aconnector = NULL;
10904 	int i;
10905 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10906 		if (!conn_state->crtc)
10907 			conn_state = old_conn_state;
10908 
10909 		if (conn_state->crtc != crtc)
10910 			continue;
10911 
10912 		aconnector = to_amdgpu_dm_connector(connector);
10913 		if (!aconnector->port || !aconnector->mst_port)
10914 			aconnector = NULL;
10915 		else
10916 			break;
10917 	}
10918 
10919 	if (!aconnector)
10920 		return 0;
10921 
10922 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10923 }
10924 #endif
10925 
10926 /**
10927  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10928  * @dev: The DRM device
10929  * @state: The atomic state to commit
10930  *
10931  * Validate that the given atomic state is programmable by DC into hardware.
10932  * This involves constructing a &struct dc_state reflecting the new hardware
10933  * state we wish to commit, then querying DC to see if it is programmable. It's
10934  * important not to modify the existing DC state. Otherwise, atomic_check
10935  * may unexpectedly commit hardware changes.
10936  *
10937  * When validating the DC state, it's important that the right locks are
10938  * acquired. For full updates case which removes/adds/updates streams on one
10939  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10940  * that any such full update commit will wait for completion of any outstanding
10941  * flip using DRMs synchronization events.
10942  *
10943  * Note that DM adds the affected connectors for all CRTCs in state, when that
10944  * might not seem necessary. This is because DC stream creation requires the
10945  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10946  * be possible but non-trivial - a possible TODO item.
10947  *
10948  * Return: -Error code if validation failed.
10949  */
10950 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10951 				  struct drm_atomic_state *state)
10952 {
10953 	struct amdgpu_device *adev = drm_to_adev(dev);
10954 	struct dm_atomic_state *dm_state = NULL;
10955 	struct dc *dc = adev->dm.dc;
10956 	struct drm_connector *connector;
10957 	struct drm_connector_state *old_con_state, *new_con_state;
10958 	struct drm_crtc *crtc;
10959 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10960 	struct drm_plane *plane;
10961 	struct drm_plane_state *old_plane_state, *new_plane_state;
10962 	enum dc_status status;
10963 	int ret, i;
10964 	bool lock_and_validation_needed = false;
10965 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10966 #if defined(CONFIG_DRM_AMD_DC_DCN)
10967 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10968 	struct drm_dp_mst_topology_state *mst_state;
10969 	struct drm_dp_mst_topology_mgr *mgr;
10970 #endif
10971 
10972 	trace_amdgpu_dm_atomic_check_begin(state);
10973 
10974 	ret = drm_atomic_helper_check_modeset(dev, state);
10975 	if (ret) {
10976 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10977 		goto fail;
10978 	}
10979 
10980 	/* Check connector changes */
10981 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10982 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10983 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10984 
10985 		/* Skip connectors that are disabled or part of modeset already. */
10986 		if (!old_con_state->crtc && !new_con_state->crtc)
10987 			continue;
10988 
10989 		if (!new_con_state->crtc)
10990 			continue;
10991 
10992 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10993 		if (IS_ERR(new_crtc_state)) {
10994 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10995 			ret = PTR_ERR(new_crtc_state);
10996 			goto fail;
10997 		}
10998 
10999 		if (dm_old_con_state->abm_level !=
11000 		    dm_new_con_state->abm_level)
11001 			new_crtc_state->connectors_changed = true;
11002 	}
11003 
11004 #if defined(CONFIG_DRM_AMD_DC_DCN)
11005 	if (dc_resource_is_dsc_encoding_supported(dc)) {
11006 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11007 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11008 				ret = add_affected_mst_dsc_crtcs(state, crtc);
11009 				if (ret) {
11010 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11011 					goto fail;
11012 				}
11013 			}
11014 		}
11015 		pre_validate_dsc(state, &dm_state, vars);
11016 	}
11017 #endif
11018 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11019 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11020 
11021 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11022 		    !new_crtc_state->color_mgmt_changed &&
11023 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11024 			dm_old_crtc_state->dsc_force_changed == false)
11025 			continue;
11026 
11027 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11028 		if (ret) {
11029 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11030 			goto fail;
11031 		}
11032 
11033 		if (!new_crtc_state->enable)
11034 			continue;
11035 
11036 		ret = drm_atomic_add_affected_connectors(state, crtc);
11037 		if (ret) {
11038 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11039 			goto fail;
11040 		}
11041 
11042 		ret = drm_atomic_add_affected_planes(state, crtc);
11043 		if (ret) {
11044 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11045 			goto fail;
11046 		}
11047 
11048 		if (dm_old_crtc_state->dsc_force_changed)
11049 			new_crtc_state->mode_changed = true;
11050 	}
11051 
11052 	/*
11053 	 * Add all primary and overlay planes on the CRTC to the state
11054 	 * whenever a plane is enabled to maintain correct z-ordering
11055 	 * and to enable fast surface updates.
11056 	 */
11057 	drm_for_each_crtc(crtc, dev) {
11058 		bool modified = false;
11059 
11060 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11061 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11062 				continue;
11063 
11064 			if (new_plane_state->crtc == crtc ||
11065 			    old_plane_state->crtc == crtc) {
11066 				modified = true;
11067 				break;
11068 			}
11069 		}
11070 
11071 		if (!modified)
11072 			continue;
11073 
11074 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11075 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11076 				continue;
11077 
11078 			new_plane_state =
11079 				drm_atomic_get_plane_state(state, plane);
11080 
11081 			if (IS_ERR(new_plane_state)) {
11082 				ret = PTR_ERR(new_plane_state);
11083 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11084 				goto fail;
11085 			}
11086 		}
11087 	}
11088 
11089 	/* Remove exiting planes if they are modified */
11090 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11091 		ret = dm_update_plane_state(dc, state, plane,
11092 					    old_plane_state,
11093 					    new_plane_state,
11094 					    false,
11095 					    &lock_and_validation_needed);
11096 		if (ret) {
11097 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11098 			goto fail;
11099 		}
11100 	}
11101 
11102 	/* Disable all crtcs which require disable */
11103 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11104 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11105 					   old_crtc_state,
11106 					   new_crtc_state,
11107 					   false,
11108 					   &lock_and_validation_needed);
11109 		if (ret) {
11110 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11111 			goto fail;
11112 		}
11113 	}
11114 
11115 	/* Enable all crtcs which require enable */
11116 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11117 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11118 					   old_crtc_state,
11119 					   new_crtc_state,
11120 					   true,
11121 					   &lock_and_validation_needed);
11122 		if (ret) {
11123 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11124 			goto fail;
11125 		}
11126 	}
11127 
11128 	/* Add new/modified planes */
11129 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11130 		ret = dm_update_plane_state(dc, state, plane,
11131 					    old_plane_state,
11132 					    new_plane_state,
11133 					    true,
11134 					    &lock_and_validation_needed);
11135 		if (ret) {
11136 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11137 			goto fail;
11138 		}
11139 	}
11140 
11141 	/* Run this here since we want to validate the streams we created */
11142 	ret = drm_atomic_helper_check_planes(dev, state);
11143 	if (ret) {
11144 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11145 		goto fail;
11146 	}
11147 
11148 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11149 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11150 		if (dm_new_crtc_state->mpo_requested)
11151 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11152 	}
11153 
11154 	/* Check cursor planes scaling */
11155 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11156 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11157 		if (ret) {
11158 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11159 			goto fail;
11160 		}
11161 	}
11162 
11163 	if (state->legacy_cursor_update) {
11164 		/*
11165 		 * This is a fast cursor update coming from the plane update
11166 		 * helper, check if it can be done asynchronously for better
11167 		 * performance.
11168 		 */
11169 		state->async_update =
11170 			!drm_atomic_helper_async_check(dev, state);
11171 
11172 		/*
11173 		 * Skip the remaining global validation if this is an async
11174 		 * update. Cursor updates can be done without affecting
11175 		 * state or bandwidth calcs and this avoids the performance
11176 		 * penalty of locking the private state object and
11177 		 * allocating a new dc_state.
11178 		 */
11179 		if (state->async_update)
11180 			return 0;
11181 	}
11182 
11183 	/* Check scaling and underscan changes*/
11184 	/* TODO Removed scaling changes validation due to inability to commit
11185 	 * new stream into context w\o causing full reset. Need to
11186 	 * decide how to handle.
11187 	 */
11188 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11189 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11190 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11191 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11192 
11193 		/* Skip any modesets/resets */
11194 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11195 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11196 			continue;
11197 
11198 		/* Skip any thing not scale or underscan changes */
11199 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11200 			continue;
11201 
11202 		lock_and_validation_needed = true;
11203 	}
11204 
11205 #if defined(CONFIG_DRM_AMD_DC_DCN)
11206 	/* set the slot info for each mst_state based on the link encoding format */
11207 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11208 		struct amdgpu_dm_connector *aconnector;
11209 		struct drm_connector *connector;
11210 		struct drm_connector_list_iter iter;
11211 		u8 link_coding_cap;
11212 
11213 		if (!mgr->mst_state )
11214 			continue;
11215 
11216 		drm_connector_list_iter_begin(dev, &iter);
11217 		drm_for_each_connector_iter(connector, &iter) {
11218 			int id = connector->index;
11219 
11220 			if (id == mst_state->mgr->conn_base_id) {
11221 				aconnector = to_amdgpu_dm_connector(connector);
11222 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11223 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11224 
11225 				break;
11226 			}
11227 		}
11228 		drm_connector_list_iter_end(&iter);
11229 
11230 	}
11231 #endif
11232 	/**
11233 	 * Streams and planes are reset when there are changes that affect
11234 	 * bandwidth. Anything that affects bandwidth needs to go through
11235 	 * DC global validation to ensure that the configuration can be applied
11236 	 * to hardware.
11237 	 *
11238 	 * We have to currently stall out here in atomic_check for outstanding
11239 	 * commits to finish in this case because our IRQ handlers reference
11240 	 * DRM state directly - we can end up disabling interrupts too early
11241 	 * if we don't.
11242 	 *
11243 	 * TODO: Remove this stall and drop DM state private objects.
11244 	 */
11245 	if (lock_and_validation_needed) {
11246 		ret = dm_atomic_get_state(state, &dm_state);
11247 		if (ret) {
11248 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11249 			goto fail;
11250 		}
11251 
11252 		ret = do_aquire_global_lock(dev, state);
11253 		if (ret) {
11254 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11255 			goto fail;
11256 		}
11257 
11258 #if defined(CONFIG_DRM_AMD_DC_DCN)
11259 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11260 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11261 			goto fail;
11262 		}
11263 
11264 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11265 		if (ret) {
11266 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11267 			goto fail;
11268 		}
11269 #endif
11270 
11271 		/*
11272 		 * Perform validation of MST topology in the state:
11273 		 * We need to perform MST atomic check before calling
11274 		 * dc_validate_global_state(), or there is a chance
11275 		 * to get stuck in an infinite loop and hang eventually.
11276 		 */
11277 		ret = drm_dp_mst_atomic_check(state);
11278 		if (ret) {
11279 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11280 			goto fail;
11281 		}
11282 		status = dc_validate_global_state(dc, dm_state->context, true);
11283 		if (status != DC_OK) {
11284 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11285 				       dc_status_to_str(status), status);
11286 			ret = -EINVAL;
11287 			goto fail;
11288 		}
11289 	} else {
11290 		/*
11291 		 * The commit is a fast update. Fast updates shouldn't change
11292 		 * the DC context, affect global validation, and can have their
11293 		 * commit work done in parallel with other commits not touching
11294 		 * the same resource. If we have a new DC context as part of
11295 		 * the DM atomic state from validation we need to free it and
11296 		 * retain the existing one instead.
11297 		 *
11298 		 * Furthermore, since the DM atomic state only contains the DC
11299 		 * context and can safely be annulled, we can free the state
11300 		 * and clear the associated private object now to free
11301 		 * some memory and avoid a possible use-after-free later.
11302 		 */
11303 
11304 		for (i = 0; i < state->num_private_objs; i++) {
11305 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11306 
11307 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11308 				int j = state->num_private_objs-1;
11309 
11310 				dm_atomic_destroy_state(obj,
11311 						state->private_objs[i].state);
11312 
11313 				/* If i is not at the end of the array then the
11314 				 * last element needs to be moved to where i was
11315 				 * before the array can safely be truncated.
11316 				 */
11317 				if (i != j)
11318 					state->private_objs[i] =
11319 						state->private_objs[j];
11320 
11321 				state->private_objs[j].ptr = NULL;
11322 				state->private_objs[j].state = NULL;
11323 				state->private_objs[j].old_state = NULL;
11324 				state->private_objs[j].new_state = NULL;
11325 
11326 				state->num_private_objs = j;
11327 				break;
11328 			}
11329 		}
11330 	}
11331 
11332 	/* Store the overall update type for use later in atomic check. */
11333 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11334 		struct dm_crtc_state *dm_new_crtc_state =
11335 			to_dm_crtc_state(new_crtc_state);
11336 
11337 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11338 							 UPDATE_TYPE_FULL :
11339 							 UPDATE_TYPE_FAST;
11340 	}
11341 
11342 	/* Must be success */
11343 	WARN_ON(ret);
11344 
11345 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11346 
11347 	return ret;
11348 
11349 fail:
11350 	if (ret == -EDEADLK)
11351 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11352 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11353 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11354 	else
11355 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11356 
11357 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11358 
11359 	return ret;
11360 }
11361 
11362 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11363 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11364 {
11365 	uint8_t dpcd_data;
11366 	bool capable = false;
11367 
11368 	if (amdgpu_dm_connector->dc_link &&
11369 		dm_helpers_dp_read_dpcd(
11370 				NULL,
11371 				amdgpu_dm_connector->dc_link,
11372 				DP_DOWN_STREAM_PORT_COUNT,
11373 				&dpcd_data,
11374 				sizeof(dpcd_data))) {
11375 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11376 	}
11377 
11378 	return capable;
11379 }
11380 
11381 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11382 		unsigned int offset,
11383 		unsigned int total_length,
11384 		uint8_t *data,
11385 		unsigned int length,
11386 		struct amdgpu_hdmi_vsdb_info *vsdb)
11387 {
11388 	bool res;
11389 	union dmub_rb_cmd cmd;
11390 	struct dmub_cmd_send_edid_cea *input;
11391 	struct dmub_cmd_edid_cea_output *output;
11392 
11393 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11394 		return false;
11395 
11396 	memset(&cmd, 0, sizeof(cmd));
11397 
11398 	input = &cmd.edid_cea.data.input;
11399 
11400 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11401 	cmd.edid_cea.header.sub_type = 0;
11402 	cmd.edid_cea.header.payload_bytes =
11403 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11404 	input->offset = offset;
11405 	input->length = length;
11406 	input->cea_total_length = total_length;
11407 	memcpy(input->payload, data, length);
11408 
11409 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11410 	if (!res) {
11411 		DRM_ERROR("EDID CEA parser failed\n");
11412 		return false;
11413 	}
11414 
11415 	output = &cmd.edid_cea.data.output;
11416 
11417 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11418 		if (!output->ack.success) {
11419 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11420 					output->ack.offset);
11421 		}
11422 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11423 		if (!output->amd_vsdb.vsdb_found)
11424 			return false;
11425 
11426 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11427 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11428 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11429 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11430 	} else {
11431 		DRM_WARN("Unknown EDID CEA parser results\n");
11432 		return false;
11433 	}
11434 
11435 	return true;
11436 }
11437 
11438 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11439 		uint8_t *edid_ext, int len,
11440 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11441 {
11442 	int i;
11443 
11444 	/* send extension block to DMCU for parsing */
11445 	for (i = 0; i < len; i += 8) {
11446 		bool res;
11447 		int offset;
11448 
11449 		/* send 8 bytes a time */
11450 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11451 			return false;
11452 
11453 		if (i+8 == len) {
11454 			/* EDID block sent completed, expect result */
11455 			int version, min_rate, max_rate;
11456 
11457 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11458 			if (res) {
11459 				/* amd vsdb found */
11460 				vsdb_info->freesync_supported = 1;
11461 				vsdb_info->amd_vsdb_version = version;
11462 				vsdb_info->min_refresh_rate_hz = min_rate;
11463 				vsdb_info->max_refresh_rate_hz = max_rate;
11464 				return true;
11465 			}
11466 			/* not amd vsdb */
11467 			return false;
11468 		}
11469 
11470 		/* check for ack*/
11471 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11472 		if (!res)
11473 			return false;
11474 	}
11475 
11476 	return false;
11477 }
11478 
11479 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11480 		uint8_t *edid_ext, int len,
11481 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11482 {
11483 	int i;
11484 
11485 	/* send extension block to DMCU for parsing */
11486 	for (i = 0; i < len; i += 8) {
11487 		/* send 8 bytes a time */
11488 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11489 			return false;
11490 	}
11491 
11492 	return vsdb_info->freesync_supported;
11493 }
11494 
11495 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11496 		uint8_t *edid_ext, int len,
11497 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11498 {
11499 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11500 
11501 	if (adev->dm.dmub_srv)
11502 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11503 	else
11504 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11505 }
11506 
11507 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11508 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11509 {
11510 	uint8_t *edid_ext = NULL;
11511 	int i;
11512 	bool valid_vsdb_found = false;
11513 
11514 	/*----- drm_find_cea_extension() -----*/
11515 	/* No EDID or EDID extensions */
11516 	if (edid == NULL || edid->extensions == 0)
11517 		return -ENODEV;
11518 
11519 	/* Find CEA extension */
11520 	for (i = 0; i < edid->extensions; i++) {
11521 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11522 		if (edid_ext[0] == CEA_EXT)
11523 			break;
11524 	}
11525 
11526 	if (i == edid->extensions)
11527 		return -ENODEV;
11528 
11529 	/*----- cea_db_offsets() -----*/
11530 	if (edid_ext[0] != CEA_EXT)
11531 		return -ENODEV;
11532 
11533 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11534 
11535 	return valid_vsdb_found ? i : -ENODEV;
11536 }
11537 
11538 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11539 					struct edid *edid)
11540 {
11541 	int i = 0;
11542 	struct detailed_timing *timing;
11543 	struct detailed_non_pixel *data;
11544 	struct detailed_data_monitor_range *range;
11545 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11546 			to_amdgpu_dm_connector(connector);
11547 	struct dm_connector_state *dm_con_state = NULL;
11548 	struct dc_sink *sink;
11549 
11550 	struct drm_device *dev = connector->dev;
11551 	struct amdgpu_device *adev = drm_to_adev(dev);
11552 	bool freesync_capable = false;
11553 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11554 
11555 	if (!connector->state) {
11556 		DRM_ERROR("%s - Connector has no state", __func__);
11557 		goto update;
11558 	}
11559 
11560 	sink = amdgpu_dm_connector->dc_sink ?
11561 		amdgpu_dm_connector->dc_sink :
11562 		amdgpu_dm_connector->dc_em_sink;
11563 
11564 	if (!edid || !sink) {
11565 		dm_con_state = to_dm_connector_state(connector->state);
11566 
11567 		amdgpu_dm_connector->min_vfreq = 0;
11568 		amdgpu_dm_connector->max_vfreq = 0;
11569 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11570 		connector->display_info.monitor_range.min_vfreq = 0;
11571 		connector->display_info.monitor_range.max_vfreq = 0;
11572 		freesync_capable = false;
11573 
11574 		goto update;
11575 	}
11576 
11577 	dm_con_state = to_dm_connector_state(connector->state);
11578 
11579 	if (!adev->dm.freesync_module)
11580 		goto update;
11581 
11582 
11583 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11584 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11585 		bool edid_check_required = false;
11586 
11587 		if (edid) {
11588 			edid_check_required = is_dp_capable_without_timing_msa(
11589 						adev->dm.dc,
11590 						amdgpu_dm_connector);
11591 		}
11592 
11593 		if (edid_check_required == true && (edid->version > 1 ||
11594 		   (edid->version == 1 && edid->revision > 1))) {
11595 			for (i = 0; i < 4; i++) {
11596 
11597 				timing	= &edid->detailed_timings[i];
11598 				data	= &timing->data.other_data;
11599 				range	= &data->data.range;
11600 				/*
11601 				 * Check if monitor has continuous frequency mode
11602 				 */
11603 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11604 					continue;
11605 				/*
11606 				 * Check for flag range limits only. If flag == 1 then
11607 				 * no additional timing information provided.
11608 				 * Default GTF, GTF Secondary curve and CVT are not
11609 				 * supported
11610 				 */
11611 				if (range->flags != 1)
11612 					continue;
11613 
11614 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11615 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11616 				amdgpu_dm_connector->pixel_clock_mhz =
11617 					range->pixel_clock_mhz * 10;
11618 
11619 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11620 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11621 
11622 				break;
11623 			}
11624 
11625 			if (amdgpu_dm_connector->max_vfreq -
11626 			    amdgpu_dm_connector->min_vfreq > 10) {
11627 
11628 				freesync_capable = true;
11629 			}
11630 		}
11631 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11632 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11633 		if (i >= 0 && vsdb_info.freesync_supported) {
11634 			timing  = &edid->detailed_timings[i];
11635 			data    = &timing->data.other_data;
11636 
11637 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11638 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11639 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11640 				freesync_capable = true;
11641 
11642 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11643 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11644 		}
11645 	}
11646 
11647 update:
11648 	if (dm_con_state)
11649 		dm_con_state->freesync_capable = freesync_capable;
11650 
11651 	if (connector->vrr_capable_property)
11652 		drm_connector_set_vrr_capable_property(connector,
11653 						       freesync_capable);
11654 }
11655 
11656 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11657 {
11658 	struct amdgpu_device *adev = drm_to_adev(dev);
11659 	struct dc *dc = adev->dm.dc;
11660 	int i;
11661 
11662 	mutex_lock(&adev->dm.dc_lock);
11663 	if (dc->current_state) {
11664 		for (i = 0; i < dc->current_state->stream_count; ++i)
11665 			dc->current_state->streams[i]
11666 				->triggered_crtc_reset.enabled =
11667 				adev->dm.force_timing_sync;
11668 
11669 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11670 		dc_trigger_sync(dc, dc->current_state);
11671 	}
11672 	mutex_unlock(&adev->dm.dc_lock);
11673 }
11674 
11675 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11676 		       uint32_t value, const char *func_name)
11677 {
11678 #ifdef DM_CHECK_ADDR_0
11679 	if (address == 0) {
11680 		DC_ERR("invalid register write. address = 0");
11681 		return;
11682 	}
11683 #endif
11684 	cgs_write_register(ctx->cgs_device, address, value);
11685 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11686 }
11687 
11688 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11689 			  const char *func_name)
11690 {
11691 	uint32_t value;
11692 #ifdef DM_CHECK_ADDR_0
11693 	if (address == 0) {
11694 		DC_ERR("invalid register read; address = 0\n");
11695 		return 0;
11696 	}
11697 #endif
11698 
11699 	if (ctx->dmub_srv &&
11700 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11701 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11702 		ASSERT(false);
11703 		return 0;
11704 	}
11705 
11706 	value = cgs_read_register(ctx->cgs_device, address);
11707 
11708 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11709 
11710 	return value;
11711 }
11712 
11713 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11714 						struct dc_context *ctx,
11715 						uint8_t status_type,
11716 						uint32_t *operation_result)
11717 {
11718 	struct amdgpu_device *adev = ctx->driver_context;
11719 	int return_status = -1;
11720 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11721 
11722 	if (is_cmd_aux) {
11723 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11724 			return_status = p_notify->aux_reply.length;
11725 			*operation_result = p_notify->result;
11726 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11727 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11728 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11729 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11730 		} else {
11731 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11732 		}
11733 	} else {
11734 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11735 			return_status = 0;
11736 			*operation_result = p_notify->sc_status;
11737 		} else {
11738 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11739 		}
11740 	}
11741 
11742 	return return_status;
11743 }
11744 
11745 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11746 	unsigned int link_index, void *cmd_payload, void *operation_result)
11747 {
11748 	struct amdgpu_device *adev = ctx->driver_context;
11749 	int ret = 0;
11750 
11751 	if (is_cmd_aux) {
11752 		dc_process_dmub_aux_transfer_async(ctx->dc,
11753 			link_index, (struct aux_payload *)cmd_payload);
11754 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11755 					(struct set_config_cmd_payload *)cmd_payload,
11756 					adev->dm.dmub_notify)) {
11757 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11758 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11759 					(uint32_t *)operation_result);
11760 	}
11761 
11762 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11763 	if (ret == 0) {
11764 		DRM_ERROR("wait_for_completion_timeout timeout!");
11765 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11766 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11767 				(uint32_t *)operation_result);
11768 	}
11769 
11770 	if (is_cmd_aux) {
11771 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11772 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11773 
11774 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11775 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11776 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11777 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11778 				       adev->dm.dmub_notify->aux_reply.length);
11779 			}
11780 		}
11781 	}
11782 
11783 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11784 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11785 			(uint32_t *)operation_result);
11786 }
11787 
11788 /*
11789  * Check whether seamless boot is supported.
11790  *
11791  * So far we only support seamless boot on CHIP_VANGOGH.
11792  * If everything goes well, we may consider expanding
11793  * seamless boot to other ASICs.
11794  */
11795 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11796 {
11797 	switch (adev->asic_type) {
11798 	case CHIP_VANGOGH:
11799 		if (!adev->mman.keep_stolen_vga_memory)
11800 			return true;
11801 		break;
11802 	default:
11803 		break;
11804 	}
11805 
11806 	return false;
11807 }
11808