xref: /openbmc/linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision 9c0c2c7aa23cbf78277557cfe8ec8bb7689225fe)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85 
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88 
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93 
94 #include "soc15_common.h"
95 #endif
96 
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100 
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117 
118 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
119 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
120 
121 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123 
124 /* Number of bytes in PSP header for firmware. */
125 #define PSP_HEADER_BYTES 0x100
126 
127 /* Number of bytes in PSP footer for firmware. */
128 #define PSP_FOOTER_BYTES 0x100
129 
130 /**
131  * DOC: overview
132  *
133  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
134  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
135  * requests into DC requests, and DC responses into DRM responses.
136  *
137  * The root control structure is &struct amdgpu_display_manager.
138  */
139 
140 /* basic init/fini API */
141 static int amdgpu_dm_init(struct amdgpu_device *adev);
142 static void amdgpu_dm_fini(struct amdgpu_device *adev);
143 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
144 
145 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146 {
147 	switch (link->dpcd_caps.dongle_type) {
148 	case DISPLAY_DONGLE_NONE:
149 		return DRM_MODE_SUBCONNECTOR_Native;
150 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151 		return DRM_MODE_SUBCONNECTOR_VGA;
152 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
154 		return DRM_MODE_SUBCONNECTOR_DVID;
155 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157 		return DRM_MODE_SUBCONNECTOR_HDMIA;
158 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159 	default:
160 		return DRM_MODE_SUBCONNECTOR_Unknown;
161 	}
162 }
163 
164 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165 {
166 	struct dc_link *link = aconnector->dc_link;
167 	struct drm_connector *connector = &aconnector->base;
168 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169 
170 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171 		return;
172 
173 	if (aconnector->dc_sink)
174 		subconnector = get_subconnector_type(link);
175 
176 	drm_object_property_set_value(&connector->base,
177 			connector->dev->mode_config.dp_subconnector_property,
178 			subconnector);
179 }
180 
181 /*
182  * initializes drm_device display related structures, based on the information
183  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184  * drm_encoder, drm_mode_config
185  *
186  * Returns 0 on success
187  */
188 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189 /* removes and deallocates the drm structures, created by the above function */
190 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191 
192 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
193 				struct drm_plane *plane,
194 				unsigned long possible_crtcs,
195 				const struct dc_plane_cap *plane_cap);
196 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197 			       struct drm_plane *plane,
198 			       uint32_t link_index);
199 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
201 				    uint32_t link_index,
202 				    struct amdgpu_encoder *amdgpu_encoder);
203 static int amdgpu_dm_encoder_init(struct drm_device *dev,
204 				  struct amdgpu_encoder *aencoder,
205 				  uint32_t link_index);
206 
207 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208 
209 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210 
211 static int amdgpu_dm_atomic_check(struct drm_device *dev,
212 				  struct drm_atomic_state *state);
213 
214 static void handle_cursor_update(struct drm_plane *plane,
215 				 struct drm_plane_state *old_plane_state);
216 
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219 
220 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
221 static void handle_hpd_rx_irq(void *param);
222 
223 static bool
224 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225 				 struct drm_crtc_state *new_crtc_state);
226 /*
227  * dm_vblank_get_counter
228  *
229  * @brief
230  * Get counter for number of vertical blanks
231  *
232  * @param
233  * struct amdgpu_device *adev - [in] desired amdgpu device
234  * int disp_idx - [in] which CRTC to get the counter from
235  *
236  * @return
237  * Counter for vertical blanks
238  */
239 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
240 {
241 	if (crtc >= adev->mode_info.num_crtc)
242 		return 0;
243 	else {
244 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
245 
246 		if (acrtc->dm_irq_params.stream == NULL) {
247 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
248 				  crtc);
249 			return 0;
250 		}
251 
252 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
253 	}
254 }
255 
256 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257 				  u32 *vbl, u32 *position)
258 {
259 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
260 
261 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262 		return -EINVAL;
263 	else {
264 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
265 
266 		if (acrtc->dm_irq_params.stream ==  NULL) {
267 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268 				  crtc);
269 			return 0;
270 		}
271 
272 		/*
273 		 * TODO rework base driver to use values directly.
274 		 * for now parse it back into reg-format
275 		 */
276 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
277 					 &v_blank_start,
278 					 &v_blank_end,
279 					 &h_position,
280 					 &v_position);
281 
282 		*position = v_position | (h_position << 16);
283 		*vbl = v_blank_start | (v_blank_end << 16);
284 	}
285 
286 	return 0;
287 }
288 
289 static bool dm_is_idle(void *handle)
290 {
291 	/* XXX todo */
292 	return true;
293 }
294 
295 static int dm_wait_for_idle(void *handle)
296 {
297 	/* XXX todo */
298 	return 0;
299 }
300 
301 static bool dm_check_soft_reset(void *handle)
302 {
303 	return false;
304 }
305 
306 static int dm_soft_reset(void *handle)
307 {
308 	/* XXX todo */
309 	return 0;
310 }
311 
312 static struct amdgpu_crtc *
313 get_crtc_by_otg_inst(struct amdgpu_device *adev,
314 		     int otg_inst)
315 {
316 	struct drm_device *dev = adev_to_drm(adev);
317 	struct drm_crtc *crtc;
318 	struct amdgpu_crtc *amdgpu_crtc;
319 
320 	if (WARN_ON(otg_inst == -1))
321 		return adev->mode_info.crtcs[0];
322 
323 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 		amdgpu_crtc = to_amdgpu_crtc(crtc);
325 
326 		if (amdgpu_crtc->otg_inst == otg_inst)
327 			return amdgpu_crtc;
328 	}
329 
330 	return NULL;
331 }
332 
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334 {
335 	return acrtc->dm_irq_params.freesync_config.state ==
336 		       VRR_STATE_ACTIVE_VARIABLE ||
337 	       acrtc->dm_irq_params.freesync_config.state ==
338 		       VRR_STATE_ACTIVE_FIXED;
339 }
340 
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342 {
343 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345 }
346 
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348 					      struct dm_crtc_state *new_state)
349 {
350 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
351 		return true;
352 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353 		return true;
354 	else
355 		return false;
356 }
357 
358 /**
359  * dm_pflip_high_irq() - Handle pageflip interrupt
360  * @interrupt_params: ignored
361  *
362  * Handles the pageflip interrupt by notifying all interested parties
363  * that the pageflip has been completed.
364  */
365 static void dm_pflip_high_irq(void *interrupt_params)
366 {
367 	struct amdgpu_crtc *amdgpu_crtc;
368 	struct common_irq_params *irq_params = interrupt_params;
369 	struct amdgpu_device *adev = irq_params->adev;
370 	unsigned long flags;
371 	struct drm_pending_vblank_event *e;
372 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
373 	bool vrr_active;
374 
375 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376 
377 	/* IRQ could occur when in initial stage */
378 	/* TODO work and BO cleanup */
379 	if (amdgpu_crtc == NULL) {
380 		DC_LOG_PFLIP("CRTC is null, returning.\n");
381 		return;
382 	}
383 
384 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
385 
386 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388 						 amdgpu_crtc->pflip_status,
389 						 AMDGPU_FLIP_SUBMITTED,
390 						 amdgpu_crtc->crtc_id,
391 						 amdgpu_crtc);
392 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
393 		return;
394 	}
395 
396 	/* page flip completed. */
397 	e = amdgpu_crtc->event;
398 	amdgpu_crtc->event = NULL;
399 
400 	WARN_ON(!e);
401 
402 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
403 
404 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
405 	if (!vrr_active ||
406 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407 				      &v_blank_end, &hpos, &vpos) ||
408 	    (vpos < v_blank_start)) {
409 		/* Update to correct count and vblank timestamp if racing with
410 		 * vblank irq. This also updates to the correct vblank timestamp
411 		 * even in VRR mode, as scanout is past the front-porch atm.
412 		 */
413 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
414 
415 		/* Wake up userspace by sending the pageflip event with proper
416 		 * count and timestamp of vblank of flip completion.
417 		 */
418 		if (e) {
419 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420 
421 			/* Event sent, so done with vblank for this flip */
422 			drm_crtc_vblank_put(&amdgpu_crtc->base);
423 		}
424 	} else if (e) {
425 		/* VRR active and inside front-porch: vblank count and
426 		 * timestamp for pageflip event will only be up to date after
427 		 * drm_crtc_handle_vblank() has been executed from late vblank
428 		 * irq handler after start of back-porch (vline 0). We queue the
429 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
430 		 * updated timestamp and count, once it runs after us.
431 		 *
432 		 * We need to open-code this instead of using the helper
433 		 * drm_crtc_arm_vblank_event(), as that helper would
434 		 * call drm_crtc_accurate_vblank_count(), which we must
435 		 * not call in VRR mode while we are in front-porch!
436 		 */
437 
438 		/* sequence will be replaced by real count during send-out. */
439 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440 		e->pipe = amdgpu_crtc->crtc_id;
441 
442 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
443 		e = NULL;
444 	}
445 
446 	/* Keep track of vblank of this flip for flip throttling. We use the
447 	 * cooked hw counter, as that one incremented at start of this vblank
448 	 * of pageflip completion, so last_flip_vblank is the forbidden count
449 	 * for queueing new pageflips if vsync + VRR is enabled.
450 	 */
451 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
452 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
453 
454 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
456 
457 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
459 		     vrr_active, (int) !e);
460 }
461 
462 static void dm_vupdate_high_irq(void *interrupt_params)
463 {
464 	struct common_irq_params *irq_params = interrupt_params;
465 	struct amdgpu_device *adev = irq_params->adev;
466 	struct amdgpu_crtc *acrtc;
467 	struct drm_device *drm_dev;
468 	struct drm_vblank_crtc *vblank;
469 	ktime_t frame_duration_ns, previous_timestamp;
470 	unsigned long flags;
471 	int vrr_active;
472 
473 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474 
475 	if (acrtc) {
476 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477 		drm_dev = acrtc->base.dev;
478 		vblank = &drm_dev->vblank[acrtc->base.index];
479 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480 		frame_duration_ns = vblank->time - previous_timestamp;
481 
482 		if (frame_duration_ns > 0) {
483 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
484 						frame_duration_ns,
485 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
487 		}
488 
489 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
490 			      acrtc->crtc_id,
491 			      vrr_active);
492 
493 		/* Core vblank handling is done here after end of front-porch in
494 		 * vrr mode, as vblank timestamping will give valid results
495 		 * while now done after front-porch. This will also deliver
496 		 * page-flip completion events that have been queued to us
497 		 * if a pageflip happened inside front-porch.
498 		 */
499 		if (vrr_active) {
500 			drm_crtc_handle_vblank(&acrtc->base);
501 
502 			/* BTR processing for pre-DCE12 ASICs */
503 			if (acrtc->dm_irq_params.stream &&
504 			    adev->family < AMDGPU_FAMILY_AI) {
505 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506 				mod_freesync_handle_v_update(
507 				    adev->dm.freesync_module,
508 				    acrtc->dm_irq_params.stream,
509 				    &acrtc->dm_irq_params.vrr_params);
510 
511 				dc_stream_adjust_vmin_vmax(
512 				    adev->dm.dc,
513 				    acrtc->dm_irq_params.stream,
514 				    &acrtc->dm_irq_params.vrr_params.adjust);
515 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
516 			}
517 		}
518 	}
519 }
520 
521 /**
522  * dm_crtc_high_irq() - Handles CRTC interrupt
523  * @interrupt_params: used for determining the CRTC instance
524  *
525  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526  * event handler.
527  */
528 static void dm_crtc_high_irq(void *interrupt_params)
529 {
530 	struct common_irq_params *irq_params = interrupt_params;
531 	struct amdgpu_device *adev = irq_params->adev;
532 	struct amdgpu_crtc *acrtc;
533 	unsigned long flags;
534 	int vrr_active;
535 
536 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
537 	if (!acrtc)
538 		return;
539 
540 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
541 
542 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543 		      vrr_active, acrtc->dm_irq_params.active_planes);
544 
545 	/**
546 	 * Core vblank handling at start of front-porch is only possible
547 	 * in non-vrr mode, as only there vblank timestamping will give
548 	 * valid results while done in front-porch. Otherwise defer it
549 	 * to dm_vupdate_high_irq after end of front-porch.
550 	 */
551 	if (!vrr_active)
552 		drm_crtc_handle_vblank(&acrtc->base);
553 
554 	/**
555 	 * Following stuff must happen at start of vblank, for crc
556 	 * computation and below-the-range btr support in vrr mode.
557 	 */
558 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
559 
560 	/* BTR updates need to happen before VUPDATE on Vega and above. */
561 	if (adev->family < AMDGPU_FAMILY_AI)
562 		return;
563 
564 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
565 
566 	if (acrtc->dm_irq_params.stream &&
567 	    acrtc->dm_irq_params.vrr_params.supported &&
568 	    acrtc->dm_irq_params.freesync_config.state ==
569 		    VRR_STATE_ACTIVE_VARIABLE) {
570 		mod_freesync_handle_v_update(adev->dm.freesync_module,
571 					     acrtc->dm_irq_params.stream,
572 					     &acrtc->dm_irq_params.vrr_params);
573 
574 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575 					   &acrtc->dm_irq_params.vrr_params.adjust);
576 	}
577 
578 	/*
579 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
580 	 * In that case, pageflip completion interrupts won't fire and pageflip
581 	 * completion events won't get delivered. Prevent this by sending
582 	 * pending pageflip events from here if a flip is still pending.
583 	 *
584 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
585 	 * avoid race conditions between flip programming and completion,
586 	 * which could cause too early flip completion events.
587 	 */
588 	if (adev->family >= AMDGPU_FAMILY_RV &&
589 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590 	    acrtc->dm_irq_params.active_planes == 0) {
591 		if (acrtc->event) {
592 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593 			acrtc->event = NULL;
594 			drm_crtc_vblank_put(&acrtc->base);
595 		}
596 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
597 	}
598 
599 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 }
601 
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
604 /**
605  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606  * DCN generation ASICs
607  * @interrupt_params: interrupt parameters
608  *
609  * Used to set crc window/read out crc value at vertical line 0 position
610  */
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612 {
613 	struct common_irq_params *irq_params = interrupt_params;
614 	struct amdgpu_device *adev = irq_params->adev;
615 	struct amdgpu_crtc *acrtc;
616 
617 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618 
619 	if (!acrtc)
620 		return;
621 
622 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 }
624 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
625 
626 /**
627  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
628  * @adev: amdgpu_device pointer
629  * @notify: dmub notification structure
630  *
631  * Dmub AUX or SET_CONFIG command completion processing callback
632  * Copies dmub notification to DM which is to be read by AUX command.
633  * issuing thread and also signals the event to wake up the thread.
634  */
635 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
636 					struct dmub_notification *notify)
637 {
638 	if (adev->dm.dmub_notify)
639 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
640 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
641 		complete(&adev->dm.dmub_aux_transfer_done);
642 }
643 
644 /**
645  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
646  * @adev: amdgpu_device pointer
647  * @notify: dmub notification structure
648  *
649  * Dmub Hpd interrupt processing callback. Gets displayindex through the
650  * ink index and calls helper to do the processing.
651  */
652 static void dmub_hpd_callback(struct amdgpu_device *adev,
653 			      struct dmub_notification *notify)
654 {
655 	struct amdgpu_dm_connector *aconnector;
656 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
657 	struct drm_connector *connector;
658 	struct drm_connector_list_iter iter;
659 	struct dc_link *link;
660 	uint8_t link_index = 0;
661 	struct drm_device *dev = adev->dm.ddev;
662 
663 	if (adev == NULL)
664 		return;
665 
666 	if (notify == NULL) {
667 		DRM_ERROR("DMUB HPD callback notification was NULL");
668 		return;
669 	}
670 
671 	if (notify->link_index > adev->dm.dc->link_count) {
672 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
673 		return;
674 	}
675 
676 	link_index = notify->link_index;
677 	link = adev->dm.dc->links[link_index];
678 
679 	drm_connector_list_iter_begin(dev, &iter);
680 	drm_for_each_connector_iter(connector, &iter) {
681 		aconnector = to_amdgpu_dm_connector(connector);
682 		if (link && aconnector->dc_link == link) {
683 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
684 			hpd_aconnector = aconnector;
685 			break;
686 		}
687 	}
688 	drm_connector_list_iter_end(&iter);
689 
690 	if (hpd_aconnector) {
691 		if (notify->type == DMUB_NOTIFICATION_HPD)
692 			handle_hpd_irq_helper(hpd_aconnector);
693 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
694 			handle_hpd_rx_irq(hpd_aconnector);
695 	}
696 }
697 
698 /**
699  * register_dmub_notify_callback - Sets callback for DMUB notify
700  * @adev: amdgpu_device pointer
701  * @type: Type of dmub notification
702  * @callback: Dmub interrupt callback function
703  * @dmub_int_thread_offload: offload indicator
704  *
705  * API to register a dmub callback handler for a dmub notification
706  * Also sets indicator whether callback processing to be offloaded.
707  * to dmub interrupt handling thread
708  * Return: true if successfully registered, false if there is existing registration
709  */
710 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
711 					  enum dmub_notification_type type,
712 					  dmub_notify_interrupt_callback_t callback,
713 					  bool dmub_int_thread_offload)
714 {
715 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
716 		adev->dm.dmub_callback[type] = callback;
717 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
718 	} else
719 		return false;
720 
721 	return true;
722 }
723 
724 static void dm_handle_hpd_work(struct work_struct *work)
725 {
726 	struct dmub_hpd_work *dmub_hpd_wrk;
727 
728 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
729 
730 	if (!dmub_hpd_wrk->dmub_notify) {
731 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
732 		return;
733 	}
734 
735 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
736 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
737 		dmub_hpd_wrk->dmub_notify);
738 	}
739 
740 	kfree(dmub_hpd_wrk->dmub_notify);
741 	kfree(dmub_hpd_wrk);
742 
743 }
744 
745 #define DMUB_TRACE_MAX_READ 64
746 /**
747  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
748  * @interrupt_params: used for determining the Outbox instance
749  *
750  * Handles the Outbox Interrupt
751  * event handler.
752  */
753 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
754 {
755 	struct dmub_notification notify;
756 	struct common_irq_params *irq_params = interrupt_params;
757 	struct amdgpu_device *adev = irq_params->adev;
758 	struct amdgpu_display_manager *dm = &adev->dm;
759 	struct dmcub_trace_buf_entry entry = { 0 };
760 	uint32_t count = 0;
761 	struct dmub_hpd_work *dmub_hpd_wrk;
762 	struct dc_link *plink = NULL;
763 
764 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
765 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
766 
767 		do {
768 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
769 			if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
770 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
771 				continue;
772 			}
773 			if (!dm->dmub_callback[notify.type]) {
774 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
775 				continue;
776 			}
777 			if (dm->dmub_thread_offload[notify.type] == true) {
778 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
779 				if (!dmub_hpd_wrk) {
780 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
781 					return;
782 				}
783 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
784 				if (!dmub_hpd_wrk->dmub_notify) {
785 					kfree(dmub_hpd_wrk);
786 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
787 					return;
788 				}
789 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
790 				if (dmub_hpd_wrk->dmub_notify)
791 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
792 				dmub_hpd_wrk->adev = adev;
793 				if (notify.type == DMUB_NOTIFICATION_HPD) {
794 					plink = adev->dm.dc->links[notify.link_index];
795 					if (plink) {
796 						plink->hpd_status =
797 							notify.hpd_status == DP_HPD_PLUG;
798 					}
799 				}
800 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
801 			} else {
802 				dm->dmub_callback[notify.type](adev, &notify);
803 			}
804 		} while (notify.pending_notification);
805 	}
806 
807 
808 	do {
809 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
810 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
811 							entry.param0, entry.param1);
812 
813 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
814 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
815 		} else
816 			break;
817 
818 		count++;
819 
820 	} while (count <= DMUB_TRACE_MAX_READ);
821 
822 	if (count > DMUB_TRACE_MAX_READ)
823 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
824 }
825 #endif /* CONFIG_DRM_AMD_DC_DCN */
826 
827 static int dm_set_clockgating_state(void *handle,
828 		  enum amd_clockgating_state state)
829 {
830 	return 0;
831 }
832 
833 static int dm_set_powergating_state(void *handle,
834 		  enum amd_powergating_state state)
835 {
836 	return 0;
837 }
838 
839 /* Prototypes of private functions */
840 static int dm_early_init(void* handle);
841 
842 /* Allocate memory for FBC compressed data  */
843 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
844 {
845 	struct drm_device *dev = connector->dev;
846 	struct amdgpu_device *adev = drm_to_adev(dev);
847 	struct dm_compressor_info *compressor = &adev->dm.compressor;
848 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
849 	struct drm_display_mode *mode;
850 	unsigned long max_size = 0;
851 
852 	if (adev->dm.dc->fbc_compressor == NULL)
853 		return;
854 
855 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
856 		return;
857 
858 	if (compressor->bo_ptr)
859 		return;
860 
861 
862 	list_for_each_entry(mode, &connector->modes, head) {
863 		if (max_size < mode->htotal * mode->vtotal)
864 			max_size = mode->htotal * mode->vtotal;
865 	}
866 
867 	if (max_size) {
868 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
869 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
870 			    &compressor->gpu_addr, &compressor->cpu_addr);
871 
872 		if (r)
873 			DRM_ERROR("DM: Failed to initialize FBC\n");
874 		else {
875 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
876 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
877 		}
878 
879 	}
880 
881 }
882 
883 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
884 					  int pipe, bool *enabled,
885 					  unsigned char *buf, int max_bytes)
886 {
887 	struct drm_device *dev = dev_get_drvdata(kdev);
888 	struct amdgpu_device *adev = drm_to_adev(dev);
889 	struct drm_connector *connector;
890 	struct drm_connector_list_iter conn_iter;
891 	struct amdgpu_dm_connector *aconnector;
892 	int ret = 0;
893 
894 	*enabled = false;
895 
896 	mutex_lock(&adev->dm.audio_lock);
897 
898 	drm_connector_list_iter_begin(dev, &conn_iter);
899 	drm_for_each_connector_iter(connector, &conn_iter) {
900 		aconnector = to_amdgpu_dm_connector(connector);
901 		if (aconnector->audio_inst != port)
902 			continue;
903 
904 		*enabled = true;
905 		ret = drm_eld_size(connector->eld);
906 		memcpy(buf, connector->eld, min(max_bytes, ret));
907 
908 		break;
909 	}
910 	drm_connector_list_iter_end(&conn_iter);
911 
912 	mutex_unlock(&adev->dm.audio_lock);
913 
914 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
915 
916 	return ret;
917 }
918 
919 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
920 	.get_eld = amdgpu_dm_audio_component_get_eld,
921 };
922 
923 static int amdgpu_dm_audio_component_bind(struct device *kdev,
924 				       struct device *hda_kdev, void *data)
925 {
926 	struct drm_device *dev = dev_get_drvdata(kdev);
927 	struct amdgpu_device *adev = drm_to_adev(dev);
928 	struct drm_audio_component *acomp = data;
929 
930 	acomp->ops = &amdgpu_dm_audio_component_ops;
931 	acomp->dev = kdev;
932 	adev->dm.audio_component = acomp;
933 
934 	return 0;
935 }
936 
937 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
938 					  struct device *hda_kdev, void *data)
939 {
940 	struct drm_device *dev = dev_get_drvdata(kdev);
941 	struct amdgpu_device *adev = drm_to_adev(dev);
942 	struct drm_audio_component *acomp = data;
943 
944 	acomp->ops = NULL;
945 	acomp->dev = NULL;
946 	adev->dm.audio_component = NULL;
947 }
948 
949 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
950 	.bind	= amdgpu_dm_audio_component_bind,
951 	.unbind	= amdgpu_dm_audio_component_unbind,
952 };
953 
954 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
955 {
956 	int i, ret;
957 
958 	if (!amdgpu_audio)
959 		return 0;
960 
961 	adev->mode_info.audio.enabled = true;
962 
963 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
964 
965 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
966 		adev->mode_info.audio.pin[i].channels = -1;
967 		adev->mode_info.audio.pin[i].rate = -1;
968 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
969 		adev->mode_info.audio.pin[i].status_bits = 0;
970 		adev->mode_info.audio.pin[i].category_code = 0;
971 		adev->mode_info.audio.pin[i].connected = false;
972 		adev->mode_info.audio.pin[i].id =
973 			adev->dm.dc->res_pool->audios[i]->inst;
974 		adev->mode_info.audio.pin[i].offset = 0;
975 	}
976 
977 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
978 	if (ret < 0)
979 		return ret;
980 
981 	adev->dm.audio_registered = true;
982 
983 	return 0;
984 }
985 
986 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
987 {
988 	if (!amdgpu_audio)
989 		return;
990 
991 	if (!adev->mode_info.audio.enabled)
992 		return;
993 
994 	if (adev->dm.audio_registered) {
995 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
996 		adev->dm.audio_registered = false;
997 	}
998 
999 	/* TODO: Disable audio? */
1000 
1001 	adev->mode_info.audio.enabled = false;
1002 }
1003 
1004 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1005 {
1006 	struct drm_audio_component *acomp = adev->dm.audio_component;
1007 
1008 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1009 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1010 
1011 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1012 						 pin, -1);
1013 	}
1014 }
1015 
1016 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1017 {
1018 	const struct dmcub_firmware_header_v1_0 *hdr;
1019 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1020 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1021 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1022 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1023 	struct abm *abm = adev->dm.dc->res_pool->abm;
1024 	struct dmub_srv_hw_params hw_params;
1025 	enum dmub_status status;
1026 	const unsigned char *fw_inst_const, *fw_bss_data;
1027 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1028 	bool has_hw_support;
1029 	struct dc *dc = adev->dm.dc;
1030 
1031 	if (!dmub_srv)
1032 		/* DMUB isn't supported on the ASIC. */
1033 		return 0;
1034 
1035 	if (!fb_info) {
1036 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1037 		return -EINVAL;
1038 	}
1039 
1040 	if (!dmub_fw) {
1041 		/* Firmware required for DMUB support. */
1042 		DRM_ERROR("No firmware provided for DMUB.\n");
1043 		return -EINVAL;
1044 	}
1045 
1046 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1047 	if (status != DMUB_STATUS_OK) {
1048 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1049 		return -EINVAL;
1050 	}
1051 
1052 	if (!has_hw_support) {
1053 		DRM_INFO("DMUB unsupported on ASIC\n");
1054 		return 0;
1055 	}
1056 
1057 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1058 	status = dmub_srv_hw_reset(dmub_srv);
1059 	if (status != DMUB_STATUS_OK)
1060 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1061 
1062 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1063 
1064 	fw_inst_const = dmub_fw->data +
1065 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1066 			PSP_HEADER_BYTES;
1067 
1068 	fw_bss_data = dmub_fw->data +
1069 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1070 		      le32_to_cpu(hdr->inst_const_bytes);
1071 
1072 	/* Copy firmware and bios info into FB memory. */
1073 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1074 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1075 
1076 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1077 
1078 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1079 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1080 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1081 	 * will be done by dm_dmub_hw_init
1082 	 */
1083 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1084 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1085 				fw_inst_const_size);
1086 	}
1087 
1088 	if (fw_bss_data_size)
1089 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1090 		       fw_bss_data, fw_bss_data_size);
1091 
1092 	/* Copy firmware bios info into FB memory. */
1093 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1094 	       adev->bios_size);
1095 
1096 	/* Reset regions that need to be reset. */
1097 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1098 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1099 
1100 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1101 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1102 
1103 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1104 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1105 
1106 	/* Initialize hardware. */
1107 	memset(&hw_params, 0, sizeof(hw_params));
1108 	hw_params.fb_base = adev->gmc.fb_start;
1109 	hw_params.fb_offset = adev->gmc.aper_base;
1110 
1111 	/* backdoor load firmware and trigger dmub running */
1112 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1113 		hw_params.load_inst_const = true;
1114 
1115 	if (dmcu)
1116 		hw_params.psp_version = dmcu->psp_version;
1117 
1118 	for (i = 0; i < fb_info->num_fb; ++i)
1119 		hw_params.fb[i] = &fb_info->fb[i];
1120 
1121 	switch (adev->asic_type) {
1122 	case CHIP_YELLOW_CARP:
1123 		if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1124 			hw_params.dpia_supported = true;
1125 #if defined(CONFIG_DRM_AMD_DC_DCN)
1126 			hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1127 #endif
1128 		}
1129 		break;
1130 	default:
1131 		break;
1132 	}
1133 
1134 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1135 	if (status != DMUB_STATUS_OK) {
1136 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1137 		return -EINVAL;
1138 	}
1139 
1140 	/* Wait for firmware load to finish. */
1141 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1142 	if (status != DMUB_STATUS_OK)
1143 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1144 
1145 	/* Init DMCU and ABM if available. */
1146 	if (dmcu && abm) {
1147 		dmcu->funcs->dmcu_init(dmcu);
1148 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1149 	}
1150 
1151 	if (!adev->dm.dc->ctx->dmub_srv)
1152 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1153 	if (!adev->dm.dc->ctx->dmub_srv) {
1154 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1155 		return -ENOMEM;
1156 	}
1157 
1158 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1159 		 adev->dm.dmcub_fw_version);
1160 
1161 	return 0;
1162 }
1163 
1164 #if defined(CONFIG_DRM_AMD_DC_DCN)
1165 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1166 {
1167 	uint64_t pt_base;
1168 	uint32_t logical_addr_low;
1169 	uint32_t logical_addr_high;
1170 	uint32_t agp_base, agp_bot, agp_top;
1171 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1172 
1173 	memset(pa_config, 0, sizeof(*pa_config));
1174 
1175 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1176 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1177 
1178 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1179 		/*
1180 		 * Raven2 has a HW issue that it is unable to use the vram which
1181 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1182 		 * workaround that increase system aperture high address (add 1)
1183 		 * to get rid of the VM fault and hardware hang.
1184 		 */
1185 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1186 	else
1187 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1188 
1189 	agp_base = 0;
1190 	agp_bot = adev->gmc.agp_start >> 24;
1191 	agp_top = adev->gmc.agp_end >> 24;
1192 
1193 
1194 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1195 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1196 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1197 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1198 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1199 	page_table_base.low_part = lower_32_bits(pt_base);
1200 
1201 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1202 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1203 
1204 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1205 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1206 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1207 
1208 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1209 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1210 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1211 
1212 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1213 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1214 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1215 
1216 	pa_config->is_hvm_enabled = 0;
1217 
1218 }
1219 #endif
1220 #if defined(CONFIG_DRM_AMD_DC_DCN)
1221 static void vblank_control_worker(struct work_struct *work)
1222 {
1223 	struct vblank_control_work *vblank_work =
1224 		container_of(work, struct vblank_control_work, work);
1225 	struct amdgpu_display_manager *dm = vblank_work->dm;
1226 
1227 	mutex_lock(&dm->dc_lock);
1228 
1229 	if (vblank_work->enable)
1230 		dm->active_vblank_irq_count++;
1231 	else if(dm->active_vblank_irq_count)
1232 		dm->active_vblank_irq_count--;
1233 
1234 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1235 
1236 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1237 
1238 	/* Control PSR based on vblank requirements from OS */
1239 	if (vblank_work->stream && vblank_work->stream->link) {
1240 		if (vblank_work->enable) {
1241 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1242 				amdgpu_dm_psr_disable(vblank_work->stream);
1243 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1244 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1245 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1246 			amdgpu_dm_psr_enable(vblank_work->stream);
1247 		}
1248 	}
1249 
1250 	mutex_unlock(&dm->dc_lock);
1251 
1252 	dc_stream_release(vblank_work->stream);
1253 
1254 	kfree(vblank_work);
1255 }
1256 
1257 #endif
1258 
1259 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1260 {
1261 	struct hpd_rx_irq_offload_work *offload_work;
1262 	struct amdgpu_dm_connector *aconnector;
1263 	struct dc_link *dc_link;
1264 	struct amdgpu_device *adev;
1265 	enum dc_connection_type new_connection_type = dc_connection_none;
1266 	unsigned long flags;
1267 
1268 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1269 	aconnector = offload_work->offload_wq->aconnector;
1270 
1271 	if (!aconnector) {
1272 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1273 		goto skip;
1274 	}
1275 
1276 	adev = drm_to_adev(aconnector->base.dev);
1277 	dc_link = aconnector->dc_link;
1278 
1279 	mutex_lock(&aconnector->hpd_lock);
1280 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1281 		DRM_ERROR("KMS: Failed to detect connector\n");
1282 	mutex_unlock(&aconnector->hpd_lock);
1283 
1284 	if (new_connection_type == dc_connection_none)
1285 		goto skip;
1286 
1287 	if (amdgpu_in_reset(adev))
1288 		goto skip;
1289 
1290 	mutex_lock(&adev->dm.dc_lock);
1291 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1292 		dc_link_dp_handle_automated_test(dc_link);
1293 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1294 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1295 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1296 		dc_link_dp_handle_link_loss(dc_link);
1297 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1298 		offload_work->offload_wq->is_handling_link_loss = false;
1299 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1300 	}
1301 	mutex_unlock(&adev->dm.dc_lock);
1302 
1303 skip:
1304 	kfree(offload_work);
1305 
1306 }
1307 
1308 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1309 {
1310 	int max_caps = dc->caps.max_links;
1311 	int i = 0;
1312 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1313 
1314 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1315 
1316 	if (!hpd_rx_offload_wq)
1317 		return NULL;
1318 
1319 
1320 	for (i = 0; i < max_caps; i++) {
1321 		hpd_rx_offload_wq[i].wq =
1322 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1323 
1324 		if (hpd_rx_offload_wq[i].wq == NULL) {
1325 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1326 			return NULL;
1327 		}
1328 
1329 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1330 	}
1331 
1332 	return hpd_rx_offload_wq;
1333 }
1334 
1335 struct amdgpu_stutter_quirk {
1336 	u16 chip_vendor;
1337 	u16 chip_device;
1338 	u16 subsys_vendor;
1339 	u16 subsys_device;
1340 	u8 revision;
1341 };
1342 
1343 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1344 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1345 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1346 	{ 0, 0, 0, 0, 0 },
1347 };
1348 
1349 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1350 {
1351 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1352 
1353 	while (p && p->chip_device != 0) {
1354 		if (pdev->vendor == p->chip_vendor &&
1355 		    pdev->device == p->chip_device &&
1356 		    pdev->subsystem_vendor == p->subsys_vendor &&
1357 		    pdev->subsystem_device == p->subsys_device &&
1358 		    pdev->revision == p->revision) {
1359 			return true;
1360 		}
1361 		++p;
1362 	}
1363 	return false;
1364 }
1365 
1366 static int amdgpu_dm_init(struct amdgpu_device *adev)
1367 {
1368 	struct dc_init_data init_data;
1369 #ifdef CONFIG_DRM_AMD_DC_HDCP
1370 	struct dc_callback_init init_params;
1371 #endif
1372 	int r;
1373 
1374 	adev->dm.ddev = adev_to_drm(adev);
1375 	adev->dm.adev = adev;
1376 
1377 	/* Zero all the fields */
1378 	memset(&init_data, 0, sizeof(init_data));
1379 #ifdef CONFIG_DRM_AMD_DC_HDCP
1380 	memset(&init_params, 0, sizeof(init_params));
1381 #endif
1382 
1383 	mutex_init(&adev->dm.dc_lock);
1384 	mutex_init(&adev->dm.audio_lock);
1385 #if defined(CONFIG_DRM_AMD_DC_DCN)
1386 	spin_lock_init(&adev->dm.vblank_lock);
1387 #endif
1388 
1389 	if(amdgpu_dm_irq_init(adev)) {
1390 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1391 		goto error;
1392 	}
1393 
1394 	init_data.asic_id.chip_family = adev->family;
1395 
1396 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1397 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1398 	init_data.asic_id.chip_id = adev->pdev->device;
1399 
1400 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1401 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1402 	init_data.asic_id.atombios_base_address =
1403 		adev->mode_info.atom_context->bios;
1404 
1405 	init_data.driver = adev;
1406 
1407 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1408 
1409 	if (!adev->dm.cgs_device) {
1410 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1411 		goto error;
1412 	}
1413 
1414 	init_data.cgs_device = adev->dm.cgs_device;
1415 
1416 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1417 
1418 	switch (adev->asic_type) {
1419 	case CHIP_CARRIZO:
1420 	case CHIP_STONEY:
1421 		init_data.flags.gpu_vm_support = true;
1422 		break;
1423 	default:
1424 		switch (adev->ip_versions[DCE_HWIP][0]) {
1425 		case IP_VERSION(2, 1, 0):
1426 			init_data.flags.gpu_vm_support = true;
1427 			switch (adev->dm.dmcub_fw_version) {
1428 			case 0: /* development */
1429 			case 0x1: /* linux-firmware.git hash 6d9f399 */
1430 			case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1431 				init_data.flags.disable_dmcu = false;
1432 				break;
1433 			default:
1434 				init_data.flags.disable_dmcu = true;
1435 			}
1436 			break;
1437 		case IP_VERSION(1, 0, 0):
1438 		case IP_VERSION(1, 0, 1):
1439 		case IP_VERSION(3, 0, 1):
1440 		case IP_VERSION(3, 1, 2):
1441 		case IP_VERSION(3, 1, 3):
1442 			init_data.flags.gpu_vm_support = true;
1443 			break;
1444 		case IP_VERSION(2, 0, 3):
1445 			init_data.flags.disable_dmcu = true;
1446 			break;
1447 		default:
1448 			break;
1449 		}
1450 		break;
1451 	}
1452 
1453 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1454 		init_data.flags.fbc_support = true;
1455 
1456 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1457 		init_data.flags.multi_mon_pp_mclk_switch = true;
1458 
1459 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1460 		init_data.flags.disable_fractional_pwm = true;
1461 
1462 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1463 		init_data.flags.edp_no_power_sequencing = true;
1464 
1465 #ifdef CONFIG_DRM_AMD_DC_DCN
1466 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1467 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1468 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1469 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1470 #endif
1471 
1472 	init_data.flags.power_down_display_on_boot = true;
1473 
1474 	if (check_seamless_boot_capability(adev)) {
1475 		init_data.flags.power_down_display_on_boot = false;
1476 		init_data.flags.allow_seamless_boot_optimization = true;
1477 		DRM_INFO("Seamless boot condition check passed\n");
1478 	}
1479 
1480 	INIT_LIST_HEAD(&adev->dm.da_list);
1481 	/* Display Core create. */
1482 	adev->dm.dc = dc_create(&init_data);
1483 
1484 	if (adev->dm.dc) {
1485 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1486 	} else {
1487 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1488 		goto error;
1489 	}
1490 
1491 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1492 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1493 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1494 	}
1495 
1496 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1497 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1498 	if (dm_should_disable_stutter(adev->pdev))
1499 		adev->dm.dc->debug.disable_stutter = true;
1500 
1501 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1502 		adev->dm.dc->debug.disable_stutter = true;
1503 
1504 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1505 		adev->dm.dc->debug.disable_dsc = true;
1506 		adev->dm.dc->debug.disable_dsc_edp = true;
1507 	}
1508 
1509 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1510 		adev->dm.dc->debug.disable_clock_gate = true;
1511 
1512 	r = dm_dmub_hw_init(adev);
1513 	if (r) {
1514 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1515 		goto error;
1516 	}
1517 
1518 	dc_hardware_init(adev->dm.dc);
1519 
1520 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1521 	if (!adev->dm.hpd_rx_offload_wq) {
1522 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1523 		goto error;
1524 	}
1525 
1526 #if defined(CONFIG_DRM_AMD_DC_DCN)
1527 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1528 		struct dc_phy_addr_space_config pa_config;
1529 
1530 		mmhub_read_system_context(adev, &pa_config);
1531 
1532 		// Call the DC init_memory func
1533 		dc_setup_system_context(adev->dm.dc, &pa_config);
1534 	}
1535 #endif
1536 
1537 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1538 	if (!adev->dm.freesync_module) {
1539 		DRM_ERROR(
1540 		"amdgpu: failed to initialize freesync_module.\n");
1541 	} else
1542 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1543 				adev->dm.freesync_module);
1544 
1545 	amdgpu_dm_init_color_mod();
1546 
1547 #if defined(CONFIG_DRM_AMD_DC_DCN)
1548 	if (adev->dm.dc->caps.max_links > 0) {
1549 		adev->dm.vblank_control_workqueue =
1550 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1551 		if (!adev->dm.vblank_control_workqueue)
1552 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1553 	}
1554 #endif
1555 
1556 #ifdef CONFIG_DRM_AMD_DC_HDCP
1557 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1558 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1559 
1560 		if (!adev->dm.hdcp_workqueue)
1561 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1562 		else
1563 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1564 
1565 		dc_init_callbacks(adev->dm.dc, &init_params);
1566 	}
1567 #endif
1568 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1569 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1570 #endif
1571 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1572 		init_completion(&adev->dm.dmub_aux_transfer_done);
1573 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1574 		if (!adev->dm.dmub_notify) {
1575 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1576 			goto error;
1577 		}
1578 
1579 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1580 		if (!adev->dm.delayed_hpd_wq) {
1581 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1582 			goto error;
1583 		}
1584 
1585 		amdgpu_dm_outbox_init(adev);
1586 #if defined(CONFIG_DRM_AMD_DC_DCN)
1587 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1588 			dmub_aux_setconfig_callback, false)) {
1589 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1590 			goto error;
1591 		}
1592 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1593 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1594 			goto error;
1595 		}
1596 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1597 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1598 			goto error;
1599 		}
1600 #endif /* CONFIG_DRM_AMD_DC_DCN */
1601 	}
1602 
1603 	if (amdgpu_dm_initialize_drm_device(adev)) {
1604 		DRM_ERROR(
1605 		"amdgpu: failed to initialize sw for display support.\n");
1606 		goto error;
1607 	}
1608 
1609 	/* create fake encoders for MST */
1610 	dm_dp_create_fake_mst_encoders(adev);
1611 
1612 	/* TODO: Add_display_info? */
1613 
1614 	/* TODO use dynamic cursor width */
1615 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1616 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1617 
1618 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1619 		DRM_ERROR(
1620 		"amdgpu: failed to initialize sw for display support.\n");
1621 		goto error;
1622 	}
1623 
1624 
1625 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1626 
1627 	return 0;
1628 error:
1629 	amdgpu_dm_fini(adev);
1630 
1631 	return -EINVAL;
1632 }
1633 
1634 static int amdgpu_dm_early_fini(void *handle)
1635 {
1636 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1637 
1638 	amdgpu_dm_audio_fini(adev);
1639 
1640 	return 0;
1641 }
1642 
1643 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1644 {
1645 	int i;
1646 
1647 #if defined(CONFIG_DRM_AMD_DC_DCN)
1648 	if (adev->dm.vblank_control_workqueue) {
1649 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1650 		adev->dm.vblank_control_workqueue = NULL;
1651 	}
1652 #endif
1653 
1654 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1655 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1656 	}
1657 
1658 	amdgpu_dm_destroy_drm_device(&adev->dm);
1659 
1660 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1661 	if (adev->dm.crc_rd_wrk) {
1662 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1663 		kfree(adev->dm.crc_rd_wrk);
1664 		adev->dm.crc_rd_wrk = NULL;
1665 	}
1666 #endif
1667 #ifdef CONFIG_DRM_AMD_DC_HDCP
1668 	if (adev->dm.hdcp_workqueue) {
1669 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1670 		adev->dm.hdcp_workqueue = NULL;
1671 	}
1672 
1673 	if (adev->dm.dc)
1674 		dc_deinit_callbacks(adev->dm.dc);
1675 #endif
1676 
1677 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1678 
1679 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1680 		kfree(adev->dm.dmub_notify);
1681 		adev->dm.dmub_notify = NULL;
1682 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1683 		adev->dm.delayed_hpd_wq = NULL;
1684 	}
1685 
1686 	if (adev->dm.dmub_bo)
1687 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1688 				      &adev->dm.dmub_bo_gpu_addr,
1689 				      &adev->dm.dmub_bo_cpu_addr);
1690 
1691 	if (adev->dm.hpd_rx_offload_wq) {
1692 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1693 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1694 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1695 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1696 			}
1697 		}
1698 
1699 		kfree(adev->dm.hpd_rx_offload_wq);
1700 		adev->dm.hpd_rx_offload_wq = NULL;
1701 	}
1702 
1703 	/* DC Destroy TODO: Replace destroy DAL */
1704 	if (adev->dm.dc)
1705 		dc_destroy(&adev->dm.dc);
1706 	/*
1707 	 * TODO: pageflip, vlank interrupt
1708 	 *
1709 	 * amdgpu_dm_irq_fini(adev);
1710 	 */
1711 
1712 	if (adev->dm.cgs_device) {
1713 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1714 		adev->dm.cgs_device = NULL;
1715 	}
1716 	if (adev->dm.freesync_module) {
1717 		mod_freesync_destroy(adev->dm.freesync_module);
1718 		adev->dm.freesync_module = NULL;
1719 	}
1720 
1721 	mutex_destroy(&adev->dm.audio_lock);
1722 	mutex_destroy(&adev->dm.dc_lock);
1723 
1724 	return;
1725 }
1726 
1727 static int load_dmcu_fw(struct amdgpu_device *adev)
1728 {
1729 	const char *fw_name_dmcu = NULL;
1730 	int r;
1731 	const struct dmcu_firmware_header_v1_0 *hdr;
1732 
1733 	switch(adev->asic_type) {
1734 #if defined(CONFIG_DRM_AMD_DC_SI)
1735 	case CHIP_TAHITI:
1736 	case CHIP_PITCAIRN:
1737 	case CHIP_VERDE:
1738 	case CHIP_OLAND:
1739 #endif
1740 	case CHIP_BONAIRE:
1741 	case CHIP_HAWAII:
1742 	case CHIP_KAVERI:
1743 	case CHIP_KABINI:
1744 	case CHIP_MULLINS:
1745 	case CHIP_TONGA:
1746 	case CHIP_FIJI:
1747 	case CHIP_CARRIZO:
1748 	case CHIP_STONEY:
1749 	case CHIP_POLARIS11:
1750 	case CHIP_POLARIS10:
1751 	case CHIP_POLARIS12:
1752 	case CHIP_VEGAM:
1753 	case CHIP_VEGA10:
1754 	case CHIP_VEGA12:
1755 	case CHIP_VEGA20:
1756 		return 0;
1757 	case CHIP_NAVI12:
1758 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1759 		break;
1760 	case CHIP_RAVEN:
1761 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1762 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1763 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1764 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1765 		else
1766 			return 0;
1767 		break;
1768 	default:
1769 		switch (adev->ip_versions[DCE_HWIP][0]) {
1770 		case IP_VERSION(2, 0, 2):
1771 		case IP_VERSION(2, 0, 3):
1772 		case IP_VERSION(2, 0, 0):
1773 		case IP_VERSION(2, 1, 0):
1774 		case IP_VERSION(3, 0, 0):
1775 		case IP_VERSION(3, 0, 2):
1776 		case IP_VERSION(3, 0, 3):
1777 		case IP_VERSION(3, 0, 1):
1778 		case IP_VERSION(3, 1, 2):
1779 		case IP_VERSION(3, 1, 3):
1780 			return 0;
1781 		default:
1782 			break;
1783 		}
1784 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1785 		return -EINVAL;
1786 	}
1787 
1788 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1789 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1790 		return 0;
1791 	}
1792 
1793 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1794 	if (r == -ENOENT) {
1795 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1796 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1797 		adev->dm.fw_dmcu = NULL;
1798 		return 0;
1799 	}
1800 	if (r) {
1801 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1802 			fw_name_dmcu);
1803 		return r;
1804 	}
1805 
1806 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1807 	if (r) {
1808 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1809 			fw_name_dmcu);
1810 		release_firmware(adev->dm.fw_dmcu);
1811 		adev->dm.fw_dmcu = NULL;
1812 		return r;
1813 	}
1814 
1815 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1816 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1817 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1818 	adev->firmware.fw_size +=
1819 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1820 
1821 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1822 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1823 	adev->firmware.fw_size +=
1824 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1825 
1826 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1827 
1828 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1829 
1830 	return 0;
1831 }
1832 
1833 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1834 {
1835 	struct amdgpu_device *adev = ctx;
1836 
1837 	return dm_read_reg(adev->dm.dc->ctx, address);
1838 }
1839 
1840 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1841 				     uint32_t value)
1842 {
1843 	struct amdgpu_device *adev = ctx;
1844 
1845 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1846 }
1847 
1848 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1849 {
1850 	struct dmub_srv_create_params create_params;
1851 	struct dmub_srv_region_params region_params;
1852 	struct dmub_srv_region_info region_info;
1853 	struct dmub_srv_fb_params fb_params;
1854 	struct dmub_srv_fb_info *fb_info;
1855 	struct dmub_srv *dmub_srv;
1856 	const struct dmcub_firmware_header_v1_0 *hdr;
1857 	const char *fw_name_dmub;
1858 	enum dmub_asic dmub_asic;
1859 	enum dmub_status status;
1860 	int r;
1861 
1862 	switch (adev->ip_versions[DCE_HWIP][0]) {
1863 	case IP_VERSION(2, 1, 0):
1864 		dmub_asic = DMUB_ASIC_DCN21;
1865 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1866 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1867 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1868 		break;
1869 	case IP_VERSION(3, 0, 0):
1870 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1871 			dmub_asic = DMUB_ASIC_DCN30;
1872 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1873 		} else {
1874 			dmub_asic = DMUB_ASIC_DCN30;
1875 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1876 		}
1877 		break;
1878 	case IP_VERSION(3, 0, 1):
1879 		dmub_asic = DMUB_ASIC_DCN301;
1880 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1881 		break;
1882 	case IP_VERSION(3, 0, 2):
1883 		dmub_asic = DMUB_ASIC_DCN302;
1884 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1885 		break;
1886 	case IP_VERSION(3, 0, 3):
1887 		dmub_asic = DMUB_ASIC_DCN303;
1888 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1889 		break;
1890 	case IP_VERSION(3, 1, 2):
1891 	case IP_VERSION(3, 1, 3):
1892 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1893 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1894 		break;
1895 
1896 	default:
1897 		/* ASIC doesn't support DMUB. */
1898 		return 0;
1899 	}
1900 
1901 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1902 	if (r) {
1903 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1904 		return 0;
1905 	}
1906 
1907 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1908 	if (r) {
1909 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1910 		return 0;
1911 	}
1912 
1913 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1914 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1915 
1916 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1917 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1918 			AMDGPU_UCODE_ID_DMCUB;
1919 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1920 			adev->dm.dmub_fw;
1921 		adev->firmware.fw_size +=
1922 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1923 
1924 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1925 			 adev->dm.dmcub_fw_version);
1926 	}
1927 
1928 
1929 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1930 	dmub_srv = adev->dm.dmub_srv;
1931 
1932 	if (!dmub_srv) {
1933 		DRM_ERROR("Failed to allocate DMUB service!\n");
1934 		return -ENOMEM;
1935 	}
1936 
1937 	memset(&create_params, 0, sizeof(create_params));
1938 	create_params.user_ctx = adev;
1939 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1940 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1941 	create_params.asic = dmub_asic;
1942 
1943 	/* Create the DMUB service. */
1944 	status = dmub_srv_create(dmub_srv, &create_params);
1945 	if (status != DMUB_STATUS_OK) {
1946 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1947 		return -EINVAL;
1948 	}
1949 
1950 	/* Calculate the size of all the regions for the DMUB service. */
1951 	memset(&region_params, 0, sizeof(region_params));
1952 
1953 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1954 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1955 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1956 	region_params.vbios_size = adev->bios_size;
1957 	region_params.fw_bss_data = region_params.bss_data_size ?
1958 		adev->dm.dmub_fw->data +
1959 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1960 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1961 	region_params.fw_inst_const =
1962 		adev->dm.dmub_fw->data +
1963 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1964 		PSP_HEADER_BYTES;
1965 
1966 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1967 					   &region_info);
1968 
1969 	if (status != DMUB_STATUS_OK) {
1970 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1971 		return -EINVAL;
1972 	}
1973 
1974 	/*
1975 	 * Allocate a framebuffer based on the total size of all the regions.
1976 	 * TODO: Move this into GART.
1977 	 */
1978 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1979 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1980 				    &adev->dm.dmub_bo_gpu_addr,
1981 				    &adev->dm.dmub_bo_cpu_addr);
1982 	if (r)
1983 		return r;
1984 
1985 	/* Rebase the regions on the framebuffer address. */
1986 	memset(&fb_params, 0, sizeof(fb_params));
1987 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1988 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1989 	fb_params.region_info = &region_info;
1990 
1991 	adev->dm.dmub_fb_info =
1992 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1993 	fb_info = adev->dm.dmub_fb_info;
1994 
1995 	if (!fb_info) {
1996 		DRM_ERROR(
1997 			"Failed to allocate framebuffer info for DMUB service!\n");
1998 		return -ENOMEM;
1999 	}
2000 
2001 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2002 	if (status != DMUB_STATUS_OK) {
2003 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2004 		return -EINVAL;
2005 	}
2006 
2007 	return 0;
2008 }
2009 
2010 static int dm_sw_init(void *handle)
2011 {
2012 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2013 	int r;
2014 
2015 	r = dm_dmub_sw_init(adev);
2016 	if (r)
2017 		return r;
2018 
2019 	return load_dmcu_fw(adev);
2020 }
2021 
2022 static int dm_sw_fini(void *handle)
2023 {
2024 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2025 
2026 	kfree(adev->dm.dmub_fb_info);
2027 	adev->dm.dmub_fb_info = NULL;
2028 
2029 	if (adev->dm.dmub_srv) {
2030 		dmub_srv_destroy(adev->dm.dmub_srv);
2031 		adev->dm.dmub_srv = NULL;
2032 	}
2033 
2034 	release_firmware(adev->dm.dmub_fw);
2035 	adev->dm.dmub_fw = NULL;
2036 
2037 	release_firmware(adev->dm.fw_dmcu);
2038 	adev->dm.fw_dmcu = NULL;
2039 
2040 	return 0;
2041 }
2042 
2043 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2044 {
2045 	struct amdgpu_dm_connector *aconnector;
2046 	struct drm_connector *connector;
2047 	struct drm_connector_list_iter iter;
2048 	int ret = 0;
2049 
2050 	drm_connector_list_iter_begin(dev, &iter);
2051 	drm_for_each_connector_iter(connector, &iter) {
2052 		aconnector = to_amdgpu_dm_connector(connector);
2053 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2054 		    aconnector->mst_mgr.aux) {
2055 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2056 					 aconnector,
2057 					 aconnector->base.base.id);
2058 
2059 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2060 			if (ret < 0) {
2061 				DRM_ERROR("DM_MST: Failed to start MST\n");
2062 				aconnector->dc_link->type =
2063 					dc_connection_single;
2064 				break;
2065 			}
2066 		}
2067 	}
2068 	drm_connector_list_iter_end(&iter);
2069 
2070 	return ret;
2071 }
2072 
2073 static int dm_late_init(void *handle)
2074 {
2075 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2076 
2077 	struct dmcu_iram_parameters params;
2078 	unsigned int linear_lut[16];
2079 	int i;
2080 	struct dmcu *dmcu = NULL;
2081 
2082 	dmcu = adev->dm.dc->res_pool->dmcu;
2083 
2084 	for (i = 0; i < 16; i++)
2085 		linear_lut[i] = 0xFFFF * i / 15;
2086 
2087 	params.set = 0;
2088 	params.backlight_ramping_override = false;
2089 	params.backlight_ramping_start = 0xCCCC;
2090 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2091 	params.backlight_lut_array_size = 16;
2092 	params.backlight_lut_array = linear_lut;
2093 
2094 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2095 	 * 0xFFFF x 0.01 = 0x28F
2096 	 */
2097 	params.min_abm_backlight = 0x28F;
2098 	/* In the case where abm is implemented on dmcub,
2099 	* dmcu object will be null.
2100 	* ABM 2.4 and up are implemented on dmcub.
2101 	*/
2102 	if (dmcu) {
2103 		if (!dmcu_load_iram(dmcu, params))
2104 			return -EINVAL;
2105 	} else if (adev->dm.dc->ctx->dmub_srv) {
2106 		struct dc_link *edp_links[MAX_NUM_EDP];
2107 		int edp_num;
2108 
2109 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2110 		for (i = 0; i < edp_num; i++) {
2111 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2112 				return -EINVAL;
2113 		}
2114 	}
2115 
2116 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2117 }
2118 
2119 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2120 {
2121 	struct amdgpu_dm_connector *aconnector;
2122 	struct drm_connector *connector;
2123 	struct drm_connector_list_iter iter;
2124 	struct drm_dp_mst_topology_mgr *mgr;
2125 	int ret;
2126 	bool need_hotplug = false;
2127 
2128 	drm_connector_list_iter_begin(dev, &iter);
2129 	drm_for_each_connector_iter(connector, &iter) {
2130 		aconnector = to_amdgpu_dm_connector(connector);
2131 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2132 		    aconnector->mst_port)
2133 			continue;
2134 
2135 		mgr = &aconnector->mst_mgr;
2136 
2137 		if (suspend) {
2138 			drm_dp_mst_topology_mgr_suspend(mgr);
2139 		} else {
2140 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2141 			if (ret < 0) {
2142 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2143 				need_hotplug = true;
2144 			}
2145 		}
2146 	}
2147 	drm_connector_list_iter_end(&iter);
2148 
2149 	if (need_hotplug)
2150 		drm_kms_helper_hotplug_event(dev);
2151 }
2152 
2153 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2154 {
2155 	struct smu_context *smu = &adev->smu;
2156 	int ret = 0;
2157 
2158 	if (!is_support_sw_smu(adev))
2159 		return 0;
2160 
2161 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2162 	 * on window driver dc implementation.
2163 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2164 	 * should be passed to smu during boot up and resume from s3.
2165 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2166 	 * dcn20_resource_construct
2167 	 * then call pplib functions below to pass the settings to smu:
2168 	 * smu_set_watermarks_for_clock_ranges
2169 	 * smu_set_watermarks_table
2170 	 * navi10_set_watermarks_table
2171 	 * smu_write_watermarks_table
2172 	 *
2173 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2174 	 * dc has implemented different flow for window driver:
2175 	 * dc_hardware_init / dc_set_power_state
2176 	 * dcn10_init_hw
2177 	 * notify_wm_ranges
2178 	 * set_wm_ranges
2179 	 * -- Linux
2180 	 * smu_set_watermarks_for_clock_ranges
2181 	 * renoir_set_watermarks_table
2182 	 * smu_write_watermarks_table
2183 	 *
2184 	 * For Linux,
2185 	 * dc_hardware_init -> amdgpu_dm_init
2186 	 * dc_set_power_state --> dm_resume
2187 	 *
2188 	 * therefore, this function apply to navi10/12/14 but not Renoir
2189 	 * *
2190 	 */
2191 	switch (adev->ip_versions[DCE_HWIP][0]) {
2192 	case IP_VERSION(2, 0, 2):
2193 	case IP_VERSION(2, 0, 0):
2194 		break;
2195 	default:
2196 		return 0;
2197 	}
2198 
2199 	ret = smu_write_watermarks_table(smu);
2200 	if (ret) {
2201 		DRM_ERROR("Failed to update WMTABLE!\n");
2202 		return ret;
2203 	}
2204 
2205 	return 0;
2206 }
2207 
2208 /**
2209  * dm_hw_init() - Initialize DC device
2210  * @handle: The base driver device containing the amdgpu_dm device.
2211  *
2212  * Initialize the &struct amdgpu_display_manager device. This involves calling
2213  * the initializers of each DM component, then populating the struct with them.
2214  *
2215  * Although the function implies hardware initialization, both hardware and
2216  * software are initialized here. Splitting them out to their relevant init
2217  * hooks is a future TODO item.
2218  *
2219  * Some notable things that are initialized here:
2220  *
2221  * - Display Core, both software and hardware
2222  * - DC modules that we need (freesync and color management)
2223  * - DRM software states
2224  * - Interrupt sources and handlers
2225  * - Vblank support
2226  * - Debug FS entries, if enabled
2227  */
2228 static int dm_hw_init(void *handle)
2229 {
2230 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2231 	/* Create DAL display manager */
2232 	amdgpu_dm_init(adev);
2233 	amdgpu_dm_hpd_init(adev);
2234 
2235 	return 0;
2236 }
2237 
2238 /**
2239  * dm_hw_fini() - Teardown DC device
2240  * @handle: The base driver device containing the amdgpu_dm device.
2241  *
2242  * Teardown components within &struct amdgpu_display_manager that require
2243  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2244  * were loaded. Also flush IRQ workqueues and disable them.
2245  */
2246 static int dm_hw_fini(void *handle)
2247 {
2248 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2249 
2250 	amdgpu_dm_hpd_fini(adev);
2251 
2252 	amdgpu_dm_irq_fini(adev);
2253 	amdgpu_dm_fini(adev);
2254 	return 0;
2255 }
2256 
2257 
2258 static int dm_enable_vblank(struct drm_crtc *crtc);
2259 static void dm_disable_vblank(struct drm_crtc *crtc);
2260 
2261 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2262 				 struct dc_state *state, bool enable)
2263 {
2264 	enum dc_irq_source irq_source;
2265 	struct amdgpu_crtc *acrtc;
2266 	int rc = -EBUSY;
2267 	int i = 0;
2268 
2269 	for (i = 0; i < state->stream_count; i++) {
2270 		acrtc = get_crtc_by_otg_inst(
2271 				adev, state->stream_status[i].primary_otg_inst);
2272 
2273 		if (acrtc && state->stream_status[i].plane_count != 0) {
2274 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2275 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2276 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2277 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2278 			if (rc)
2279 				DRM_WARN("Failed to %s pflip interrupts\n",
2280 					 enable ? "enable" : "disable");
2281 
2282 			if (enable) {
2283 				rc = dm_enable_vblank(&acrtc->base);
2284 				if (rc)
2285 					DRM_WARN("Failed to enable vblank interrupts\n");
2286 			} else {
2287 				dm_disable_vblank(&acrtc->base);
2288 			}
2289 
2290 		}
2291 	}
2292 
2293 }
2294 
2295 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2296 {
2297 	struct dc_state *context = NULL;
2298 	enum dc_status res = DC_ERROR_UNEXPECTED;
2299 	int i;
2300 	struct dc_stream_state *del_streams[MAX_PIPES];
2301 	int del_streams_count = 0;
2302 
2303 	memset(del_streams, 0, sizeof(del_streams));
2304 
2305 	context = dc_create_state(dc);
2306 	if (context == NULL)
2307 		goto context_alloc_fail;
2308 
2309 	dc_resource_state_copy_construct_current(dc, context);
2310 
2311 	/* First remove from context all streams */
2312 	for (i = 0; i < context->stream_count; i++) {
2313 		struct dc_stream_state *stream = context->streams[i];
2314 
2315 		del_streams[del_streams_count++] = stream;
2316 	}
2317 
2318 	/* Remove all planes for removed streams and then remove the streams */
2319 	for (i = 0; i < del_streams_count; i++) {
2320 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2321 			res = DC_FAIL_DETACH_SURFACES;
2322 			goto fail;
2323 		}
2324 
2325 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2326 		if (res != DC_OK)
2327 			goto fail;
2328 	}
2329 
2330 	res = dc_commit_state(dc, context);
2331 
2332 fail:
2333 	dc_release_state(context);
2334 
2335 context_alloc_fail:
2336 	return res;
2337 }
2338 
2339 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2340 {
2341 	int i;
2342 
2343 	if (dm->hpd_rx_offload_wq) {
2344 		for (i = 0; i < dm->dc->caps.max_links; i++)
2345 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2346 	}
2347 }
2348 
2349 static int dm_suspend(void *handle)
2350 {
2351 	struct amdgpu_device *adev = handle;
2352 	struct amdgpu_display_manager *dm = &adev->dm;
2353 	int ret = 0;
2354 
2355 	if (amdgpu_in_reset(adev)) {
2356 		mutex_lock(&dm->dc_lock);
2357 
2358 #if defined(CONFIG_DRM_AMD_DC_DCN)
2359 		dc_allow_idle_optimizations(adev->dm.dc, false);
2360 #endif
2361 
2362 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2363 
2364 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2365 
2366 		amdgpu_dm_commit_zero_streams(dm->dc);
2367 
2368 		amdgpu_dm_irq_suspend(adev);
2369 
2370 		hpd_rx_irq_work_suspend(dm);
2371 
2372 		return ret;
2373 	}
2374 
2375 	WARN_ON(adev->dm.cached_state);
2376 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2377 
2378 	s3_handle_mst(adev_to_drm(adev), true);
2379 
2380 	amdgpu_dm_irq_suspend(adev);
2381 
2382 	hpd_rx_irq_work_suspend(dm);
2383 
2384 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2385 
2386 	return 0;
2387 }
2388 
2389 static struct amdgpu_dm_connector *
2390 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2391 					     struct drm_crtc *crtc)
2392 {
2393 	uint32_t i;
2394 	struct drm_connector_state *new_con_state;
2395 	struct drm_connector *connector;
2396 	struct drm_crtc *crtc_from_state;
2397 
2398 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2399 		crtc_from_state = new_con_state->crtc;
2400 
2401 		if (crtc_from_state == crtc)
2402 			return to_amdgpu_dm_connector(connector);
2403 	}
2404 
2405 	return NULL;
2406 }
2407 
2408 static void emulated_link_detect(struct dc_link *link)
2409 {
2410 	struct dc_sink_init_data sink_init_data = { 0 };
2411 	struct display_sink_capability sink_caps = { 0 };
2412 	enum dc_edid_status edid_status;
2413 	struct dc_context *dc_ctx = link->ctx;
2414 	struct dc_sink *sink = NULL;
2415 	struct dc_sink *prev_sink = NULL;
2416 
2417 	link->type = dc_connection_none;
2418 	prev_sink = link->local_sink;
2419 
2420 	if (prev_sink)
2421 		dc_sink_release(prev_sink);
2422 
2423 	switch (link->connector_signal) {
2424 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2425 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2426 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2427 		break;
2428 	}
2429 
2430 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2431 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2432 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2433 		break;
2434 	}
2435 
2436 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2437 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2438 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2439 		break;
2440 	}
2441 
2442 	case SIGNAL_TYPE_LVDS: {
2443 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2444 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2445 		break;
2446 	}
2447 
2448 	case SIGNAL_TYPE_EDP: {
2449 		sink_caps.transaction_type =
2450 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2451 		sink_caps.signal = SIGNAL_TYPE_EDP;
2452 		break;
2453 	}
2454 
2455 	case SIGNAL_TYPE_DISPLAY_PORT: {
2456 		sink_caps.transaction_type =
2457 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2458 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2459 		break;
2460 	}
2461 
2462 	default:
2463 		DC_ERROR("Invalid connector type! signal:%d\n",
2464 			link->connector_signal);
2465 		return;
2466 	}
2467 
2468 	sink_init_data.link = link;
2469 	sink_init_data.sink_signal = sink_caps.signal;
2470 
2471 	sink = dc_sink_create(&sink_init_data);
2472 	if (!sink) {
2473 		DC_ERROR("Failed to create sink!\n");
2474 		return;
2475 	}
2476 
2477 	/* dc_sink_create returns a new reference */
2478 	link->local_sink = sink;
2479 
2480 	edid_status = dm_helpers_read_local_edid(
2481 			link->ctx,
2482 			link,
2483 			sink);
2484 
2485 	if (edid_status != EDID_OK)
2486 		DC_ERROR("Failed to read EDID");
2487 
2488 }
2489 
2490 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2491 				     struct amdgpu_display_manager *dm)
2492 {
2493 	struct {
2494 		struct dc_surface_update surface_updates[MAX_SURFACES];
2495 		struct dc_plane_info plane_infos[MAX_SURFACES];
2496 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2497 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2498 		struct dc_stream_update stream_update;
2499 	} * bundle;
2500 	int k, m;
2501 
2502 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2503 
2504 	if (!bundle) {
2505 		dm_error("Failed to allocate update bundle\n");
2506 		goto cleanup;
2507 	}
2508 
2509 	for (k = 0; k < dc_state->stream_count; k++) {
2510 		bundle->stream_update.stream = dc_state->streams[k];
2511 
2512 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2513 			bundle->surface_updates[m].surface =
2514 				dc_state->stream_status->plane_states[m];
2515 			bundle->surface_updates[m].surface->force_full_update =
2516 				true;
2517 		}
2518 		dc_commit_updates_for_stream(
2519 			dm->dc, bundle->surface_updates,
2520 			dc_state->stream_status->plane_count,
2521 			dc_state->streams[k], &bundle->stream_update, dc_state);
2522 	}
2523 
2524 cleanup:
2525 	kfree(bundle);
2526 
2527 	return;
2528 }
2529 
2530 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2531 {
2532 	struct dc_stream_state *stream_state;
2533 	struct amdgpu_dm_connector *aconnector = link->priv;
2534 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2535 	struct dc_stream_update stream_update;
2536 	bool dpms_off = true;
2537 
2538 	memset(&stream_update, 0, sizeof(stream_update));
2539 	stream_update.dpms_off = &dpms_off;
2540 
2541 	mutex_lock(&adev->dm.dc_lock);
2542 	stream_state = dc_stream_find_from_link(link);
2543 
2544 	if (stream_state == NULL) {
2545 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2546 		mutex_unlock(&adev->dm.dc_lock);
2547 		return;
2548 	}
2549 
2550 	stream_update.stream = stream_state;
2551 	acrtc_state->force_dpms_off = true;
2552 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2553 				     stream_state, &stream_update,
2554 				     stream_state->ctx->dc->current_state);
2555 	mutex_unlock(&adev->dm.dc_lock);
2556 }
2557 
2558 static int dm_resume(void *handle)
2559 {
2560 	struct amdgpu_device *adev = handle;
2561 	struct drm_device *ddev = adev_to_drm(adev);
2562 	struct amdgpu_display_manager *dm = &adev->dm;
2563 	struct amdgpu_dm_connector *aconnector;
2564 	struct drm_connector *connector;
2565 	struct drm_connector_list_iter iter;
2566 	struct drm_crtc *crtc;
2567 	struct drm_crtc_state *new_crtc_state;
2568 	struct dm_crtc_state *dm_new_crtc_state;
2569 	struct drm_plane *plane;
2570 	struct drm_plane_state *new_plane_state;
2571 	struct dm_plane_state *dm_new_plane_state;
2572 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2573 	enum dc_connection_type new_connection_type = dc_connection_none;
2574 	struct dc_state *dc_state;
2575 	int i, r, j;
2576 
2577 	if (amdgpu_in_reset(adev)) {
2578 		dc_state = dm->cached_dc_state;
2579 
2580 		/*
2581 		 * The dc->current_state is backed up into dm->cached_dc_state
2582 		 * before we commit 0 streams.
2583 		 *
2584 		 * DC will clear link encoder assignments on the real state
2585 		 * but the changes won't propagate over to the copy we made
2586 		 * before the 0 streams commit.
2587 		 *
2588 		 * DC expects that link encoder assignments are *not* valid
2589 		 * when committing a state, so as a workaround it needs to be
2590 		 * cleared here.
2591 		 */
2592 		link_enc_cfg_init(dm->dc, dc_state);
2593 
2594 		if (dc_enable_dmub_notifications(adev->dm.dc))
2595 			amdgpu_dm_outbox_init(adev);
2596 
2597 		r = dm_dmub_hw_init(adev);
2598 		if (r)
2599 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2600 
2601 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2602 		dc_resume(dm->dc);
2603 
2604 		amdgpu_dm_irq_resume_early(adev);
2605 
2606 		for (i = 0; i < dc_state->stream_count; i++) {
2607 			dc_state->streams[i]->mode_changed = true;
2608 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2609 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2610 					= 0xffffffff;
2611 			}
2612 		}
2613 
2614 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2615 
2616 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2617 
2618 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2619 
2620 		dc_release_state(dm->cached_dc_state);
2621 		dm->cached_dc_state = NULL;
2622 
2623 		amdgpu_dm_irq_resume_late(adev);
2624 
2625 		mutex_unlock(&dm->dc_lock);
2626 
2627 		return 0;
2628 	}
2629 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2630 	dc_release_state(dm_state->context);
2631 	dm_state->context = dc_create_state(dm->dc);
2632 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2633 	dc_resource_state_construct(dm->dc, dm_state->context);
2634 
2635 	/* Re-enable outbox interrupts for DPIA. */
2636 	if (dc_enable_dmub_notifications(adev->dm.dc))
2637 		amdgpu_dm_outbox_init(adev);
2638 
2639 	/* Before powering on DC we need to re-initialize DMUB. */
2640 	r = dm_dmub_hw_init(adev);
2641 	if (r)
2642 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2643 
2644 	/* power on hardware */
2645 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2646 
2647 	/* program HPD filter */
2648 	dc_resume(dm->dc);
2649 
2650 	/*
2651 	 * early enable HPD Rx IRQ, should be done before set mode as short
2652 	 * pulse interrupts are used for MST
2653 	 */
2654 	amdgpu_dm_irq_resume_early(adev);
2655 
2656 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2657 	s3_handle_mst(ddev, false);
2658 
2659 	/* Do detection*/
2660 	drm_connector_list_iter_begin(ddev, &iter);
2661 	drm_for_each_connector_iter(connector, &iter) {
2662 		aconnector = to_amdgpu_dm_connector(connector);
2663 
2664 		/*
2665 		 * this is the case when traversing through already created
2666 		 * MST connectors, should be skipped
2667 		 */
2668 		if (aconnector->mst_port)
2669 			continue;
2670 
2671 		mutex_lock(&aconnector->hpd_lock);
2672 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2673 			DRM_ERROR("KMS: Failed to detect connector\n");
2674 
2675 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2676 			emulated_link_detect(aconnector->dc_link);
2677 		else
2678 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2679 
2680 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2681 			aconnector->fake_enable = false;
2682 
2683 		if (aconnector->dc_sink)
2684 			dc_sink_release(aconnector->dc_sink);
2685 		aconnector->dc_sink = NULL;
2686 		amdgpu_dm_update_connector_after_detect(aconnector);
2687 		mutex_unlock(&aconnector->hpd_lock);
2688 	}
2689 	drm_connector_list_iter_end(&iter);
2690 
2691 	/* Force mode set in atomic commit */
2692 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2693 		new_crtc_state->active_changed = true;
2694 
2695 	/*
2696 	 * atomic_check is expected to create the dc states. We need to release
2697 	 * them here, since they were duplicated as part of the suspend
2698 	 * procedure.
2699 	 */
2700 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2701 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2702 		if (dm_new_crtc_state->stream) {
2703 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2704 			dc_stream_release(dm_new_crtc_state->stream);
2705 			dm_new_crtc_state->stream = NULL;
2706 		}
2707 	}
2708 
2709 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2710 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2711 		if (dm_new_plane_state->dc_state) {
2712 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2713 			dc_plane_state_release(dm_new_plane_state->dc_state);
2714 			dm_new_plane_state->dc_state = NULL;
2715 		}
2716 	}
2717 
2718 	drm_atomic_helper_resume(ddev, dm->cached_state);
2719 
2720 	dm->cached_state = NULL;
2721 
2722 	amdgpu_dm_irq_resume_late(adev);
2723 
2724 	amdgpu_dm_smu_write_watermarks_table(adev);
2725 
2726 	return 0;
2727 }
2728 
2729 /**
2730  * DOC: DM Lifecycle
2731  *
2732  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2733  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2734  * the base driver's device list to be initialized and torn down accordingly.
2735  *
2736  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2737  */
2738 
2739 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2740 	.name = "dm",
2741 	.early_init = dm_early_init,
2742 	.late_init = dm_late_init,
2743 	.sw_init = dm_sw_init,
2744 	.sw_fini = dm_sw_fini,
2745 	.early_fini = amdgpu_dm_early_fini,
2746 	.hw_init = dm_hw_init,
2747 	.hw_fini = dm_hw_fini,
2748 	.suspend = dm_suspend,
2749 	.resume = dm_resume,
2750 	.is_idle = dm_is_idle,
2751 	.wait_for_idle = dm_wait_for_idle,
2752 	.check_soft_reset = dm_check_soft_reset,
2753 	.soft_reset = dm_soft_reset,
2754 	.set_clockgating_state = dm_set_clockgating_state,
2755 	.set_powergating_state = dm_set_powergating_state,
2756 };
2757 
2758 const struct amdgpu_ip_block_version dm_ip_block =
2759 {
2760 	.type = AMD_IP_BLOCK_TYPE_DCE,
2761 	.major = 1,
2762 	.minor = 0,
2763 	.rev = 0,
2764 	.funcs = &amdgpu_dm_funcs,
2765 };
2766 
2767 
2768 /**
2769  * DOC: atomic
2770  *
2771  * *WIP*
2772  */
2773 
2774 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2775 	.fb_create = amdgpu_display_user_framebuffer_create,
2776 	.get_format_info = amd_get_format_info,
2777 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2778 	.atomic_check = amdgpu_dm_atomic_check,
2779 	.atomic_commit = drm_atomic_helper_commit,
2780 };
2781 
2782 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2783 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2784 };
2785 
2786 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2787 {
2788 	u32 max_cll, min_cll, max, min, q, r;
2789 	struct amdgpu_dm_backlight_caps *caps;
2790 	struct amdgpu_display_manager *dm;
2791 	struct drm_connector *conn_base;
2792 	struct amdgpu_device *adev;
2793 	struct dc_link *link = NULL;
2794 	static const u8 pre_computed_values[] = {
2795 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2796 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2797 	int i;
2798 
2799 	if (!aconnector || !aconnector->dc_link)
2800 		return;
2801 
2802 	link = aconnector->dc_link;
2803 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2804 		return;
2805 
2806 	conn_base = &aconnector->base;
2807 	adev = drm_to_adev(conn_base->dev);
2808 	dm = &adev->dm;
2809 	for (i = 0; i < dm->num_of_edps; i++) {
2810 		if (link == dm->backlight_link[i])
2811 			break;
2812 	}
2813 	if (i >= dm->num_of_edps)
2814 		return;
2815 	caps = &dm->backlight_caps[i];
2816 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2817 	caps->aux_support = false;
2818 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2819 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2820 
2821 	if (caps->ext_caps->bits.oled == 1 /*||
2822 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2823 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2824 		caps->aux_support = true;
2825 
2826 	if (amdgpu_backlight == 0)
2827 		caps->aux_support = false;
2828 	else if (amdgpu_backlight == 1)
2829 		caps->aux_support = true;
2830 
2831 	/* From the specification (CTA-861-G), for calculating the maximum
2832 	 * luminance we need to use:
2833 	 *	Luminance = 50*2**(CV/32)
2834 	 * Where CV is a one-byte value.
2835 	 * For calculating this expression we may need float point precision;
2836 	 * to avoid this complexity level, we take advantage that CV is divided
2837 	 * by a constant. From the Euclids division algorithm, we know that CV
2838 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2839 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2840 	 * need to pre-compute the value of r/32. For pre-computing the values
2841 	 * We just used the following Ruby line:
2842 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2843 	 * The results of the above expressions can be verified at
2844 	 * pre_computed_values.
2845 	 */
2846 	q = max_cll >> 5;
2847 	r = max_cll % 32;
2848 	max = (1 << q) * pre_computed_values[r];
2849 
2850 	// min luminance: maxLum * (CV/255)^2 / 100
2851 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2852 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2853 
2854 	caps->aux_max_input_signal = max;
2855 	caps->aux_min_input_signal = min;
2856 }
2857 
2858 void amdgpu_dm_update_connector_after_detect(
2859 		struct amdgpu_dm_connector *aconnector)
2860 {
2861 	struct drm_connector *connector = &aconnector->base;
2862 	struct drm_device *dev = connector->dev;
2863 	struct dc_sink *sink;
2864 
2865 	/* MST handled by drm_mst framework */
2866 	if (aconnector->mst_mgr.mst_state == true)
2867 		return;
2868 
2869 	sink = aconnector->dc_link->local_sink;
2870 	if (sink)
2871 		dc_sink_retain(sink);
2872 
2873 	/*
2874 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2875 	 * the connector sink is set to either fake or physical sink depends on link status.
2876 	 * Skip if already done during boot.
2877 	 */
2878 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2879 			&& aconnector->dc_em_sink) {
2880 
2881 		/*
2882 		 * For S3 resume with headless use eml_sink to fake stream
2883 		 * because on resume connector->sink is set to NULL
2884 		 */
2885 		mutex_lock(&dev->mode_config.mutex);
2886 
2887 		if (sink) {
2888 			if (aconnector->dc_sink) {
2889 				amdgpu_dm_update_freesync_caps(connector, NULL);
2890 				/*
2891 				 * retain and release below are used to
2892 				 * bump up refcount for sink because the link doesn't point
2893 				 * to it anymore after disconnect, so on next crtc to connector
2894 				 * reshuffle by UMD we will get into unwanted dc_sink release
2895 				 */
2896 				dc_sink_release(aconnector->dc_sink);
2897 			}
2898 			aconnector->dc_sink = sink;
2899 			dc_sink_retain(aconnector->dc_sink);
2900 			amdgpu_dm_update_freesync_caps(connector,
2901 					aconnector->edid);
2902 		} else {
2903 			amdgpu_dm_update_freesync_caps(connector, NULL);
2904 			if (!aconnector->dc_sink) {
2905 				aconnector->dc_sink = aconnector->dc_em_sink;
2906 				dc_sink_retain(aconnector->dc_sink);
2907 			}
2908 		}
2909 
2910 		mutex_unlock(&dev->mode_config.mutex);
2911 
2912 		if (sink)
2913 			dc_sink_release(sink);
2914 		return;
2915 	}
2916 
2917 	/*
2918 	 * TODO: temporary guard to look for proper fix
2919 	 * if this sink is MST sink, we should not do anything
2920 	 */
2921 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2922 		dc_sink_release(sink);
2923 		return;
2924 	}
2925 
2926 	if (aconnector->dc_sink == sink) {
2927 		/*
2928 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2929 		 * Do nothing!!
2930 		 */
2931 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2932 				aconnector->connector_id);
2933 		if (sink)
2934 			dc_sink_release(sink);
2935 		return;
2936 	}
2937 
2938 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2939 		aconnector->connector_id, aconnector->dc_sink, sink);
2940 
2941 	mutex_lock(&dev->mode_config.mutex);
2942 
2943 	/*
2944 	 * 1. Update status of the drm connector
2945 	 * 2. Send an event and let userspace tell us what to do
2946 	 */
2947 	if (sink) {
2948 		/*
2949 		 * TODO: check if we still need the S3 mode update workaround.
2950 		 * If yes, put it here.
2951 		 */
2952 		if (aconnector->dc_sink) {
2953 			amdgpu_dm_update_freesync_caps(connector, NULL);
2954 			dc_sink_release(aconnector->dc_sink);
2955 		}
2956 
2957 		aconnector->dc_sink = sink;
2958 		dc_sink_retain(aconnector->dc_sink);
2959 		if (sink->dc_edid.length == 0) {
2960 			aconnector->edid = NULL;
2961 			if (aconnector->dc_link->aux_mode) {
2962 				drm_dp_cec_unset_edid(
2963 					&aconnector->dm_dp_aux.aux);
2964 			}
2965 		} else {
2966 			aconnector->edid =
2967 				(struct edid *)sink->dc_edid.raw_edid;
2968 
2969 			if (aconnector->dc_link->aux_mode)
2970 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2971 						    aconnector->edid);
2972 		}
2973 
2974 		drm_connector_update_edid_property(connector, aconnector->edid);
2975 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2976 		update_connector_ext_caps(aconnector);
2977 	} else {
2978 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2979 		amdgpu_dm_update_freesync_caps(connector, NULL);
2980 		drm_connector_update_edid_property(connector, NULL);
2981 		aconnector->num_modes = 0;
2982 		dc_sink_release(aconnector->dc_sink);
2983 		aconnector->dc_sink = NULL;
2984 		aconnector->edid = NULL;
2985 #ifdef CONFIG_DRM_AMD_DC_HDCP
2986 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2987 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2988 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2989 #endif
2990 	}
2991 
2992 	mutex_unlock(&dev->mode_config.mutex);
2993 
2994 	update_subconnector_property(aconnector);
2995 
2996 	if (sink)
2997 		dc_sink_release(sink);
2998 }
2999 
3000 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3001 {
3002 	struct drm_connector *connector = &aconnector->base;
3003 	struct drm_device *dev = connector->dev;
3004 	enum dc_connection_type new_connection_type = dc_connection_none;
3005 	struct amdgpu_device *adev = drm_to_adev(dev);
3006 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3007 	struct dm_crtc_state *dm_crtc_state = NULL;
3008 
3009 	if (adev->dm.disable_hpd_irq)
3010 		return;
3011 
3012 	if (dm_con_state->base.state && dm_con_state->base.crtc)
3013 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3014 					dm_con_state->base.state,
3015 					dm_con_state->base.crtc));
3016 	/*
3017 	 * In case of failure or MST no need to update connector status or notify the OS
3018 	 * since (for MST case) MST does this in its own context.
3019 	 */
3020 	mutex_lock(&aconnector->hpd_lock);
3021 
3022 #ifdef CONFIG_DRM_AMD_DC_HDCP
3023 	if (adev->dm.hdcp_workqueue) {
3024 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3025 		dm_con_state->update_hdcp = true;
3026 	}
3027 #endif
3028 	if (aconnector->fake_enable)
3029 		aconnector->fake_enable = false;
3030 
3031 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3032 		DRM_ERROR("KMS: Failed to detect connector\n");
3033 
3034 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3035 		emulated_link_detect(aconnector->dc_link);
3036 
3037 		drm_modeset_lock_all(dev);
3038 		dm_restore_drm_connector_state(dev, connector);
3039 		drm_modeset_unlock_all(dev);
3040 
3041 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3042 			drm_kms_helper_connector_hotplug_event(connector);
3043 
3044 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3045 		if (new_connection_type == dc_connection_none &&
3046 		    aconnector->dc_link->type == dc_connection_none &&
3047 		    dm_crtc_state)
3048 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3049 
3050 		amdgpu_dm_update_connector_after_detect(aconnector);
3051 
3052 		drm_modeset_lock_all(dev);
3053 		dm_restore_drm_connector_state(dev, connector);
3054 		drm_modeset_unlock_all(dev);
3055 
3056 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3057 			drm_kms_helper_connector_hotplug_event(connector);
3058 	}
3059 	mutex_unlock(&aconnector->hpd_lock);
3060 
3061 }
3062 
3063 static void handle_hpd_irq(void *param)
3064 {
3065 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3066 
3067 	handle_hpd_irq_helper(aconnector);
3068 
3069 }
3070 
3071 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3072 {
3073 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3074 	uint8_t dret;
3075 	bool new_irq_handled = false;
3076 	int dpcd_addr;
3077 	int dpcd_bytes_to_read;
3078 
3079 	const int max_process_count = 30;
3080 	int process_count = 0;
3081 
3082 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3083 
3084 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3085 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3086 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3087 		dpcd_addr = DP_SINK_COUNT;
3088 	} else {
3089 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3090 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3091 		dpcd_addr = DP_SINK_COUNT_ESI;
3092 	}
3093 
3094 	dret = drm_dp_dpcd_read(
3095 		&aconnector->dm_dp_aux.aux,
3096 		dpcd_addr,
3097 		esi,
3098 		dpcd_bytes_to_read);
3099 
3100 	while (dret == dpcd_bytes_to_read &&
3101 		process_count < max_process_count) {
3102 		uint8_t retry;
3103 		dret = 0;
3104 
3105 		process_count++;
3106 
3107 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3108 		/* handle HPD short pulse irq */
3109 		if (aconnector->mst_mgr.mst_state)
3110 			drm_dp_mst_hpd_irq(
3111 				&aconnector->mst_mgr,
3112 				esi,
3113 				&new_irq_handled);
3114 
3115 		if (new_irq_handled) {
3116 			/* ACK at DPCD to notify down stream */
3117 			const int ack_dpcd_bytes_to_write =
3118 				dpcd_bytes_to_read - 1;
3119 
3120 			for (retry = 0; retry < 3; retry++) {
3121 				uint8_t wret;
3122 
3123 				wret = drm_dp_dpcd_write(
3124 					&aconnector->dm_dp_aux.aux,
3125 					dpcd_addr + 1,
3126 					&esi[1],
3127 					ack_dpcd_bytes_to_write);
3128 				if (wret == ack_dpcd_bytes_to_write)
3129 					break;
3130 			}
3131 
3132 			/* check if there is new irq to be handled */
3133 			dret = drm_dp_dpcd_read(
3134 				&aconnector->dm_dp_aux.aux,
3135 				dpcd_addr,
3136 				esi,
3137 				dpcd_bytes_to_read);
3138 
3139 			new_irq_handled = false;
3140 		} else {
3141 			break;
3142 		}
3143 	}
3144 
3145 	if (process_count == max_process_count)
3146 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3147 }
3148 
3149 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3150 							union hpd_irq_data hpd_irq_data)
3151 {
3152 	struct hpd_rx_irq_offload_work *offload_work =
3153 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3154 
3155 	if (!offload_work) {
3156 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3157 		return;
3158 	}
3159 
3160 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3161 	offload_work->data = hpd_irq_data;
3162 	offload_work->offload_wq = offload_wq;
3163 
3164 	queue_work(offload_wq->wq, &offload_work->work);
3165 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3166 }
3167 
3168 static void handle_hpd_rx_irq(void *param)
3169 {
3170 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3171 	struct drm_connector *connector = &aconnector->base;
3172 	struct drm_device *dev = connector->dev;
3173 	struct dc_link *dc_link = aconnector->dc_link;
3174 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3175 	bool result = false;
3176 	enum dc_connection_type new_connection_type = dc_connection_none;
3177 	struct amdgpu_device *adev = drm_to_adev(dev);
3178 	union hpd_irq_data hpd_irq_data;
3179 	bool link_loss = false;
3180 	bool has_left_work = false;
3181 	int idx = aconnector->base.index;
3182 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3183 
3184 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3185 
3186 	if (adev->dm.disable_hpd_irq)
3187 		return;
3188 
3189 	/*
3190 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3191 	 * conflict, after implement i2c helper, this mutex should be
3192 	 * retired.
3193 	 */
3194 	mutex_lock(&aconnector->hpd_lock);
3195 
3196 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3197 						&link_loss, true, &has_left_work);
3198 
3199 	if (!has_left_work)
3200 		goto out;
3201 
3202 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3203 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3204 		goto out;
3205 	}
3206 
3207 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3208 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3209 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3210 			dm_handle_mst_sideband_msg(aconnector);
3211 			goto out;
3212 		}
3213 
3214 		if (link_loss) {
3215 			bool skip = false;
3216 
3217 			spin_lock(&offload_wq->offload_lock);
3218 			skip = offload_wq->is_handling_link_loss;
3219 
3220 			if (!skip)
3221 				offload_wq->is_handling_link_loss = true;
3222 
3223 			spin_unlock(&offload_wq->offload_lock);
3224 
3225 			if (!skip)
3226 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3227 
3228 			goto out;
3229 		}
3230 	}
3231 
3232 out:
3233 	if (result && !is_mst_root_connector) {
3234 		/* Downstream Port status changed. */
3235 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3236 			DRM_ERROR("KMS: Failed to detect connector\n");
3237 
3238 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3239 			emulated_link_detect(dc_link);
3240 
3241 			if (aconnector->fake_enable)
3242 				aconnector->fake_enable = false;
3243 
3244 			amdgpu_dm_update_connector_after_detect(aconnector);
3245 
3246 
3247 			drm_modeset_lock_all(dev);
3248 			dm_restore_drm_connector_state(dev, connector);
3249 			drm_modeset_unlock_all(dev);
3250 
3251 			drm_kms_helper_connector_hotplug_event(connector);
3252 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3253 
3254 			if (aconnector->fake_enable)
3255 				aconnector->fake_enable = false;
3256 
3257 			amdgpu_dm_update_connector_after_detect(aconnector);
3258 
3259 
3260 			drm_modeset_lock_all(dev);
3261 			dm_restore_drm_connector_state(dev, connector);
3262 			drm_modeset_unlock_all(dev);
3263 
3264 			drm_kms_helper_connector_hotplug_event(connector);
3265 		}
3266 	}
3267 #ifdef CONFIG_DRM_AMD_DC_HDCP
3268 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3269 		if (adev->dm.hdcp_workqueue)
3270 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3271 	}
3272 #endif
3273 
3274 	if (dc_link->type != dc_connection_mst_branch)
3275 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3276 
3277 	mutex_unlock(&aconnector->hpd_lock);
3278 }
3279 
3280 static void register_hpd_handlers(struct amdgpu_device *adev)
3281 {
3282 	struct drm_device *dev = adev_to_drm(adev);
3283 	struct drm_connector *connector;
3284 	struct amdgpu_dm_connector *aconnector;
3285 	const struct dc_link *dc_link;
3286 	struct dc_interrupt_params int_params = {0};
3287 
3288 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3289 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3290 
3291 	list_for_each_entry(connector,
3292 			&dev->mode_config.connector_list, head)	{
3293 
3294 		aconnector = to_amdgpu_dm_connector(connector);
3295 		dc_link = aconnector->dc_link;
3296 
3297 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3298 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3299 			int_params.irq_source = dc_link->irq_source_hpd;
3300 
3301 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3302 					handle_hpd_irq,
3303 					(void *) aconnector);
3304 		}
3305 
3306 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3307 
3308 			/* Also register for DP short pulse (hpd_rx). */
3309 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3310 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3311 
3312 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3313 					handle_hpd_rx_irq,
3314 					(void *) aconnector);
3315 
3316 			if (adev->dm.hpd_rx_offload_wq)
3317 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3318 					aconnector;
3319 		}
3320 	}
3321 }
3322 
3323 #if defined(CONFIG_DRM_AMD_DC_SI)
3324 /* Register IRQ sources and initialize IRQ callbacks */
3325 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3326 {
3327 	struct dc *dc = adev->dm.dc;
3328 	struct common_irq_params *c_irq_params;
3329 	struct dc_interrupt_params int_params = {0};
3330 	int r;
3331 	int i;
3332 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3333 
3334 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3335 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3336 
3337 	/*
3338 	 * Actions of amdgpu_irq_add_id():
3339 	 * 1. Register a set() function with base driver.
3340 	 *    Base driver will call set() function to enable/disable an
3341 	 *    interrupt in DC hardware.
3342 	 * 2. Register amdgpu_dm_irq_handler().
3343 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3344 	 *    coming from DC hardware.
3345 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3346 	 *    for acknowledging and handling. */
3347 
3348 	/* Use VBLANK interrupt */
3349 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3350 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3351 		if (r) {
3352 			DRM_ERROR("Failed to add crtc irq id!\n");
3353 			return r;
3354 		}
3355 
3356 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3357 		int_params.irq_source =
3358 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3359 
3360 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3361 
3362 		c_irq_params->adev = adev;
3363 		c_irq_params->irq_src = int_params.irq_source;
3364 
3365 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3366 				dm_crtc_high_irq, c_irq_params);
3367 	}
3368 
3369 	/* Use GRPH_PFLIP interrupt */
3370 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3371 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3372 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3373 		if (r) {
3374 			DRM_ERROR("Failed to add page flip irq id!\n");
3375 			return r;
3376 		}
3377 
3378 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3379 		int_params.irq_source =
3380 			dc_interrupt_to_irq_source(dc, i, 0);
3381 
3382 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3383 
3384 		c_irq_params->adev = adev;
3385 		c_irq_params->irq_src = int_params.irq_source;
3386 
3387 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3388 				dm_pflip_high_irq, c_irq_params);
3389 
3390 	}
3391 
3392 	/* HPD */
3393 	r = amdgpu_irq_add_id(adev, client_id,
3394 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3395 	if (r) {
3396 		DRM_ERROR("Failed to add hpd irq id!\n");
3397 		return r;
3398 	}
3399 
3400 	register_hpd_handlers(adev);
3401 
3402 	return 0;
3403 }
3404 #endif
3405 
3406 /* Register IRQ sources and initialize IRQ callbacks */
3407 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3408 {
3409 	struct dc *dc = adev->dm.dc;
3410 	struct common_irq_params *c_irq_params;
3411 	struct dc_interrupt_params int_params = {0};
3412 	int r;
3413 	int i;
3414 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3415 
3416 	if (adev->family >= AMDGPU_FAMILY_AI)
3417 		client_id = SOC15_IH_CLIENTID_DCE;
3418 
3419 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3420 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3421 
3422 	/*
3423 	 * Actions of amdgpu_irq_add_id():
3424 	 * 1. Register a set() function with base driver.
3425 	 *    Base driver will call set() function to enable/disable an
3426 	 *    interrupt in DC hardware.
3427 	 * 2. Register amdgpu_dm_irq_handler().
3428 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3429 	 *    coming from DC hardware.
3430 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3431 	 *    for acknowledging and handling. */
3432 
3433 	/* Use VBLANK interrupt */
3434 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3435 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3436 		if (r) {
3437 			DRM_ERROR("Failed to add crtc irq id!\n");
3438 			return r;
3439 		}
3440 
3441 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3442 		int_params.irq_source =
3443 			dc_interrupt_to_irq_source(dc, i, 0);
3444 
3445 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3446 
3447 		c_irq_params->adev = adev;
3448 		c_irq_params->irq_src = int_params.irq_source;
3449 
3450 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3451 				dm_crtc_high_irq, c_irq_params);
3452 	}
3453 
3454 	/* Use VUPDATE interrupt */
3455 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3456 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3457 		if (r) {
3458 			DRM_ERROR("Failed to add vupdate irq id!\n");
3459 			return r;
3460 		}
3461 
3462 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3463 		int_params.irq_source =
3464 			dc_interrupt_to_irq_source(dc, i, 0);
3465 
3466 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3467 
3468 		c_irq_params->adev = adev;
3469 		c_irq_params->irq_src = int_params.irq_source;
3470 
3471 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3472 				dm_vupdate_high_irq, c_irq_params);
3473 	}
3474 
3475 	/* Use GRPH_PFLIP interrupt */
3476 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3477 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3478 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3479 		if (r) {
3480 			DRM_ERROR("Failed to add page flip irq id!\n");
3481 			return r;
3482 		}
3483 
3484 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3485 		int_params.irq_source =
3486 			dc_interrupt_to_irq_source(dc, i, 0);
3487 
3488 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3489 
3490 		c_irq_params->adev = adev;
3491 		c_irq_params->irq_src = int_params.irq_source;
3492 
3493 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3494 				dm_pflip_high_irq, c_irq_params);
3495 
3496 	}
3497 
3498 	/* HPD */
3499 	r = amdgpu_irq_add_id(adev, client_id,
3500 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3501 	if (r) {
3502 		DRM_ERROR("Failed to add hpd irq id!\n");
3503 		return r;
3504 	}
3505 
3506 	register_hpd_handlers(adev);
3507 
3508 	return 0;
3509 }
3510 
3511 #if defined(CONFIG_DRM_AMD_DC_DCN)
3512 /* Register IRQ sources and initialize IRQ callbacks */
3513 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3514 {
3515 	struct dc *dc = adev->dm.dc;
3516 	struct common_irq_params *c_irq_params;
3517 	struct dc_interrupt_params int_params = {0};
3518 	int r;
3519 	int i;
3520 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3521 	static const unsigned int vrtl_int_srcid[] = {
3522 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3523 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3524 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3525 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3526 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3527 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3528 	};
3529 #endif
3530 
3531 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3532 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3533 
3534 	/*
3535 	 * Actions of amdgpu_irq_add_id():
3536 	 * 1. Register a set() function with base driver.
3537 	 *    Base driver will call set() function to enable/disable an
3538 	 *    interrupt in DC hardware.
3539 	 * 2. Register amdgpu_dm_irq_handler().
3540 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3541 	 *    coming from DC hardware.
3542 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3543 	 *    for acknowledging and handling.
3544 	 */
3545 
3546 	/* Use VSTARTUP interrupt */
3547 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3548 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3549 			i++) {
3550 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3551 
3552 		if (r) {
3553 			DRM_ERROR("Failed to add crtc irq id!\n");
3554 			return r;
3555 		}
3556 
3557 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3558 		int_params.irq_source =
3559 			dc_interrupt_to_irq_source(dc, i, 0);
3560 
3561 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3562 
3563 		c_irq_params->adev = adev;
3564 		c_irq_params->irq_src = int_params.irq_source;
3565 
3566 		amdgpu_dm_irq_register_interrupt(
3567 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3568 	}
3569 
3570 	/* Use otg vertical line interrupt */
3571 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3572 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3573 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3574 				vrtl_int_srcid[i], &adev->vline0_irq);
3575 
3576 		if (r) {
3577 			DRM_ERROR("Failed to add vline0 irq id!\n");
3578 			return r;
3579 		}
3580 
3581 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3582 		int_params.irq_source =
3583 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3584 
3585 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3586 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3587 			break;
3588 		}
3589 
3590 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3591 					- DC_IRQ_SOURCE_DC1_VLINE0];
3592 
3593 		c_irq_params->adev = adev;
3594 		c_irq_params->irq_src = int_params.irq_source;
3595 
3596 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3597 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3598 	}
3599 #endif
3600 
3601 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3602 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3603 	 * to trigger at end of each vblank, regardless of state of the lock,
3604 	 * matching DCE behaviour.
3605 	 */
3606 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3607 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3608 	     i++) {
3609 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3610 
3611 		if (r) {
3612 			DRM_ERROR("Failed to add vupdate irq id!\n");
3613 			return r;
3614 		}
3615 
3616 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3617 		int_params.irq_source =
3618 			dc_interrupt_to_irq_source(dc, i, 0);
3619 
3620 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3621 
3622 		c_irq_params->adev = adev;
3623 		c_irq_params->irq_src = int_params.irq_source;
3624 
3625 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3626 				dm_vupdate_high_irq, c_irq_params);
3627 	}
3628 
3629 	/* Use GRPH_PFLIP interrupt */
3630 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3631 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3632 			i++) {
3633 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3634 		if (r) {
3635 			DRM_ERROR("Failed to add page flip irq id!\n");
3636 			return r;
3637 		}
3638 
3639 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3640 		int_params.irq_source =
3641 			dc_interrupt_to_irq_source(dc, i, 0);
3642 
3643 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3644 
3645 		c_irq_params->adev = adev;
3646 		c_irq_params->irq_src = int_params.irq_source;
3647 
3648 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3649 				dm_pflip_high_irq, c_irq_params);
3650 
3651 	}
3652 
3653 	/* HPD */
3654 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3655 			&adev->hpd_irq);
3656 	if (r) {
3657 		DRM_ERROR("Failed to add hpd irq id!\n");
3658 		return r;
3659 	}
3660 
3661 	register_hpd_handlers(adev);
3662 
3663 	return 0;
3664 }
3665 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3666 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3667 {
3668 	struct dc *dc = adev->dm.dc;
3669 	struct common_irq_params *c_irq_params;
3670 	struct dc_interrupt_params int_params = {0};
3671 	int r, i;
3672 
3673 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3674 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3675 
3676 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3677 			&adev->dmub_outbox_irq);
3678 	if (r) {
3679 		DRM_ERROR("Failed to add outbox irq id!\n");
3680 		return r;
3681 	}
3682 
3683 	if (dc->ctx->dmub_srv) {
3684 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3685 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3686 		int_params.irq_source =
3687 		dc_interrupt_to_irq_source(dc, i, 0);
3688 
3689 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3690 
3691 		c_irq_params->adev = adev;
3692 		c_irq_params->irq_src = int_params.irq_source;
3693 
3694 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3695 				dm_dmub_outbox1_low_irq, c_irq_params);
3696 	}
3697 
3698 	return 0;
3699 }
3700 #endif
3701 
3702 /*
3703  * Acquires the lock for the atomic state object and returns
3704  * the new atomic state.
3705  *
3706  * This should only be called during atomic check.
3707  */
3708 static int dm_atomic_get_state(struct drm_atomic_state *state,
3709 			       struct dm_atomic_state **dm_state)
3710 {
3711 	struct drm_device *dev = state->dev;
3712 	struct amdgpu_device *adev = drm_to_adev(dev);
3713 	struct amdgpu_display_manager *dm = &adev->dm;
3714 	struct drm_private_state *priv_state;
3715 
3716 	if (*dm_state)
3717 		return 0;
3718 
3719 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3720 	if (IS_ERR(priv_state))
3721 		return PTR_ERR(priv_state);
3722 
3723 	*dm_state = to_dm_atomic_state(priv_state);
3724 
3725 	return 0;
3726 }
3727 
3728 static struct dm_atomic_state *
3729 dm_atomic_get_new_state(struct drm_atomic_state *state)
3730 {
3731 	struct drm_device *dev = state->dev;
3732 	struct amdgpu_device *adev = drm_to_adev(dev);
3733 	struct amdgpu_display_manager *dm = &adev->dm;
3734 	struct drm_private_obj *obj;
3735 	struct drm_private_state *new_obj_state;
3736 	int i;
3737 
3738 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3739 		if (obj->funcs == dm->atomic_obj.funcs)
3740 			return to_dm_atomic_state(new_obj_state);
3741 	}
3742 
3743 	return NULL;
3744 }
3745 
3746 static struct drm_private_state *
3747 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3748 {
3749 	struct dm_atomic_state *old_state, *new_state;
3750 
3751 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3752 	if (!new_state)
3753 		return NULL;
3754 
3755 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3756 
3757 	old_state = to_dm_atomic_state(obj->state);
3758 
3759 	if (old_state && old_state->context)
3760 		new_state->context = dc_copy_state(old_state->context);
3761 
3762 	if (!new_state->context) {
3763 		kfree(new_state);
3764 		return NULL;
3765 	}
3766 
3767 	return &new_state->base;
3768 }
3769 
3770 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3771 				    struct drm_private_state *state)
3772 {
3773 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3774 
3775 	if (dm_state && dm_state->context)
3776 		dc_release_state(dm_state->context);
3777 
3778 	kfree(dm_state);
3779 }
3780 
3781 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3782 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3783 	.atomic_destroy_state = dm_atomic_destroy_state,
3784 };
3785 
3786 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3787 {
3788 	struct dm_atomic_state *state;
3789 	int r;
3790 
3791 	adev->mode_info.mode_config_initialized = true;
3792 
3793 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3794 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3795 
3796 	adev_to_drm(adev)->mode_config.max_width = 16384;
3797 	adev_to_drm(adev)->mode_config.max_height = 16384;
3798 
3799 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3800 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3801 	/* indicates support for immediate flip */
3802 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3803 
3804 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3805 
3806 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3807 	if (!state)
3808 		return -ENOMEM;
3809 
3810 	state->context = dc_create_state(adev->dm.dc);
3811 	if (!state->context) {
3812 		kfree(state);
3813 		return -ENOMEM;
3814 	}
3815 
3816 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3817 
3818 	drm_atomic_private_obj_init(adev_to_drm(adev),
3819 				    &adev->dm.atomic_obj,
3820 				    &state->base,
3821 				    &dm_atomic_state_funcs);
3822 
3823 	r = amdgpu_display_modeset_create_props(adev);
3824 	if (r) {
3825 		dc_release_state(state->context);
3826 		kfree(state);
3827 		return r;
3828 	}
3829 
3830 	r = amdgpu_dm_audio_init(adev);
3831 	if (r) {
3832 		dc_release_state(state->context);
3833 		kfree(state);
3834 		return r;
3835 	}
3836 
3837 	return 0;
3838 }
3839 
3840 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3841 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3842 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3843 
3844 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3845 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3846 
3847 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3848 					    int bl_idx)
3849 {
3850 #if defined(CONFIG_ACPI)
3851 	struct amdgpu_dm_backlight_caps caps;
3852 
3853 	memset(&caps, 0, sizeof(caps));
3854 
3855 	if (dm->backlight_caps[bl_idx].caps_valid)
3856 		return;
3857 
3858 	amdgpu_acpi_get_backlight_caps(&caps);
3859 	if (caps.caps_valid) {
3860 		dm->backlight_caps[bl_idx].caps_valid = true;
3861 		if (caps.aux_support)
3862 			return;
3863 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3864 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3865 	} else {
3866 		dm->backlight_caps[bl_idx].min_input_signal =
3867 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3868 		dm->backlight_caps[bl_idx].max_input_signal =
3869 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3870 	}
3871 #else
3872 	if (dm->backlight_caps[bl_idx].aux_support)
3873 		return;
3874 
3875 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3876 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3877 #endif
3878 }
3879 
3880 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3881 				unsigned *min, unsigned *max)
3882 {
3883 	if (!caps)
3884 		return 0;
3885 
3886 	if (caps->aux_support) {
3887 		// Firmware limits are in nits, DC API wants millinits.
3888 		*max = 1000 * caps->aux_max_input_signal;
3889 		*min = 1000 * caps->aux_min_input_signal;
3890 	} else {
3891 		// Firmware limits are 8-bit, PWM control is 16-bit.
3892 		*max = 0x101 * caps->max_input_signal;
3893 		*min = 0x101 * caps->min_input_signal;
3894 	}
3895 	return 1;
3896 }
3897 
3898 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3899 					uint32_t brightness)
3900 {
3901 	unsigned min, max;
3902 
3903 	if (!get_brightness_range(caps, &min, &max))
3904 		return brightness;
3905 
3906 	// Rescale 0..255 to min..max
3907 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3908 				       AMDGPU_MAX_BL_LEVEL);
3909 }
3910 
3911 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3912 				      uint32_t brightness)
3913 {
3914 	unsigned min, max;
3915 
3916 	if (!get_brightness_range(caps, &min, &max))
3917 		return brightness;
3918 
3919 	if (brightness < min)
3920 		return 0;
3921 	// Rescale min..max to 0..255
3922 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3923 				 max - min);
3924 }
3925 
3926 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3927 					 int bl_idx,
3928 					 u32 user_brightness)
3929 {
3930 	struct amdgpu_dm_backlight_caps caps;
3931 	struct dc_link *link;
3932 	u32 brightness;
3933 	bool rc;
3934 
3935 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3936 	caps = dm->backlight_caps[bl_idx];
3937 
3938 	dm->brightness[bl_idx] = user_brightness;
3939 	/* update scratch register */
3940 	if (bl_idx == 0)
3941 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3942 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3943 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3944 
3945 	/* Change brightness based on AUX property */
3946 	if (caps.aux_support) {
3947 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3948 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3949 		if (!rc)
3950 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3951 	} else {
3952 		rc = dc_link_set_backlight_level(link, brightness, 0);
3953 		if (!rc)
3954 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3955 	}
3956 
3957 	return rc ? 0 : 1;
3958 }
3959 
3960 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3961 {
3962 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3963 	int i;
3964 
3965 	for (i = 0; i < dm->num_of_edps; i++) {
3966 		if (bd == dm->backlight_dev[i])
3967 			break;
3968 	}
3969 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3970 		i = 0;
3971 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3972 
3973 	return 0;
3974 }
3975 
3976 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3977 					 int bl_idx)
3978 {
3979 	struct amdgpu_dm_backlight_caps caps;
3980 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3981 
3982 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3983 	caps = dm->backlight_caps[bl_idx];
3984 
3985 	if (caps.aux_support) {
3986 		u32 avg, peak;
3987 		bool rc;
3988 
3989 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3990 		if (!rc)
3991 			return dm->brightness[bl_idx];
3992 		return convert_brightness_to_user(&caps, avg);
3993 	} else {
3994 		int ret = dc_link_get_backlight_level(link);
3995 
3996 		if (ret == DC_ERROR_UNEXPECTED)
3997 			return dm->brightness[bl_idx];
3998 		return convert_brightness_to_user(&caps, ret);
3999 	}
4000 }
4001 
4002 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4003 {
4004 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4005 	int i;
4006 
4007 	for (i = 0; i < dm->num_of_edps; i++) {
4008 		if (bd == dm->backlight_dev[i])
4009 			break;
4010 	}
4011 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4012 		i = 0;
4013 	return amdgpu_dm_backlight_get_level(dm, i);
4014 }
4015 
4016 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4017 	.options = BL_CORE_SUSPENDRESUME,
4018 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4019 	.update_status	= amdgpu_dm_backlight_update_status,
4020 };
4021 
4022 static void
4023 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4024 {
4025 	char bl_name[16];
4026 	struct backlight_properties props = { 0 };
4027 
4028 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4029 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4030 
4031 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4032 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4033 	props.type = BACKLIGHT_RAW;
4034 
4035 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4036 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4037 
4038 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4039 								       adev_to_drm(dm->adev)->dev,
4040 								       dm,
4041 								       &amdgpu_dm_backlight_ops,
4042 								       &props);
4043 
4044 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4045 		DRM_ERROR("DM: Backlight registration failed!\n");
4046 	else
4047 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4048 }
4049 #endif
4050 
4051 static int initialize_plane(struct amdgpu_display_manager *dm,
4052 			    struct amdgpu_mode_info *mode_info, int plane_id,
4053 			    enum drm_plane_type plane_type,
4054 			    const struct dc_plane_cap *plane_cap)
4055 {
4056 	struct drm_plane *plane;
4057 	unsigned long possible_crtcs;
4058 	int ret = 0;
4059 
4060 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4061 	if (!plane) {
4062 		DRM_ERROR("KMS: Failed to allocate plane\n");
4063 		return -ENOMEM;
4064 	}
4065 	plane->type = plane_type;
4066 
4067 	/*
4068 	 * HACK: IGT tests expect that the primary plane for a CRTC
4069 	 * can only have one possible CRTC. Only expose support for
4070 	 * any CRTC if they're not going to be used as a primary plane
4071 	 * for a CRTC - like overlay or underlay planes.
4072 	 */
4073 	possible_crtcs = 1 << plane_id;
4074 	if (plane_id >= dm->dc->caps.max_streams)
4075 		possible_crtcs = 0xff;
4076 
4077 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4078 
4079 	if (ret) {
4080 		DRM_ERROR("KMS: Failed to initialize plane\n");
4081 		kfree(plane);
4082 		return ret;
4083 	}
4084 
4085 	if (mode_info)
4086 		mode_info->planes[plane_id] = plane;
4087 
4088 	return ret;
4089 }
4090 
4091 
4092 static void register_backlight_device(struct amdgpu_display_manager *dm,
4093 				      struct dc_link *link)
4094 {
4095 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4096 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4097 
4098 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4099 	    link->type != dc_connection_none) {
4100 		/*
4101 		 * Event if registration failed, we should continue with
4102 		 * DM initialization because not having a backlight control
4103 		 * is better then a black screen.
4104 		 */
4105 		if (!dm->backlight_dev[dm->num_of_edps])
4106 			amdgpu_dm_register_backlight_device(dm);
4107 
4108 		if (dm->backlight_dev[dm->num_of_edps]) {
4109 			dm->backlight_link[dm->num_of_edps] = link;
4110 			dm->num_of_edps++;
4111 		}
4112 	}
4113 #endif
4114 }
4115 
4116 
4117 /*
4118  * In this architecture, the association
4119  * connector -> encoder -> crtc
4120  * id not really requried. The crtc and connector will hold the
4121  * display_index as an abstraction to use with DAL component
4122  *
4123  * Returns 0 on success
4124  */
4125 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4126 {
4127 	struct amdgpu_display_manager *dm = &adev->dm;
4128 	int32_t i;
4129 	struct amdgpu_dm_connector *aconnector = NULL;
4130 	struct amdgpu_encoder *aencoder = NULL;
4131 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4132 	uint32_t link_cnt;
4133 	int32_t primary_planes;
4134 	enum dc_connection_type new_connection_type = dc_connection_none;
4135 	const struct dc_plane_cap *plane;
4136 	bool psr_feature_enabled = false;
4137 
4138 	dm->display_indexes_num = dm->dc->caps.max_streams;
4139 	/* Update the actual used number of crtc */
4140 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4141 
4142 	link_cnt = dm->dc->caps.max_links;
4143 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4144 		DRM_ERROR("DM: Failed to initialize mode config\n");
4145 		return -EINVAL;
4146 	}
4147 
4148 	/* There is one primary plane per CRTC */
4149 	primary_planes = dm->dc->caps.max_streams;
4150 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4151 
4152 	/*
4153 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4154 	 * Order is reversed to match iteration order in atomic check.
4155 	 */
4156 	for (i = (primary_planes - 1); i >= 0; i--) {
4157 		plane = &dm->dc->caps.planes[i];
4158 
4159 		if (initialize_plane(dm, mode_info, i,
4160 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4161 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4162 			goto fail;
4163 		}
4164 	}
4165 
4166 	/*
4167 	 * Initialize overlay planes, index starting after primary planes.
4168 	 * These planes have a higher DRM index than the primary planes since
4169 	 * they should be considered as having a higher z-order.
4170 	 * Order is reversed to match iteration order in atomic check.
4171 	 *
4172 	 * Only support DCN for now, and only expose one so we don't encourage
4173 	 * userspace to use up all the pipes.
4174 	 */
4175 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4176 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4177 
4178 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4179 			continue;
4180 
4181 		if (!plane->blends_with_above || !plane->blends_with_below)
4182 			continue;
4183 
4184 		if (!plane->pixel_format_support.argb8888)
4185 			continue;
4186 
4187 		if (initialize_plane(dm, NULL, primary_planes + i,
4188 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4189 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4190 			goto fail;
4191 		}
4192 
4193 		/* Only create one overlay plane. */
4194 		break;
4195 	}
4196 
4197 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4198 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4199 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4200 			goto fail;
4201 		}
4202 
4203 #if defined(CONFIG_DRM_AMD_DC_DCN)
4204 	/* Use Outbox interrupt */
4205 	switch (adev->ip_versions[DCE_HWIP][0]) {
4206 	case IP_VERSION(3, 0, 0):
4207 	case IP_VERSION(3, 1, 2):
4208 	case IP_VERSION(3, 1, 3):
4209 	case IP_VERSION(2, 1, 0):
4210 		if (register_outbox_irq_handlers(dm->adev)) {
4211 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4212 			goto fail;
4213 		}
4214 		break;
4215 	default:
4216 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4217 			      adev->ip_versions[DCE_HWIP][0]);
4218 	}
4219 
4220 	/* Determine whether to enable PSR support by default. */
4221 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4222 		switch (adev->ip_versions[DCE_HWIP][0]) {
4223 		case IP_VERSION(3, 1, 2):
4224 		case IP_VERSION(3, 1, 3):
4225 			psr_feature_enabled = true;
4226 			break;
4227 		default:
4228 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4229 			break;
4230 		}
4231 	}
4232 #endif
4233 
4234 	/* loops over all connectors on the board */
4235 	for (i = 0; i < link_cnt; i++) {
4236 		struct dc_link *link = NULL;
4237 
4238 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4239 			DRM_ERROR(
4240 				"KMS: Cannot support more than %d display indexes\n",
4241 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4242 			continue;
4243 		}
4244 
4245 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4246 		if (!aconnector)
4247 			goto fail;
4248 
4249 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4250 		if (!aencoder)
4251 			goto fail;
4252 
4253 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4254 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4255 			goto fail;
4256 		}
4257 
4258 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4259 			DRM_ERROR("KMS: Failed to initialize connector\n");
4260 			goto fail;
4261 		}
4262 
4263 		link = dc_get_link_at_index(dm->dc, i);
4264 
4265 		if (!dc_link_detect_sink(link, &new_connection_type))
4266 			DRM_ERROR("KMS: Failed to detect connector\n");
4267 
4268 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4269 			emulated_link_detect(link);
4270 			amdgpu_dm_update_connector_after_detect(aconnector);
4271 
4272 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4273 			amdgpu_dm_update_connector_after_detect(aconnector);
4274 			register_backlight_device(dm, link);
4275 			if (dm->num_of_edps)
4276 				update_connector_ext_caps(aconnector);
4277 			if (psr_feature_enabled)
4278 				amdgpu_dm_set_psr_caps(link);
4279 		}
4280 
4281 
4282 	}
4283 
4284 	/*
4285 	 * Disable vblank IRQs aggressively for power-saving.
4286 	 *
4287 	 * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
4288 	 * is also supported.
4289 	 */
4290 	adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
4291 
4292 	/* Software is initialized. Now we can register interrupt handlers. */
4293 	switch (adev->asic_type) {
4294 #if defined(CONFIG_DRM_AMD_DC_SI)
4295 	case CHIP_TAHITI:
4296 	case CHIP_PITCAIRN:
4297 	case CHIP_VERDE:
4298 	case CHIP_OLAND:
4299 		if (dce60_register_irq_handlers(dm->adev)) {
4300 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4301 			goto fail;
4302 		}
4303 		break;
4304 #endif
4305 	case CHIP_BONAIRE:
4306 	case CHIP_HAWAII:
4307 	case CHIP_KAVERI:
4308 	case CHIP_KABINI:
4309 	case CHIP_MULLINS:
4310 	case CHIP_TONGA:
4311 	case CHIP_FIJI:
4312 	case CHIP_CARRIZO:
4313 	case CHIP_STONEY:
4314 	case CHIP_POLARIS11:
4315 	case CHIP_POLARIS10:
4316 	case CHIP_POLARIS12:
4317 	case CHIP_VEGAM:
4318 	case CHIP_VEGA10:
4319 	case CHIP_VEGA12:
4320 	case CHIP_VEGA20:
4321 		if (dce110_register_irq_handlers(dm->adev)) {
4322 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4323 			goto fail;
4324 		}
4325 		break;
4326 	default:
4327 #if defined(CONFIG_DRM_AMD_DC_DCN)
4328 		switch (adev->ip_versions[DCE_HWIP][0]) {
4329 		case IP_VERSION(1, 0, 0):
4330 		case IP_VERSION(1, 0, 1):
4331 		case IP_VERSION(2, 0, 2):
4332 		case IP_VERSION(2, 0, 3):
4333 		case IP_VERSION(2, 0, 0):
4334 		case IP_VERSION(2, 1, 0):
4335 		case IP_VERSION(3, 0, 0):
4336 		case IP_VERSION(3, 0, 2):
4337 		case IP_VERSION(3, 0, 3):
4338 		case IP_VERSION(3, 0, 1):
4339 		case IP_VERSION(3, 1, 2):
4340 		case IP_VERSION(3, 1, 3):
4341 			if (dcn10_register_irq_handlers(dm->adev)) {
4342 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4343 				goto fail;
4344 			}
4345 			break;
4346 		default:
4347 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4348 					adev->ip_versions[DCE_HWIP][0]);
4349 			goto fail;
4350 		}
4351 #endif
4352 		break;
4353 	}
4354 
4355 	return 0;
4356 fail:
4357 	kfree(aencoder);
4358 	kfree(aconnector);
4359 
4360 	return -EINVAL;
4361 }
4362 
4363 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4364 {
4365 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4366 	return;
4367 }
4368 
4369 /******************************************************************************
4370  * amdgpu_display_funcs functions
4371  *****************************************************************************/
4372 
4373 /*
4374  * dm_bandwidth_update - program display watermarks
4375  *
4376  * @adev: amdgpu_device pointer
4377  *
4378  * Calculate and program the display watermarks and line buffer allocation.
4379  */
4380 static void dm_bandwidth_update(struct amdgpu_device *adev)
4381 {
4382 	/* TODO: implement later */
4383 }
4384 
4385 static const struct amdgpu_display_funcs dm_display_funcs = {
4386 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4387 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4388 	.backlight_set_level = NULL, /* never called for DC */
4389 	.backlight_get_level = NULL, /* never called for DC */
4390 	.hpd_sense = NULL,/* called unconditionally */
4391 	.hpd_set_polarity = NULL, /* called unconditionally */
4392 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4393 	.page_flip_get_scanoutpos =
4394 		dm_crtc_get_scanoutpos,/* called unconditionally */
4395 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4396 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4397 };
4398 
4399 #if defined(CONFIG_DEBUG_KERNEL_DC)
4400 
4401 static ssize_t s3_debug_store(struct device *device,
4402 			      struct device_attribute *attr,
4403 			      const char *buf,
4404 			      size_t count)
4405 {
4406 	int ret;
4407 	int s3_state;
4408 	struct drm_device *drm_dev = dev_get_drvdata(device);
4409 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4410 
4411 	ret = kstrtoint(buf, 0, &s3_state);
4412 
4413 	if (ret == 0) {
4414 		if (s3_state) {
4415 			dm_resume(adev);
4416 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4417 		} else
4418 			dm_suspend(adev);
4419 	}
4420 
4421 	return ret == 0 ? count : 0;
4422 }
4423 
4424 DEVICE_ATTR_WO(s3_debug);
4425 
4426 #endif
4427 
4428 static int dm_early_init(void *handle)
4429 {
4430 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4431 
4432 	switch (adev->asic_type) {
4433 #if defined(CONFIG_DRM_AMD_DC_SI)
4434 	case CHIP_TAHITI:
4435 	case CHIP_PITCAIRN:
4436 	case CHIP_VERDE:
4437 		adev->mode_info.num_crtc = 6;
4438 		adev->mode_info.num_hpd = 6;
4439 		adev->mode_info.num_dig = 6;
4440 		break;
4441 	case CHIP_OLAND:
4442 		adev->mode_info.num_crtc = 2;
4443 		adev->mode_info.num_hpd = 2;
4444 		adev->mode_info.num_dig = 2;
4445 		break;
4446 #endif
4447 	case CHIP_BONAIRE:
4448 	case CHIP_HAWAII:
4449 		adev->mode_info.num_crtc = 6;
4450 		adev->mode_info.num_hpd = 6;
4451 		adev->mode_info.num_dig = 6;
4452 		break;
4453 	case CHIP_KAVERI:
4454 		adev->mode_info.num_crtc = 4;
4455 		adev->mode_info.num_hpd = 6;
4456 		adev->mode_info.num_dig = 7;
4457 		break;
4458 	case CHIP_KABINI:
4459 	case CHIP_MULLINS:
4460 		adev->mode_info.num_crtc = 2;
4461 		adev->mode_info.num_hpd = 6;
4462 		adev->mode_info.num_dig = 6;
4463 		break;
4464 	case CHIP_FIJI:
4465 	case CHIP_TONGA:
4466 		adev->mode_info.num_crtc = 6;
4467 		adev->mode_info.num_hpd = 6;
4468 		adev->mode_info.num_dig = 7;
4469 		break;
4470 	case CHIP_CARRIZO:
4471 		adev->mode_info.num_crtc = 3;
4472 		adev->mode_info.num_hpd = 6;
4473 		adev->mode_info.num_dig = 9;
4474 		break;
4475 	case CHIP_STONEY:
4476 		adev->mode_info.num_crtc = 2;
4477 		adev->mode_info.num_hpd = 6;
4478 		adev->mode_info.num_dig = 9;
4479 		break;
4480 	case CHIP_POLARIS11:
4481 	case CHIP_POLARIS12:
4482 		adev->mode_info.num_crtc = 5;
4483 		adev->mode_info.num_hpd = 5;
4484 		adev->mode_info.num_dig = 5;
4485 		break;
4486 	case CHIP_POLARIS10:
4487 	case CHIP_VEGAM:
4488 		adev->mode_info.num_crtc = 6;
4489 		adev->mode_info.num_hpd = 6;
4490 		adev->mode_info.num_dig = 6;
4491 		break;
4492 	case CHIP_VEGA10:
4493 	case CHIP_VEGA12:
4494 	case CHIP_VEGA20:
4495 		adev->mode_info.num_crtc = 6;
4496 		adev->mode_info.num_hpd = 6;
4497 		adev->mode_info.num_dig = 6;
4498 		break;
4499 	default:
4500 #if defined(CONFIG_DRM_AMD_DC_DCN)
4501 		switch (adev->ip_versions[DCE_HWIP][0]) {
4502 		case IP_VERSION(2, 0, 2):
4503 		case IP_VERSION(3, 0, 0):
4504 			adev->mode_info.num_crtc = 6;
4505 			adev->mode_info.num_hpd = 6;
4506 			adev->mode_info.num_dig = 6;
4507 			break;
4508 		case IP_VERSION(2, 0, 0):
4509 		case IP_VERSION(3, 0, 2):
4510 			adev->mode_info.num_crtc = 5;
4511 			adev->mode_info.num_hpd = 5;
4512 			adev->mode_info.num_dig = 5;
4513 			break;
4514 		case IP_VERSION(2, 0, 3):
4515 		case IP_VERSION(3, 0, 3):
4516 			adev->mode_info.num_crtc = 2;
4517 			adev->mode_info.num_hpd = 2;
4518 			adev->mode_info.num_dig = 2;
4519 			break;
4520 		case IP_VERSION(1, 0, 0):
4521 		case IP_VERSION(1, 0, 1):
4522 		case IP_VERSION(3, 0, 1):
4523 		case IP_VERSION(2, 1, 0):
4524 		case IP_VERSION(3, 1, 2):
4525 		case IP_VERSION(3, 1, 3):
4526 			adev->mode_info.num_crtc = 4;
4527 			adev->mode_info.num_hpd = 4;
4528 			adev->mode_info.num_dig = 4;
4529 			break;
4530 		default:
4531 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4532 					adev->ip_versions[DCE_HWIP][0]);
4533 			return -EINVAL;
4534 		}
4535 #endif
4536 		break;
4537 	}
4538 
4539 	amdgpu_dm_set_irq_funcs(adev);
4540 
4541 	if (adev->mode_info.funcs == NULL)
4542 		adev->mode_info.funcs = &dm_display_funcs;
4543 
4544 	/*
4545 	 * Note: Do NOT change adev->audio_endpt_rreg and
4546 	 * adev->audio_endpt_wreg because they are initialised in
4547 	 * amdgpu_device_init()
4548 	 */
4549 #if defined(CONFIG_DEBUG_KERNEL_DC)
4550 	device_create_file(
4551 		adev_to_drm(adev)->dev,
4552 		&dev_attr_s3_debug);
4553 #endif
4554 
4555 	return 0;
4556 }
4557 
4558 static bool modeset_required(struct drm_crtc_state *crtc_state,
4559 			     struct dc_stream_state *new_stream,
4560 			     struct dc_stream_state *old_stream)
4561 {
4562 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4563 }
4564 
4565 static bool modereset_required(struct drm_crtc_state *crtc_state)
4566 {
4567 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4568 }
4569 
4570 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4571 {
4572 	drm_encoder_cleanup(encoder);
4573 	kfree(encoder);
4574 }
4575 
4576 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4577 	.destroy = amdgpu_dm_encoder_destroy,
4578 };
4579 
4580 
4581 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4582 					 struct drm_framebuffer *fb,
4583 					 int *min_downscale, int *max_upscale)
4584 {
4585 	struct amdgpu_device *adev = drm_to_adev(dev);
4586 	struct dc *dc = adev->dm.dc;
4587 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4588 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4589 
4590 	switch (fb->format->format) {
4591 	case DRM_FORMAT_P010:
4592 	case DRM_FORMAT_NV12:
4593 	case DRM_FORMAT_NV21:
4594 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4595 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4596 		break;
4597 
4598 	case DRM_FORMAT_XRGB16161616F:
4599 	case DRM_FORMAT_ARGB16161616F:
4600 	case DRM_FORMAT_XBGR16161616F:
4601 	case DRM_FORMAT_ABGR16161616F:
4602 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4603 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4604 		break;
4605 
4606 	default:
4607 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4608 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4609 		break;
4610 	}
4611 
4612 	/*
4613 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4614 	 * scaling factor of 1.0 == 1000 units.
4615 	 */
4616 	if (*max_upscale == 1)
4617 		*max_upscale = 1000;
4618 
4619 	if (*min_downscale == 1)
4620 		*min_downscale = 1000;
4621 }
4622 
4623 
4624 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4625 				const struct drm_plane_state *state,
4626 				struct dc_scaling_info *scaling_info)
4627 {
4628 	int scale_w, scale_h, min_downscale, max_upscale;
4629 
4630 	memset(scaling_info, 0, sizeof(*scaling_info));
4631 
4632 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4633 	scaling_info->src_rect.x = state->src_x >> 16;
4634 	scaling_info->src_rect.y = state->src_y >> 16;
4635 
4636 	/*
4637 	 * For reasons we don't (yet) fully understand a non-zero
4638 	 * src_y coordinate into an NV12 buffer can cause a
4639 	 * system hang on DCN1x.
4640 	 * To avoid hangs (and maybe be overly cautious)
4641 	 * let's reject both non-zero src_x and src_y.
4642 	 *
4643 	 * We currently know of only one use-case to reproduce a
4644 	 * scenario with non-zero src_x and src_y for NV12, which
4645 	 * is to gesture the YouTube Android app into full screen
4646 	 * on ChromeOS.
4647 	 */
4648 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4649 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4650 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4651 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4652 		return -EINVAL;
4653 
4654 	scaling_info->src_rect.width = state->src_w >> 16;
4655 	if (scaling_info->src_rect.width == 0)
4656 		return -EINVAL;
4657 
4658 	scaling_info->src_rect.height = state->src_h >> 16;
4659 	if (scaling_info->src_rect.height == 0)
4660 		return -EINVAL;
4661 
4662 	scaling_info->dst_rect.x = state->crtc_x;
4663 	scaling_info->dst_rect.y = state->crtc_y;
4664 
4665 	if (state->crtc_w == 0)
4666 		return -EINVAL;
4667 
4668 	scaling_info->dst_rect.width = state->crtc_w;
4669 
4670 	if (state->crtc_h == 0)
4671 		return -EINVAL;
4672 
4673 	scaling_info->dst_rect.height = state->crtc_h;
4674 
4675 	/* DRM doesn't specify clipping on destination output. */
4676 	scaling_info->clip_rect = scaling_info->dst_rect;
4677 
4678 	/* Validate scaling per-format with DC plane caps */
4679 	if (state->plane && state->plane->dev && state->fb) {
4680 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4681 					     &min_downscale, &max_upscale);
4682 	} else {
4683 		min_downscale = 250;
4684 		max_upscale = 16000;
4685 	}
4686 
4687 	scale_w = scaling_info->dst_rect.width * 1000 /
4688 		  scaling_info->src_rect.width;
4689 
4690 	if (scale_w < min_downscale || scale_w > max_upscale)
4691 		return -EINVAL;
4692 
4693 	scale_h = scaling_info->dst_rect.height * 1000 /
4694 		  scaling_info->src_rect.height;
4695 
4696 	if (scale_h < min_downscale || scale_h > max_upscale)
4697 		return -EINVAL;
4698 
4699 	/*
4700 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4701 	 * assume reasonable defaults based on the format.
4702 	 */
4703 
4704 	return 0;
4705 }
4706 
4707 static void
4708 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4709 				 uint64_t tiling_flags)
4710 {
4711 	/* Fill GFX8 params */
4712 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4713 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4714 
4715 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4716 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4717 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4718 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4719 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4720 
4721 		/* XXX fix me for VI */
4722 		tiling_info->gfx8.num_banks = num_banks;
4723 		tiling_info->gfx8.array_mode =
4724 				DC_ARRAY_2D_TILED_THIN1;
4725 		tiling_info->gfx8.tile_split = tile_split;
4726 		tiling_info->gfx8.bank_width = bankw;
4727 		tiling_info->gfx8.bank_height = bankh;
4728 		tiling_info->gfx8.tile_aspect = mtaspect;
4729 		tiling_info->gfx8.tile_mode =
4730 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4731 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4732 			== DC_ARRAY_1D_TILED_THIN1) {
4733 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4734 	}
4735 
4736 	tiling_info->gfx8.pipe_config =
4737 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4738 }
4739 
4740 static void
4741 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4742 				  union dc_tiling_info *tiling_info)
4743 {
4744 	tiling_info->gfx9.num_pipes =
4745 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4746 	tiling_info->gfx9.num_banks =
4747 		adev->gfx.config.gb_addr_config_fields.num_banks;
4748 	tiling_info->gfx9.pipe_interleave =
4749 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4750 	tiling_info->gfx9.num_shader_engines =
4751 		adev->gfx.config.gb_addr_config_fields.num_se;
4752 	tiling_info->gfx9.max_compressed_frags =
4753 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4754 	tiling_info->gfx9.num_rb_per_se =
4755 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4756 	tiling_info->gfx9.shaderEnable = 1;
4757 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4758 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4759 }
4760 
4761 static int
4762 validate_dcc(struct amdgpu_device *adev,
4763 	     const enum surface_pixel_format format,
4764 	     const enum dc_rotation_angle rotation,
4765 	     const union dc_tiling_info *tiling_info,
4766 	     const struct dc_plane_dcc_param *dcc,
4767 	     const struct dc_plane_address *address,
4768 	     const struct plane_size *plane_size)
4769 {
4770 	struct dc *dc = adev->dm.dc;
4771 	struct dc_dcc_surface_param input;
4772 	struct dc_surface_dcc_cap output;
4773 
4774 	memset(&input, 0, sizeof(input));
4775 	memset(&output, 0, sizeof(output));
4776 
4777 	if (!dcc->enable)
4778 		return 0;
4779 
4780 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4781 	    !dc->cap_funcs.get_dcc_compression_cap)
4782 		return -EINVAL;
4783 
4784 	input.format = format;
4785 	input.surface_size.width = plane_size->surface_size.width;
4786 	input.surface_size.height = plane_size->surface_size.height;
4787 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4788 
4789 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4790 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4791 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4792 		input.scan = SCAN_DIRECTION_VERTICAL;
4793 
4794 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4795 		return -EINVAL;
4796 
4797 	if (!output.capable)
4798 		return -EINVAL;
4799 
4800 	if (dcc->independent_64b_blks == 0 &&
4801 	    output.grph.rgb.independent_64b_blks != 0)
4802 		return -EINVAL;
4803 
4804 	return 0;
4805 }
4806 
4807 static bool
4808 modifier_has_dcc(uint64_t modifier)
4809 {
4810 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4811 }
4812 
4813 static unsigned
4814 modifier_gfx9_swizzle_mode(uint64_t modifier)
4815 {
4816 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4817 		return 0;
4818 
4819 	return AMD_FMT_MOD_GET(TILE, modifier);
4820 }
4821 
4822 static const struct drm_format_info *
4823 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4824 {
4825 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4826 }
4827 
4828 static void
4829 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4830 				    union dc_tiling_info *tiling_info,
4831 				    uint64_t modifier)
4832 {
4833 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4834 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4835 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4836 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4837 
4838 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4839 
4840 	if (!IS_AMD_FMT_MOD(modifier))
4841 		return;
4842 
4843 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4844 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4845 
4846 	if (adev->family >= AMDGPU_FAMILY_NV) {
4847 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4848 	} else {
4849 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4850 
4851 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4852 	}
4853 }
4854 
4855 enum dm_micro_swizzle {
4856 	MICRO_SWIZZLE_Z = 0,
4857 	MICRO_SWIZZLE_S = 1,
4858 	MICRO_SWIZZLE_D = 2,
4859 	MICRO_SWIZZLE_R = 3
4860 };
4861 
4862 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4863 					  uint32_t format,
4864 					  uint64_t modifier)
4865 {
4866 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4867 	const struct drm_format_info *info = drm_format_info(format);
4868 	int i;
4869 
4870 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4871 
4872 	if (!info)
4873 		return false;
4874 
4875 	/*
4876 	 * We always have to allow these modifiers:
4877 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4878 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4879 	 */
4880 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4881 	    modifier == DRM_FORMAT_MOD_INVALID) {
4882 		return true;
4883 	}
4884 
4885 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4886 	for (i = 0; i < plane->modifier_count; i++) {
4887 		if (modifier == plane->modifiers[i])
4888 			break;
4889 	}
4890 	if (i == plane->modifier_count)
4891 		return false;
4892 
4893 	/*
4894 	 * For D swizzle the canonical modifier depends on the bpp, so check
4895 	 * it here.
4896 	 */
4897 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4898 	    adev->family >= AMDGPU_FAMILY_NV) {
4899 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4900 			return false;
4901 	}
4902 
4903 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4904 	    info->cpp[0] < 8)
4905 		return false;
4906 
4907 	if (modifier_has_dcc(modifier)) {
4908 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4909 		if (info->cpp[0] != 4)
4910 			return false;
4911 		/* We support multi-planar formats, but not when combined with
4912 		 * additional DCC metadata planes. */
4913 		if (info->num_planes > 1)
4914 			return false;
4915 	}
4916 
4917 	return true;
4918 }
4919 
4920 static void
4921 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4922 {
4923 	if (!*mods)
4924 		return;
4925 
4926 	if (*cap - *size < 1) {
4927 		uint64_t new_cap = *cap * 2;
4928 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4929 
4930 		if (!new_mods) {
4931 			kfree(*mods);
4932 			*mods = NULL;
4933 			return;
4934 		}
4935 
4936 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4937 		kfree(*mods);
4938 		*mods = new_mods;
4939 		*cap = new_cap;
4940 	}
4941 
4942 	(*mods)[*size] = mod;
4943 	*size += 1;
4944 }
4945 
4946 static void
4947 add_gfx9_modifiers(const struct amdgpu_device *adev,
4948 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4949 {
4950 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4951 	int pipe_xor_bits = min(8, pipes +
4952 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4953 	int bank_xor_bits = min(8 - pipe_xor_bits,
4954 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4955 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4956 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4957 
4958 
4959 	if (adev->family == AMDGPU_FAMILY_RV) {
4960 		/* Raven2 and later */
4961 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4962 
4963 		/*
4964 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4965 		 * doesn't support _D on DCN
4966 		 */
4967 
4968 		if (has_constant_encode) {
4969 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4970 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4971 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4972 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4973 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4974 				    AMD_FMT_MOD_SET(DCC, 1) |
4975 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4976 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4977 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4978 		}
4979 
4980 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4981 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4982 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4983 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4984 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4985 			    AMD_FMT_MOD_SET(DCC, 1) |
4986 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4987 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4988 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4989 
4990 		if (has_constant_encode) {
4991 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4992 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4993 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4994 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4995 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4996 				    AMD_FMT_MOD_SET(DCC, 1) |
4997 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4998 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4999 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5000 
5001 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5002 				    AMD_FMT_MOD_SET(RB, rb) |
5003 				    AMD_FMT_MOD_SET(PIPE, pipes));
5004 		}
5005 
5006 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5007 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5008 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5009 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5010 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5011 			    AMD_FMT_MOD_SET(DCC, 1) |
5012 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5013 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5014 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5015 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5016 			    AMD_FMT_MOD_SET(RB, rb) |
5017 			    AMD_FMT_MOD_SET(PIPE, pipes));
5018 	}
5019 
5020 	/*
5021 	 * Only supported for 64bpp on Raven, will be filtered on format in
5022 	 * dm_plane_format_mod_supported.
5023 	 */
5024 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5025 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5026 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5027 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5028 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5029 
5030 	if (adev->family == AMDGPU_FAMILY_RV) {
5031 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5032 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5033 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5034 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5035 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5036 	}
5037 
5038 	/*
5039 	 * Only supported for 64bpp on Raven, will be filtered on format in
5040 	 * dm_plane_format_mod_supported.
5041 	 */
5042 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5043 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5044 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5045 
5046 	if (adev->family == AMDGPU_FAMILY_RV) {
5047 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5048 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5049 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5050 	}
5051 }
5052 
5053 static void
5054 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5055 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5056 {
5057 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5058 
5059 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5060 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5061 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5062 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5063 		    AMD_FMT_MOD_SET(DCC, 1) |
5064 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5065 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5066 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5067 
5068 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5069 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5070 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5071 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5072 		    AMD_FMT_MOD_SET(DCC, 1) |
5073 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5074 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5075 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5076 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5077 
5078 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5079 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5080 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5081 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5082 
5083 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5084 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5085 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5086 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5087 
5088 
5089 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5090 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5091 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5092 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5093 
5094 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5095 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5096 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5097 }
5098 
5099 static void
5100 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5101 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5102 {
5103 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5104 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5105 
5106 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5107 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5108 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5109 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5110 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5111 		    AMD_FMT_MOD_SET(DCC, 1) |
5112 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5113 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5114 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5115 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5116 
5117 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5118 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5119 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5120 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5121 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5122 		    AMD_FMT_MOD_SET(DCC, 1) |
5123 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5124 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5125 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5126 
5127 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5128 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5129 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5130 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5131 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5132 		    AMD_FMT_MOD_SET(DCC, 1) |
5133 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5134 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5135 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5136 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5137 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5138 
5139 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5140 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5141 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5142 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5143 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5144 		    AMD_FMT_MOD_SET(DCC, 1) |
5145 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5146 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5147 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5148 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5149 
5150 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5151 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5152 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5153 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5154 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5155 
5156 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5157 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5158 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5159 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5160 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5161 
5162 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5163 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5164 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5165 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5166 
5167 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5168 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5169 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5170 }
5171 
5172 static int
5173 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5174 {
5175 	uint64_t size = 0, capacity = 128;
5176 	*mods = NULL;
5177 
5178 	/* We have not hooked up any pre-GFX9 modifiers. */
5179 	if (adev->family < AMDGPU_FAMILY_AI)
5180 		return 0;
5181 
5182 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5183 
5184 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5185 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5186 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5187 		return *mods ? 0 : -ENOMEM;
5188 	}
5189 
5190 	switch (adev->family) {
5191 	case AMDGPU_FAMILY_AI:
5192 	case AMDGPU_FAMILY_RV:
5193 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5194 		break;
5195 	case AMDGPU_FAMILY_NV:
5196 	case AMDGPU_FAMILY_VGH:
5197 	case AMDGPU_FAMILY_YC:
5198 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5199 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5200 		else
5201 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5202 		break;
5203 	}
5204 
5205 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5206 
5207 	/* INVALID marks the end of the list. */
5208 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5209 
5210 	if (!*mods)
5211 		return -ENOMEM;
5212 
5213 	return 0;
5214 }
5215 
5216 static int
5217 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5218 					  const struct amdgpu_framebuffer *afb,
5219 					  const enum surface_pixel_format format,
5220 					  const enum dc_rotation_angle rotation,
5221 					  const struct plane_size *plane_size,
5222 					  union dc_tiling_info *tiling_info,
5223 					  struct dc_plane_dcc_param *dcc,
5224 					  struct dc_plane_address *address,
5225 					  const bool force_disable_dcc)
5226 {
5227 	const uint64_t modifier = afb->base.modifier;
5228 	int ret = 0;
5229 
5230 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5231 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5232 
5233 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5234 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5235 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5236 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5237 
5238 		dcc->enable = 1;
5239 		dcc->meta_pitch = afb->base.pitches[1];
5240 		dcc->independent_64b_blks = independent_64b_blks;
5241 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5242 			if (independent_64b_blks && independent_128b_blks)
5243 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5244 			else if (independent_128b_blks)
5245 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5246 			else if (independent_64b_blks && !independent_128b_blks)
5247 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5248 			else
5249 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5250 		} else {
5251 			if (independent_64b_blks)
5252 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5253 			else
5254 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5255 		}
5256 
5257 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5258 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5259 	}
5260 
5261 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5262 	if (ret)
5263 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5264 
5265 	return ret;
5266 }
5267 
5268 static int
5269 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5270 			     const struct amdgpu_framebuffer *afb,
5271 			     const enum surface_pixel_format format,
5272 			     const enum dc_rotation_angle rotation,
5273 			     const uint64_t tiling_flags,
5274 			     union dc_tiling_info *tiling_info,
5275 			     struct plane_size *plane_size,
5276 			     struct dc_plane_dcc_param *dcc,
5277 			     struct dc_plane_address *address,
5278 			     bool tmz_surface,
5279 			     bool force_disable_dcc)
5280 {
5281 	const struct drm_framebuffer *fb = &afb->base;
5282 	int ret;
5283 
5284 	memset(tiling_info, 0, sizeof(*tiling_info));
5285 	memset(plane_size, 0, sizeof(*plane_size));
5286 	memset(dcc, 0, sizeof(*dcc));
5287 	memset(address, 0, sizeof(*address));
5288 
5289 	address->tmz_surface = tmz_surface;
5290 
5291 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5292 		uint64_t addr = afb->address + fb->offsets[0];
5293 
5294 		plane_size->surface_size.x = 0;
5295 		plane_size->surface_size.y = 0;
5296 		plane_size->surface_size.width = fb->width;
5297 		plane_size->surface_size.height = fb->height;
5298 		plane_size->surface_pitch =
5299 			fb->pitches[0] / fb->format->cpp[0];
5300 
5301 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5302 		address->grph.addr.low_part = lower_32_bits(addr);
5303 		address->grph.addr.high_part = upper_32_bits(addr);
5304 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5305 		uint64_t luma_addr = afb->address + fb->offsets[0];
5306 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5307 
5308 		plane_size->surface_size.x = 0;
5309 		plane_size->surface_size.y = 0;
5310 		plane_size->surface_size.width = fb->width;
5311 		plane_size->surface_size.height = fb->height;
5312 		plane_size->surface_pitch =
5313 			fb->pitches[0] / fb->format->cpp[0];
5314 
5315 		plane_size->chroma_size.x = 0;
5316 		plane_size->chroma_size.y = 0;
5317 		/* TODO: set these based on surface format */
5318 		plane_size->chroma_size.width = fb->width / 2;
5319 		plane_size->chroma_size.height = fb->height / 2;
5320 
5321 		plane_size->chroma_pitch =
5322 			fb->pitches[1] / fb->format->cpp[1];
5323 
5324 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5325 		address->video_progressive.luma_addr.low_part =
5326 			lower_32_bits(luma_addr);
5327 		address->video_progressive.luma_addr.high_part =
5328 			upper_32_bits(luma_addr);
5329 		address->video_progressive.chroma_addr.low_part =
5330 			lower_32_bits(chroma_addr);
5331 		address->video_progressive.chroma_addr.high_part =
5332 			upper_32_bits(chroma_addr);
5333 	}
5334 
5335 	if (adev->family >= AMDGPU_FAMILY_AI) {
5336 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5337 								rotation, plane_size,
5338 								tiling_info, dcc,
5339 								address,
5340 								force_disable_dcc);
5341 		if (ret)
5342 			return ret;
5343 	} else {
5344 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5345 	}
5346 
5347 	return 0;
5348 }
5349 
5350 static void
5351 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5352 			       bool *per_pixel_alpha, bool *global_alpha,
5353 			       int *global_alpha_value)
5354 {
5355 	*per_pixel_alpha = false;
5356 	*global_alpha = false;
5357 	*global_alpha_value = 0xff;
5358 
5359 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5360 		return;
5361 
5362 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5363 		static const uint32_t alpha_formats[] = {
5364 			DRM_FORMAT_ARGB8888,
5365 			DRM_FORMAT_RGBA8888,
5366 			DRM_FORMAT_ABGR8888,
5367 		};
5368 		uint32_t format = plane_state->fb->format->format;
5369 		unsigned int i;
5370 
5371 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5372 			if (format == alpha_formats[i]) {
5373 				*per_pixel_alpha = true;
5374 				break;
5375 			}
5376 		}
5377 	}
5378 
5379 	if (plane_state->alpha < 0xffff) {
5380 		*global_alpha = true;
5381 		*global_alpha_value = plane_state->alpha >> 8;
5382 	}
5383 }
5384 
5385 static int
5386 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5387 			    const enum surface_pixel_format format,
5388 			    enum dc_color_space *color_space)
5389 {
5390 	bool full_range;
5391 
5392 	*color_space = COLOR_SPACE_SRGB;
5393 
5394 	/* DRM color properties only affect non-RGB formats. */
5395 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5396 		return 0;
5397 
5398 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5399 
5400 	switch (plane_state->color_encoding) {
5401 	case DRM_COLOR_YCBCR_BT601:
5402 		if (full_range)
5403 			*color_space = COLOR_SPACE_YCBCR601;
5404 		else
5405 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5406 		break;
5407 
5408 	case DRM_COLOR_YCBCR_BT709:
5409 		if (full_range)
5410 			*color_space = COLOR_SPACE_YCBCR709;
5411 		else
5412 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5413 		break;
5414 
5415 	case DRM_COLOR_YCBCR_BT2020:
5416 		if (full_range)
5417 			*color_space = COLOR_SPACE_2020_YCBCR;
5418 		else
5419 			return -EINVAL;
5420 		break;
5421 
5422 	default:
5423 		return -EINVAL;
5424 	}
5425 
5426 	return 0;
5427 }
5428 
5429 static int
5430 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5431 			    const struct drm_plane_state *plane_state,
5432 			    const uint64_t tiling_flags,
5433 			    struct dc_plane_info *plane_info,
5434 			    struct dc_plane_address *address,
5435 			    bool tmz_surface,
5436 			    bool force_disable_dcc)
5437 {
5438 	const struct drm_framebuffer *fb = plane_state->fb;
5439 	const struct amdgpu_framebuffer *afb =
5440 		to_amdgpu_framebuffer(plane_state->fb);
5441 	int ret;
5442 
5443 	memset(plane_info, 0, sizeof(*plane_info));
5444 
5445 	switch (fb->format->format) {
5446 	case DRM_FORMAT_C8:
5447 		plane_info->format =
5448 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5449 		break;
5450 	case DRM_FORMAT_RGB565:
5451 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5452 		break;
5453 	case DRM_FORMAT_XRGB8888:
5454 	case DRM_FORMAT_ARGB8888:
5455 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5456 		break;
5457 	case DRM_FORMAT_XRGB2101010:
5458 	case DRM_FORMAT_ARGB2101010:
5459 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5460 		break;
5461 	case DRM_FORMAT_XBGR2101010:
5462 	case DRM_FORMAT_ABGR2101010:
5463 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5464 		break;
5465 	case DRM_FORMAT_XBGR8888:
5466 	case DRM_FORMAT_ABGR8888:
5467 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5468 		break;
5469 	case DRM_FORMAT_NV21:
5470 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5471 		break;
5472 	case DRM_FORMAT_NV12:
5473 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5474 		break;
5475 	case DRM_FORMAT_P010:
5476 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5477 		break;
5478 	case DRM_FORMAT_XRGB16161616F:
5479 	case DRM_FORMAT_ARGB16161616F:
5480 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5481 		break;
5482 	case DRM_FORMAT_XBGR16161616F:
5483 	case DRM_FORMAT_ABGR16161616F:
5484 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5485 		break;
5486 	case DRM_FORMAT_XRGB16161616:
5487 	case DRM_FORMAT_ARGB16161616:
5488 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5489 		break;
5490 	case DRM_FORMAT_XBGR16161616:
5491 	case DRM_FORMAT_ABGR16161616:
5492 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5493 		break;
5494 	default:
5495 		DRM_ERROR(
5496 			"Unsupported screen format %p4cc\n",
5497 			&fb->format->format);
5498 		return -EINVAL;
5499 	}
5500 
5501 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5502 	case DRM_MODE_ROTATE_0:
5503 		plane_info->rotation = ROTATION_ANGLE_0;
5504 		break;
5505 	case DRM_MODE_ROTATE_90:
5506 		plane_info->rotation = ROTATION_ANGLE_90;
5507 		break;
5508 	case DRM_MODE_ROTATE_180:
5509 		plane_info->rotation = ROTATION_ANGLE_180;
5510 		break;
5511 	case DRM_MODE_ROTATE_270:
5512 		plane_info->rotation = ROTATION_ANGLE_270;
5513 		break;
5514 	default:
5515 		plane_info->rotation = ROTATION_ANGLE_0;
5516 		break;
5517 	}
5518 
5519 	plane_info->visible = true;
5520 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5521 
5522 	plane_info->layer_index = 0;
5523 
5524 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5525 					  &plane_info->color_space);
5526 	if (ret)
5527 		return ret;
5528 
5529 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5530 					   plane_info->rotation, tiling_flags,
5531 					   &plane_info->tiling_info,
5532 					   &plane_info->plane_size,
5533 					   &plane_info->dcc, address, tmz_surface,
5534 					   force_disable_dcc);
5535 	if (ret)
5536 		return ret;
5537 
5538 	fill_blending_from_plane_state(
5539 		plane_state, &plane_info->per_pixel_alpha,
5540 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5541 
5542 	return 0;
5543 }
5544 
5545 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5546 				    struct dc_plane_state *dc_plane_state,
5547 				    struct drm_plane_state *plane_state,
5548 				    struct drm_crtc_state *crtc_state)
5549 {
5550 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5551 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5552 	struct dc_scaling_info scaling_info;
5553 	struct dc_plane_info plane_info;
5554 	int ret;
5555 	bool force_disable_dcc = false;
5556 
5557 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5558 	if (ret)
5559 		return ret;
5560 
5561 	dc_plane_state->src_rect = scaling_info.src_rect;
5562 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5563 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5564 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5565 
5566 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5567 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5568 					  afb->tiling_flags,
5569 					  &plane_info,
5570 					  &dc_plane_state->address,
5571 					  afb->tmz_surface,
5572 					  force_disable_dcc);
5573 	if (ret)
5574 		return ret;
5575 
5576 	dc_plane_state->format = plane_info.format;
5577 	dc_plane_state->color_space = plane_info.color_space;
5578 	dc_plane_state->format = plane_info.format;
5579 	dc_plane_state->plane_size = plane_info.plane_size;
5580 	dc_plane_state->rotation = plane_info.rotation;
5581 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5582 	dc_plane_state->stereo_format = plane_info.stereo_format;
5583 	dc_plane_state->tiling_info = plane_info.tiling_info;
5584 	dc_plane_state->visible = plane_info.visible;
5585 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5586 	dc_plane_state->global_alpha = plane_info.global_alpha;
5587 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5588 	dc_plane_state->dcc = plane_info.dcc;
5589 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5590 	dc_plane_state->flip_int_enabled = true;
5591 
5592 	/*
5593 	 * Always set input transfer function, since plane state is refreshed
5594 	 * every time.
5595 	 */
5596 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5597 	if (ret)
5598 		return ret;
5599 
5600 	return 0;
5601 }
5602 
5603 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5604 					   const struct dm_connector_state *dm_state,
5605 					   struct dc_stream_state *stream)
5606 {
5607 	enum amdgpu_rmx_type rmx_type;
5608 
5609 	struct rect src = { 0 }; /* viewport in composition space*/
5610 	struct rect dst = { 0 }; /* stream addressable area */
5611 
5612 	/* no mode. nothing to be done */
5613 	if (!mode)
5614 		return;
5615 
5616 	/* Full screen scaling by default */
5617 	src.width = mode->hdisplay;
5618 	src.height = mode->vdisplay;
5619 	dst.width = stream->timing.h_addressable;
5620 	dst.height = stream->timing.v_addressable;
5621 
5622 	if (dm_state) {
5623 		rmx_type = dm_state->scaling;
5624 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5625 			if (src.width * dst.height <
5626 					src.height * dst.width) {
5627 				/* height needs less upscaling/more downscaling */
5628 				dst.width = src.width *
5629 						dst.height / src.height;
5630 			} else {
5631 				/* width needs less upscaling/more downscaling */
5632 				dst.height = src.height *
5633 						dst.width / src.width;
5634 			}
5635 		} else if (rmx_type == RMX_CENTER) {
5636 			dst = src;
5637 		}
5638 
5639 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5640 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5641 
5642 		if (dm_state->underscan_enable) {
5643 			dst.x += dm_state->underscan_hborder / 2;
5644 			dst.y += dm_state->underscan_vborder / 2;
5645 			dst.width -= dm_state->underscan_hborder;
5646 			dst.height -= dm_state->underscan_vborder;
5647 		}
5648 	}
5649 
5650 	stream->src = src;
5651 	stream->dst = dst;
5652 
5653 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5654 		      dst.x, dst.y, dst.width, dst.height);
5655 
5656 }
5657 
5658 static enum dc_color_depth
5659 convert_color_depth_from_display_info(const struct drm_connector *connector,
5660 				      bool is_y420, int requested_bpc)
5661 {
5662 	uint8_t bpc;
5663 
5664 	if (is_y420) {
5665 		bpc = 8;
5666 
5667 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5668 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5669 			bpc = 16;
5670 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5671 			bpc = 12;
5672 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5673 			bpc = 10;
5674 	} else {
5675 		bpc = (uint8_t)connector->display_info.bpc;
5676 		/* Assume 8 bpc by default if no bpc is specified. */
5677 		bpc = bpc ? bpc : 8;
5678 	}
5679 
5680 	if (requested_bpc > 0) {
5681 		/*
5682 		 * Cap display bpc based on the user requested value.
5683 		 *
5684 		 * The value for state->max_bpc may not correctly updated
5685 		 * depending on when the connector gets added to the state
5686 		 * or if this was called outside of atomic check, so it
5687 		 * can't be used directly.
5688 		 */
5689 		bpc = min_t(u8, bpc, requested_bpc);
5690 
5691 		/* Round down to the nearest even number. */
5692 		bpc = bpc - (bpc & 1);
5693 	}
5694 
5695 	switch (bpc) {
5696 	case 0:
5697 		/*
5698 		 * Temporary Work around, DRM doesn't parse color depth for
5699 		 * EDID revision before 1.4
5700 		 * TODO: Fix edid parsing
5701 		 */
5702 		return COLOR_DEPTH_888;
5703 	case 6:
5704 		return COLOR_DEPTH_666;
5705 	case 8:
5706 		return COLOR_DEPTH_888;
5707 	case 10:
5708 		return COLOR_DEPTH_101010;
5709 	case 12:
5710 		return COLOR_DEPTH_121212;
5711 	case 14:
5712 		return COLOR_DEPTH_141414;
5713 	case 16:
5714 		return COLOR_DEPTH_161616;
5715 	default:
5716 		return COLOR_DEPTH_UNDEFINED;
5717 	}
5718 }
5719 
5720 static enum dc_aspect_ratio
5721 get_aspect_ratio(const struct drm_display_mode *mode_in)
5722 {
5723 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5724 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5725 }
5726 
5727 static enum dc_color_space
5728 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5729 {
5730 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5731 
5732 	switch (dc_crtc_timing->pixel_encoding)	{
5733 	case PIXEL_ENCODING_YCBCR422:
5734 	case PIXEL_ENCODING_YCBCR444:
5735 	case PIXEL_ENCODING_YCBCR420:
5736 	{
5737 		/*
5738 		 * 27030khz is the separation point between HDTV and SDTV
5739 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5740 		 * respectively
5741 		 */
5742 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5743 			if (dc_crtc_timing->flags.Y_ONLY)
5744 				color_space =
5745 					COLOR_SPACE_YCBCR709_LIMITED;
5746 			else
5747 				color_space = COLOR_SPACE_YCBCR709;
5748 		} else {
5749 			if (dc_crtc_timing->flags.Y_ONLY)
5750 				color_space =
5751 					COLOR_SPACE_YCBCR601_LIMITED;
5752 			else
5753 				color_space = COLOR_SPACE_YCBCR601;
5754 		}
5755 
5756 	}
5757 	break;
5758 	case PIXEL_ENCODING_RGB:
5759 		color_space = COLOR_SPACE_SRGB;
5760 		break;
5761 
5762 	default:
5763 		WARN_ON(1);
5764 		break;
5765 	}
5766 
5767 	return color_space;
5768 }
5769 
5770 static bool adjust_colour_depth_from_display_info(
5771 	struct dc_crtc_timing *timing_out,
5772 	const struct drm_display_info *info)
5773 {
5774 	enum dc_color_depth depth = timing_out->display_color_depth;
5775 	int normalized_clk;
5776 	do {
5777 		normalized_clk = timing_out->pix_clk_100hz / 10;
5778 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5779 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5780 			normalized_clk /= 2;
5781 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5782 		switch (depth) {
5783 		case COLOR_DEPTH_888:
5784 			break;
5785 		case COLOR_DEPTH_101010:
5786 			normalized_clk = (normalized_clk * 30) / 24;
5787 			break;
5788 		case COLOR_DEPTH_121212:
5789 			normalized_clk = (normalized_clk * 36) / 24;
5790 			break;
5791 		case COLOR_DEPTH_161616:
5792 			normalized_clk = (normalized_clk * 48) / 24;
5793 			break;
5794 		default:
5795 			/* The above depths are the only ones valid for HDMI. */
5796 			return false;
5797 		}
5798 		if (normalized_clk <= info->max_tmds_clock) {
5799 			timing_out->display_color_depth = depth;
5800 			return true;
5801 		}
5802 	} while (--depth > COLOR_DEPTH_666);
5803 	return false;
5804 }
5805 
5806 static void fill_stream_properties_from_drm_display_mode(
5807 	struct dc_stream_state *stream,
5808 	const struct drm_display_mode *mode_in,
5809 	const struct drm_connector *connector,
5810 	const struct drm_connector_state *connector_state,
5811 	const struct dc_stream_state *old_stream,
5812 	int requested_bpc)
5813 {
5814 	struct dc_crtc_timing *timing_out = &stream->timing;
5815 	const struct drm_display_info *info = &connector->display_info;
5816 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5817 	struct hdmi_vendor_infoframe hv_frame;
5818 	struct hdmi_avi_infoframe avi_frame;
5819 
5820 	memset(&hv_frame, 0, sizeof(hv_frame));
5821 	memset(&avi_frame, 0, sizeof(avi_frame));
5822 
5823 	timing_out->h_border_left = 0;
5824 	timing_out->h_border_right = 0;
5825 	timing_out->v_border_top = 0;
5826 	timing_out->v_border_bottom = 0;
5827 	/* TODO: un-hardcode */
5828 	if (drm_mode_is_420_only(info, mode_in)
5829 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5830 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5831 	else if (drm_mode_is_420_also(info, mode_in)
5832 			&& aconnector->force_yuv420_output)
5833 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5834 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5835 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5836 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5837 	else
5838 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5839 
5840 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5841 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5842 		connector,
5843 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5844 		requested_bpc);
5845 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5846 	timing_out->hdmi_vic = 0;
5847 
5848 	if(old_stream) {
5849 		timing_out->vic = old_stream->timing.vic;
5850 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5851 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5852 	} else {
5853 		timing_out->vic = drm_match_cea_mode(mode_in);
5854 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5855 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5856 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5857 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5858 	}
5859 
5860 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5861 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5862 		timing_out->vic = avi_frame.video_code;
5863 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5864 		timing_out->hdmi_vic = hv_frame.vic;
5865 	}
5866 
5867 	if (is_freesync_video_mode(mode_in, aconnector)) {
5868 		timing_out->h_addressable = mode_in->hdisplay;
5869 		timing_out->h_total = mode_in->htotal;
5870 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5871 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5872 		timing_out->v_total = mode_in->vtotal;
5873 		timing_out->v_addressable = mode_in->vdisplay;
5874 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5875 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5876 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5877 	} else {
5878 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5879 		timing_out->h_total = mode_in->crtc_htotal;
5880 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5881 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5882 		timing_out->v_total = mode_in->crtc_vtotal;
5883 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5884 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5885 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5886 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5887 	}
5888 
5889 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5890 
5891 	stream->output_color_space = get_output_color_space(timing_out);
5892 
5893 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5894 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5895 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5896 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5897 		    drm_mode_is_420_also(info, mode_in) &&
5898 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5899 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5900 			adjust_colour_depth_from_display_info(timing_out, info);
5901 		}
5902 	}
5903 }
5904 
5905 static void fill_audio_info(struct audio_info *audio_info,
5906 			    const struct drm_connector *drm_connector,
5907 			    const struct dc_sink *dc_sink)
5908 {
5909 	int i = 0;
5910 	int cea_revision = 0;
5911 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5912 
5913 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5914 	audio_info->product_id = edid_caps->product_id;
5915 
5916 	cea_revision = drm_connector->display_info.cea_rev;
5917 
5918 	strscpy(audio_info->display_name,
5919 		edid_caps->display_name,
5920 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5921 
5922 	if (cea_revision >= 3) {
5923 		audio_info->mode_count = edid_caps->audio_mode_count;
5924 
5925 		for (i = 0; i < audio_info->mode_count; ++i) {
5926 			audio_info->modes[i].format_code =
5927 					(enum audio_format_code)
5928 					(edid_caps->audio_modes[i].format_code);
5929 			audio_info->modes[i].channel_count =
5930 					edid_caps->audio_modes[i].channel_count;
5931 			audio_info->modes[i].sample_rates.all =
5932 					edid_caps->audio_modes[i].sample_rate;
5933 			audio_info->modes[i].sample_size =
5934 					edid_caps->audio_modes[i].sample_size;
5935 		}
5936 	}
5937 
5938 	audio_info->flags.all = edid_caps->speaker_flags;
5939 
5940 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5941 	if (drm_connector->latency_present[0]) {
5942 		audio_info->video_latency = drm_connector->video_latency[0];
5943 		audio_info->audio_latency = drm_connector->audio_latency[0];
5944 	}
5945 
5946 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5947 
5948 }
5949 
5950 static void
5951 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5952 				      struct drm_display_mode *dst_mode)
5953 {
5954 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5955 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5956 	dst_mode->crtc_clock = src_mode->crtc_clock;
5957 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5958 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5959 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5960 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5961 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5962 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5963 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5964 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5965 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5966 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5967 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5968 }
5969 
5970 static void
5971 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5972 					const struct drm_display_mode *native_mode,
5973 					bool scale_enabled)
5974 {
5975 	if (scale_enabled) {
5976 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5977 	} else if (native_mode->clock == drm_mode->clock &&
5978 			native_mode->htotal == drm_mode->htotal &&
5979 			native_mode->vtotal == drm_mode->vtotal) {
5980 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5981 	} else {
5982 		/* no scaling nor amdgpu inserted, no need to patch */
5983 	}
5984 }
5985 
5986 static struct dc_sink *
5987 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5988 {
5989 	struct dc_sink_init_data sink_init_data = { 0 };
5990 	struct dc_sink *sink = NULL;
5991 	sink_init_data.link = aconnector->dc_link;
5992 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5993 
5994 	sink = dc_sink_create(&sink_init_data);
5995 	if (!sink) {
5996 		DRM_ERROR("Failed to create sink!\n");
5997 		return NULL;
5998 	}
5999 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6000 
6001 	return sink;
6002 }
6003 
6004 static void set_multisync_trigger_params(
6005 		struct dc_stream_state *stream)
6006 {
6007 	struct dc_stream_state *master = NULL;
6008 
6009 	if (stream->triggered_crtc_reset.enabled) {
6010 		master = stream->triggered_crtc_reset.event_source;
6011 		stream->triggered_crtc_reset.event =
6012 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6013 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6014 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6015 	}
6016 }
6017 
6018 static void set_master_stream(struct dc_stream_state *stream_set[],
6019 			      int stream_count)
6020 {
6021 	int j, highest_rfr = 0, master_stream = 0;
6022 
6023 	for (j = 0;  j < stream_count; j++) {
6024 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6025 			int refresh_rate = 0;
6026 
6027 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6028 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6029 			if (refresh_rate > highest_rfr) {
6030 				highest_rfr = refresh_rate;
6031 				master_stream = j;
6032 			}
6033 		}
6034 	}
6035 	for (j = 0;  j < stream_count; j++) {
6036 		if (stream_set[j])
6037 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6038 	}
6039 }
6040 
6041 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6042 {
6043 	int i = 0;
6044 	struct dc_stream_state *stream;
6045 
6046 	if (context->stream_count < 2)
6047 		return;
6048 	for (i = 0; i < context->stream_count ; i++) {
6049 		if (!context->streams[i])
6050 			continue;
6051 		/*
6052 		 * TODO: add a function to read AMD VSDB bits and set
6053 		 * crtc_sync_master.multi_sync_enabled flag
6054 		 * For now it's set to false
6055 		 */
6056 	}
6057 
6058 	set_master_stream(context->streams, context->stream_count);
6059 
6060 	for (i = 0; i < context->stream_count ; i++) {
6061 		stream = context->streams[i];
6062 
6063 		if (!stream)
6064 			continue;
6065 
6066 		set_multisync_trigger_params(stream);
6067 	}
6068 }
6069 
6070 #if defined(CONFIG_DRM_AMD_DC_DCN)
6071 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6072 							struct dc_sink *sink, struct dc_stream_state *stream,
6073 							struct dsc_dec_dpcd_caps *dsc_caps)
6074 {
6075 	stream->timing.flags.DSC = 0;
6076 
6077 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6078 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6079 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6080 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6081 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6082 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6083 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6084 				dsc_caps);
6085 	}
6086 }
6087 
6088 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6089 				    struct dc_sink *sink, struct dc_stream_state *stream,
6090 				    struct dsc_dec_dpcd_caps *dsc_caps,
6091 				    uint32_t max_dsc_target_bpp_limit_override)
6092 {
6093 	const struct dc_link_settings *verified_link_cap = NULL;
6094 	uint32_t link_bw_in_kbps;
6095 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6096 	struct dc *dc = sink->ctx->dc;
6097 	struct dc_dsc_bw_range bw_range = {0};
6098 	struct dc_dsc_config dsc_cfg = {0};
6099 
6100 	verified_link_cap = dc_link_get_link_cap(stream->link);
6101 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6102 	edp_min_bpp_x16 = 8 * 16;
6103 	edp_max_bpp_x16 = 8 * 16;
6104 
6105 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6106 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6107 
6108 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6109 		edp_min_bpp_x16 = edp_max_bpp_x16;
6110 
6111 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6112 				dc->debug.dsc_min_slice_height_override,
6113 				edp_min_bpp_x16, edp_max_bpp_x16,
6114 				dsc_caps,
6115 				&stream->timing,
6116 				&bw_range)) {
6117 
6118 		if (bw_range.max_kbps < link_bw_in_kbps) {
6119 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6120 					dsc_caps,
6121 					dc->debug.dsc_min_slice_height_override,
6122 					max_dsc_target_bpp_limit_override,
6123 					0,
6124 					&stream->timing,
6125 					&dsc_cfg)) {
6126 				stream->timing.dsc_cfg = dsc_cfg;
6127 				stream->timing.flags.DSC = 1;
6128 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6129 			}
6130 			return;
6131 		}
6132 	}
6133 
6134 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6135 				dsc_caps,
6136 				dc->debug.dsc_min_slice_height_override,
6137 				max_dsc_target_bpp_limit_override,
6138 				link_bw_in_kbps,
6139 				&stream->timing,
6140 				&dsc_cfg)) {
6141 		stream->timing.dsc_cfg = dsc_cfg;
6142 		stream->timing.flags.DSC = 1;
6143 	}
6144 }
6145 
6146 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6147 										struct dc_sink *sink, struct dc_stream_state *stream,
6148 										struct dsc_dec_dpcd_caps *dsc_caps)
6149 {
6150 	struct drm_connector *drm_connector = &aconnector->base;
6151 	uint32_t link_bandwidth_kbps;
6152 	uint32_t max_dsc_target_bpp_limit_override = 0;
6153 	struct dc *dc = sink->ctx->dc;
6154 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6155 	uint32_t dsc_max_supported_bw_in_kbps;
6156 
6157 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6158 							dc_link_get_link_cap(aconnector->dc_link));
6159 
6160 	if (stream->link && stream->link->local_sink)
6161 		max_dsc_target_bpp_limit_override =
6162 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6163 
6164 	/* Set DSC policy according to dsc_clock_en */
6165 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6166 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6167 
6168 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6169 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6170 
6171 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6172 
6173 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6174 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6175 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6176 						dsc_caps,
6177 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6178 						max_dsc_target_bpp_limit_override,
6179 						link_bandwidth_kbps,
6180 						&stream->timing,
6181 						&stream->timing.dsc_cfg)) {
6182 				stream->timing.flags.DSC = 1;
6183 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6184 								 __func__, drm_connector->name);
6185 			}
6186 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6187 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6188 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6189 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6190 
6191 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6192 					max_supported_bw_in_kbps > 0 &&
6193 					dsc_max_supported_bw_in_kbps > 0)
6194 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6195 						dsc_caps,
6196 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6197 						max_dsc_target_bpp_limit_override,
6198 						dsc_max_supported_bw_in_kbps,
6199 						&stream->timing,
6200 						&stream->timing.dsc_cfg)) {
6201 					stream->timing.flags.DSC = 1;
6202 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6203 									 __func__, drm_connector->name);
6204 				}
6205 		}
6206 	}
6207 
6208 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6209 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6210 		stream->timing.flags.DSC = 1;
6211 
6212 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6213 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6214 
6215 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6216 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6217 
6218 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6219 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6220 }
6221 #endif /* CONFIG_DRM_AMD_DC_DCN */
6222 
6223 /**
6224  * DOC: FreeSync Video
6225  *
6226  * When a userspace application wants to play a video, the content follows a
6227  * standard format definition that usually specifies the FPS for that format.
6228  * The below list illustrates some video format and the expected FPS,
6229  * respectively:
6230  *
6231  * - TV/NTSC (23.976 FPS)
6232  * - Cinema (24 FPS)
6233  * - TV/PAL (25 FPS)
6234  * - TV/NTSC (29.97 FPS)
6235  * - TV/NTSC (30 FPS)
6236  * - Cinema HFR (48 FPS)
6237  * - TV/PAL (50 FPS)
6238  * - Commonly used (60 FPS)
6239  * - Multiples of 24 (48,72,96,120 FPS)
6240  *
6241  * The list of standards video format is not huge and can be added to the
6242  * connector modeset list beforehand. With that, userspace can leverage
6243  * FreeSync to extends the front porch in order to attain the target refresh
6244  * rate. Such a switch will happen seamlessly, without screen blanking or
6245  * reprogramming of the output in any other way. If the userspace requests a
6246  * modesetting change compatible with FreeSync modes that only differ in the
6247  * refresh rate, DC will skip the full update and avoid blink during the
6248  * transition. For example, the video player can change the modesetting from
6249  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6250  * causing any display blink. This same concept can be applied to a mode
6251  * setting change.
6252  */
6253 static struct drm_display_mode *
6254 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6255 			  bool use_probed_modes)
6256 {
6257 	struct drm_display_mode *m, *m_pref = NULL;
6258 	u16 current_refresh, highest_refresh;
6259 	struct list_head *list_head = use_probed_modes ?
6260 						    &aconnector->base.probed_modes :
6261 						    &aconnector->base.modes;
6262 
6263 	if (aconnector->freesync_vid_base.clock != 0)
6264 		return &aconnector->freesync_vid_base;
6265 
6266 	/* Find the preferred mode */
6267 	list_for_each_entry (m, list_head, head) {
6268 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6269 			m_pref = m;
6270 			break;
6271 		}
6272 	}
6273 
6274 	if (!m_pref) {
6275 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6276 		m_pref = list_first_entry_or_null(
6277 			&aconnector->base.modes, struct drm_display_mode, head);
6278 		if (!m_pref) {
6279 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6280 			return NULL;
6281 		}
6282 	}
6283 
6284 	highest_refresh = drm_mode_vrefresh(m_pref);
6285 
6286 	/*
6287 	 * Find the mode with highest refresh rate with same resolution.
6288 	 * For some monitors, preferred mode is not the mode with highest
6289 	 * supported refresh rate.
6290 	 */
6291 	list_for_each_entry (m, list_head, head) {
6292 		current_refresh  = drm_mode_vrefresh(m);
6293 
6294 		if (m->hdisplay == m_pref->hdisplay &&
6295 		    m->vdisplay == m_pref->vdisplay &&
6296 		    highest_refresh < current_refresh) {
6297 			highest_refresh = current_refresh;
6298 			m_pref = m;
6299 		}
6300 	}
6301 
6302 	aconnector->freesync_vid_base = *m_pref;
6303 	return m_pref;
6304 }
6305 
6306 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6307 				   struct amdgpu_dm_connector *aconnector)
6308 {
6309 	struct drm_display_mode *high_mode;
6310 	int timing_diff;
6311 
6312 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6313 	if (!high_mode || !mode)
6314 		return false;
6315 
6316 	timing_diff = high_mode->vtotal - mode->vtotal;
6317 
6318 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6319 	    high_mode->hdisplay != mode->hdisplay ||
6320 	    high_mode->vdisplay != mode->vdisplay ||
6321 	    high_mode->hsync_start != mode->hsync_start ||
6322 	    high_mode->hsync_end != mode->hsync_end ||
6323 	    high_mode->htotal != mode->htotal ||
6324 	    high_mode->hskew != mode->hskew ||
6325 	    high_mode->vscan != mode->vscan ||
6326 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6327 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6328 		return false;
6329 	else
6330 		return true;
6331 }
6332 
6333 static struct dc_stream_state *
6334 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6335 		       const struct drm_display_mode *drm_mode,
6336 		       const struct dm_connector_state *dm_state,
6337 		       const struct dc_stream_state *old_stream,
6338 		       int requested_bpc)
6339 {
6340 	struct drm_display_mode *preferred_mode = NULL;
6341 	struct drm_connector *drm_connector;
6342 	const struct drm_connector_state *con_state =
6343 		dm_state ? &dm_state->base : NULL;
6344 	struct dc_stream_state *stream = NULL;
6345 	struct drm_display_mode mode = *drm_mode;
6346 	struct drm_display_mode saved_mode;
6347 	struct drm_display_mode *freesync_mode = NULL;
6348 	bool native_mode_found = false;
6349 	bool recalculate_timing = false;
6350 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6351 	int mode_refresh;
6352 	int preferred_refresh = 0;
6353 #if defined(CONFIG_DRM_AMD_DC_DCN)
6354 	struct dsc_dec_dpcd_caps dsc_caps;
6355 #endif
6356 	struct dc_sink *sink = NULL;
6357 
6358 	memset(&saved_mode, 0, sizeof(saved_mode));
6359 
6360 	if (aconnector == NULL) {
6361 		DRM_ERROR("aconnector is NULL!\n");
6362 		return stream;
6363 	}
6364 
6365 	drm_connector = &aconnector->base;
6366 
6367 	if (!aconnector->dc_sink) {
6368 		sink = create_fake_sink(aconnector);
6369 		if (!sink)
6370 			return stream;
6371 	} else {
6372 		sink = aconnector->dc_sink;
6373 		dc_sink_retain(sink);
6374 	}
6375 
6376 	stream = dc_create_stream_for_sink(sink);
6377 
6378 	if (stream == NULL) {
6379 		DRM_ERROR("Failed to create stream for sink!\n");
6380 		goto finish;
6381 	}
6382 
6383 	stream->dm_stream_context = aconnector;
6384 
6385 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6386 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6387 
6388 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6389 		/* Search for preferred mode */
6390 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6391 			native_mode_found = true;
6392 			break;
6393 		}
6394 	}
6395 	if (!native_mode_found)
6396 		preferred_mode = list_first_entry_or_null(
6397 				&aconnector->base.modes,
6398 				struct drm_display_mode,
6399 				head);
6400 
6401 	mode_refresh = drm_mode_vrefresh(&mode);
6402 
6403 	if (preferred_mode == NULL) {
6404 		/*
6405 		 * This may not be an error, the use case is when we have no
6406 		 * usermode calls to reset and set mode upon hotplug. In this
6407 		 * case, we call set mode ourselves to restore the previous mode
6408 		 * and the modelist may not be filled in in time.
6409 		 */
6410 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6411 	} else {
6412 		recalculate_timing = amdgpu_freesync_vid_mode &&
6413 				 is_freesync_video_mode(&mode, aconnector);
6414 		if (recalculate_timing) {
6415 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6416 			saved_mode = mode;
6417 			mode = *freesync_mode;
6418 		} else {
6419 			decide_crtc_timing_for_drm_display_mode(
6420 				&mode, preferred_mode, scale);
6421 
6422 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6423 		}
6424 	}
6425 
6426 	if (recalculate_timing)
6427 		drm_mode_set_crtcinfo(&saved_mode, 0);
6428 	else if (!dm_state)
6429 		drm_mode_set_crtcinfo(&mode, 0);
6430 
6431        /*
6432 	* If scaling is enabled and refresh rate didn't change
6433 	* we copy the vic and polarities of the old timings
6434 	*/
6435 	if (!scale || mode_refresh != preferred_refresh)
6436 		fill_stream_properties_from_drm_display_mode(
6437 			stream, &mode, &aconnector->base, con_state, NULL,
6438 			requested_bpc);
6439 	else
6440 		fill_stream_properties_from_drm_display_mode(
6441 			stream, &mode, &aconnector->base, con_state, old_stream,
6442 			requested_bpc);
6443 
6444 #if defined(CONFIG_DRM_AMD_DC_DCN)
6445 	/* SST DSC determination policy */
6446 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6447 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6448 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6449 #endif
6450 
6451 	update_stream_scaling_settings(&mode, dm_state, stream);
6452 
6453 	fill_audio_info(
6454 		&stream->audio_info,
6455 		drm_connector,
6456 		sink);
6457 
6458 	update_stream_signal(stream, sink);
6459 
6460 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6461 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6462 
6463 	if (stream->link->psr_settings.psr_feature_enabled) {
6464 		//
6465 		// should decide stream support vsc sdp colorimetry capability
6466 		// before building vsc info packet
6467 		//
6468 		stream->use_vsc_sdp_for_colorimetry = false;
6469 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6470 			stream->use_vsc_sdp_for_colorimetry =
6471 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6472 		} else {
6473 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6474 				stream->use_vsc_sdp_for_colorimetry = true;
6475 		}
6476 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6477 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6478 
6479 	}
6480 finish:
6481 	dc_sink_release(sink);
6482 
6483 	return stream;
6484 }
6485 
6486 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6487 {
6488 	drm_crtc_cleanup(crtc);
6489 	kfree(crtc);
6490 }
6491 
6492 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6493 				  struct drm_crtc_state *state)
6494 {
6495 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6496 
6497 	/* TODO Destroy dc_stream objects are stream object is flattened */
6498 	if (cur->stream)
6499 		dc_stream_release(cur->stream);
6500 
6501 
6502 	__drm_atomic_helper_crtc_destroy_state(state);
6503 
6504 
6505 	kfree(state);
6506 }
6507 
6508 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6509 {
6510 	struct dm_crtc_state *state;
6511 
6512 	if (crtc->state)
6513 		dm_crtc_destroy_state(crtc, crtc->state);
6514 
6515 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6516 	if (WARN_ON(!state))
6517 		return;
6518 
6519 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6520 }
6521 
6522 static struct drm_crtc_state *
6523 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6524 {
6525 	struct dm_crtc_state *state, *cur;
6526 
6527 	cur = to_dm_crtc_state(crtc->state);
6528 
6529 	if (WARN_ON(!crtc->state))
6530 		return NULL;
6531 
6532 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6533 	if (!state)
6534 		return NULL;
6535 
6536 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6537 
6538 	if (cur->stream) {
6539 		state->stream = cur->stream;
6540 		dc_stream_retain(state->stream);
6541 	}
6542 
6543 	state->active_planes = cur->active_planes;
6544 	state->vrr_infopacket = cur->vrr_infopacket;
6545 	state->abm_level = cur->abm_level;
6546 	state->vrr_supported = cur->vrr_supported;
6547 	state->freesync_config = cur->freesync_config;
6548 	state->cm_has_degamma = cur->cm_has_degamma;
6549 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6550 	state->force_dpms_off = cur->force_dpms_off;
6551 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6552 
6553 	return &state->base;
6554 }
6555 
6556 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6557 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6558 {
6559 	crtc_debugfs_init(crtc);
6560 
6561 	return 0;
6562 }
6563 #endif
6564 
6565 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6566 {
6567 	enum dc_irq_source irq_source;
6568 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6569 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6570 	int rc;
6571 
6572 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6573 
6574 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6575 
6576 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6577 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6578 	return rc;
6579 }
6580 
6581 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6582 {
6583 	enum dc_irq_source irq_source;
6584 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6585 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6586 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6587 #if defined(CONFIG_DRM_AMD_DC_DCN)
6588 	struct amdgpu_display_manager *dm = &adev->dm;
6589 	struct vblank_control_work *work;
6590 #endif
6591 	int rc = 0;
6592 
6593 	if (enable) {
6594 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6595 		if (amdgpu_dm_vrr_active(acrtc_state))
6596 			rc = dm_set_vupdate_irq(crtc, true);
6597 	} else {
6598 		/* vblank irq off -> vupdate irq off */
6599 		rc = dm_set_vupdate_irq(crtc, false);
6600 	}
6601 
6602 	if (rc)
6603 		return rc;
6604 
6605 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6606 
6607 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6608 		return -EBUSY;
6609 
6610 	if (amdgpu_in_reset(adev))
6611 		return 0;
6612 
6613 #if defined(CONFIG_DRM_AMD_DC_DCN)
6614 	if (dm->vblank_control_workqueue) {
6615 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6616 		if (!work)
6617 			return -ENOMEM;
6618 
6619 		INIT_WORK(&work->work, vblank_control_worker);
6620 		work->dm = dm;
6621 		work->acrtc = acrtc;
6622 		work->enable = enable;
6623 
6624 		if (acrtc_state->stream) {
6625 			dc_stream_retain(acrtc_state->stream);
6626 			work->stream = acrtc_state->stream;
6627 		}
6628 
6629 		queue_work(dm->vblank_control_workqueue, &work->work);
6630 	}
6631 #endif
6632 
6633 	return 0;
6634 }
6635 
6636 static int dm_enable_vblank(struct drm_crtc *crtc)
6637 {
6638 	return dm_set_vblank(crtc, true);
6639 }
6640 
6641 static void dm_disable_vblank(struct drm_crtc *crtc)
6642 {
6643 	dm_set_vblank(crtc, false);
6644 }
6645 
6646 /* Implemented only the options currently availible for the driver */
6647 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6648 	.reset = dm_crtc_reset_state,
6649 	.destroy = amdgpu_dm_crtc_destroy,
6650 	.set_config = drm_atomic_helper_set_config,
6651 	.page_flip = drm_atomic_helper_page_flip,
6652 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6653 	.atomic_destroy_state = dm_crtc_destroy_state,
6654 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6655 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6656 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6657 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6658 	.enable_vblank = dm_enable_vblank,
6659 	.disable_vblank = dm_disable_vblank,
6660 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6661 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6662 	.late_register = amdgpu_dm_crtc_late_register,
6663 #endif
6664 };
6665 
6666 static enum drm_connector_status
6667 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6668 {
6669 	bool connected;
6670 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6671 
6672 	/*
6673 	 * Notes:
6674 	 * 1. This interface is NOT called in context of HPD irq.
6675 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6676 	 * makes it a bad place for *any* MST-related activity.
6677 	 */
6678 
6679 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6680 	    !aconnector->fake_enable)
6681 		connected = (aconnector->dc_sink != NULL);
6682 	else
6683 		connected = (aconnector->base.force == DRM_FORCE_ON);
6684 
6685 	update_subconnector_property(aconnector);
6686 
6687 	return (connected ? connector_status_connected :
6688 			connector_status_disconnected);
6689 }
6690 
6691 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6692 					    struct drm_connector_state *connector_state,
6693 					    struct drm_property *property,
6694 					    uint64_t val)
6695 {
6696 	struct drm_device *dev = connector->dev;
6697 	struct amdgpu_device *adev = drm_to_adev(dev);
6698 	struct dm_connector_state *dm_old_state =
6699 		to_dm_connector_state(connector->state);
6700 	struct dm_connector_state *dm_new_state =
6701 		to_dm_connector_state(connector_state);
6702 
6703 	int ret = -EINVAL;
6704 
6705 	if (property == dev->mode_config.scaling_mode_property) {
6706 		enum amdgpu_rmx_type rmx_type;
6707 
6708 		switch (val) {
6709 		case DRM_MODE_SCALE_CENTER:
6710 			rmx_type = RMX_CENTER;
6711 			break;
6712 		case DRM_MODE_SCALE_ASPECT:
6713 			rmx_type = RMX_ASPECT;
6714 			break;
6715 		case DRM_MODE_SCALE_FULLSCREEN:
6716 			rmx_type = RMX_FULL;
6717 			break;
6718 		case DRM_MODE_SCALE_NONE:
6719 		default:
6720 			rmx_type = RMX_OFF;
6721 			break;
6722 		}
6723 
6724 		if (dm_old_state->scaling == rmx_type)
6725 			return 0;
6726 
6727 		dm_new_state->scaling = rmx_type;
6728 		ret = 0;
6729 	} else if (property == adev->mode_info.underscan_hborder_property) {
6730 		dm_new_state->underscan_hborder = val;
6731 		ret = 0;
6732 	} else if (property == adev->mode_info.underscan_vborder_property) {
6733 		dm_new_state->underscan_vborder = val;
6734 		ret = 0;
6735 	} else if (property == adev->mode_info.underscan_property) {
6736 		dm_new_state->underscan_enable = val;
6737 		ret = 0;
6738 	} else if (property == adev->mode_info.abm_level_property) {
6739 		dm_new_state->abm_level = val;
6740 		ret = 0;
6741 	}
6742 
6743 	return ret;
6744 }
6745 
6746 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6747 					    const struct drm_connector_state *state,
6748 					    struct drm_property *property,
6749 					    uint64_t *val)
6750 {
6751 	struct drm_device *dev = connector->dev;
6752 	struct amdgpu_device *adev = drm_to_adev(dev);
6753 	struct dm_connector_state *dm_state =
6754 		to_dm_connector_state(state);
6755 	int ret = -EINVAL;
6756 
6757 	if (property == dev->mode_config.scaling_mode_property) {
6758 		switch (dm_state->scaling) {
6759 		case RMX_CENTER:
6760 			*val = DRM_MODE_SCALE_CENTER;
6761 			break;
6762 		case RMX_ASPECT:
6763 			*val = DRM_MODE_SCALE_ASPECT;
6764 			break;
6765 		case RMX_FULL:
6766 			*val = DRM_MODE_SCALE_FULLSCREEN;
6767 			break;
6768 		case RMX_OFF:
6769 		default:
6770 			*val = DRM_MODE_SCALE_NONE;
6771 			break;
6772 		}
6773 		ret = 0;
6774 	} else if (property == adev->mode_info.underscan_hborder_property) {
6775 		*val = dm_state->underscan_hborder;
6776 		ret = 0;
6777 	} else if (property == adev->mode_info.underscan_vborder_property) {
6778 		*val = dm_state->underscan_vborder;
6779 		ret = 0;
6780 	} else if (property == adev->mode_info.underscan_property) {
6781 		*val = dm_state->underscan_enable;
6782 		ret = 0;
6783 	} else if (property == adev->mode_info.abm_level_property) {
6784 		*val = dm_state->abm_level;
6785 		ret = 0;
6786 	}
6787 
6788 	return ret;
6789 }
6790 
6791 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6792 {
6793 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6794 
6795 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6796 }
6797 
6798 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6799 {
6800 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6801 	const struct dc_link *link = aconnector->dc_link;
6802 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6803 	struct amdgpu_display_manager *dm = &adev->dm;
6804 	int i;
6805 
6806 	/*
6807 	 * Call only if mst_mgr was iniitalized before since it's not done
6808 	 * for all connector types.
6809 	 */
6810 	if (aconnector->mst_mgr.dev)
6811 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6812 
6813 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6814 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6815 	for (i = 0; i < dm->num_of_edps; i++) {
6816 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6817 			backlight_device_unregister(dm->backlight_dev[i]);
6818 			dm->backlight_dev[i] = NULL;
6819 		}
6820 	}
6821 #endif
6822 
6823 	if (aconnector->dc_em_sink)
6824 		dc_sink_release(aconnector->dc_em_sink);
6825 	aconnector->dc_em_sink = NULL;
6826 	if (aconnector->dc_sink)
6827 		dc_sink_release(aconnector->dc_sink);
6828 	aconnector->dc_sink = NULL;
6829 
6830 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6831 	drm_connector_unregister(connector);
6832 	drm_connector_cleanup(connector);
6833 	if (aconnector->i2c) {
6834 		i2c_del_adapter(&aconnector->i2c->base);
6835 		kfree(aconnector->i2c);
6836 	}
6837 	kfree(aconnector->dm_dp_aux.aux.name);
6838 
6839 	kfree(connector);
6840 }
6841 
6842 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6843 {
6844 	struct dm_connector_state *state =
6845 		to_dm_connector_state(connector->state);
6846 
6847 	if (connector->state)
6848 		__drm_atomic_helper_connector_destroy_state(connector->state);
6849 
6850 	kfree(state);
6851 
6852 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6853 
6854 	if (state) {
6855 		state->scaling = RMX_OFF;
6856 		state->underscan_enable = false;
6857 		state->underscan_hborder = 0;
6858 		state->underscan_vborder = 0;
6859 		state->base.max_requested_bpc = 8;
6860 		state->vcpi_slots = 0;
6861 		state->pbn = 0;
6862 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6863 			state->abm_level = amdgpu_dm_abm_level;
6864 
6865 		__drm_atomic_helper_connector_reset(connector, &state->base);
6866 	}
6867 }
6868 
6869 struct drm_connector_state *
6870 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6871 {
6872 	struct dm_connector_state *state =
6873 		to_dm_connector_state(connector->state);
6874 
6875 	struct dm_connector_state *new_state =
6876 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6877 
6878 	if (!new_state)
6879 		return NULL;
6880 
6881 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6882 
6883 	new_state->freesync_capable = state->freesync_capable;
6884 	new_state->abm_level = state->abm_level;
6885 	new_state->scaling = state->scaling;
6886 	new_state->underscan_enable = state->underscan_enable;
6887 	new_state->underscan_hborder = state->underscan_hborder;
6888 	new_state->underscan_vborder = state->underscan_vborder;
6889 	new_state->vcpi_slots = state->vcpi_slots;
6890 	new_state->pbn = state->pbn;
6891 	return &new_state->base;
6892 }
6893 
6894 static int
6895 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6896 {
6897 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6898 		to_amdgpu_dm_connector(connector);
6899 	int r;
6900 
6901 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6902 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6903 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6904 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6905 		if (r)
6906 			return r;
6907 	}
6908 
6909 #if defined(CONFIG_DEBUG_FS)
6910 	connector_debugfs_init(amdgpu_dm_connector);
6911 #endif
6912 
6913 	return 0;
6914 }
6915 
6916 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6917 	.reset = amdgpu_dm_connector_funcs_reset,
6918 	.detect = amdgpu_dm_connector_detect,
6919 	.fill_modes = drm_helper_probe_single_connector_modes,
6920 	.destroy = amdgpu_dm_connector_destroy,
6921 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6922 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6923 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6924 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6925 	.late_register = amdgpu_dm_connector_late_register,
6926 	.early_unregister = amdgpu_dm_connector_unregister
6927 };
6928 
6929 static int get_modes(struct drm_connector *connector)
6930 {
6931 	return amdgpu_dm_connector_get_modes(connector);
6932 }
6933 
6934 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6935 {
6936 	struct dc_sink_init_data init_params = {
6937 			.link = aconnector->dc_link,
6938 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6939 	};
6940 	struct edid *edid;
6941 
6942 	if (!aconnector->base.edid_blob_ptr) {
6943 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6944 				aconnector->base.name);
6945 
6946 		aconnector->base.force = DRM_FORCE_OFF;
6947 		aconnector->base.override_edid = false;
6948 		return;
6949 	}
6950 
6951 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6952 
6953 	aconnector->edid = edid;
6954 
6955 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6956 		aconnector->dc_link,
6957 		(uint8_t *)edid,
6958 		(edid->extensions + 1) * EDID_LENGTH,
6959 		&init_params);
6960 
6961 	if (aconnector->base.force == DRM_FORCE_ON) {
6962 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6963 		aconnector->dc_link->local_sink :
6964 		aconnector->dc_em_sink;
6965 		dc_sink_retain(aconnector->dc_sink);
6966 	}
6967 }
6968 
6969 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6970 {
6971 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6972 
6973 	/*
6974 	 * In case of headless boot with force on for DP managed connector
6975 	 * Those settings have to be != 0 to get initial modeset
6976 	 */
6977 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6978 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6979 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6980 	}
6981 
6982 
6983 	aconnector->base.override_edid = true;
6984 	create_eml_sink(aconnector);
6985 }
6986 
6987 static struct dc_stream_state *
6988 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6989 				const struct drm_display_mode *drm_mode,
6990 				const struct dm_connector_state *dm_state,
6991 				const struct dc_stream_state *old_stream)
6992 {
6993 	struct drm_connector *connector = &aconnector->base;
6994 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6995 	struct dc_stream_state *stream;
6996 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6997 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6998 	enum dc_status dc_result = DC_OK;
6999 
7000 	do {
7001 		stream = create_stream_for_sink(aconnector, drm_mode,
7002 						dm_state, old_stream,
7003 						requested_bpc);
7004 		if (stream == NULL) {
7005 			DRM_ERROR("Failed to create stream for sink!\n");
7006 			break;
7007 		}
7008 
7009 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7010 
7011 		if (dc_result != DC_OK) {
7012 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7013 				      drm_mode->hdisplay,
7014 				      drm_mode->vdisplay,
7015 				      drm_mode->clock,
7016 				      dc_result,
7017 				      dc_status_to_str(dc_result));
7018 
7019 			dc_stream_release(stream);
7020 			stream = NULL;
7021 			requested_bpc -= 2; /* lower bpc to retry validation */
7022 		}
7023 
7024 	} while (stream == NULL && requested_bpc >= 6);
7025 
7026 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7027 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7028 
7029 		aconnector->force_yuv420_output = true;
7030 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7031 						dm_state, old_stream);
7032 		aconnector->force_yuv420_output = false;
7033 	}
7034 
7035 	return stream;
7036 }
7037 
7038 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7039 				   struct drm_display_mode *mode)
7040 {
7041 	int result = MODE_ERROR;
7042 	struct dc_sink *dc_sink;
7043 	/* TODO: Unhardcode stream count */
7044 	struct dc_stream_state *stream;
7045 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7046 
7047 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7048 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7049 		return result;
7050 
7051 	/*
7052 	 * Only run this the first time mode_valid is called to initilialize
7053 	 * EDID mgmt
7054 	 */
7055 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7056 		!aconnector->dc_em_sink)
7057 		handle_edid_mgmt(aconnector);
7058 
7059 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7060 
7061 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7062 				aconnector->base.force != DRM_FORCE_ON) {
7063 		DRM_ERROR("dc_sink is NULL!\n");
7064 		goto fail;
7065 	}
7066 
7067 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7068 	if (stream) {
7069 		dc_stream_release(stream);
7070 		result = MODE_OK;
7071 	}
7072 
7073 fail:
7074 	/* TODO: error handling*/
7075 	return result;
7076 }
7077 
7078 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7079 				struct dc_info_packet *out)
7080 {
7081 	struct hdmi_drm_infoframe frame;
7082 	unsigned char buf[30]; /* 26 + 4 */
7083 	ssize_t len;
7084 	int ret, i;
7085 
7086 	memset(out, 0, sizeof(*out));
7087 
7088 	if (!state->hdr_output_metadata)
7089 		return 0;
7090 
7091 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7092 	if (ret)
7093 		return ret;
7094 
7095 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7096 	if (len < 0)
7097 		return (int)len;
7098 
7099 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7100 	if (len != 30)
7101 		return -EINVAL;
7102 
7103 	/* Prepare the infopacket for DC. */
7104 	switch (state->connector->connector_type) {
7105 	case DRM_MODE_CONNECTOR_HDMIA:
7106 		out->hb0 = 0x87; /* type */
7107 		out->hb1 = 0x01; /* version */
7108 		out->hb2 = 0x1A; /* length */
7109 		out->sb[0] = buf[3]; /* checksum */
7110 		i = 1;
7111 		break;
7112 
7113 	case DRM_MODE_CONNECTOR_DisplayPort:
7114 	case DRM_MODE_CONNECTOR_eDP:
7115 		out->hb0 = 0x00; /* sdp id, zero */
7116 		out->hb1 = 0x87; /* type */
7117 		out->hb2 = 0x1D; /* payload len - 1 */
7118 		out->hb3 = (0x13 << 2); /* sdp version */
7119 		out->sb[0] = 0x01; /* version */
7120 		out->sb[1] = 0x1A; /* length */
7121 		i = 2;
7122 		break;
7123 
7124 	default:
7125 		return -EINVAL;
7126 	}
7127 
7128 	memcpy(&out->sb[i], &buf[4], 26);
7129 	out->valid = true;
7130 
7131 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7132 		       sizeof(out->sb), false);
7133 
7134 	return 0;
7135 }
7136 
7137 static int
7138 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7139 				 struct drm_atomic_state *state)
7140 {
7141 	struct drm_connector_state *new_con_state =
7142 		drm_atomic_get_new_connector_state(state, conn);
7143 	struct drm_connector_state *old_con_state =
7144 		drm_atomic_get_old_connector_state(state, conn);
7145 	struct drm_crtc *crtc = new_con_state->crtc;
7146 	struct drm_crtc_state *new_crtc_state;
7147 	int ret;
7148 
7149 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7150 
7151 	if (!crtc)
7152 		return 0;
7153 
7154 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7155 		struct dc_info_packet hdr_infopacket;
7156 
7157 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7158 		if (ret)
7159 			return ret;
7160 
7161 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7162 		if (IS_ERR(new_crtc_state))
7163 			return PTR_ERR(new_crtc_state);
7164 
7165 		/*
7166 		 * DC considers the stream backends changed if the
7167 		 * static metadata changes. Forcing the modeset also
7168 		 * gives a simple way for userspace to switch from
7169 		 * 8bpc to 10bpc when setting the metadata to enter
7170 		 * or exit HDR.
7171 		 *
7172 		 * Changing the static metadata after it's been
7173 		 * set is permissible, however. So only force a
7174 		 * modeset if we're entering or exiting HDR.
7175 		 */
7176 		new_crtc_state->mode_changed =
7177 			!old_con_state->hdr_output_metadata ||
7178 			!new_con_state->hdr_output_metadata;
7179 	}
7180 
7181 	return 0;
7182 }
7183 
7184 static const struct drm_connector_helper_funcs
7185 amdgpu_dm_connector_helper_funcs = {
7186 	/*
7187 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7188 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7189 	 * are missing after user start lightdm. So we need to renew modes list.
7190 	 * in get_modes call back, not just return the modes count
7191 	 */
7192 	.get_modes = get_modes,
7193 	.mode_valid = amdgpu_dm_connector_mode_valid,
7194 	.atomic_check = amdgpu_dm_connector_atomic_check,
7195 };
7196 
7197 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7198 {
7199 }
7200 
7201 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7202 {
7203 	struct drm_atomic_state *state = new_crtc_state->state;
7204 	struct drm_plane *plane;
7205 	int num_active = 0;
7206 
7207 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7208 		struct drm_plane_state *new_plane_state;
7209 
7210 		/* Cursor planes are "fake". */
7211 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7212 			continue;
7213 
7214 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7215 
7216 		if (!new_plane_state) {
7217 			/*
7218 			 * The plane is enable on the CRTC and hasn't changed
7219 			 * state. This means that it previously passed
7220 			 * validation and is therefore enabled.
7221 			 */
7222 			num_active += 1;
7223 			continue;
7224 		}
7225 
7226 		/* We need a framebuffer to be considered enabled. */
7227 		num_active += (new_plane_state->fb != NULL);
7228 	}
7229 
7230 	return num_active;
7231 }
7232 
7233 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7234 					 struct drm_crtc_state *new_crtc_state)
7235 {
7236 	struct dm_crtc_state *dm_new_crtc_state =
7237 		to_dm_crtc_state(new_crtc_state);
7238 
7239 	dm_new_crtc_state->active_planes = 0;
7240 
7241 	if (!dm_new_crtc_state->stream)
7242 		return;
7243 
7244 	dm_new_crtc_state->active_planes =
7245 		count_crtc_active_planes(new_crtc_state);
7246 }
7247 
7248 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7249 				       struct drm_atomic_state *state)
7250 {
7251 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7252 									  crtc);
7253 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7254 	struct dc *dc = adev->dm.dc;
7255 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7256 	int ret = -EINVAL;
7257 
7258 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7259 
7260 	dm_update_crtc_active_planes(crtc, crtc_state);
7261 
7262 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7263 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7264 		return ret;
7265 	}
7266 
7267 	/*
7268 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7269 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7270 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7271 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7272 	 */
7273 	if (crtc_state->enable &&
7274 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7275 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7276 		return -EINVAL;
7277 	}
7278 
7279 	/* In some use cases, like reset, no stream is attached */
7280 	if (!dm_crtc_state->stream)
7281 		return 0;
7282 
7283 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7284 		return 0;
7285 
7286 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7287 	return ret;
7288 }
7289 
7290 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7291 				      const struct drm_display_mode *mode,
7292 				      struct drm_display_mode *adjusted_mode)
7293 {
7294 	return true;
7295 }
7296 
7297 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7298 	.disable = dm_crtc_helper_disable,
7299 	.atomic_check = dm_crtc_helper_atomic_check,
7300 	.mode_fixup = dm_crtc_helper_mode_fixup,
7301 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7302 };
7303 
7304 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7305 {
7306 
7307 }
7308 
7309 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7310 {
7311 	switch (display_color_depth) {
7312 		case COLOR_DEPTH_666:
7313 			return 6;
7314 		case COLOR_DEPTH_888:
7315 			return 8;
7316 		case COLOR_DEPTH_101010:
7317 			return 10;
7318 		case COLOR_DEPTH_121212:
7319 			return 12;
7320 		case COLOR_DEPTH_141414:
7321 			return 14;
7322 		case COLOR_DEPTH_161616:
7323 			return 16;
7324 		default:
7325 			break;
7326 		}
7327 	return 0;
7328 }
7329 
7330 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7331 					  struct drm_crtc_state *crtc_state,
7332 					  struct drm_connector_state *conn_state)
7333 {
7334 	struct drm_atomic_state *state = crtc_state->state;
7335 	struct drm_connector *connector = conn_state->connector;
7336 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7337 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7338 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7339 	struct drm_dp_mst_topology_mgr *mst_mgr;
7340 	struct drm_dp_mst_port *mst_port;
7341 	enum dc_color_depth color_depth;
7342 	int clock, bpp = 0;
7343 	bool is_y420 = false;
7344 
7345 	if (!aconnector->port || !aconnector->dc_sink)
7346 		return 0;
7347 
7348 	mst_port = aconnector->port;
7349 	mst_mgr = &aconnector->mst_port->mst_mgr;
7350 
7351 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7352 		return 0;
7353 
7354 	if (!state->duplicated) {
7355 		int max_bpc = conn_state->max_requested_bpc;
7356 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7357 				aconnector->force_yuv420_output;
7358 		color_depth = convert_color_depth_from_display_info(connector,
7359 								    is_y420,
7360 								    max_bpc);
7361 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7362 		clock = adjusted_mode->clock;
7363 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7364 	}
7365 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7366 									   mst_mgr,
7367 									   mst_port,
7368 									   dm_new_connector_state->pbn,
7369 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7370 	if (dm_new_connector_state->vcpi_slots < 0) {
7371 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7372 		return dm_new_connector_state->vcpi_slots;
7373 	}
7374 	return 0;
7375 }
7376 
7377 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7378 	.disable = dm_encoder_helper_disable,
7379 	.atomic_check = dm_encoder_helper_atomic_check
7380 };
7381 
7382 #if defined(CONFIG_DRM_AMD_DC_DCN)
7383 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7384 					    struct dc_state *dc_state,
7385 					    struct dsc_mst_fairness_vars *vars)
7386 {
7387 	struct dc_stream_state *stream = NULL;
7388 	struct drm_connector *connector;
7389 	struct drm_connector_state *new_con_state;
7390 	struct amdgpu_dm_connector *aconnector;
7391 	struct dm_connector_state *dm_conn_state;
7392 	int i, j;
7393 	int vcpi, pbn_div, pbn, slot_num = 0;
7394 
7395 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7396 
7397 		aconnector = to_amdgpu_dm_connector(connector);
7398 
7399 		if (!aconnector->port)
7400 			continue;
7401 
7402 		if (!new_con_state || !new_con_state->crtc)
7403 			continue;
7404 
7405 		dm_conn_state = to_dm_connector_state(new_con_state);
7406 
7407 		for (j = 0; j < dc_state->stream_count; j++) {
7408 			stream = dc_state->streams[j];
7409 			if (!stream)
7410 				continue;
7411 
7412 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7413 				break;
7414 
7415 			stream = NULL;
7416 		}
7417 
7418 		if (!stream)
7419 			continue;
7420 
7421 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7422 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7423 		for (j = 0; j < dc_state->stream_count; j++) {
7424 			if (vars[j].aconnector == aconnector) {
7425 				pbn = vars[j].pbn;
7426 				break;
7427 			}
7428 		}
7429 
7430 		if (j == dc_state->stream_count)
7431 			continue;
7432 
7433 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7434 
7435 		if (stream->timing.flags.DSC != 1) {
7436 			dm_conn_state->pbn = pbn;
7437 			dm_conn_state->vcpi_slots = slot_num;
7438 
7439 			drm_dp_mst_atomic_enable_dsc(state,
7440 						     aconnector->port,
7441 						     dm_conn_state->pbn,
7442 						     0,
7443 						     false);
7444 			continue;
7445 		}
7446 
7447 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7448 						    aconnector->port,
7449 						    pbn, pbn_div,
7450 						    true);
7451 		if (vcpi < 0)
7452 			return vcpi;
7453 
7454 		dm_conn_state->pbn = pbn;
7455 		dm_conn_state->vcpi_slots = vcpi;
7456 	}
7457 	return 0;
7458 }
7459 #endif
7460 
7461 static void dm_drm_plane_reset(struct drm_plane *plane)
7462 {
7463 	struct dm_plane_state *amdgpu_state = NULL;
7464 
7465 	if (plane->state)
7466 		plane->funcs->atomic_destroy_state(plane, plane->state);
7467 
7468 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7469 	WARN_ON(amdgpu_state == NULL);
7470 
7471 	if (amdgpu_state)
7472 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7473 }
7474 
7475 static struct drm_plane_state *
7476 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7477 {
7478 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7479 
7480 	old_dm_plane_state = to_dm_plane_state(plane->state);
7481 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7482 	if (!dm_plane_state)
7483 		return NULL;
7484 
7485 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7486 
7487 	if (old_dm_plane_state->dc_state) {
7488 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7489 		dc_plane_state_retain(dm_plane_state->dc_state);
7490 	}
7491 
7492 	return &dm_plane_state->base;
7493 }
7494 
7495 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7496 				struct drm_plane_state *state)
7497 {
7498 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7499 
7500 	if (dm_plane_state->dc_state)
7501 		dc_plane_state_release(dm_plane_state->dc_state);
7502 
7503 	drm_atomic_helper_plane_destroy_state(plane, state);
7504 }
7505 
7506 static const struct drm_plane_funcs dm_plane_funcs = {
7507 	.update_plane	= drm_atomic_helper_update_plane,
7508 	.disable_plane	= drm_atomic_helper_disable_plane,
7509 	.destroy	= drm_primary_helper_destroy,
7510 	.reset = dm_drm_plane_reset,
7511 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7512 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7513 	.format_mod_supported = dm_plane_format_mod_supported,
7514 };
7515 
7516 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7517 				      struct drm_plane_state *new_state)
7518 {
7519 	struct amdgpu_framebuffer *afb;
7520 	struct drm_gem_object *obj;
7521 	struct amdgpu_device *adev;
7522 	struct amdgpu_bo *rbo;
7523 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7524 	struct list_head list;
7525 	struct ttm_validate_buffer tv;
7526 	struct ww_acquire_ctx ticket;
7527 	uint32_t domain;
7528 	int r;
7529 
7530 	if (!new_state->fb) {
7531 		DRM_DEBUG_KMS("No FB bound\n");
7532 		return 0;
7533 	}
7534 
7535 	afb = to_amdgpu_framebuffer(new_state->fb);
7536 	obj = new_state->fb->obj[0];
7537 	rbo = gem_to_amdgpu_bo(obj);
7538 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7539 	INIT_LIST_HEAD(&list);
7540 
7541 	tv.bo = &rbo->tbo;
7542 	tv.num_shared = 1;
7543 	list_add(&tv.head, &list);
7544 
7545 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7546 	if (r) {
7547 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7548 		return r;
7549 	}
7550 
7551 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7552 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7553 	else
7554 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7555 
7556 	r = amdgpu_bo_pin(rbo, domain);
7557 	if (unlikely(r != 0)) {
7558 		if (r != -ERESTARTSYS)
7559 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7560 		ttm_eu_backoff_reservation(&ticket, &list);
7561 		return r;
7562 	}
7563 
7564 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7565 	if (unlikely(r != 0)) {
7566 		amdgpu_bo_unpin(rbo);
7567 		ttm_eu_backoff_reservation(&ticket, &list);
7568 		DRM_ERROR("%p bind failed\n", rbo);
7569 		return r;
7570 	}
7571 
7572 	ttm_eu_backoff_reservation(&ticket, &list);
7573 
7574 	afb->address = amdgpu_bo_gpu_offset(rbo);
7575 
7576 	amdgpu_bo_ref(rbo);
7577 
7578 	/**
7579 	 * We don't do surface updates on planes that have been newly created,
7580 	 * but we also don't have the afb->address during atomic check.
7581 	 *
7582 	 * Fill in buffer attributes depending on the address here, but only on
7583 	 * newly created planes since they're not being used by DC yet and this
7584 	 * won't modify global state.
7585 	 */
7586 	dm_plane_state_old = to_dm_plane_state(plane->state);
7587 	dm_plane_state_new = to_dm_plane_state(new_state);
7588 
7589 	if (dm_plane_state_new->dc_state &&
7590 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7591 		struct dc_plane_state *plane_state =
7592 			dm_plane_state_new->dc_state;
7593 		bool force_disable_dcc = !plane_state->dcc.enable;
7594 
7595 		fill_plane_buffer_attributes(
7596 			adev, afb, plane_state->format, plane_state->rotation,
7597 			afb->tiling_flags,
7598 			&plane_state->tiling_info, &plane_state->plane_size,
7599 			&plane_state->dcc, &plane_state->address,
7600 			afb->tmz_surface, force_disable_dcc);
7601 	}
7602 
7603 	return 0;
7604 }
7605 
7606 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7607 				       struct drm_plane_state *old_state)
7608 {
7609 	struct amdgpu_bo *rbo;
7610 	int r;
7611 
7612 	if (!old_state->fb)
7613 		return;
7614 
7615 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7616 	r = amdgpu_bo_reserve(rbo, false);
7617 	if (unlikely(r)) {
7618 		DRM_ERROR("failed to reserve rbo before unpin\n");
7619 		return;
7620 	}
7621 
7622 	amdgpu_bo_unpin(rbo);
7623 	amdgpu_bo_unreserve(rbo);
7624 	amdgpu_bo_unref(&rbo);
7625 }
7626 
7627 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7628 				       struct drm_crtc_state *new_crtc_state)
7629 {
7630 	struct drm_framebuffer *fb = state->fb;
7631 	int min_downscale, max_upscale;
7632 	int min_scale = 0;
7633 	int max_scale = INT_MAX;
7634 
7635 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7636 	if (fb && state->crtc) {
7637 		/* Validate viewport to cover the case when only the position changes */
7638 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7639 			int viewport_width = state->crtc_w;
7640 			int viewport_height = state->crtc_h;
7641 
7642 			if (state->crtc_x < 0)
7643 				viewport_width += state->crtc_x;
7644 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7645 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7646 
7647 			if (state->crtc_y < 0)
7648 				viewport_height += state->crtc_y;
7649 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7650 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7651 
7652 			if (viewport_width < 0 || viewport_height < 0) {
7653 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7654 				return -EINVAL;
7655 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7656 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7657 				return -EINVAL;
7658 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7659 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7660 				return -EINVAL;
7661 			}
7662 
7663 		}
7664 
7665 		/* Get min/max allowed scaling factors from plane caps. */
7666 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7667 					     &min_downscale, &max_upscale);
7668 		/*
7669 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7670 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7671 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7672 		 */
7673 		min_scale = (1000 << 16) / max_upscale;
7674 		max_scale = (1000 << 16) / min_downscale;
7675 	}
7676 
7677 	return drm_atomic_helper_check_plane_state(
7678 		state, new_crtc_state, min_scale, max_scale, true, true);
7679 }
7680 
7681 static int dm_plane_atomic_check(struct drm_plane *plane,
7682 				 struct drm_atomic_state *state)
7683 {
7684 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7685 										 plane);
7686 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7687 	struct dc *dc = adev->dm.dc;
7688 	struct dm_plane_state *dm_plane_state;
7689 	struct dc_scaling_info scaling_info;
7690 	struct drm_crtc_state *new_crtc_state;
7691 	int ret;
7692 
7693 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7694 
7695 	dm_plane_state = to_dm_plane_state(new_plane_state);
7696 
7697 	if (!dm_plane_state->dc_state)
7698 		return 0;
7699 
7700 	new_crtc_state =
7701 		drm_atomic_get_new_crtc_state(state,
7702 					      new_plane_state->crtc);
7703 	if (!new_crtc_state)
7704 		return -EINVAL;
7705 
7706 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7707 	if (ret)
7708 		return ret;
7709 
7710 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7711 	if (ret)
7712 		return ret;
7713 
7714 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7715 		return 0;
7716 
7717 	return -EINVAL;
7718 }
7719 
7720 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7721 				       struct drm_atomic_state *state)
7722 {
7723 	/* Only support async updates on cursor planes. */
7724 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7725 		return -EINVAL;
7726 
7727 	return 0;
7728 }
7729 
7730 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7731 					 struct drm_atomic_state *state)
7732 {
7733 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7734 									   plane);
7735 	struct drm_plane_state *old_state =
7736 		drm_atomic_get_old_plane_state(state, plane);
7737 
7738 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7739 
7740 	swap(plane->state->fb, new_state->fb);
7741 
7742 	plane->state->src_x = new_state->src_x;
7743 	plane->state->src_y = new_state->src_y;
7744 	plane->state->src_w = new_state->src_w;
7745 	plane->state->src_h = new_state->src_h;
7746 	plane->state->crtc_x = new_state->crtc_x;
7747 	plane->state->crtc_y = new_state->crtc_y;
7748 	plane->state->crtc_w = new_state->crtc_w;
7749 	plane->state->crtc_h = new_state->crtc_h;
7750 
7751 	handle_cursor_update(plane, old_state);
7752 }
7753 
7754 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7755 	.prepare_fb = dm_plane_helper_prepare_fb,
7756 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7757 	.atomic_check = dm_plane_atomic_check,
7758 	.atomic_async_check = dm_plane_atomic_async_check,
7759 	.atomic_async_update = dm_plane_atomic_async_update
7760 };
7761 
7762 /*
7763  * TODO: these are currently initialized to rgb formats only.
7764  * For future use cases we should either initialize them dynamically based on
7765  * plane capabilities, or initialize this array to all formats, so internal drm
7766  * check will succeed, and let DC implement proper check
7767  */
7768 static const uint32_t rgb_formats[] = {
7769 	DRM_FORMAT_XRGB8888,
7770 	DRM_FORMAT_ARGB8888,
7771 	DRM_FORMAT_RGBA8888,
7772 	DRM_FORMAT_XRGB2101010,
7773 	DRM_FORMAT_XBGR2101010,
7774 	DRM_FORMAT_ARGB2101010,
7775 	DRM_FORMAT_ABGR2101010,
7776 	DRM_FORMAT_XRGB16161616,
7777 	DRM_FORMAT_XBGR16161616,
7778 	DRM_FORMAT_ARGB16161616,
7779 	DRM_FORMAT_ABGR16161616,
7780 	DRM_FORMAT_XBGR8888,
7781 	DRM_FORMAT_ABGR8888,
7782 	DRM_FORMAT_RGB565,
7783 };
7784 
7785 static const uint32_t overlay_formats[] = {
7786 	DRM_FORMAT_XRGB8888,
7787 	DRM_FORMAT_ARGB8888,
7788 	DRM_FORMAT_RGBA8888,
7789 	DRM_FORMAT_XBGR8888,
7790 	DRM_FORMAT_ABGR8888,
7791 	DRM_FORMAT_RGB565
7792 };
7793 
7794 static const u32 cursor_formats[] = {
7795 	DRM_FORMAT_ARGB8888
7796 };
7797 
7798 static int get_plane_formats(const struct drm_plane *plane,
7799 			     const struct dc_plane_cap *plane_cap,
7800 			     uint32_t *formats, int max_formats)
7801 {
7802 	int i, num_formats = 0;
7803 
7804 	/*
7805 	 * TODO: Query support for each group of formats directly from
7806 	 * DC plane caps. This will require adding more formats to the
7807 	 * caps list.
7808 	 */
7809 
7810 	switch (plane->type) {
7811 	case DRM_PLANE_TYPE_PRIMARY:
7812 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7813 			if (num_formats >= max_formats)
7814 				break;
7815 
7816 			formats[num_formats++] = rgb_formats[i];
7817 		}
7818 
7819 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7820 			formats[num_formats++] = DRM_FORMAT_NV12;
7821 		if (plane_cap && plane_cap->pixel_format_support.p010)
7822 			formats[num_formats++] = DRM_FORMAT_P010;
7823 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7824 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7825 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7826 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7827 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7828 		}
7829 		break;
7830 
7831 	case DRM_PLANE_TYPE_OVERLAY:
7832 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7833 			if (num_formats >= max_formats)
7834 				break;
7835 
7836 			formats[num_formats++] = overlay_formats[i];
7837 		}
7838 		break;
7839 
7840 	case DRM_PLANE_TYPE_CURSOR:
7841 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7842 			if (num_formats >= max_formats)
7843 				break;
7844 
7845 			formats[num_formats++] = cursor_formats[i];
7846 		}
7847 		break;
7848 	}
7849 
7850 	return num_formats;
7851 }
7852 
7853 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7854 				struct drm_plane *plane,
7855 				unsigned long possible_crtcs,
7856 				const struct dc_plane_cap *plane_cap)
7857 {
7858 	uint32_t formats[32];
7859 	int num_formats;
7860 	int res = -EPERM;
7861 	unsigned int supported_rotations;
7862 	uint64_t *modifiers = NULL;
7863 
7864 	num_formats = get_plane_formats(plane, plane_cap, formats,
7865 					ARRAY_SIZE(formats));
7866 
7867 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7868 	if (res)
7869 		return res;
7870 
7871 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7872 				       &dm_plane_funcs, formats, num_formats,
7873 				       modifiers, plane->type, NULL);
7874 	kfree(modifiers);
7875 	if (res)
7876 		return res;
7877 
7878 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7879 	    plane_cap && plane_cap->per_pixel_alpha) {
7880 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7881 					  BIT(DRM_MODE_BLEND_PREMULTI);
7882 
7883 		drm_plane_create_alpha_property(plane);
7884 		drm_plane_create_blend_mode_property(plane, blend_caps);
7885 	}
7886 
7887 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7888 	    plane_cap &&
7889 	    (plane_cap->pixel_format_support.nv12 ||
7890 	     plane_cap->pixel_format_support.p010)) {
7891 		/* This only affects YUV formats. */
7892 		drm_plane_create_color_properties(
7893 			plane,
7894 			BIT(DRM_COLOR_YCBCR_BT601) |
7895 			BIT(DRM_COLOR_YCBCR_BT709) |
7896 			BIT(DRM_COLOR_YCBCR_BT2020),
7897 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7898 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7899 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7900 	}
7901 
7902 	supported_rotations =
7903 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7904 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7905 
7906 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7907 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7908 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7909 						   supported_rotations);
7910 
7911 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7912 
7913 	/* Create (reset) the plane state */
7914 	if (plane->funcs->reset)
7915 		plane->funcs->reset(plane);
7916 
7917 	return 0;
7918 }
7919 
7920 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7921 			       struct drm_plane *plane,
7922 			       uint32_t crtc_index)
7923 {
7924 	struct amdgpu_crtc *acrtc = NULL;
7925 	struct drm_plane *cursor_plane;
7926 
7927 	int res = -ENOMEM;
7928 
7929 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7930 	if (!cursor_plane)
7931 		goto fail;
7932 
7933 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7934 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7935 
7936 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7937 	if (!acrtc)
7938 		goto fail;
7939 
7940 	res = drm_crtc_init_with_planes(
7941 			dm->ddev,
7942 			&acrtc->base,
7943 			plane,
7944 			cursor_plane,
7945 			&amdgpu_dm_crtc_funcs, NULL);
7946 
7947 	if (res)
7948 		goto fail;
7949 
7950 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7951 
7952 	/* Create (reset) the plane state */
7953 	if (acrtc->base.funcs->reset)
7954 		acrtc->base.funcs->reset(&acrtc->base);
7955 
7956 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7957 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7958 
7959 	acrtc->crtc_id = crtc_index;
7960 	acrtc->base.enabled = false;
7961 	acrtc->otg_inst = -1;
7962 
7963 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7964 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7965 				   true, MAX_COLOR_LUT_ENTRIES);
7966 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7967 
7968 	return 0;
7969 
7970 fail:
7971 	kfree(acrtc);
7972 	kfree(cursor_plane);
7973 	return res;
7974 }
7975 
7976 
7977 static int to_drm_connector_type(enum signal_type st)
7978 {
7979 	switch (st) {
7980 	case SIGNAL_TYPE_HDMI_TYPE_A:
7981 		return DRM_MODE_CONNECTOR_HDMIA;
7982 	case SIGNAL_TYPE_EDP:
7983 		return DRM_MODE_CONNECTOR_eDP;
7984 	case SIGNAL_TYPE_LVDS:
7985 		return DRM_MODE_CONNECTOR_LVDS;
7986 	case SIGNAL_TYPE_RGB:
7987 		return DRM_MODE_CONNECTOR_VGA;
7988 	case SIGNAL_TYPE_DISPLAY_PORT:
7989 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7990 		return DRM_MODE_CONNECTOR_DisplayPort;
7991 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7992 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7993 		return DRM_MODE_CONNECTOR_DVID;
7994 	case SIGNAL_TYPE_VIRTUAL:
7995 		return DRM_MODE_CONNECTOR_VIRTUAL;
7996 
7997 	default:
7998 		return DRM_MODE_CONNECTOR_Unknown;
7999 	}
8000 }
8001 
8002 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8003 {
8004 	struct drm_encoder *encoder;
8005 
8006 	/* There is only one encoder per connector */
8007 	drm_connector_for_each_possible_encoder(connector, encoder)
8008 		return encoder;
8009 
8010 	return NULL;
8011 }
8012 
8013 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8014 {
8015 	struct drm_encoder *encoder;
8016 	struct amdgpu_encoder *amdgpu_encoder;
8017 
8018 	encoder = amdgpu_dm_connector_to_encoder(connector);
8019 
8020 	if (encoder == NULL)
8021 		return;
8022 
8023 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8024 
8025 	amdgpu_encoder->native_mode.clock = 0;
8026 
8027 	if (!list_empty(&connector->probed_modes)) {
8028 		struct drm_display_mode *preferred_mode = NULL;
8029 
8030 		list_for_each_entry(preferred_mode,
8031 				    &connector->probed_modes,
8032 				    head) {
8033 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8034 				amdgpu_encoder->native_mode = *preferred_mode;
8035 
8036 			break;
8037 		}
8038 
8039 	}
8040 }
8041 
8042 static struct drm_display_mode *
8043 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8044 			     char *name,
8045 			     int hdisplay, int vdisplay)
8046 {
8047 	struct drm_device *dev = encoder->dev;
8048 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8049 	struct drm_display_mode *mode = NULL;
8050 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8051 
8052 	mode = drm_mode_duplicate(dev, native_mode);
8053 
8054 	if (mode == NULL)
8055 		return NULL;
8056 
8057 	mode->hdisplay = hdisplay;
8058 	mode->vdisplay = vdisplay;
8059 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8060 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8061 
8062 	return mode;
8063 
8064 }
8065 
8066 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8067 						 struct drm_connector *connector)
8068 {
8069 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8070 	struct drm_display_mode *mode = NULL;
8071 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8072 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8073 				to_amdgpu_dm_connector(connector);
8074 	int i;
8075 	int n;
8076 	struct mode_size {
8077 		char name[DRM_DISPLAY_MODE_LEN];
8078 		int w;
8079 		int h;
8080 	} common_modes[] = {
8081 		{  "640x480",  640,  480},
8082 		{  "800x600",  800,  600},
8083 		{ "1024x768", 1024,  768},
8084 		{ "1280x720", 1280,  720},
8085 		{ "1280x800", 1280,  800},
8086 		{"1280x1024", 1280, 1024},
8087 		{ "1440x900", 1440,  900},
8088 		{"1680x1050", 1680, 1050},
8089 		{"1600x1200", 1600, 1200},
8090 		{"1920x1080", 1920, 1080},
8091 		{"1920x1200", 1920, 1200}
8092 	};
8093 
8094 	n = ARRAY_SIZE(common_modes);
8095 
8096 	for (i = 0; i < n; i++) {
8097 		struct drm_display_mode *curmode = NULL;
8098 		bool mode_existed = false;
8099 
8100 		if (common_modes[i].w > native_mode->hdisplay ||
8101 		    common_modes[i].h > native_mode->vdisplay ||
8102 		   (common_modes[i].w == native_mode->hdisplay &&
8103 		    common_modes[i].h == native_mode->vdisplay))
8104 			continue;
8105 
8106 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8107 			if (common_modes[i].w == curmode->hdisplay &&
8108 			    common_modes[i].h == curmode->vdisplay) {
8109 				mode_existed = true;
8110 				break;
8111 			}
8112 		}
8113 
8114 		if (mode_existed)
8115 			continue;
8116 
8117 		mode = amdgpu_dm_create_common_mode(encoder,
8118 				common_modes[i].name, common_modes[i].w,
8119 				common_modes[i].h);
8120 		drm_mode_probed_add(connector, mode);
8121 		amdgpu_dm_connector->num_modes++;
8122 	}
8123 }
8124 
8125 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8126 {
8127 	struct drm_encoder *encoder;
8128 	struct amdgpu_encoder *amdgpu_encoder;
8129 	const struct drm_display_mode *native_mode;
8130 
8131 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8132 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8133 		return;
8134 
8135 	encoder = amdgpu_dm_connector_to_encoder(connector);
8136 	if (!encoder)
8137 		return;
8138 
8139 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8140 
8141 	native_mode = &amdgpu_encoder->native_mode;
8142 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8143 		return;
8144 
8145 	drm_connector_set_panel_orientation_with_quirk(connector,
8146 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8147 						       native_mode->hdisplay,
8148 						       native_mode->vdisplay);
8149 }
8150 
8151 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8152 					      struct edid *edid)
8153 {
8154 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8155 			to_amdgpu_dm_connector(connector);
8156 
8157 	if (edid) {
8158 		/* empty probed_modes */
8159 		INIT_LIST_HEAD(&connector->probed_modes);
8160 		amdgpu_dm_connector->num_modes =
8161 				drm_add_edid_modes(connector, edid);
8162 
8163 		/* sorting the probed modes before calling function
8164 		 * amdgpu_dm_get_native_mode() since EDID can have
8165 		 * more than one preferred mode. The modes that are
8166 		 * later in the probed mode list could be of higher
8167 		 * and preferred resolution. For example, 3840x2160
8168 		 * resolution in base EDID preferred timing and 4096x2160
8169 		 * preferred resolution in DID extension block later.
8170 		 */
8171 		drm_mode_sort(&connector->probed_modes);
8172 		amdgpu_dm_get_native_mode(connector);
8173 
8174 		/* Freesync capabilities are reset by calling
8175 		 * drm_add_edid_modes() and need to be
8176 		 * restored here.
8177 		 */
8178 		amdgpu_dm_update_freesync_caps(connector, edid);
8179 
8180 		amdgpu_set_panel_orientation(connector);
8181 	} else {
8182 		amdgpu_dm_connector->num_modes = 0;
8183 	}
8184 }
8185 
8186 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8187 			      struct drm_display_mode *mode)
8188 {
8189 	struct drm_display_mode *m;
8190 
8191 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8192 		if (drm_mode_equal(m, mode))
8193 			return true;
8194 	}
8195 
8196 	return false;
8197 }
8198 
8199 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8200 {
8201 	const struct drm_display_mode *m;
8202 	struct drm_display_mode *new_mode;
8203 	uint i;
8204 	uint32_t new_modes_count = 0;
8205 
8206 	/* Standard FPS values
8207 	 *
8208 	 * 23.976       - TV/NTSC
8209 	 * 24 	        - Cinema
8210 	 * 25 	        - TV/PAL
8211 	 * 29.97        - TV/NTSC
8212 	 * 30 	        - TV/NTSC
8213 	 * 48 	        - Cinema HFR
8214 	 * 50 	        - TV/PAL
8215 	 * 60 	        - Commonly used
8216 	 * 48,72,96,120 - Multiples of 24
8217 	 */
8218 	static const uint32_t common_rates[] = {
8219 		23976, 24000, 25000, 29970, 30000,
8220 		48000, 50000, 60000, 72000, 96000, 120000
8221 	};
8222 
8223 	/*
8224 	 * Find mode with highest refresh rate with the same resolution
8225 	 * as the preferred mode. Some monitors report a preferred mode
8226 	 * with lower resolution than the highest refresh rate supported.
8227 	 */
8228 
8229 	m = get_highest_refresh_rate_mode(aconnector, true);
8230 	if (!m)
8231 		return 0;
8232 
8233 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8234 		uint64_t target_vtotal, target_vtotal_diff;
8235 		uint64_t num, den;
8236 
8237 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8238 			continue;
8239 
8240 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8241 		    common_rates[i] > aconnector->max_vfreq * 1000)
8242 			continue;
8243 
8244 		num = (unsigned long long)m->clock * 1000 * 1000;
8245 		den = common_rates[i] * (unsigned long long)m->htotal;
8246 		target_vtotal = div_u64(num, den);
8247 		target_vtotal_diff = target_vtotal - m->vtotal;
8248 
8249 		/* Check for illegal modes */
8250 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8251 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8252 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8253 			continue;
8254 
8255 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8256 		if (!new_mode)
8257 			goto out;
8258 
8259 		new_mode->vtotal += (u16)target_vtotal_diff;
8260 		new_mode->vsync_start += (u16)target_vtotal_diff;
8261 		new_mode->vsync_end += (u16)target_vtotal_diff;
8262 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8263 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8264 
8265 		if (!is_duplicate_mode(aconnector, new_mode)) {
8266 			drm_mode_probed_add(&aconnector->base, new_mode);
8267 			new_modes_count += 1;
8268 		} else
8269 			drm_mode_destroy(aconnector->base.dev, new_mode);
8270 	}
8271  out:
8272 	return new_modes_count;
8273 }
8274 
8275 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8276 						   struct edid *edid)
8277 {
8278 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8279 		to_amdgpu_dm_connector(connector);
8280 
8281 	if (!(amdgpu_freesync_vid_mode && edid))
8282 		return;
8283 
8284 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8285 		amdgpu_dm_connector->num_modes +=
8286 			add_fs_modes(amdgpu_dm_connector);
8287 }
8288 
8289 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8290 {
8291 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8292 			to_amdgpu_dm_connector(connector);
8293 	struct drm_encoder *encoder;
8294 	struct edid *edid = amdgpu_dm_connector->edid;
8295 
8296 	encoder = amdgpu_dm_connector_to_encoder(connector);
8297 
8298 	if (!drm_edid_is_valid(edid)) {
8299 		amdgpu_dm_connector->num_modes =
8300 				drm_add_modes_noedid(connector, 640, 480);
8301 	} else {
8302 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8303 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8304 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8305 	}
8306 	amdgpu_dm_fbc_init(connector);
8307 
8308 	return amdgpu_dm_connector->num_modes;
8309 }
8310 
8311 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8312 				     struct amdgpu_dm_connector *aconnector,
8313 				     int connector_type,
8314 				     struct dc_link *link,
8315 				     int link_index)
8316 {
8317 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8318 
8319 	/*
8320 	 * Some of the properties below require access to state, like bpc.
8321 	 * Allocate some default initial connector state with our reset helper.
8322 	 */
8323 	if (aconnector->base.funcs->reset)
8324 		aconnector->base.funcs->reset(&aconnector->base);
8325 
8326 	aconnector->connector_id = link_index;
8327 	aconnector->dc_link = link;
8328 	aconnector->base.interlace_allowed = false;
8329 	aconnector->base.doublescan_allowed = false;
8330 	aconnector->base.stereo_allowed = false;
8331 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8332 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8333 	aconnector->audio_inst = -1;
8334 	mutex_init(&aconnector->hpd_lock);
8335 
8336 	/*
8337 	 * configure support HPD hot plug connector_>polled default value is 0
8338 	 * which means HPD hot plug not supported
8339 	 */
8340 	switch (connector_type) {
8341 	case DRM_MODE_CONNECTOR_HDMIA:
8342 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8343 		aconnector->base.ycbcr_420_allowed =
8344 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8345 		break;
8346 	case DRM_MODE_CONNECTOR_DisplayPort:
8347 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8348 		link->link_enc = dp_get_link_enc(link);
8349 		ASSERT(link->link_enc);
8350 		if (link->link_enc)
8351 			aconnector->base.ycbcr_420_allowed =
8352 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8353 		break;
8354 	case DRM_MODE_CONNECTOR_DVID:
8355 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8356 		break;
8357 	default:
8358 		break;
8359 	}
8360 
8361 	drm_object_attach_property(&aconnector->base.base,
8362 				dm->ddev->mode_config.scaling_mode_property,
8363 				DRM_MODE_SCALE_NONE);
8364 
8365 	drm_object_attach_property(&aconnector->base.base,
8366 				adev->mode_info.underscan_property,
8367 				UNDERSCAN_OFF);
8368 	drm_object_attach_property(&aconnector->base.base,
8369 				adev->mode_info.underscan_hborder_property,
8370 				0);
8371 	drm_object_attach_property(&aconnector->base.base,
8372 				adev->mode_info.underscan_vborder_property,
8373 				0);
8374 
8375 	if (!aconnector->mst_port)
8376 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8377 
8378 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8379 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8380 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8381 
8382 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8383 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8384 		drm_object_attach_property(&aconnector->base.base,
8385 				adev->mode_info.abm_level_property, 0);
8386 	}
8387 
8388 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8389 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8390 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8391 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8392 
8393 		if (!aconnector->mst_port)
8394 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8395 
8396 #ifdef CONFIG_DRM_AMD_DC_HDCP
8397 		if (adev->dm.hdcp_workqueue)
8398 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8399 #endif
8400 	}
8401 }
8402 
8403 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8404 			      struct i2c_msg *msgs, int num)
8405 {
8406 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8407 	struct ddc_service *ddc_service = i2c->ddc_service;
8408 	struct i2c_command cmd;
8409 	int i;
8410 	int result = -EIO;
8411 
8412 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8413 
8414 	if (!cmd.payloads)
8415 		return result;
8416 
8417 	cmd.number_of_payloads = num;
8418 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8419 	cmd.speed = 100;
8420 
8421 	for (i = 0; i < num; i++) {
8422 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8423 		cmd.payloads[i].address = msgs[i].addr;
8424 		cmd.payloads[i].length = msgs[i].len;
8425 		cmd.payloads[i].data = msgs[i].buf;
8426 	}
8427 
8428 	if (dc_submit_i2c(
8429 			ddc_service->ctx->dc,
8430 			ddc_service->ddc_pin->hw_info.ddc_channel,
8431 			&cmd))
8432 		result = num;
8433 
8434 	kfree(cmd.payloads);
8435 	return result;
8436 }
8437 
8438 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8439 {
8440 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8441 }
8442 
8443 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8444 	.master_xfer = amdgpu_dm_i2c_xfer,
8445 	.functionality = amdgpu_dm_i2c_func,
8446 };
8447 
8448 static struct amdgpu_i2c_adapter *
8449 create_i2c(struct ddc_service *ddc_service,
8450 	   int link_index,
8451 	   int *res)
8452 {
8453 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8454 	struct amdgpu_i2c_adapter *i2c;
8455 
8456 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8457 	if (!i2c)
8458 		return NULL;
8459 	i2c->base.owner = THIS_MODULE;
8460 	i2c->base.class = I2C_CLASS_DDC;
8461 	i2c->base.dev.parent = &adev->pdev->dev;
8462 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8463 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8464 	i2c_set_adapdata(&i2c->base, i2c);
8465 	i2c->ddc_service = ddc_service;
8466 	if (i2c->ddc_service->ddc_pin)
8467 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8468 
8469 	return i2c;
8470 }
8471 
8472 
8473 /*
8474  * Note: this function assumes that dc_link_detect() was called for the
8475  * dc_link which will be represented by this aconnector.
8476  */
8477 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8478 				    struct amdgpu_dm_connector *aconnector,
8479 				    uint32_t link_index,
8480 				    struct amdgpu_encoder *aencoder)
8481 {
8482 	int res = 0;
8483 	int connector_type;
8484 	struct dc *dc = dm->dc;
8485 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8486 	struct amdgpu_i2c_adapter *i2c;
8487 
8488 	link->priv = aconnector;
8489 
8490 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8491 
8492 	i2c = create_i2c(link->ddc, link->link_index, &res);
8493 	if (!i2c) {
8494 		DRM_ERROR("Failed to create i2c adapter data\n");
8495 		return -ENOMEM;
8496 	}
8497 
8498 	aconnector->i2c = i2c;
8499 	res = i2c_add_adapter(&i2c->base);
8500 
8501 	if (res) {
8502 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8503 		goto out_free;
8504 	}
8505 
8506 	connector_type = to_drm_connector_type(link->connector_signal);
8507 
8508 	res = drm_connector_init_with_ddc(
8509 			dm->ddev,
8510 			&aconnector->base,
8511 			&amdgpu_dm_connector_funcs,
8512 			connector_type,
8513 			&i2c->base);
8514 
8515 	if (res) {
8516 		DRM_ERROR("connector_init failed\n");
8517 		aconnector->connector_id = -1;
8518 		goto out_free;
8519 	}
8520 
8521 	drm_connector_helper_add(
8522 			&aconnector->base,
8523 			&amdgpu_dm_connector_helper_funcs);
8524 
8525 	amdgpu_dm_connector_init_helper(
8526 		dm,
8527 		aconnector,
8528 		connector_type,
8529 		link,
8530 		link_index);
8531 
8532 	drm_connector_attach_encoder(
8533 		&aconnector->base, &aencoder->base);
8534 
8535 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8536 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8537 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8538 
8539 out_free:
8540 	if (res) {
8541 		kfree(i2c);
8542 		aconnector->i2c = NULL;
8543 	}
8544 	return res;
8545 }
8546 
8547 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8548 {
8549 	switch (adev->mode_info.num_crtc) {
8550 	case 1:
8551 		return 0x1;
8552 	case 2:
8553 		return 0x3;
8554 	case 3:
8555 		return 0x7;
8556 	case 4:
8557 		return 0xf;
8558 	case 5:
8559 		return 0x1f;
8560 	case 6:
8561 	default:
8562 		return 0x3f;
8563 	}
8564 }
8565 
8566 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8567 				  struct amdgpu_encoder *aencoder,
8568 				  uint32_t link_index)
8569 {
8570 	struct amdgpu_device *adev = drm_to_adev(dev);
8571 
8572 	int res = drm_encoder_init(dev,
8573 				   &aencoder->base,
8574 				   &amdgpu_dm_encoder_funcs,
8575 				   DRM_MODE_ENCODER_TMDS,
8576 				   NULL);
8577 
8578 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8579 
8580 	if (!res)
8581 		aencoder->encoder_id = link_index;
8582 	else
8583 		aencoder->encoder_id = -1;
8584 
8585 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8586 
8587 	return res;
8588 }
8589 
8590 static void manage_dm_interrupts(struct amdgpu_device *adev,
8591 				 struct amdgpu_crtc *acrtc,
8592 				 bool enable)
8593 {
8594 	/*
8595 	 * We have no guarantee that the frontend index maps to the same
8596 	 * backend index - some even map to more than one.
8597 	 *
8598 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8599 	 */
8600 	int irq_type =
8601 		amdgpu_display_crtc_idx_to_irq_type(
8602 			adev,
8603 			acrtc->crtc_id);
8604 
8605 	if (enable) {
8606 		drm_crtc_vblank_on(&acrtc->base);
8607 		amdgpu_irq_get(
8608 			adev,
8609 			&adev->pageflip_irq,
8610 			irq_type);
8611 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8612 		amdgpu_irq_get(
8613 			adev,
8614 			&adev->vline0_irq,
8615 			irq_type);
8616 #endif
8617 	} else {
8618 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8619 		amdgpu_irq_put(
8620 			adev,
8621 			&adev->vline0_irq,
8622 			irq_type);
8623 #endif
8624 		amdgpu_irq_put(
8625 			adev,
8626 			&adev->pageflip_irq,
8627 			irq_type);
8628 		drm_crtc_vblank_off(&acrtc->base);
8629 	}
8630 }
8631 
8632 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8633 				      struct amdgpu_crtc *acrtc)
8634 {
8635 	int irq_type =
8636 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8637 
8638 	/**
8639 	 * This reads the current state for the IRQ and force reapplies
8640 	 * the setting to hardware.
8641 	 */
8642 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8643 }
8644 
8645 static bool
8646 is_scaling_state_different(const struct dm_connector_state *dm_state,
8647 			   const struct dm_connector_state *old_dm_state)
8648 {
8649 	if (dm_state->scaling != old_dm_state->scaling)
8650 		return true;
8651 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8652 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8653 			return true;
8654 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8655 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8656 			return true;
8657 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8658 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8659 		return true;
8660 	return false;
8661 }
8662 
8663 #ifdef CONFIG_DRM_AMD_DC_HDCP
8664 static bool is_content_protection_different(struct drm_connector_state *state,
8665 					    const struct drm_connector_state *old_state,
8666 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8667 {
8668 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8669 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8670 
8671 	/* Handle: Type0/1 change */
8672 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8673 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8674 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8675 		return true;
8676 	}
8677 
8678 	/* CP is being re enabled, ignore this
8679 	 *
8680 	 * Handles:	ENABLED -> DESIRED
8681 	 */
8682 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8683 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8684 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8685 		return false;
8686 	}
8687 
8688 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8689 	 *
8690 	 * Handles:	UNDESIRED -> ENABLED
8691 	 */
8692 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8693 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8694 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8695 
8696 	/* Stream removed and re-enabled
8697 	 *
8698 	 * Can sometimes overlap with the HPD case,
8699 	 * thus set update_hdcp to false to avoid
8700 	 * setting HDCP multiple times.
8701 	 *
8702 	 * Handles:	DESIRED -> DESIRED (Special case)
8703 	 */
8704 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8705 		state->crtc && state->crtc->enabled &&
8706 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8707 		dm_con_state->update_hdcp = false;
8708 		return true;
8709 	}
8710 
8711 	/* Hot-plug, headless s3, dpms
8712 	 *
8713 	 * Only start HDCP if the display is connected/enabled.
8714 	 * update_hdcp flag will be set to false until the next
8715 	 * HPD comes in.
8716 	 *
8717 	 * Handles:	DESIRED -> DESIRED (Special case)
8718 	 */
8719 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8720 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8721 		dm_con_state->update_hdcp = false;
8722 		return true;
8723 	}
8724 
8725 	/*
8726 	 * Handles:	UNDESIRED -> UNDESIRED
8727 	 *		DESIRED -> DESIRED
8728 	 *		ENABLED -> ENABLED
8729 	 */
8730 	if (old_state->content_protection == state->content_protection)
8731 		return false;
8732 
8733 	/*
8734 	 * Handles:	UNDESIRED -> DESIRED
8735 	 *		DESIRED -> UNDESIRED
8736 	 *		ENABLED -> UNDESIRED
8737 	 */
8738 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8739 		return true;
8740 
8741 	/*
8742 	 * Handles:	DESIRED -> ENABLED
8743 	 */
8744 	return false;
8745 }
8746 
8747 #endif
8748 static void remove_stream(struct amdgpu_device *adev,
8749 			  struct amdgpu_crtc *acrtc,
8750 			  struct dc_stream_state *stream)
8751 {
8752 	/* this is the update mode case */
8753 
8754 	acrtc->otg_inst = -1;
8755 	acrtc->enabled = false;
8756 }
8757 
8758 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8759 			       struct dc_cursor_position *position)
8760 {
8761 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8762 	int x, y;
8763 	int xorigin = 0, yorigin = 0;
8764 
8765 	if (!crtc || !plane->state->fb)
8766 		return 0;
8767 
8768 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8769 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8770 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8771 			  __func__,
8772 			  plane->state->crtc_w,
8773 			  plane->state->crtc_h);
8774 		return -EINVAL;
8775 	}
8776 
8777 	x = plane->state->crtc_x;
8778 	y = plane->state->crtc_y;
8779 
8780 	if (x <= -amdgpu_crtc->max_cursor_width ||
8781 	    y <= -amdgpu_crtc->max_cursor_height)
8782 		return 0;
8783 
8784 	if (x < 0) {
8785 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8786 		x = 0;
8787 	}
8788 	if (y < 0) {
8789 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8790 		y = 0;
8791 	}
8792 	position->enable = true;
8793 	position->translate_by_source = true;
8794 	position->x = x;
8795 	position->y = y;
8796 	position->x_hotspot = xorigin;
8797 	position->y_hotspot = yorigin;
8798 
8799 	return 0;
8800 }
8801 
8802 static void handle_cursor_update(struct drm_plane *plane,
8803 				 struct drm_plane_state *old_plane_state)
8804 {
8805 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8806 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8807 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8808 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8809 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8810 	uint64_t address = afb ? afb->address : 0;
8811 	struct dc_cursor_position position = {0};
8812 	struct dc_cursor_attributes attributes;
8813 	int ret;
8814 
8815 	if (!plane->state->fb && !old_plane_state->fb)
8816 		return;
8817 
8818 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8819 		      __func__,
8820 		      amdgpu_crtc->crtc_id,
8821 		      plane->state->crtc_w,
8822 		      plane->state->crtc_h);
8823 
8824 	ret = get_cursor_position(plane, crtc, &position);
8825 	if (ret)
8826 		return;
8827 
8828 	if (!position.enable) {
8829 		/* turn off cursor */
8830 		if (crtc_state && crtc_state->stream) {
8831 			mutex_lock(&adev->dm.dc_lock);
8832 			dc_stream_set_cursor_position(crtc_state->stream,
8833 						      &position);
8834 			mutex_unlock(&adev->dm.dc_lock);
8835 		}
8836 		return;
8837 	}
8838 
8839 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8840 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8841 
8842 	memset(&attributes, 0, sizeof(attributes));
8843 	attributes.address.high_part = upper_32_bits(address);
8844 	attributes.address.low_part  = lower_32_bits(address);
8845 	attributes.width             = plane->state->crtc_w;
8846 	attributes.height            = plane->state->crtc_h;
8847 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8848 	attributes.rotation_angle    = 0;
8849 	attributes.attribute_flags.value = 0;
8850 
8851 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8852 
8853 	if (crtc_state->stream) {
8854 		mutex_lock(&adev->dm.dc_lock);
8855 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8856 							 &attributes))
8857 			DRM_ERROR("DC failed to set cursor attributes\n");
8858 
8859 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8860 						   &position))
8861 			DRM_ERROR("DC failed to set cursor position\n");
8862 		mutex_unlock(&adev->dm.dc_lock);
8863 	}
8864 }
8865 
8866 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8867 {
8868 
8869 	assert_spin_locked(&acrtc->base.dev->event_lock);
8870 	WARN_ON(acrtc->event);
8871 
8872 	acrtc->event = acrtc->base.state->event;
8873 
8874 	/* Set the flip status */
8875 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8876 
8877 	/* Mark this event as consumed */
8878 	acrtc->base.state->event = NULL;
8879 
8880 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8881 		     acrtc->crtc_id);
8882 }
8883 
8884 static void update_freesync_state_on_stream(
8885 	struct amdgpu_display_manager *dm,
8886 	struct dm_crtc_state *new_crtc_state,
8887 	struct dc_stream_state *new_stream,
8888 	struct dc_plane_state *surface,
8889 	u32 flip_timestamp_in_us)
8890 {
8891 	struct mod_vrr_params vrr_params;
8892 	struct dc_info_packet vrr_infopacket = {0};
8893 	struct amdgpu_device *adev = dm->adev;
8894 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8895 	unsigned long flags;
8896 	bool pack_sdp_v1_3 = false;
8897 
8898 	if (!new_stream)
8899 		return;
8900 
8901 	/*
8902 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8903 	 * For now it's sufficient to just guard against these conditions.
8904 	 */
8905 
8906 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8907 		return;
8908 
8909 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8910         vrr_params = acrtc->dm_irq_params.vrr_params;
8911 
8912 	if (surface) {
8913 		mod_freesync_handle_preflip(
8914 			dm->freesync_module,
8915 			surface,
8916 			new_stream,
8917 			flip_timestamp_in_us,
8918 			&vrr_params);
8919 
8920 		if (adev->family < AMDGPU_FAMILY_AI &&
8921 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8922 			mod_freesync_handle_v_update(dm->freesync_module,
8923 						     new_stream, &vrr_params);
8924 
8925 			/* Need to call this before the frame ends. */
8926 			dc_stream_adjust_vmin_vmax(dm->dc,
8927 						   new_crtc_state->stream,
8928 						   &vrr_params.adjust);
8929 		}
8930 	}
8931 
8932 	mod_freesync_build_vrr_infopacket(
8933 		dm->freesync_module,
8934 		new_stream,
8935 		&vrr_params,
8936 		PACKET_TYPE_VRR,
8937 		TRANSFER_FUNC_UNKNOWN,
8938 		&vrr_infopacket,
8939 		pack_sdp_v1_3);
8940 
8941 	new_crtc_state->freesync_timing_changed |=
8942 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8943 			&vrr_params.adjust,
8944 			sizeof(vrr_params.adjust)) != 0);
8945 
8946 	new_crtc_state->freesync_vrr_info_changed |=
8947 		(memcmp(&new_crtc_state->vrr_infopacket,
8948 			&vrr_infopacket,
8949 			sizeof(vrr_infopacket)) != 0);
8950 
8951 	acrtc->dm_irq_params.vrr_params = vrr_params;
8952 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8953 
8954 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8955 	new_stream->vrr_infopacket = vrr_infopacket;
8956 
8957 	if (new_crtc_state->freesync_vrr_info_changed)
8958 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8959 			      new_crtc_state->base.crtc->base.id,
8960 			      (int)new_crtc_state->base.vrr_enabled,
8961 			      (int)vrr_params.state);
8962 
8963 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8964 }
8965 
8966 static void update_stream_irq_parameters(
8967 	struct amdgpu_display_manager *dm,
8968 	struct dm_crtc_state *new_crtc_state)
8969 {
8970 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8971 	struct mod_vrr_params vrr_params;
8972 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8973 	struct amdgpu_device *adev = dm->adev;
8974 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8975 	unsigned long flags;
8976 
8977 	if (!new_stream)
8978 		return;
8979 
8980 	/*
8981 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8982 	 * For now it's sufficient to just guard against these conditions.
8983 	 */
8984 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8985 		return;
8986 
8987 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8988 	vrr_params = acrtc->dm_irq_params.vrr_params;
8989 
8990 	if (new_crtc_state->vrr_supported &&
8991 	    config.min_refresh_in_uhz &&
8992 	    config.max_refresh_in_uhz) {
8993 		/*
8994 		 * if freesync compatible mode was set, config.state will be set
8995 		 * in atomic check
8996 		 */
8997 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8998 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8999 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9000 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9001 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9002 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9003 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9004 		} else {
9005 			config.state = new_crtc_state->base.vrr_enabled ?
9006 						     VRR_STATE_ACTIVE_VARIABLE :
9007 						     VRR_STATE_INACTIVE;
9008 		}
9009 	} else {
9010 		config.state = VRR_STATE_UNSUPPORTED;
9011 	}
9012 
9013 	mod_freesync_build_vrr_params(dm->freesync_module,
9014 				      new_stream,
9015 				      &config, &vrr_params);
9016 
9017 	new_crtc_state->freesync_timing_changed |=
9018 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9019 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9020 
9021 	new_crtc_state->freesync_config = config;
9022 	/* Copy state for access from DM IRQ handler */
9023 	acrtc->dm_irq_params.freesync_config = config;
9024 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9025 	acrtc->dm_irq_params.vrr_params = vrr_params;
9026 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9027 }
9028 
9029 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9030 					    struct dm_crtc_state *new_state)
9031 {
9032 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9033 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9034 
9035 	if (!old_vrr_active && new_vrr_active) {
9036 		/* Transition VRR inactive -> active:
9037 		 * While VRR is active, we must not disable vblank irq, as a
9038 		 * reenable after disable would compute bogus vblank/pflip
9039 		 * timestamps if it likely happened inside display front-porch.
9040 		 *
9041 		 * We also need vupdate irq for the actual core vblank handling
9042 		 * at end of vblank.
9043 		 */
9044 		dm_set_vupdate_irq(new_state->base.crtc, true);
9045 		drm_crtc_vblank_get(new_state->base.crtc);
9046 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9047 				 __func__, new_state->base.crtc->base.id);
9048 	} else if (old_vrr_active && !new_vrr_active) {
9049 		/* Transition VRR active -> inactive:
9050 		 * Allow vblank irq disable again for fixed refresh rate.
9051 		 */
9052 		dm_set_vupdate_irq(new_state->base.crtc, false);
9053 		drm_crtc_vblank_put(new_state->base.crtc);
9054 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9055 				 __func__, new_state->base.crtc->base.id);
9056 	}
9057 }
9058 
9059 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9060 {
9061 	struct drm_plane *plane;
9062 	struct drm_plane_state *old_plane_state;
9063 	int i;
9064 
9065 	/*
9066 	 * TODO: Make this per-stream so we don't issue redundant updates for
9067 	 * commits with multiple streams.
9068 	 */
9069 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9070 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9071 			handle_cursor_update(plane, old_plane_state);
9072 }
9073 
9074 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9075 				    struct dc_state *dc_state,
9076 				    struct drm_device *dev,
9077 				    struct amdgpu_display_manager *dm,
9078 				    struct drm_crtc *pcrtc,
9079 				    bool wait_for_vblank)
9080 {
9081 	uint32_t i;
9082 	uint64_t timestamp_ns;
9083 	struct drm_plane *plane;
9084 	struct drm_plane_state *old_plane_state, *new_plane_state;
9085 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9086 	struct drm_crtc_state *new_pcrtc_state =
9087 			drm_atomic_get_new_crtc_state(state, pcrtc);
9088 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9089 	struct dm_crtc_state *dm_old_crtc_state =
9090 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9091 	int planes_count = 0, vpos, hpos;
9092 	long r;
9093 	unsigned long flags;
9094 	struct amdgpu_bo *abo;
9095 	uint32_t target_vblank, last_flip_vblank;
9096 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9097 	bool pflip_present = false;
9098 	struct {
9099 		struct dc_surface_update surface_updates[MAX_SURFACES];
9100 		struct dc_plane_info plane_infos[MAX_SURFACES];
9101 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9102 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9103 		struct dc_stream_update stream_update;
9104 	} *bundle;
9105 
9106 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9107 
9108 	if (!bundle) {
9109 		dm_error("Failed to allocate update bundle\n");
9110 		goto cleanup;
9111 	}
9112 
9113 	/*
9114 	 * Disable the cursor first if we're disabling all the planes.
9115 	 * It'll remain on the screen after the planes are re-enabled
9116 	 * if we don't.
9117 	 */
9118 	if (acrtc_state->active_planes == 0)
9119 		amdgpu_dm_commit_cursors(state);
9120 
9121 	/* update planes when needed */
9122 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9123 		struct drm_crtc *crtc = new_plane_state->crtc;
9124 		struct drm_crtc_state *new_crtc_state;
9125 		struct drm_framebuffer *fb = new_plane_state->fb;
9126 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9127 		bool plane_needs_flip;
9128 		struct dc_plane_state *dc_plane;
9129 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9130 
9131 		/* Cursor plane is handled after stream updates */
9132 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9133 			continue;
9134 
9135 		if (!fb || !crtc || pcrtc != crtc)
9136 			continue;
9137 
9138 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9139 		if (!new_crtc_state->active)
9140 			continue;
9141 
9142 		dc_plane = dm_new_plane_state->dc_state;
9143 
9144 		bundle->surface_updates[planes_count].surface = dc_plane;
9145 		if (new_pcrtc_state->color_mgmt_changed) {
9146 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9147 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9148 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9149 		}
9150 
9151 		fill_dc_scaling_info(dm->adev, new_plane_state,
9152 				     &bundle->scaling_infos[planes_count]);
9153 
9154 		bundle->surface_updates[planes_count].scaling_info =
9155 			&bundle->scaling_infos[planes_count];
9156 
9157 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9158 
9159 		pflip_present = pflip_present || plane_needs_flip;
9160 
9161 		if (!plane_needs_flip) {
9162 			planes_count += 1;
9163 			continue;
9164 		}
9165 
9166 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9167 
9168 		/*
9169 		 * Wait for all fences on this FB. Do limited wait to avoid
9170 		 * deadlock during GPU reset when this fence will not signal
9171 		 * but we hold reservation lock for the BO.
9172 		 */
9173 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9174 					  msecs_to_jiffies(5000));
9175 		if (unlikely(r <= 0))
9176 			DRM_ERROR("Waiting for fences timed out!");
9177 
9178 		fill_dc_plane_info_and_addr(
9179 			dm->adev, new_plane_state,
9180 			afb->tiling_flags,
9181 			&bundle->plane_infos[planes_count],
9182 			&bundle->flip_addrs[planes_count].address,
9183 			afb->tmz_surface, false);
9184 
9185 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9186 				 new_plane_state->plane->index,
9187 				 bundle->plane_infos[planes_count].dcc.enable);
9188 
9189 		bundle->surface_updates[planes_count].plane_info =
9190 			&bundle->plane_infos[planes_count];
9191 
9192 		/*
9193 		 * Only allow immediate flips for fast updates that don't
9194 		 * change FB pitch, DCC state, rotation or mirroing.
9195 		 */
9196 		bundle->flip_addrs[planes_count].flip_immediate =
9197 			crtc->state->async_flip &&
9198 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9199 
9200 		timestamp_ns = ktime_get_ns();
9201 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9202 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9203 		bundle->surface_updates[planes_count].surface = dc_plane;
9204 
9205 		if (!bundle->surface_updates[planes_count].surface) {
9206 			DRM_ERROR("No surface for CRTC: id=%d\n",
9207 					acrtc_attach->crtc_id);
9208 			continue;
9209 		}
9210 
9211 		if (plane == pcrtc->primary)
9212 			update_freesync_state_on_stream(
9213 				dm,
9214 				acrtc_state,
9215 				acrtc_state->stream,
9216 				dc_plane,
9217 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9218 
9219 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9220 				 __func__,
9221 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9222 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9223 
9224 		planes_count += 1;
9225 
9226 	}
9227 
9228 	if (pflip_present) {
9229 		if (!vrr_active) {
9230 			/* Use old throttling in non-vrr fixed refresh rate mode
9231 			 * to keep flip scheduling based on target vblank counts
9232 			 * working in a backwards compatible way, e.g., for
9233 			 * clients using the GLX_OML_sync_control extension or
9234 			 * DRI3/Present extension with defined target_msc.
9235 			 */
9236 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9237 		}
9238 		else {
9239 			/* For variable refresh rate mode only:
9240 			 * Get vblank of last completed flip to avoid > 1 vrr
9241 			 * flips per video frame by use of throttling, but allow
9242 			 * flip programming anywhere in the possibly large
9243 			 * variable vrr vblank interval for fine-grained flip
9244 			 * timing control and more opportunity to avoid stutter
9245 			 * on late submission of flips.
9246 			 */
9247 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9248 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9249 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9250 		}
9251 
9252 		target_vblank = last_flip_vblank + wait_for_vblank;
9253 
9254 		/*
9255 		 * Wait until we're out of the vertical blank period before the one
9256 		 * targeted by the flip
9257 		 */
9258 		while ((acrtc_attach->enabled &&
9259 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9260 							    0, &vpos, &hpos, NULL,
9261 							    NULL, &pcrtc->hwmode)
9262 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9263 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9264 			(int)(target_vblank -
9265 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9266 			usleep_range(1000, 1100);
9267 		}
9268 
9269 		/**
9270 		 * Prepare the flip event for the pageflip interrupt to handle.
9271 		 *
9272 		 * This only works in the case where we've already turned on the
9273 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9274 		 * from 0 -> n planes we have to skip a hardware generated event
9275 		 * and rely on sending it from software.
9276 		 */
9277 		if (acrtc_attach->base.state->event &&
9278 		    acrtc_state->active_planes > 0 &&
9279 		    !acrtc_state->force_dpms_off) {
9280 			drm_crtc_vblank_get(pcrtc);
9281 
9282 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9283 
9284 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9285 			prepare_flip_isr(acrtc_attach);
9286 
9287 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9288 		}
9289 
9290 		if (acrtc_state->stream) {
9291 			if (acrtc_state->freesync_vrr_info_changed)
9292 				bundle->stream_update.vrr_infopacket =
9293 					&acrtc_state->stream->vrr_infopacket;
9294 		}
9295 	}
9296 
9297 	/* Update the planes if changed or disable if we don't have any. */
9298 	if ((planes_count || acrtc_state->active_planes == 0) &&
9299 		acrtc_state->stream) {
9300 #if defined(CONFIG_DRM_AMD_DC_DCN)
9301 		/*
9302 		 * If PSR or idle optimizations are enabled then flush out
9303 		 * any pending work before hardware programming.
9304 		 */
9305 		if (dm->vblank_control_workqueue)
9306 			flush_workqueue(dm->vblank_control_workqueue);
9307 #endif
9308 
9309 		bundle->stream_update.stream = acrtc_state->stream;
9310 		if (new_pcrtc_state->mode_changed) {
9311 			bundle->stream_update.src = acrtc_state->stream->src;
9312 			bundle->stream_update.dst = acrtc_state->stream->dst;
9313 		}
9314 
9315 		if (new_pcrtc_state->color_mgmt_changed) {
9316 			/*
9317 			 * TODO: This isn't fully correct since we've actually
9318 			 * already modified the stream in place.
9319 			 */
9320 			bundle->stream_update.gamut_remap =
9321 				&acrtc_state->stream->gamut_remap_matrix;
9322 			bundle->stream_update.output_csc_transform =
9323 				&acrtc_state->stream->csc_color_matrix;
9324 			bundle->stream_update.out_transfer_func =
9325 				acrtc_state->stream->out_transfer_func;
9326 		}
9327 
9328 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9329 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9330 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9331 
9332 		/*
9333 		 * If FreeSync state on the stream has changed then we need to
9334 		 * re-adjust the min/max bounds now that DC doesn't handle this
9335 		 * as part of commit.
9336 		 */
9337 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9338 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9339 			dc_stream_adjust_vmin_vmax(
9340 				dm->dc, acrtc_state->stream,
9341 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9342 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9343 		}
9344 		mutex_lock(&dm->dc_lock);
9345 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9346 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9347 			amdgpu_dm_psr_disable(acrtc_state->stream);
9348 
9349 		dc_commit_updates_for_stream(dm->dc,
9350 						     bundle->surface_updates,
9351 						     planes_count,
9352 						     acrtc_state->stream,
9353 						     &bundle->stream_update,
9354 						     dc_state);
9355 
9356 		/**
9357 		 * Enable or disable the interrupts on the backend.
9358 		 *
9359 		 * Most pipes are put into power gating when unused.
9360 		 *
9361 		 * When power gating is enabled on a pipe we lose the
9362 		 * interrupt enablement state when power gating is disabled.
9363 		 *
9364 		 * So we need to update the IRQ control state in hardware
9365 		 * whenever the pipe turns on (since it could be previously
9366 		 * power gated) or off (since some pipes can't be power gated
9367 		 * on some ASICs).
9368 		 */
9369 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9370 			dm_update_pflip_irq_state(drm_to_adev(dev),
9371 						  acrtc_attach);
9372 
9373 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9374 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9375 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9376 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9377 
9378 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9379 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9380 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9381 			struct amdgpu_dm_connector *aconn =
9382 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9383 
9384 			if (aconn->psr_skip_count > 0)
9385 				aconn->psr_skip_count--;
9386 
9387 			/* Allow PSR when skip count is 0. */
9388 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9389 		} else {
9390 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9391 		}
9392 
9393 		mutex_unlock(&dm->dc_lock);
9394 	}
9395 
9396 	/*
9397 	 * Update cursor state *after* programming all the planes.
9398 	 * This avoids redundant programming in the case where we're going
9399 	 * to be disabling a single plane - those pipes are being disabled.
9400 	 */
9401 	if (acrtc_state->active_planes)
9402 		amdgpu_dm_commit_cursors(state);
9403 
9404 cleanup:
9405 	kfree(bundle);
9406 }
9407 
9408 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9409 				   struct drm_atomic_state *state)
9410 {
9411 	struct amdgpu_device *adev = drm_to_adev(dev);
9412 	struct amdgpu_dm_connector *aconnector;
9413 	struct drm_connector *connector;
9414 	struct drm_connector_state *old_con_state, *new_con_state;
9415 	struct drm_crtc_state *new_crtc_state;
9416 	struct dm_crtc_state *new_dm_crtc_state;
9417 	const struct dc_stream_status *status;
9418 	int i, inst;
9419 
9420 	/* Notify device removals. */
9421 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9422 		if (old_con_state->crtc != new_con_state->crtc) {
9423 			/* CRTC changes require notification. */
9424 			goto notify;
9425 		}
9426 
9427 		if (!new_con_state->crtc)
9428 			continue;
9429 
9430 		new_crtc_state = drm_atomic_get_new_crtc_state(
9431 			state, new_con_state->crtc);
9432 
9433 		if (!new_crtc_state)
9434 			continue;
9435 
9436 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9437 			continue;
9438 
9439 	notify:
9440 		aconnector = to_amdgpu_dm_connector(connector);
9441 
9442 		mutex_lock(&adev->dm.audio_lock);
9443 		inst = aconnector->audio_inst;
9444 		aconnector->audio_inst = -1;
9445 		mutex_unlock(&adev->dm.audio_lock);
9446 
9447 		amdgpu_dm_audio_eld_notify(adev, inst);
9448 	}
9449 
9450 	/* Notify audio device additions. */
9451 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9452 		if (!new_con_state->crtc)
9453 			continue;
9454 
9455 		new_crtc_state = drm_atomic_get_new_crtc_state(
9456 			state, new_con_state->crtc);
9457 
9458 		if (!new_crtc_state)
9459 			continue;
9460 
9461 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9462 			continue;
9463 
9464 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9465 		if (!new_dm_crtc_state->stream)
9466 			continue;
9467 
9468 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9469 		if (!status)
9470 			continue;
9471 
9472 		aconnector = to_amdgpu_dm_connector(connector);
9473 
9474 		mutex_lock(&adev->dm.audio_lock);
9475 		inst = status->audio_inst;
9476 		aconnector->audio_inst = inst;
9477 		mutex_unlock(&adev->dm.audio_lock);
9478 
9479 		amdgpu_dm_audio_eld_notify(adev, inst);
9480 	}
9481 }
9482 
9483 /*
9484  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9485  * @crtc_state: the DRM CRTC state
9486  * @stream_state: the DC stream state.
9487  *
9488  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9489  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9490  */
9491 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9492 						struct dc_stream_state *stream_state)
9493 {
9494 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9495 }
9496 
9497 /**
9498  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9499  * @state: The atomic state to commit
9500  *
9501  * This will tell DC to commit the constructed DC state from atomic_check,
9502  * programming the hardware. Any failures here implies a hardware failure, since
9503  * atomic check should have filtered anything non-kosher.
9504  */
9505 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9506 {
9507 	struct drm_device *dev = state->dev;
9508 	struct amdgpu_device *adev = drm_to_adev(dev);
9509 	struct amdgpu_display_manager *dm = &adev->dm;
9510 	struct dm_atomic_state *dm_state;
9511 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9512 	uint32_t i, j;
9513 	struct drm_crtc *crtc;
9514 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9515 	unsigned long flags;
9516 	bool wait_for_vblank = true;
9517 	struct drm_connector *connector;
9518 	struct drm_connector_state *old_con_state, *new_con_state;
9519 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9520 	int crtc_disable_count = 0;
9521 	bool mode_set_reset_required = false;
9522 
9523 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9524 
9525 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9526 
9527 	dm_state = dm_atomic_get_new_state(state);
9528 	if (dm_state && dm_state->context) {
9529 		dc_state = dm_state->context;
9530 	} else {
9531 		/* No state changes, retain current state. */
9532 		dc_state_temp = dc_create_state(dm->dc);
9533 		ASSERT(dc_state_temp);
9534 		dc_state = dc_state_temp;
9535 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9536 	}
9537 
9538 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9539 				       new_crtc_state, i) {
9540 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9541 
9542 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9543 
9544 		if (old_crtc_state->active &&
9545 		    (!new_crtc_state->active ||
9546 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9547 			manage_dm_interrupts(adev, acrtc, false);
9548 			dc_stream_release(dm_old_crtc_state->stream);
9549 		}
9550 	}
9551 
9552 	drm_atomic_helper_calc_timestamping_constants(state);
9553 
9554 	/* update changed items */
9555 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9556 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9557 
9558 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9559 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9560 
9561 		DRM_DEBUG_ATOMIC(
9562 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9563 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9564 			"connectors_changed:%d\n",
9565 			acrtc->crtc_id,
9566 			new_crtc_state->enable,
9567 			new_crtc_state->active,
9568 			new_crtc_state->planes_changed,
9569 			new_crtc_state->mode_changed,
9570 			new_crtc_state->active_changed,
9571 			new_crtc_state->connectors_changed);
9572 
9573 		/* Disable cursor if disabling crtc */
9574 		if (old_crtc_state->active && !new_crtc_state->active) {
9575 			struct dc_cursor_position position;
9576 
9577 			memset(&position, 0, sizeof(position));
9578 			mutex_lock(&dm->dc_lock);
9579 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9580 			mutex_unlock(&dm->dc_lock);
9581 		}
9582 
9583 		/* Copy all transient state flags into dc state */
9584 		if (dm_new_crtc_state->stream) {
9585 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9586 							    dm_new_crtc_state->stream);
9587 		}
9588 
9589 		/* handles headless hotplug case, updating new_state and
9590 		 * aconnector as needed
9591 		 */
9592 
9593 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9594 
9595 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9596 
9597 			if (!dm_new_crtc_state->stream) {
9598 				/*
9599 				 * this could happen because of issues with
9600 				 * userspace notifications delivery.
9601 				 * In this case userspace tries to set mode on
9602 				 * display which is disconnected in fact.
9603 				 * dc_sink is NULL in this case on aconnector.
9604 				 * We expect reset mode will come soon.
9605 				 *
9606 				 * This can also happen when unplug is done
9607 				 * during resume sequence ended
9608 				 *
9609 				 * In this case, we want to pretend we still
9610 				 * have a sink to keep the pipe running so that
9611 				 * hw state is consistent with the sw state
9612 				 */
9613 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9614 						__func__, acrtc->base.base.id);
9615 				continue;
9616 			}
9617 
9618 			if (dm_old_crtc_state->stream)
9619 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9620 
9621 			pm_runtime_get_noresume(dev->dev);
9622 
9623 			acrtc->enabled = true;
9624 			acrtc->hw_mode = new_crtc_state->mode;
9625 			crtc->hwmode = new_crtc_state->mode;
9626 			mode_set_reset_required = true;
9627 		} else if (modereset_required(new_crtc_state)) {
9628 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9629 			/* i.e. reset mode */
9630 			if (dm_old_crtc_state->stream)
9631 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9632 
9633 			mode_set_reset_required = true;
9634 		}
9635 	} /* for_each_crtc_in_state() */
9636 
9637 	if (dc_state) {
9638 		/* if there mode set or reset, disable eDP PSR */
9639 		if (mode_set_reset_required) {
9640 #if defined(CONFIG_DRM_AMD_DC_DCN)
9641 			if (dm->vblank_control_workqueue)
9642 				flush_workqueue(dm->vblank_control_workqueue);
9643 #endif
9644 			amdgpu_dm_psr_disable_all(dm);
9645 		}
9646 
9647 		dm_enable_per_frame_crtc_master_sync(dc_state);
9648 		mutex_lock(&dm->dc_lock);
9649 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9650 #if defined(CONFIG_DRM_AMD_DC_DCN)
9651                /* Allow idle optimization when vblank count is 0 for display off */
9652                if (dm->active_vblank_irq_count == 0)
9653                    dc_allow_idle_optimizations(dm->dc,true);
9654 #endif
9655 		mutex_unlock(&dm->dc_lock);
9656 	}
9657 
9658 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9659 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9660 
9661 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9662 
9663 		if (dm_new_crtc_state->stream != NULL) {
9664 			const struct dc_stream_status *status =
9665 					dc_stream_get_status(dm_new_crtc_state->stream);
9666 
9667 			if (!status)
9668 				status = dc_stream_get_status_from_state(dc_state,
9669 									 dm_new_crtc_state->stream);
9670 			if (!status)
9671 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9672 			else
9673 				acrtc->otg_inst = status->primary_otg_inst;
9674 		}
9675 	}
9676 #ifdef CONFIG_DRM_AMD_DC_HDCP
9677 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9678 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9679 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9680 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9681 
9682 		new_crtc_state = NULL;
9683 
9684 		if (acrtc)
9685 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9686 
9687 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9688 
9689 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9690 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9691 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9692 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9693 			dm_new_con_state->update_hdcp = true;
9694 			continue;
9695 		}
9696 
9697 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9698 			hdcp_update_display(
9699 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9700 				new_con_state->hdcp_content_type,
9701 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9702 	}
9703 #endif
9704 
9705 	/* Handle connector state changes */
9706 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9707 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9708 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9709 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9710 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9711 		struct dc_stream_update stream_update;
9712 		struct dc_info_packet hdr_packet;
9713 		struct dc_stream_status *status = NULL;
9714 		bool abm_changed, hdr_changed, scaling_changed;
9715 
9716 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9717 		memset(&stream_update, 0, sizeof(stream_update));
9718 
9719 		if (acrtc) {
9720 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9721 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9722 		}
9723 
9724 		/* Skip any modesets/resets */
9725 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9726 			continue;
9727 
9728 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9729 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9730 
9731 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9732 							     dm_old_con_state);
9733 
9734 		abm_changed = dm_new_crtc_state->abm_level !=
9735 			      dm_old_crtc_state->abm_level;
9736 
9737 		hdr_changed =
9738 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9739 
9740 		if (!scaling_changed && !abm_changed && !hdr_changed)
9741 			continue;
9742 
9743 		stream_update.stream = dm_new_crtc_state->stream;
9744 		if (scaling_changed) {
9745 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9746 					dm_new_con_state, dm_new_crtc_state->stream);
9747 
9748 			stream_update.src = dm_new_crtc_state->stream->src;
9749 			stream_update.dst = dm_new_crtc_state->stream->dst;
9750 		}
9751 
9752 		if (abm_changed) {
9753 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9754 
9755 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9756 		}
9757 
9758 		if (hdr_changed) {
9759 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9760 			stream_update.hdr_static_metadata = &hdr_packet;
9761 		}
9762 
9763 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9764 
9765 		if (WARN_ON(!status))
9766 			continue;
9767 
9768 		WARN_ON(!status->plane_count);
9769 
9770 		/*
9771 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9772 		 * Here we create an empty update on each plane.
9773 		 * To fix this, DC should permit updating only stream properties.
9774 		 */
9775 		for (j = 0; j < status->plane_count; j++)
9776 			dummy_updates[j].surface = status->plane_states[0];
9777 
9778 
9779 		mutex_lock(&dm->dc_lock);
9780 		dc_commit_updates_for_stream(dm->dc,
9781 						     dummy_updates,
9782 						     status->plane_count,
9783 						     dm_new_crtc_state->stream,
9784 						     &stream_update,
9785 						     dc_state);
9786 		mutex_unlock(&dm->dc_lock);
9787 	}
9788 
9789 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9790 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9791 				      new_crtc_state, i) {
9792 		if (old_crtc_state->active && !new_crtc_state->active)
9793 			crtc_disable_count++;
9794 
9795 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9796 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9797 
9798 		/* For freesync config update on crtc state and params for irq */
9799 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9800 
9801 		/* Handle vrr on->off / off->on transitions */
9802 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9803 						dm_new_crtc_state);
9804 	}
9805 
9806 	/**
9807 	 * Enable interrupts for CRTCs that are newly enabled or went through
9808 	 * a modeset. It was intentionally deferred until after the front end
9809 	 * state was modified to wait until the OTG was on and so the IRQ
9810 	 * handlers didn't access stale or invalid state.
9811 	 */
9812 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9813 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9814 #ifdef CONFIG_DEBUG_FS
9815 		bool configure_crc = false;
9816 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9817 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9818 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9819 #endif
9820 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9821 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9822 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9823 #endif
9824 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9825 
9826 		if (new_crtc_state->active &&
9827 		    (!old_crtc_state->active ||
9828 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9829 			dc_stream_retain(dm_new_crtc_state->stream);
9830 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9831 			manage_dm_interrupts(adev, acrtc, true);
9832 
9833 #ifdef CONFIG_DEBUG_FS
9834 			/**
9835 			 * Frontend may have changed so reapply the CRC capture
9836 			 * settings for the stream.
9837 			 */
9838 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9839 
9840 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9841 				configure_crc = true;
9842 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9843 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9844 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9845 					acrtc->dm_irq_params.crc_window.update_win = true;
9846 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9847 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9848 					crc_rd_wrk->crtc = crtc;
9849 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9850 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9851 				}
9852 #endif
9853 			}
9854 
9855 			if (configure_crc)
9856 				if (amdgpu_dm_crtc_configure_crc_source(
9857 					crtc, dm_new_crtc_state, cur_crc_src))
9858 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9859 #endif
9860 		}
9861 	}
9862 
9863 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9864 		if (new_crtc_state->async_flip)
9865 			wait_for_vblank = false;
9866 
9867 	/* update planes when needed per crtc*/
9868 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9869 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9870 
9871 		if (dm_new_crtc_state->stream)
9872 			amdgpu_dm_commit_planes(state, dc_state, dev,
9873 						dm, crtc, wait_for_vblank);
9874 	}
9875 
9876 	/* Update audio instances for each connector. */
9877 	amdgpu_dm_commit_audio(dev, state);
9878 
9879 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9880 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9881 	/* restore the backlight level */
9882 	for (i = 0; i < dm->num_of_edps; i++) {
9883 		if (dm->backlight_dev[i] &&
9884 		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9885 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9886 	}
9887 #endif
9888 	/*
9889 	 * send vblank event on all events not handled in flip and
9890 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9891 	 */
9892 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9893 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9894 
9895 		if (new_crtc_state->event)
9896 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9897 
9898 		new_crtc_state->event = NULL;
9899 	}
9900 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9901 
9902 	/* Signal HW programming completion */
9903 	drm_atomic_helper_commit_hw_done(state);
9904 
9905 	if (wait_for_vblank)
9906 		drm_atomic_helper_wait_for_flip_done(dev, state);
9907 
9908 	drm_atomic_helper_cleanup_planes(dev, state);
9909 
9910 	/* return the stolen vga memory back to VRAM */
9911 	if (!adev->mman.keep_stolen_vga_memory)
9912 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9913 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9914 
9915 	/*
9916 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9917 	 * so we can put the GPU into runtime suspend if we're not driving any
9918 	 * displays anymore
9919 	 */
9920 	for (i = 0; i < crtc_disable_count; i++)
9921 		pm_runtime_put_autosuspend(dev->dev);
9922 	pm_runtime_mark_last_busy(dev->dev);
9923 
9924 	if (dc_state_temp)
9925 		dc_release_state(dc_state_temp);
9926 }
9927 
9928 
9929 static int dm_force_atomic_commit(struct drm_connector *connector)
9930 {
9931 	int ret = 0;
9932 	struct drm_device *ddev = connector->dev;
9933 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9934 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9935 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9936 	struct drm_connector_state *conn_state;
9937 	struct drm_crtc_state *crtc_state;
9938 	struct drm_plane_state *plane_state;
9939 
9940 	if (!state)
9941 		return -ENOMEM;
9942 
9943 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9944 
9945 	/* Construct an atomic state to restore previous display setting */
9946 
9947 	/*
9948 	 * Attach connectors to drm_atomic_state
9949 	 */
9950 	conn_state = drm_atomic_get_connector_state(state, connector);
9951 
9952 	ret = PTR_ERR_OR_ZERO(conn_state);
9953 	if (ret)
9954 		goto out;
9955 
9956 	/* Attach crtc to drm_atomic_state*/
9957 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9958 
9959 	ret = PTR_ERR_OR_ZERO(crtc_state);
9960 	if (ret)
9961 		goto out;
9962 
9963 	/* force a restore */
9964 	crtc_state->mode_changed = true;
9965 
9966 	/* Attach plane to drm_atomic_state */
9967 	plane_state = drm_atomic_get_plane_state(state, plane);
9968 
9969 	ret = PTR_ERR_OR_ZERO(plane_state);
9970 	if (ret)
9971 		goto out;
9972 
9973 	/* Call commit internally with the state we just constructed */
9974 	ret = drm_atomic_commit(state);
9975 
9976 out:
9977 	drm_atomic_state_put(state);
9978 	if (ret)
9979 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9980 
9981 	return ret;
9982 }
9983 
9984 /*
9985  * This function handles all cases when set mode does not come upon hotplug.
9986  * This includes when a display is unplugged then plugged back into the
9987  * same port and when running without usermode desktop manager supprot
9988  */
9989 void dm_restore_drm_connector_state(struct drm_device *dev,
9990 				    struct drm_connector *connector)
9991 {
9992 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9993 	struct amdgpu_crtc *disconnected_acrtc;
9994 	struct dm_crtc_state *acrtc_state;
9995 
9996 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9997 		return;
9998 
9999 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10000 	if (!disconnected_acrtc)
10001 		return;
10002 
10003 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10004 	if (!acrtc_state->stream)
10005 		return;
10006 
10007 	/*
10008 	 * If the previous sink is not released and different from the current,
10009 	 * we deduce we are in a state where we can not rely on usermode call
10010 	 * to turn on the display, so we do it here
10011 	 */
10012 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10013 		dm_force_atomic_commit(&aconnector->base);
10014 }
10015 
10016 /*
10017  * Grabs all modesetting locks to serialize against any blocking commits,
10018  * Waits for completion of all non blocking commits.
10019  */
10020 static int do_aquire_global_lock(struct drm_device *dev,
10021 				 struct drm_atomic_state *state)
10022 {
10023 	struct drm_crtc *crtc;
10024 	struct drm_crtc_commit *commit;
10025 	long ret;
10026 
10027 	/*
10028 	 * Adding all modeset locks to aquire_ctx will
10029 	 * ensure that when the framework release it the
10030 	 * extra locks we are locking here will get released to
10031 	 */
10032 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10033 	if (ret)
10034 		return ret;
10035 
10036 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10037 		spin_lock(&crtc->commit_lock);
10038 		commit = list_first_entry_or_null(&crtc->commit_list,
10039 				struct drm_crtc_commit, commit_entry);
10040 		if (commit)
10041 			drm_crtc_commit_get(commit);
10042 		spin_unlock(&crtc->commit_lock);
10043 
10044 		if (!commit)
10045 			continue;
10046 
10047 		/*
10048 		 * Make sure all pending HW programming completed and
10049 		 * page flips done
10050 		 */
10051 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10052 
10053 		if (ret > 0)
10054 			ret = wait_for_completion_interruptible_timeout(
10055 					&commit->flip_done, 10*HZ);
10056 
10057 		if (ret == 0)
10058 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10059 				  "timed out\n", crtc->base.id, crtc->name);
10060 
10061 		drm_crtc_commit_put(commit);
10062 	}
10063 
10064 	return ret < 0 ? ret : 0;
10065 }
10066 
10067 static void get_freesync_config_for_crtc(
10068 	struct dm_crtc_state *new_crtc_state,
10069 	struct dm_connector_state *new_con_state)
10070 {
10071 	struct mod_freesync_config config = {0};
10072 	struct amdgpu_dm_connector *aconnector =
10073 			to_amdgpu_dm_connector(new_con_state->base.connector);
10074 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10075 	int vrefresh = drm_mode_vrefresh(mode);
10076 	bool fs_vid_mode = false;
10077 
10078 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10079 					vrefresh >= aconnector->min_vfreq &&
10080 					vrefresh <= aconnector->max_vfreq;
10081 
10082 	if (new_crtc_state->vrr_supported) {
10083 		new_crtc_state->stream->ignore_msa_timing_param = true;
10084 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10085 
10086 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10087 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10088 		config.vsif_supported = true;
10089 		config.btr = true;
10090 
10091 		if (fs_vid_mode) {
10092 			config.state = VRR_STATE_ACTIVE_FIXED;
10093 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10094 			goto out;
10095 		} else if (new_crtc_state->base.vrr_enabled) {
10096 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10097 		} else {
10098 			config.state = VRR_STATE_INACTIVE;
10099 		}
10100 	}
10101 out:
10102 	new_crtc_state->freesync_config = config;
10103 }
10104 
10105 static void reset_freesync_config_for_crtc(
10106 	struct dm_crtc_state *new_crtc_state)
10107 {
10108 	new_crtc_state->vrr_supported = false;
10109 
10110 	memset(&new_crtc_state->vrr_infopacket, 0,
10111 	       sizeof(new_crtc_state->vrr_infopacket));
10112 }
10113 
10114 static bool
10115 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10116 				 struct drm_crtc_state *new_crtc_state)
10117 {
10118 	struct drm_display_mode old_mode, new_mode;
10119 
10120 	if (!old_crtc_state || !new_crtc_state)
10121 		return false;
10122 
10123 	old_mode = old_crtc_state->mode;
10124 	new_mode = new_crtc_state->mode;
10125 
10126 	if (old_mode.clock       == new_mode.clock &&
10127 	    old_mode.hdisplay    == new_mode.hdisplay &&
10128 	    old_mode.vdisplay    == new_mode.vdisplay &&
10129 	    old_mode.htotal      == new_mode.htotal &&
10130 	    old_mode.vtotal      != new_mode.vtotal &&
10131 	    old_mode.hsync_start == new_mode.hsync_start &&
10132 	    old_mode.vsync_start != new_mode.vsync_start &&
10133 	    old_mode.hsync_end   == new_mode.hsync_end &&
10134 	    old_mode.vsync_end   != new_mode.vsync_end &&
10135 	    old_mode.hskew       == new_mode.hskew &&
10136 	    old_mode.vscan       == new_mode.vscan &&
10137 	    (old_mode.vsync_end - old_mode.vsync_start) ==
10138 	    (new_mode.vsync_end - new_mode.vsync_start))
10139 		return true;
10140 
10141 	return false;
10142 }
10143 
10144 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10145 	uint64_t num, den, res;
10146 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10147 
10148 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10149 
10150 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10151 	den = (unsigned long long)new_crtc_state->mode.htotal *
10152 	      (unsigned long long)new_crtc_state->mode.vtotal;
10153 
10154 	res = div_u64(num, den);
10155 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10156 }
10157 
10158 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10159 				struct drm_atomic_state *state,
10160 				struct drm_crtc *crtc,
10161 				struct drm_crtc_state *old_crtc_state,
10162 				struct drm_crtc_state *new_crtc_state,
10163 				bool enable,
10164 				bool *lock_and_validation_needed)
10165 {
10166 	struct dm_atomic_state *dm_state = NULL;
10167 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10168 	struct dc_stream_state *new_stream;
10169 	int ret = 0;
10170 
10171 	/*
10172 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10173 	 * update changed items
10174 	 */
10175 	struct amdgpu_crtc *acrtc = NULL;
10176 	struct amdgpu_dm_connector *aconnector = NULL;
10177 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10178 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10179 
10180 	new_stream = NULL;
10181 
10182 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10183 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10184 	acrtc = to_amdgpu_crtc(crtc);
10185 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10186 
10187 	/* TODO This hack should go away */
10188 	if (aconnector && enable) {
10189 		/* Make sure fake sink is created in plug-in scenario */
10190 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10191 							    &aconnector->base);
10192 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10193 							    &aconnector->base);
10194 
10195 		if (IS_ERR(drm_new_conn_state)) {
10196 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10197 			goto fail;
10198 		}
10199 
10200 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10201 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10202 
10203 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10204 			goto skip_modeset;
10205 
10206 		new_stream = create_validate_stream_for_sink(aconnector,
10207 							     &new_crtc_state->mode,
10208 							     dm_new_conn_state,
10209 							     dm_old_crtc_state->stream);
10210 
10211 		/*
10212 		 * we can have no stream on ACTION_SET if a display
10213 		 * was disconnected during S3, in this case it is not an
10214 		 * error, the OS will be updated after detection, and
10215 		 * will do the right thing on next atomic commit
10216 		 */
10217 
10218 		if (!new_stream) {
10219 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10220 					__func__, acrtc->base.base.id);
10221 			ret = -ENOMEM;
10222 			goto fail;
10223 		}
10224 
10225 		/*
10226 		 * TODO: Check VSDB bits to decide whether this should
10227 		 * be enabled or not.
10228 		 */
10229 		new_stream->triggered_crtc_reset.enabled =
10230 			dm->force_timing_sync;
10231 
10232 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10233 
10234 		ret = fill_hdr_info_packet(drm_new_conn_state,
10235 					   &new_stream->hdr_static_metadata);
10236 		if (ret)
10237 			goto fail;
10238 
10239 		/*
10240 		 * If we already removed the old stream from the context
10241 		 * (and set the new stream to NULL) then we can't reuse
10242 		 * the old stream even if the stream and scaling are unchanged.
10243 		 * We'll hit the BUG_ON and black screen.
10244 		 *
10245 		 * TODO: Refactor this function to allow this check to work
10246 		 * in all conditions.
10247 		 */
10248 		if (amdgpu_freesync_vid_mode &&
10249 		    dm_new_crtc_state->stream &&
10250 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10251 			goto skip_modeset;
10252 
10253 		if (dm_new_crtc_state->stream &&
10254 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10255 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10256 			new_crtc_state->mode_changed = false;
10257 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10258 					 new_crtc_state->mode_changed);
10259 		}
10260 	}
10261 
10262 	/* mode_changed flag may get updated above, need to check again */
10263 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10264 		goto skip_modeset;
10265 
10266 	DRM_DEBUG_ATOMIC(
10267 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10268 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10269 		"connectors_changed:%d\n",
10270 		acrtc->crtc_id,
10271 		new_crtc_state->enable,
10272 		new_crtc_state->active,
10273 		new_crtc_state->planes_changed,
10274 		new_crtc_state->mode_changed,
10275 		new_crtc_state->active_changed,
10276 		new_crtc_state->connectors_changed);
10277 
10278 	/* Remove stream for any changed/disabled CRTC */
10279 	if (!enable) {
10280 
10281 		if (!dm_old_crtc_state->stream)
10282 			goto skip_modeset;
10283 
10284 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10285 		    is_timing_unchanged_for_freesync(new_crtc_state,
10286 						     old_crtc_state)) {
10287 			new_crtc_state->mode_changed = false;
10288 			DRM_DEBUG_DRIVER(
10289 				"Mode change not required for front porch change, "
10290 				"setting mode_changed to %d",
10291 				new_crtc_state->mode_changed);
10292 
10293 			set_freesync_fixed_config(dm_new_crtc_state);
10294 
10295 			goto skip_modeset;
10296 		} else if (amdgpu_freesync_vid_mode && aconnector &&
10297 			   is_freesync_video_mode(&new_crtc_state->mode,
10298 						  aconnector)) {
10299 			struct drm_display_mode *high_mode;
10300 
10301 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10302 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10303 				set_freesync_fixed_config(dm_new_crtc_state);
10304 			}
10305 		}
10306 
10307 		ret = dm_atomic_get_state(state, &dm_state);
10308 		if (ret)
10309 			goto fail;
10310 
10311 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10312 				crtc->base.id);
10313 
10314 		/* i.e. reset mode */
10315 		if (dc_remove_stream_from_ctx(
10316 				dm->dc,
10317 				dm_state->context,
10318 				dm_old_crtc_state->stream) != DC_OK) {
10319 			ret = -EINVAL;
10320 			goto fail;
10321 		}
10322 
10323 		dc_stream_release(dm_old_crtc_state->stream);
10324 		dm_new_crtc_state->stream = NULL;
10325 
10326 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10327 
10328 		*lock_and_validation_needed = true;
10329 
10330 	} else {/* Add stream for any updated/enabled CRTC */
10331 		/*
10332 		 * Quick fix to prevent NULL pointer on new_stream when
10333 		 * added MST connectors not found in existing crtc_state in the chained mode
10334 		 * TODO: need to dig out the root cause of that
10335 		 */
10336 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10337 			goto skip_modeset;
10338 
10339 		if (modereset_required(new_crtc_state))
10340 			goto skip_modeset;
10341 
10342 		if (modeset_required(new_crtc_state, new_stream,
10343 				     dm_old_crtc_state->stream)) {
10344 
10345 			WARN_ON(dm_new_crtc_state->stream);
10346 
10347 			ret = dm_atomic_get_state(state, &dm_state);
10348 			if (ret)
10349 				goto fail;
10350 
10351 			dm_new_crtc_state->stream = new_stream;
10352 
10353 			dc_stream_retain(new_stream);
10354 
10355 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10356 					 crtc->base.id);
10357 
10358 			if (dc_add_stream_to_ctx(
10359 					dm->dc,
10360 					dm_state->context,
10361 					dm_new_crtc_state->stream) != DC_OK) {
10362 				ret = -EINVAL;
10363 				goto fail;
10364 			}
10365 
10366 			*lock_and_validation_needed = true;
10367 		}
10368 	}
10369 
10370 skip_modeset:
10371 	/* Release extra reference */
10372 	if (new_stream)
10373 		 dc_stream_release(new_stream);
10374 
10375 	/*
10376 	 * We want to do dc stream updates that do not require a
10377 	 * full modeset below.
10378 	 */
10379 	if (!(enable && aconnector && new_crtc_state->active))
10380 		return 0;
10381 	/*
10382 	 * Given above conditions, the dc state cannot be NULL because:
10383 	 * 1. We're in the process of enabling CRTCs (just been added
10384 	 *    to the dc context, or already is on the context)
10385 	 * 2. Has a valid connector attached, and
10386 	 * 3. Is currently active and enabled.
10387 	 * => The dc stream state currently exists.
10388 	 */
10389 	BUG_ON(dm_new_crtc_state->stream == NULL);
10390 
10391 	/* Scaling or underscan settings */
10392 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10393 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10394 		update_stream_scaling_settings(
10395 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10396 
10397 	/* ABM settings */
10398 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10399 
10400 	/*
10401 	 * Color management settings. We also update color properties
10402 	 * when a modeset is needed, to ensure it gets reprogrammed.
10403 	 */
10404 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10405 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10406 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10407 		if (ret)
10408 			goto fail;
10409 	}
10410 
10411 	/* Update Freesync settings. */
10412 	get_freesync_config_for_crtc(dm_new_crtc_state,
10413 				     dm_new_conn_state);
10414 
10415 	return ret;
10416 
10417 fail:
10418 	if (new_stream)
10419 		dc_stream_release(new_stream);
10420 	return ret;
10421 }
10422 
10423 static bool should_reset_plane(struct drm_atomic_state *state,
10424 			       struct drm_plane *plane,
10425 			       struct drm_plane_state *old_plane_state,
10426 			       struct drm_plane_state *new_plane_state)
10427 {
10428 	struct drm_plane *other;
10429 	struct drm_plane_state *old_other_state, *new_other_state;
10430 	struct drm_crtc_state *new_crtc_state;
10431 	int i;
10432 
10433 	/*
10434 	 * TODO: Remove this hack once the checks below are sufficient
10435 	 * enough to determine when we need to reset all the planes on
10436 	 * the stream.
10437 	 */
10438 	if (state->allow_modeset)
10439 		return true;
10440 
10441 	/* Exit early if we know that we're adding or removing the plane. */
10442 	if (old_plane_state->crtc != new_plane_state->crtc)
10443 		return true;
10444 
10445 	/* old crtc == new_crtc == NULL, plane not in context. */
10446 	if (!new_plane_state->crtc)
10447 		return false;
10448 
10449 	new_crtc_state =
10450 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10451 
10452 	if (!new_crtc_state)
10453 		return true;
10454 
10455 	/* CRTC Degamma changes currently require us to recreate planes. */
10456 	if (new_crtc_state->color_mgmt_changed)
10457 		return true;
10458 
10459 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10460 		return true;
10461 
10462 	/*
10463 	 * If there are any new primary or overlay planes being added or
10464 	 * removed then the z-order can potentially change. To ensure
10465 	 * correct z-order and pipe acquisition the current DC architecture
10466 	 * requires us to remove and recreate all existing planes.
10467 	 *
10468 	 * TODO: Come up with a more elegant solution for this.
10469 	 */
10470 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10471 		struct amdgpu_framebuffer *old_afb, *new_afb;
10472 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10473 			continue;
10474 
10475 		if (old_other_state->crtc != new_plane_state->crtc &&
10476 		    new_other_state->crtc != new_plane_state->crtc)
10477 			continue;
10478 
10479 		if (old_other_state->crtc != new_other_state->crtc)
10480 			return true;
10481 
10482 		/* Src/dst size and scaling updates. */
10483 		if (old_other_state->src_w != new_other_state->src_w ||
10484 		    old_other_state->src_h != new_other_state->src_h ||
10485 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10486 		    old_other_state->crtc_h != new_other_state->crtc_h)
10487 			return true;
10488 
10489 		/* Rotation / mirroring updates. */
10490 		if (old_other_state->rotation != new_other_state->rotation)
10491 			return true;
10492 
10493 		/* Blending updates. */
10494 		if (old_other_state->pixel_blend_mode !=
10495 		    new_other_state->pixel_blend_mode)
10496 			return true;
10497 
10498 		/* Alpha updates. */
10499 		if (old_other_state->alpha != new_other_state->alpha)
10500 			return true;
10501 
10502 		/* Colorspace changes. */
10503 		if (old_other_state->color_range != new_other_state->color_range ||
10504 		    old_other_state->color_encoding != new_other_state->color_encoding)
10505 			return true;
10506 
10507 		/* Framebuffer checks fall at the end. */
10508 		if (!old_other_state->fb || !new_other_state->fb)
10509 			continue;
10510 
10511 		/* Pixel format changes can require bandwidth updates. */
10512 		if (old_other_state->fb->format != new_other_state->fb->format)
10513 			return true;
10514 
10515 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10516 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10517 
10518 		/* Tiling and DCC changes also require bandwidth updates. */
10519 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10520 		    old_afb->base.modifier != new_afb->base.modifier)
10521 			return true;
10522 	}
10523 
10524 	return false;
10525 }
10526 
10527 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10528 			      struct drm_plane_state *new_plane_state,
10529 			      struct drm_framebuffer *fb)
10530 {
10531 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10532 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10533 	unsigned int pitch;
10534 	bool linear;
10535 
10536 	if (fb->width > new_acrtc->max_cursor_width ||
10537 	    fb->height > new_acrtc->max_cursor_height) {
10538 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10539 				 new_plane_state->fb->width,
10540 				 new_plane_state->fb->height);
10541 		return -EINVAL;
10542 	}
10543 	if (new_plane_state->src_w != fb->width << 16 ||
10544 	    new_plane_state->src_h != fb->height << 16) {
10545 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10546 		return -EINVAL;
10547 	}
10548 
10549 	/* Pitch in pixels */
10550 	pitch = fb->pitches[0] / fb->format->cpp[0];
10551 
10552 	if (fb->width != pitch) {
10553 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10554 				 fb->width, pitch);
10555 		return -EINVAL;
10556 	}
10557 
10558 	switch (pitch) {
10559 	case 64:
10560 	case 128:
10561 	case 256:
10562 		/* FB pitch is supported by cursor plane */
10563 		break;
10564 	default:
10565 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10566 		return -EINVAL;
10567 	}
10568 
10569 	/* Core DRM takes care of checking FB modifiers, so we only need to
10570 	 * check tiling flags when the FB doesn't have a modifier. */
10571 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10572 		if (adev->family < AMDGPU_FAMILY_AI) {
10573 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10574 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10575 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10576 		} else {
10577 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10578 		}
10579 		if (!linear) {
10580 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10581 			return -EINVAL;
10582 		}
10583 	}
10584 
10585 	return 0;
10586 }
10587 
10588 static int dm_update_plane_state(struct dc *dc,
10589 				 struct drm_atomic_state *state,
10590 				 struct drm_plane *plane,
10591 				 struct drm_plane_state *old_plane_state,
10592 				 struct drm_plane_state *new_plane_state,
10593 				 bool enable,
10594 				 bool *lock_and_validation_needed)
10595 {
10596 
10597 	struct dm_atomic_state *dm_state = NULL;
10598 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10599 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10600 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10601 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10602 	struct amdgpu_crtc *new_acrtc;
10603 	bool needs_reset;
10604 	int ret = 0;
10605 
10606 
10607 	new_plane_crtc = new_plane_state->crtc;
10608 	old_plane_crtc = old_plane_state->crtc;
10609 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10610 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10611 
10612 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10613 		if (!enable || !new_plane_crtc ||
10614 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10615 			return 0;
10616 
10617 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10618 
10619 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10620 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10621 			return -EINVAL;
10622 		}
10623 
10624 		if (new_plane_state->fb) {
10625 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10626 						 new_plane_state->fb);
10627 			if (ret)
10628 				return ret;
10629 		}
10630 
10631 		return 0;
10632 	}
10633 
10634 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10635 					 new_plane_state);
10636 
10637 	/* Remove any changed/removed planes */
10638 	if (!enable) {
10639 		if (!needs_reset)
10640 			return 0;
10641 
10642 		if (!old_plane_crtc)
10643 			return 0;
10644 
10645 		old_crtc_state = drm_atomic_get_old_crtc_state(
10646 				state, old_plane_crtc);
10647 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10648 
10649 		if (!dm_old_crtc_state->stream)
10650 			return 0;
10651 
10652 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10653 				plane->base.id, old_plane_crtc->base.id);
10654 
10655 		ret = dm_atomic_get_state(state, &dm_state);
10656 		if (ret)
10657 			return ret;
10658 
10659 		if (!dc_remove_plane_from_context(
10660 				dc,
10661 				dm_old_crtc_state->stream,
10662 				dm_old_plane_state->dc_state,
10663 				dm_state->context)) {
10664 
10665 			return -EINVAL;
10666 		}
10667 
10668 
10669 		dc_plane_state_release(dm_old_plane_state->dc_state);
10670 		dm_new_plane_state->dc_state = NULL;
10671 
10672 		*lock_and_validation_needed = true;
10673 
10674 	} else { /* Add new planes */
10675 		struct dc_plane_state *dc_new_plane_state;
10676 
10677 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10678 			return 0;
10679 
10680 		if (!new_plane_crtc)
10681 			return 0;
10682 
10683 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10684 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10685 
10686 		if (!dm_new_crtc_state->stream)
10687 			return 0;
10688 
10689 		if (!needs_reset)
10690 			return 0;
10691 
10692 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10693 		if (ret)
10694 			return ret;
10695 
10696 		WARN_ON(dm_new_plane_state->dc_state);
10697 
10698 		dc_new_plane_state = dc_create_plane_state(dc);
10699 		if (!dc_new_plane_state)
10700 			return -ENOMEM;
10701 
10702 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10703 				 plane->base.id, new_plane_crtc->base.id);
10704 
10705 		ret = fill_dc_plane_attributes(
10706 			drm_to_adev(new_plane_crtc->dev),
10707 			dc_new_plane_state,
10708 			new_plane_state,
10709 			new_crtc_state);
10710 		if (ret) {
10711 			dc_plane_state_release(dc_new_plane_state);
10712 			return ret;
10713 		}
10714 
10715 		ret = dm_atomic_get_state(state, &dm_state);
10716 		if (ret) {
10717 			dc_plane_state_release(dc_new_plane_state);
10718 			return ret;
10719 		}
10720 
10721 		/*
10722 		 * Any atomic check errors that occur after this will
10723 		 * not need a release. The plane state will be attached
10724 		 * to the stream, and therefore part of the atomic
10725 		 * state. It'll be released when the atomic state is
10726 		 * cleaned.
10727 		 */
10728 		if (!dc_add_plane_to_context(
10729 				dc,
10730 				dm_new_crtc_state->stream,
10731 				dc_new_plane_state,
10732 				dm_state->context)) {
10733 
10734 			dc_plane_state_release(dc_new_plane_state);
10735 			return -EINVAL;
10736 		}
10737 
10738 		dm_new_plane_state->dc_state = dc_new_plane_state;
10739 
10740 		/* Tell DC to do a full surface update every time there
10741 		 * is a plane change. Inefficient, but works for now.
10742 		 */
10743 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10744 
10745 		*lock_and_validation_needed = true;
10746 	}
10747 
10748 
10749 	return ret;
10750 }
10751 
10752 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10753 				       int *src_w, int *src_h)
10754 {
10755 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10756 	case DRM_MODE_ROTATE_90:
10757 	case DRM_MODE_ROTATE_270:
10758 		*src_w = plane_state->src_h >> 16;
10759 		*src_h = plane_state->src_w >> 16;
10760 		break;
10761 	case DRM_MODE_ROTATE_0:
10762 	case DRM_MODE_ROTATE_180:
10763 	default:
10764 		*src_w = plane_state->src_w >> 16;
10765 		*src_h = plane_state->src_h >> 16;
10766 		break;
10767 	}
10768 }
10769 
10770 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10771 				struct drm_crtc *crtc,
10772 				struct drm_crtc_state *new_crtc_state)
10773 {
10774 	struct drm_plane *cursor = crtc->cursor, *underlying;
10775 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10776 	int i;
10777 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10778 	int cursor_src_w, cursor_src_h;
10779 	int underlying_src_w, underlying_src_h;
10780 
10781 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10782 	 * cursor per pipe but it's going to inherit the scaling and
10783 	 * positioning from the underlying pipe. Check the cursor plane's
10784 	 * blending properties match the underlying planes'. */
10785 
10786 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10787 	if (!new_cursor_state || !new_cursor_state->fb) {
10788 		return 0;
10789 	}
10790 
10791 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10792 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10793 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10794 
10795 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10796 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10797 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10798 			continue;
10799 
10800 		/* Ignore disabled planes */
10801 		if (!new_underlying_state->fb)
10802 			continue;
10803 
10804 		dm_get_oriented_plane_size(new_underlying_state,
10805 					   &underlying_src_w, &underlying_src_h);
10806 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10807 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10808 
10809 		if (cursor_scale_w != underlying_scale_w ||
10810 		    cursor_scale_h != underlying_scale_h) {
10811 			drm_dbg_atomic(crtc->dev,
10812 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10813 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10814 			return -EINVAL;
10815 		}
10816 
10817 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10818 		if (new_underlying_state->crtc_x <= 0 &&
10819 		    new_underlying_state->crtc_y <= 0 &&
10820 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10821 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10822 			break;
10823 	}
10824 
10825 	return 0;
10826 }
10827 
10828 #if defined(CONFIG_DRM_AMD_DC_DCN)
10829 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10830 {
10831 	struct drm_connector *connector;
10832 	struct drm_connector_state *conn_state;
10833 	struct amdgpu_dm_connector *aconnector = NULL;
10834 	int i;
10835 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10836 		if (conn_state->crtc != crtc)
10837 			continue;
10838 
10839 		aconnector = to_amdgpu_dm_connector(connector);
10840 		if (!aconnector->port || !aconnector->mst_port)
10841 			aconnector = NULL;
10842 		else
10843 			break;
10844 	}
10845 
10846 	if (!aconnector)
10847 		return 0;
10848 
10849 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10850 }
10851 #endif
10852 
10853 /**
10854  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10855  * @dev: The DRM device
10856  * @state: The atomic state to commit
10857  *
10858  * Validate that the given atomic state is programmable by DC into hardware.
10859  * This involves constructing a &struct dc_state reflecting the new hardware
10860  * state we wish to commit, then querying DC to see if it is programmable. It's
10861  * important not to modify the existing DC state. Otherwise, atomic_check
10862  * may unexpectedly commit hardware changes.
10863  *
10864  * When validating the DC state, it's important that the right locks are
10865  * acquired. For full updates case which removes/adds/updates streams on one
10866  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10867  * that any such full update commit will wait for completion of any outstanding
10868  * flip using DRMs synchronization events.
10869  *
10870  * Note that DM adds the affected connectors for all CRTCs in state, when that
10871  * might not seem necessary. This is because DC stream creation requires the
10872  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10873  * be possible but non-trivial - a possible TODO item.
10874  *
10875  * Return: -Error code if validation failed.
10876  */
10877 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10878 				  struct drm_atomic_state *state)
10879 {
10880 	struct amdgpu_device *adev = drm_to_adev(dev);
10881 	struct dm_atomic_state *dm_state = NULL;
10882 	struct dc *dc = adev->dm.dc;
10883 	struct drm_connector *connector;
10884 	struct drm_connector_state *old_con_state, *new_con_state;
10885 	struct drm_crtc *crtc;
10886 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10887 	struct drm_plane *plane;
10888 	struct drm_plane_state *old_plane_state, *new_plane_state;
10889 	enum dc_status status;
10890 	int ret, i;
10891 	bool lock_and_validation_needed = false;
10892 	struct dm_crtc_state *dm_old_crtc_state;
10893 #if defined(CONFIG_DRM_AMD_DC_DCN)
10894 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10895 	struct drm_dp_mst_topology_state *mst_state;
10896 	struct drm_dp_mst_topology_mgr *mgr;
10897 #endif
10898 
10899 	trace_amdgpu_dm_atomic_check_begin(state);
10900 
10901 	ret = drm_atomic_helper_check_modeset(dev, state);
10902 	if (ret) {
10903 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10904 		goto fail;
10905 	}
10906 
10907 	/* Check connector changes */
10908 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10909 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10910 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10911 
10912 		/* Skip connectors that are disabled or part of modeset already. */
10913 		if (!old_con_state->crtc && !new_con_state->crtc)
10914 			continue;
10915 
10916 		if (!new_con_state->crtc)
10917 			continue;
10918 
10919 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10920 		if (IS_ERR(new_crtc_state)) {
10921 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10922 			ret = PTR_ERR(new_crtc_state);
10923 			goto fail;
10924 		}
10925 
10926 		if (dm_old_con_state->abm_level !=
10927 		    dm_new_con_state->abm_level)
10928 			new_crtc_state->connectors_changed = true;
10929 	}
10930 
10931 #if defined(CONFIG_DRM_AMD_DC_DCN)
10932 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10933 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10934 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10935 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10936 				if (ret) {
10937 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10938 					goto fail;
10939 				}
10940 			}
10941 		}
10942 	}
10943 #endif
10944 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10945 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10946 
10947 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10948 		    !new_crtc_state->color_mgmt_changed &&
10949 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10950 			dm_old_crtc_state->dsc_force_changed == false)
10951 			continue;
10952 
10953 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10954 		if (ret) {
10955 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10956 			goto fail;
10957 		}
10958 
10959 		if (!new_crtc_state->enable)
10960 			continue;
10961 
10962 		ret = drm_atomic_add_affected_connectors(state, crtc);
10963 		if (ret) {
10964 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
10965 			goto fail;
10966 		}
10967 
10968 		ret = drm_atomic_add_affected_planes(state, crtc);
10969 		if (ret) {
10970 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
10971 			goto fail;
10972 		}
10973 
10974 		if (dm_old_crtc_state->dsc_force_changed)
10975 			new_crtc_state->mode_changed = true;
10976 	}
10977 
10978 	/*
10979 	 * Add all primary and overlay planes on the CRTC to the state
10980 	 * whenever a plane is enabled to maintain correct z-ordering
10981 	 * and to enable fast surface updates.
10982 	 */
10983 	drm_for_each_crtc(crtc, dev) {
10984 		bool modified = false;
10985 
10986 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10987 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10988 				continue;
10989 
10990 			if (new_plane_state->crtc == crtc ||
10991 			    old_plane_state->crtc == crtc) {
10992 				modified = true;
10993 				break;
10994 			}
10995 		}
10996 
10997 		if (!modified)
10998 			continue;
10999 
11000 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11001 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11002 				continue;
11003 
11004 			new_plane_state =
11005 				drm_atomic_get_plane_state(state, plane);
11006 
11007 			if (IS_ERR(new_plane_state)) {
11008 				ret = PTR_ERR(new_plane_state);
11009 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11010 				goto fail;
11011 			}
11012 		}
11013 	}
11014 
11015 	/* Remove exiting planes if they are modified */
11016 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11017 		ret = dm_update_plane_state(dc, state, plane,
11018 					    old_plane_state,
11019 					    new_plane_state,
11020 					    false,
11021 					    &lock_and_validation_needed);
11022 		if (ret) {
11023 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11024 			goto fail;
11025 		}
11026 	}
11027 
11028 	/* Disable all crtcs which require disable */
11029 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11030 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11031 					   old_crtc_state,
11032 					   new_crtc_state,
11033 					   false,
11034 					   &lock_and_validation_needed);
11035 		if (ret) {
11036 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11037 			goto fail;
11038 		}
11039 	}
11040 
11041 	/* Enable all crtcs which require enable */
11042 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11043 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11044 					   old_crtc_state,
11045 					   new_crtc_state,
11046 					   true,
11047 					   &lock_and_validation_needed);
11048 		if (ret) {
11049 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11050 			goto fail;
11051 		}
11052 	}
11053 
11054 	/* Add new/modified planes */
11055 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11056 		ret = dm_update_plane_state(dc, state, plane,
11057 					    old_plane_state,
11058 					    new_plane_state,
11059 					    true,
11060 					    &lock_and_validation_needed);
11061 		if (ret) {
11062 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11063 			goto fail;
11064 		}
11065 	}
11066 
11067 	/* Run this here since we want to validate the streams we created */
11068 	ret = drm_atomic_helper_check_planes(dev, state);
11069 	if (ret) {
11070 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11071 		goto fail;
11072 	}
11073 
11074 	/* Check cursor planes scaling */
11075 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11076 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11077 		if (ret) {
11078 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11079 			goto fail;
11080 		}
11081 	}
11082 
11083 	if (state->legacy_cursor_update) {
11084 		/*
11085 		 * This is a fast cursor update coming from the plane update
11086 		 * helper, check if it can be done asynchronously for better
11087 		 * performance.
11088 		 */
11089 		state->async_update =
11090 			!drm_atomic_helper_async_check(dev, state);
11091 
11092 		/*
11093 		 * Skip the remaining global validation if this is an async
11094 		 * update. Cursor updates can be done without affecting
11095 		 * state or bandwidth calcs and this avoids the performance
11096 		 * penalty of locking the private state object and
11097 		 * allocating a new dc_state.
11098 		 */
11099 		if (state->async_update)
11100 			return 0;
11101 	}
11102 
11103 	/* Check scaling and underscan changes*/
11104 	/* TODO Removed scaling changes validation due to inability to commit
11105 	 * new stream into context w\o causing full reset. Need to
11106 	 * decide how to handle.
11107 	 */
11108 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11109 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11110 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11111 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11112 
11113 		/* Skip any modesets/resets */
11114 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11115 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11116 			continue;
11117 
11118 		/* Skip any thing not scale or underscan changes */
11119 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11120 			continue;
11121 
11122 		lock_and_validation_needed = true;
11123 	}
11124 
11125 #if defined(CONFIG_DRM_AMD_DC_DCN)
11126 	/* set the slot info for each mst_state based on the link encoding format */
11127 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11128 		struct amdgpu_dm_connector *aconnector;
11129 		struct drm_connector *connector;
11130 		struct drm_connector_list_iter iter;
11131 		u8 link_coding_cap;
11132 
11133 		if (!mgr->mst_state )
11134 			continue;
11135 
11136 		drm_connector_list_iter_begin(dev, &iter);
11137 		drm_for_each_connector_iter(connector, &iter) {
11138 			int id = connector->index;
11139 
11140 			if (id == mst_state->mgr->conn_base_id) {
11141 				aconnector = to_amdgpu_dm_connector(connector);
11142 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11143 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11144 
11145 				break;
11146 			}
11147 		}
11148 		drm_connector_list_iter_end(&iter);
11149 
11150 	}
11151 #endif
11152 	/**
11153 	 * Streams and planes are reset when there are changes that affect
11154 	 * bandwidth. Anything that affects bandwidth needs to go through
11155 	 * DC global validation to ensure that the configuration can be applied
11156 	 * to hardware.
11157 	 *
11158 	 * We have to currently stall out here in atomic_check for outstanding
11159 	 * commits to finish in this case because our IRQ handlers reference
11160 	 * DRM state directly - we can end up disabling interrupts too early
11161 	 * if we don't.
11162 	 *
11163 	 * TODO: Remove this stall and drop DM state private objects.
11164 	 */
11165 	if (lock_and_validation_needed) {
11166 		ret = dm_atomic_get_state(state, &dm_state);
11167 		if (ret) {
11168 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11169 			goto fail;
11170 		}
11171 
11172 		ret = do_aquire_global_lock(dev, state);
11173 		if (ret) {
11174 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11175 			goto fail;
11176 		}
11177 
11178 #if defined(CONFIG_DRM_AMD_DC_DCN)
11179 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11180 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11181 			goto fail;
11182 		}
11183 
11184 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11185 		if (ret) {
11186 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11187 			goto fail;
11188 		}
11189 #endif
11190 
11191 		/*
11192 		 * Perform validation of MST topology in the state:
11193 		 * We need to perform MST atomic check before calling
11194 		 * dc_validate_global_state(), or there is a chance
11195 		 * to get stuck in an infinite loop and hang eventually.
11196 		 */
11197 		ret = drm_dp_mst_atomic_check(state);
11198 		if (ret) {
11199 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11200 			goto fail;
11201 		}
11202 		status = dc_validate_global_state(dc, dm_state->context, true);
11203 		if (status != DC_OK) {
11204 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11205 				       dc_status_to_str(status), status);
11206 			ret = -EINVAL;
11207 			goto fail;
11208 		}
11209 	} else {
11210 		/*
11211 		 * The commit is a fast update. Fast updates shouldn't change
11212 		 * the DC context, affect global validation, and can have their
11213 		 * commit work done in parallel with other commits not touching
11214 		 * the same resource. If we have a new DC context as part of
11215 		 * the DM atomic state from validation we need to free it and
11216 		 * retain the existing one instead.
11217 		 *
11218 		 * Furthermore, since the DM atomic state only contains the DC
11219 		 * context and can safely be annulled, we can free the state
11220 		 * and clear the associated private object now to free
11221 		 * some memory and avoid a possible use-after-free later.
11222 		 */
11223 
11224 		for (i = 0; i < state->num_private_objs; i++) {
11225 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11226 
11227 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11228 				int j = state->num_private_objs-1;
11229 
11230 				dm_atomic_destroy_state(obj,
11231 						state->private_objs[i].state);
11232 
11233 				/* If i is not at the end of the array then the
11234 				 * last element needs to be moved to where i was
11235 				 * before the array can safely be truncated.
11236 				 */
11237 				if (i != j)
11238 					state->private_objs[i] =
11239 						state->private_objs[j];
11240 
11241 				state->private_objs[j].ptr = NULL;
11242 				state->private_objs[j].state = NULL;
11243 				state->private_objs[j].old_state = NULL;
11244 				state->private_objs[j].new_state = NULL;
11245 
11246 				state->num_private_objs = j;
11247 				break;
11248 			}
11249 		}
11250 	}
11251 
11252 	/* Store the overall update type for use later in atomic check. */
11253 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11254 		struct dm_crtc_state *dm_new_crtc_state =
11255 			to_dm_crtc_state(new_crtc_state);
11256 
11257 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11258 							 UPDATE_TYPE_FULL :
11259 							 UPDATE_TYPE_FAST;
11260 	}
11261 
11262 	/* Must be success */
11263 	WARN_ON(ret);
11264 
11265 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11266 
11267 	return ret;
11268 
11269 fail:
11270 	if (ret == -EDEADLK)
11271 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11272 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11273 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11274 	else
11275 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11276 
11277 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11278 
11279 	return ret;
11280 }
11281 
11282 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11283 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11284 {
11285 	uint8_t dpcd_data;
11286 	bool capable = false;
11287 
11288 	if (amdgpu_dm_connector->dc_link &&
11289 		dm_helpers_dp_read_dpcd(
11290 				NULL,
11291 				amdgpu_dm_connector->dc_link,
11292 				DP_DOWN_STREAM_PORT_COUNT,
11293 				&dpcd_data,
11294 				sizeof(dpcd_data))) {
11295 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11296 	}
11297 
11298 	return capable;
11299 }
11300 
11301 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11302 		unsigned int offset,
11303 		unsigned int total_length,
11304 		uint8_t *data,
11305 		unsigned int length,
11306 		struct amdgpu_hdmi_vsdb_info *vsdb)
11307 {
11308 	bool res;
11309 	union dmub_rb_cmd cmd;
11310 	struct dmub_cmd_send_edid_cea *input;
11311 	struct dmub_cmd_edid_cea_output *output;
11312 
11313 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11314 		return false;
11315 
11316 	memset(&cmd, 0, sizeof(cmd));
11317 
11318 	input = &cmd.edid_cea.data.input;
11319 
11320 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11321 	cmd.edid_cea.header.sub_type = 0;
11322 	cmd.edid_cea.header.payload_bytes =
11323 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11324 	input->offset = offset;
11325 	input->length = length;
11326 	input->cea_total_length = total_length;
11327 	memcpy(input->payload, data, length);
11328 
11329 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11330 	if (!res) {
11331 		DRM_ERROR("EDID CEA parser failed\n");
11332 		return false;
11333 	}
11334 
11335 	output = &cmd.edid_cea.data.output;
11336 
11337 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11338 		if (!output->ack.success) {
11339 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11340 					output->ack.offset);
11341 		}
11342 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11343 		if (!output->amd_vsdb.vsdb_found)
11344 			return false;
11345 
11346 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11347 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11348 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11349 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11350 	} else {
11351 		DRM_WARN("Unknown EDID CEA parser results\n");
11352 		return false;
11353 	}
11354 
11355 	return true;
11356 }
11357 
11358 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11359 		uint8_t *edid_ext, int len,
11360 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11361 {
11362 	int i;
11363 
11364 	/* send extension block to DMCU for parsing */
11365 	for (i = 0; i < len; i += 8) {
11366 		bool res;
11367 		int offset;
11368 
11369 		/* send 8 bytes a time */
11370 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11371 			return false;
11372 
11373 		if (i+8 == len) {
11374 			/* EDID block sent completed, expect result */
11375 			int version, min_rate, max_rate;
11376 
11377 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11378 			if (res) {
11379 				/* amd vsdb found */
11380 				vsdb_info->freesync_supported = 1;
11381 				vsdb_info->amd_vsdb_version = version;
11382 				vsdb_info->min_refresh_rate_hz = min_rate;
11383 				vsdb_info->max_refresh_rate_hz = max_rate;
11384 				return true;
11385 			}
11386 			/* not amd vsdb */
11387 			return false;
11388 		}
11389 
11390 		/* check for ack*/
11391 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11392 		if (!res)
11393 			return false;
11394 	}
11395 
11396 	return false;
11397 }
11398 
11399 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11400 		uint8_t *edid_ext, int len,
11401 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11402 {
11403 	int i;
11404 
11405 	/* send extension block to DMCU for parsing */
11406 	for (i = 0; i < len; i += 8) {
11407 		/* send 8 bytes a time */
11408 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11409 			return false;
11410 	}
11411 
11412 	return vsdb_info->freesync_supported;
11413 }
11414 
11415 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11416 		uint8_t *edid_ext, int len,
11417 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11418 {
11419 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11420 
11421 	if (adev->dm.dmub_srv)
11422 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11423 	else
11424 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11425 }
11426 
11427 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11428 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11429 {
11430 	uint8_t *edid_ext = NULL;
11431 	int i;
11432 	bool valid_vsdb_found = false;
11433 
11434 	/*----- drm_find_cea_extension() -----*/
11435 	/* No EDID or EDID extensions */
11436 	if (edid == NULL || edid->extensions == 0)
11437 		return -ENODEV;
11438 
11439 	/* Find CEA extension */
11440 	for (i = 0; i < edid->extensions; i++) {
11441 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11442 		if (edid_ext[0] == CEA_EXT)
11443 			break;
11444 	}
11445 
11446 	if (i == edid->extensions)
11447 		return -ENODEV;
11448 
11449 	/*----- cea_db_offsets() -----*/
11450 	if (edid_ext[0] != CEA_EXT)
11451 		return -ENODEV;
11452 
11453 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11454 
11455 	return valid_vsdb_found ? i : -ENODEV;
11456 }
11457 
11458 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11459 					struct edid *edid)
11460 {
11461 	int i = 0;
11462 	struct detailed_timing *timing;
11463 	struct detailed_non_pixel *data;
11464 	struct detailed_data_monitor_range *range;
11465 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11466 			to_amdgpu_dm_connector(connector);
11467 	struct dm_connector_state *dm_con_state = NULL;
11468 	struct dc_sink *sink;
11469 
11470 	struct drm_device *dev = connector->dev;
11471 	struct amdgpu_device *adev = drm_to_adev(dev);
11472 	bool freesync_capable = false;
11473 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11474 
11475 	if (!connector->state) {
11476 		DRM_ERROR("%s - Connector has no state", __func__);
11477 		goto update;
11478 	}
11479 
11480 	sink = amdgpu_dm_connector->dc_sink ?
11481 		amdgpu_dm_connector->dc_sink :
11482 		amdgpu_dm_connector->dc_em_sink;
11483 
11484 	if (!edid || !sink) {
11485 		dm_con_state = to_dm_connector_state(connector->state);
11486 
11487 		amdgpu_dm_connector->min_vfreq = 0;
11488 		amdgpu_dm_connector->max_vfreq = 0;
11489 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11490 		connector->display_info.monitor_range.min_vfreq = 0;
11491 		connector->display_info.monitor_range.max_vfreq = 0;
11492 		freesync_capable = false;
11493 
11494 		goto update;
11495 	}
11496 
11497 	dm_con_state = to_dm_connector_state(connector->state);
11498 
11499 	if (!adev->dm.freesync_module)
11500 		goto update;
11501 
11502 
11503 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11504 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11505 		bool edid_check_required = false;
11506 
11507 		if (edid) {
11508 			edid_check_required = is_dp_capable_without_timing_msa(
11509 						adev->dm.dc,
11510 						amdgpu_dm_connector);
11511 		}
11512 
11513 		if (edid_check_required == true && (edid->version > 1 ||
11514 		   (edid->version == 1 && edid->revision > 1))) {
11515 			for (i = 0; i < 4; i++) {
11516 
11517 				timing	= &edid->detailed_timings[i];
11518 				data	= &timing->data.other_data;
11519 				range	= &data->data.range;
11520 				/*
11521 				 * Check if monitor has continuous frequency mode
11522 				 */
11523 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11524 					continue;
11525 				/*
11526 				 * Check for flag range limits only. If flag == 1 then
11527 				 * no additional timing information provided.
11528 				 * Default GTF, GTF Secondary curve and CVT are not
11529 				 * supported
11530 				 */
11531 				if (range->flags != 1)
11532 					continue;
11533 
11534 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11535 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11536 				amdgpu_dm_connector->pixel_clock_mhz =
11537 					range->pixel_clock_mhz * 10;
11538 
11539 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11540 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11541 
11542 				break;
11543 			}
11544 
11545 			if (amdgpu_dm_connector->max_vfreq -
11546 			    amdgpu_dm_connector->min_vfreq > 10) {
11547 
11548 				freesync_capable = true;
11549 			}
11550 		}
11551 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11552 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11553 		if (i >= 0 && vsdb_info.freesync_supported) {
11554 			timing  = &edid->detailed_timings[i];
11555 			data    = &timing->data.other_data;
11556 
11557 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11558 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11559 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11560 				freesync_capable = true;
11561 
11562 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11563 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11564 		}
11565 	}
11566 
11567 update:
11568 	if (dm_con_state)
11569 		dm_con_state->freesync_capable = freesync_capable;
11570 
11571 	if (connector->vrr_capable_property)
11572 		drm_connector_set_vrr_capable_property(connector,
11573 						       freesync_capable);
11574 }
11575 
11576 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11577 {
11578 	struct amdgpu_device *adev = drm_to_adev(dev);
11579 	struct dc *dc = adev->dm.dc;
11580 	int i;
11581 
11582 	mutex_lock(&adev->dm.dc_lock);
11583 	if (dc->current_state) {
11584 		for (i = 0; i < dc->current_state->stream_count; ++i)
11585 			dc->current_state->streams[i]
11586 				->triggered_crtc_reset.enabled =
11587 				adev->dm.force_timing_sync;
11588 
11589 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11590 		dc_trigger_sync(dc, dc->current_state);
11591 	}
11592 	mutex_unlock(&adev->dm.dc_lock);
11593 }
11594 
11595 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11596 		       uint32_t value, const char *func_name)
11597 {
11598 #ifdef DM_CHECK_ADDR_0
11599 	if (address == 0) {
11600 		DC_ERR("invalid register write. address = 0");
11601 		return;
11602 	}
11603 #endif
11604 	cgs_write_register(ctx->cgs_device, address, value);
11605 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11606 }
11607 
11608 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11609 			  const char *func_name)
11610 {
11611 	uint32_t value;
11612 #ifdef DM_CHECK_ADDR_0
11613 	if (address == 0) {
11614 		DC_ERR("invalid register read; address = 0\n");
11615 		return 0;
11616 	}
11617 #endif
11618 
11619 	if (ctx->dmub_srv &&
11620 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11621 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11622 		ASSERT(false);
11623 		return 0;
11624 	}
11625 
11626 	value = cgs_read_register(ctx->cgs_device, address);
11627 
11628 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11629 
11630 	return value;
11631 }
11632 
11633 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11634 						struct dc_context *ctx,
11635 						uint8_t status_type,
11636 						uint32_t *operation_result)
11637 {
11638 	struct amdgpu_device *adev = ctx->driver_context;
11639 	int return_status = -1;
11640 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11641 
11642 	if (is_cmd_aux) {
11643 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11644 			return_status = p_notify->aux_reply.length;
11645 			*operation_result = p_notify->result;
11646 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11647 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11648 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11649 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11650 		} else {
11651 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11652 		}
11653 	} else {
11654 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11655 			return_status = 0;
11656 			*operation_result = p_notify->sc_status;
11657 		} else {
11658 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11659 		}
11660 	}
11661 
11662 	return return_status;
11663 }
11664 
11665 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11666 	unsigned int link_index, void *cmd_payload, void *operation_result)
11667 {
11668 	struct amdgpu_device *adev = ctx->driver_context;
11669 	int ret = 0;
11670 
11671 	if (is_cmd_aux) {
11672 		dc_process_dmub_aux_transfer_async(ctx->dc,
11673 			link_index, (struct aux_payload *)cmd_payload);
11674 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11675 					(struct set_config_cmd_payload *)cmd_payload,
11676 					adev->dm.dmub_notify)) {
11677 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11678 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11679 					(uint32_t *)operation_result);
11680 	}
11681 
11682 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11683 	if (ret == 0) {
11684 		DRM_ERROR("wait_for_completion_timeout timeout!");
11685 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11686 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11687 				(uint32_t *)operation_result);
11688 	}
11689 
11690 	if (is_cmd_aux) {
11691 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11692 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11693 
11694 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11695 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11696 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11697 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11698 				       adev->dm.dmub_notify->aux_reply.length);
11699 			}
11700 		}
11701 	}
11702 
11703 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11704 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11705 			(uint32_t *)operation_result);
11706 }
11707 
11708 /*
11709  * Check whether seamless boot is supported.
11710  *
11711  * So far we only support seamless boot on CHIP_VANGOGH.
11712  * If everything goes well, we may consider expanding
11713  * seamless boot to other ASICs.
11714  */
11715 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11716 {
11717 	switch (adev->asic_type) {
11718 	case CHIP_VANGOGH:
11719 		if (!adev->mman.keep_stolen_vga_memory)
11720 			return true;
11721 		break;
11722 	default:
11723 		break;
11724 	}
11725 
11726 	return false;
11727 }
11728