1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/display/drm_dp_mst_helper.h>
77 #include <drm/display/drm_hdmi_helper.h>
78 #include <drm/drm_atomic.h>
79 #include <drm/drm_atomic_uapi.h>
80 #include <drm/drm_atomic_helper.h>
81 #include <drm/drm_fb_helper.h>
82 #include <drm/drm_fourcc.h>
83 #include <drm/drm_edid.h>
84 #include <drm/drm_vblank.h>
85 #include <drm/drm_audio_component.h>
86 #include <drm/drm_gem_atomic_helper.h>
87 
88 #if defined(CONFIG_DRM_AMD_DC_DCN)
89 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
90 
91 #include "dcn/dcn_1_0_offset.h"
92 #include "dcn/dcn_1_0_sh_mask.h"
93 #include "soc15_hw_ip.h"
94 #include "vega10_ip_offset.h"
95 
96 #include "soc15_common.h"
97 #endif
98 
99 #include "modules/inc/mod_freesync.h"
100 #include "modules/power/power_helpers.h"
101 #include "modules/inc/mod_info_packet.h"
102 
103 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
105 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
107 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
109 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
111 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
113 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
115 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
117 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
119 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
121 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
122 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
123 
124 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
125 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
126 
127 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
128 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
129 
130 /* Number of bytes in PSP header for firmware. */
131 #define PSP_HEADER_BYTES 0x100
132 
133 /* Number of bytes in PSP footer for firmware. */
134 #define PSP_FOOTER_BYTES 0x100
135 
136 /**
137  * DOC: overview
138  *
139  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
140  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
141  * requests into DC requests, and DC responses into DRM responses.
142  *
143  * The root control structure is &struct amdgpu_display_manager.
144  */
145 
146 /* basic init/fini API */
147 static int amdgpu_dm_init(struct amdgpu_device *adev);
148 static void amdgpu_dm_fini(struct amdgpu_device *adev);
149 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
150 
151 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
152 {
153 	switch (link->dpcd_caps.dongle_type) {
154 	case DISPLAY_DONGLE_NONE:
155 		return DRM_MODE_SUBCONNECTOR_Native;
156 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
157 		return DRM_MODE_SUBCONNECTOR_VGA;
158 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
159 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
160 		return DRM_MODE_SUBCONNECTOR_DVID;
161 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
162 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
163 		return DRM_MODE_SUBCONNECTOR_HDMIA;
164 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
165 	default:
166 		return DRM_MODE_SUBCONNECTOR_Unknown;
167 	}
168 }
169 
170 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
171 {
172 	struct dc_link *link = aconnector->dc_link;
173 	struct drm_connector *connector = &aconnector->base;
174 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
175 
176 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
177 		return;
178 
179 	if (aconnector->dc_sink)
180 		subconnector = get_subconnector_type(link);
181 
182 	drm_object_property_set_value(&connector->base,
183 			connector->dev->mode_config.dp_subconnector_property,
184 			subconnector);
185 }
186 
187 /*
188  * initializes drm_device display related structures, based on the information
189  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
190  * drm_encoder, drm_mode_config
191  *
192  * Returns 0 on success
193  */
194 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
195 /* removes and deallocates the drm structures, created by the above function */
196 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
197 
198 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
199 				struct drm_plane *plane,
200 				unsigned long possible_crtcs,
201 				const struct dc_plane_cap *plane_cap);
202 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
203 			       struct drm_plane *plane,
204 			       uint32_t link_index);
205 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
206 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
207 				    uint32_t link_index,
208 				    struct amdgpu_encoder *amdgpu_encoder);
209 static int amdgpu_dm_encoder_init(struct drm_device *dev,
210 				  struct amdgpu_encoder *aencoder,
211 				  uint32_t link_index);
212 
213 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
214 
215 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
216 
217 static int amdgpu_dm_atomic_check(struct drm_device *dev,
218 				  struct drm_atomic_state *state);
219 
220 static void handle_cursor_update(struct drm_plane *plane,
221 				 struct drm_plane_state *old_plane_state);
222 
223 static const struct drm_format_info *
224 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
225 
226 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
227 static void handle_hpd_rx_irq(void *param);
228 
229 static bool
230 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
231 				 struct drm_crtc_state *new_crtc_state);
232 /*
233  * dm_vblank_get_counter
234  *
235  * @brief
236  * Get counter for number of vertical blanks
237  *
238  * @param
239  * struct amdgpu_device *adev - [in] desired amdgpu device
240  * int disp_idx - [in] which CRTC to get the counter from
241  *
242  * @return
243  * Counter for vertical blanks
244  */
245 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
246 {
247 	if (crtc >= adev->mode_info.num_crtc)
248 		return 0;
249 	else {
250 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
251 
252 		if (acrtc->dm_irq_params.stream == NULL) {
253 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
254 				  crtc);
255 			return 0;
256 		}
257 
258 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
259 	}
260 }
261 
262 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
263 				  u32 *vbl, u32 *position)
264 {
265 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
266 
267 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
268 		return -EINVAL;
269 	else {
270 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
271 
272 		if (acrtc->dm_irq_params.stream ==  NULL) {
273 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
274 				  crtc);
275 			return 0;
276 		}
277 
278 		/*
279 		 * TODO rework base driver to use values directly.
280 		 * for now parse it back into reg-format
281 		 */
282 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
283 					 &v_blank_start,
284 					 &v_blank_end,
285 					 &h_position,
286 					 &v_position);
287 
288 		*position = v_position | (h_position << 16);
289 		*vbl = v_blank_start | (v_blank_end << 16);
290 	}
291 
292 	return 0;
293 }
294 
295 static bool dm_is_idle(void *handle)
296 {
297 	/* XXX todo */
298 	return true;
299 }
300 
301 static int dm_wait_for_idle(void *handle)
302 {
303 	/* XXX todo */
304 	return 0;
305 }
306 
307 static bool dm_check_soft_reset(void *handle)
308 {
309 	return false;
310 }
311 
312 static int dm_soft_reset(void *handle)
313 {
314 	/* XXX todo */
315 	return 0;
316 }
317 
318 static struct amdgpu_crtc *
319 get_crtc_by_otg_inst(struct amdgpu_device *adev,
320 		     int otg_inst)
321 {
322 	struct drm_device *dev = adev_to_drm(adev);
323 	struct drm_crtc *crtc;
324 	struct amdgpu_crtc *amdgpu_crtc;
325 
326 	if (WARN_ON(otg_inst == -1))
327 		return adev->mode_info.crtcs[0];
328 
329 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
330 		amdgpu_crtc = to_amdgpu_crtc(crtc);
331 
332 		if (amdgpu_crtc->otg_inst == otg_inst)
333 			return amdgpu_crtc;
334 	}
335 
336 	return NULL;
337 }
338 
339 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
340 {
341 	return acrtc->dm_irq_params.freesync_config.state ==
342 		       VRR_STATE_ACTIVE_VARIABLE ||
343 	       acrtc->dm_irq_params.freesync_config.state ==
344 		       VRR_STATE_ACTIVE_FIXED;
345 }
346 
347 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
348 {
349 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
350 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
351 }
352 
353 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
354 					      struct dm_crtc_state *new_state)
355 {
356 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
357 		return true;
358 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
359 		return true;
360 	else
361 		return false;
362 }
363 
364 /**
365  * dm_pflip_high_irq() - Handle pageflip interrupt
366  * @interrupt_params: ignored
367  *
368  * Handles the pageflip interrupt by notifying all interested parties
369  * that the pageflip has been completed.
370  */
371 static void dm_pflip_high_irq(void *interrupt_params)
372 {
373 	struct amdgpu_crtc *amdgpu_crtc;
374 	struct common_irq_params *irq_params = interrupt_params;
375 	struct amdgpu_device *adev = irq_params->adev;
376 	unsigned long flags;
377 	struct drm_pending_vblank_event *e;
378 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
379 	bool vrr_active;
380 
381 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
382 
383 	/* IRQ could occur when in initial stage */
384 	/* TODO work and BO cleanup */
385 	if (amdgpu_crtc == NULL) {
386 		DC_LOG_PFLIP("CRTC is null, returning.\n");
387 		return;
388 	}
389 
390 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
391 
392 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
393 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
394 						 amdgpu_crtc->pflip_status,
395 						 AMDGPU_FLIP_SUBMITTED,
396 						 amdgpu_crtc->crtc_id,
397 						 amdgpu_crtc);
398 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
399 		return;
400 	}
401 
402 	/* page flip completed. */
403 	e = amdgpu_crtc->event;
404 	amdgpu_crtc->event = NULL;
405 
406 	WARN_ON(!e);
407 
408 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
409 
410 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
411 	if (!vrr_active ||
412 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
413 				      &v_blank_end, &hpos, &vpos) ||
414 	    (vpos < v_blank_start)) {
415 		/* Update to correct count and vblank timestamp if racing with
416 		 * vblank irq. This also updates to the correct vblank timestamp
417 		 * even in VRR mode, as scanout is past the front-porch atm.
418 		 */
419 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
420 
421 		/* Wake up userspace by sending the pageflip event with proper
422 		 * count and timestamp of vblank of flip completion.
423 		 */
424 		if (e) {
425 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
426 
427 			/* Event sent, so done with vblank for this flip */
428 			drm_crtc_vblank_put(&amdgpu_crtc->base);
429 		}
430 	} else if (e) {
431 		/* VRR active and inside front-porch: vblank count and
432 		 * timestamp for pageflip event will only be up to date after
433 		 * drm_crtc_handle_vblank() has been executed from late vblank
434 		 * irq handler after start of back-porch (vline 0). We queue the
435 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
436 		 * updated timestamp and count, once it runs after us.
437 		 *
438 		 * We need to open-code this instead of using the helper
439 		 * drm_crtc_arm_vblank_event(), as that helper would
440 		 * call drm_crtc_accurate_vblank_count(), which we must
441 		 * not call in VRR mode while we are in front-porch!
442 		 */
443 
444 		/* sequence will be replaced by real count during send-out. */
445 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
446 		e->pipe = amdgpu_crtc->crtc_id;
447 
448 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
449 		e = NULL;
450 	}
451 
452 	/* Keep track of vblank of this flip for flip throttling. We use the
453 	 * cooked hw counter, as that one incremented at start of this vblank
454 	 * of pageflip completion, so last_flip_vblank is the forbidden count
455 	 * for queueing new pageflips if vsync + VRR is enabled.
456 	 */
457 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
458 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
459 
460 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
461 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
462 
463 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
464 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
465 		     vrr_active, (int) !e);
466 }
467 
468 static void dm_vupdate_high_irq(void *interrupt_params)
469 {
470 	struct common_irq_params *irq_params = interrupt_params;
471 	struct amdgpu_device *adev = irq_params->adev;
472 	struct amdgpu_crtc *acrtc;
473 	struct drm_device *drm_dev;
474 	struct drm_vblank_crtc *vblank;
475 	ktime_t frame_duration_ns, previous_timestamp;
476 	unsigned long flags;
477 	int vrr_active;
478 
479 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
480 
481 	if (acrtc) {
482 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
483 		drm_dev = acrtc->base.dev;
484 		vblank = &drm_dev->vblank[acrtc->base.index];
485 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
486 		frame_duration_ns = vblank->time - previous_timestamp;
487 
488 		if (frame_duration_ns > 0) {
489 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
490 						frame_duration_ns,
491 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
492 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
493 		}
494 
495 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
496 			      acrtc->crtc_id,
497 			      vrr_active);
498 
499 		/* Core vblank handling is done here after end of front-porch in
500 		 * vrr mode, as vblank timestamping will give valid results
501 		 * while now done after front-porch. This will also deliver
502 		 * page-flip completion events that have been queued to us
503 		 * if a pageflip happened inside front-porch.
504 		 */
505 		if (vrr_active) {
506 			drm_crtc_handle_vblank(&acrtc->base);
507 
508 			/* BTR processing for pre-DCE12 ASICs */
509 			if (acrtc->dm_irq_params.stream &&
510 			    adev->family < AMDGPU_FAMILY_AI) {
511 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
512 				mod_freesync_handle_v_update(
513 				    adev->dm.freesync_module,
514 				    acrtc->dm_irq_params.stream,
515 				    &acrtc->dm_irq_params.vrr_params);
516 
517 				dc_stream_adjust_vmin_vmax(
518 				    adev->dm.dc,
519 				    acrtc->dm_irq_params.stream,
520 				    &acrtc->dm_irq_params.vrr_params.adjust);
521 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
522 			}
523 		}
524 	}
525 }
526 
527 /**
528  * dm_crtc_high_irq() - Handles CRTC interrupt
529  * @interrupt_params: used for determining the CRTC instance
530  *
531  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
532  * event handler.
533  */
534 static void dm_crtc_high_irq(void *interrupt_params)
535 {
536 	struct common_irq_params *irq_params = interrupt_params;
537 	struct amdgpu_device *adev = irq_params->adev;
538 	struct amdgpu_crtc *acrtc;
539 	unsigned long flags;
540 	int vrr_active;
541 
542 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
543 	if (!acrtc)
544 		return;
545 
546 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
547 
548 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
549 		      vrr_active, acrtc->dm_irq_params.active_planes);
550 
551 	/**
552 	 * Core vblank handling at start of front-porch is only possible
553 	 * in non-vrr mode, as only there vblank timestamping will give
554 	 * valid results while done in front-porch. Otherwise defer it
555 	 * to dm_vupdate_high_irq after end of front-porch.
556 	 */
557 	if (!vrr_active)
558 		drm_crtc_handle_vblank(&acrtc->base);
559 
560 	/**
561 	 * Following stuff must happen at start of vblank, for crc
562 	 * computation and below-the-range btr support in vrr mode.
563 	 */
564 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
565 
566 	/* BTR updates need to happen before VUPDATE on Vega and above. */
567 	if (adev->family < AMDGPU_FAMILY_AI)
568 		return;
569 
570 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
571 
572 	if (acrtc->dm_irq_params.stream &&
573 	    acrtc->dm_irq_params.vrr_params.supported &&
574 	    acrtc->dm_irq_params.freesync_config.state ==
575 		    VRR_STATE_ACTIVE_VARIABLE) {
576 		mod_freesync_handle_v_update(adev->dm.freesync_module,
577 					     acrtc->dm_irq_params.stream,
578 					     &acrtc->dm_irq_params.vrr_params);
579 
580 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
581 					   &acrtc->dm_irq_params.vrr_params.adjust);
582 	}
583 
584 	/*
585 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
586 	 * In that case, pageflip completion interrupts won't fire and pageflip
587 	 * completion events won't get delivered. Prevent this by sending
588 	 * pending pageflip events from here if a flip is still pending.
589 	 *
590 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
591 	 * avoid race conditions between flip programming and completion,
592 	 * which could cause too early flip completion events.
593 	 */
594 	if (adev->family >= AMDGPU_FAMILY_RV &&
595 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
596 	    acrtc->dm_irq_params.active_planes == 0) {
597 		if (acrtc->event) {
598 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
599 			acrtc->event = NULL;
600 			drm_crtc_vblank_put(&acrtc->base);
601 		}
602 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
603 	}
604 
605 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
606 }
607 
608 #if defined(CONFIG_DRM_AMD_DC_DCN)
609 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
610 /**
611  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
612  * DCN generation ASICs
613  * @interrupt_params: interrupt parameters
614  *
615  * Used to set crc window/read out crc value at vertical line 0 position
616  */
617 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
618 {
619 	struct common_irq_params *irq_params = interrupt_params;
620 	struct amdgpu_device *adev = irq_params->adev;
621 	struct amdgpu_crtc *acrtc;
622 
623 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
624 
625 	if (!acrtc)
626 		return;
627 
628 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
629 }
630 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
631 
632 /**
633  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
634  * @adev: amdgpu_device pointer
635  * @notify: dmub notification structure
636  *
637  * Dmub AUX or SET_CONFIG command completion processing callback
638  * Copies dmub notification to DM which is to be read by AUX command.
639  * issuing thread and also signals the event to wake up the thread.
640  */
641 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
642 					struct dmub_notification *notify)
643 {
644 	if (adev->dm.dmub_notify)
645 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
646 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
647 		complete(&adev->dm.dmub_aux_transfer_done);
648 }
649 
650 /**
651  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
652  * @adev: amdgpu_device pointer
653  * @notify: dmub notification structure
654  *
655  * Dmub Hpd interrupt processing callback. Gets displayindex through the
656  * ink index and calls helper to do the processing.
657  */
658 static void dmub_hpd_callback(struct amdgpu_device *adev,
659 			      struct dmub_notification *notify)
660 {
661 	struct amdgpu_dm_connector *aconnector;
662 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
663 	struct drm_connector *connector;
664 	struct drm_connector_list_iter iter;
665 	struct dc_link *link;
666 	uint8_t link_index = 0;
667 	struct drm_device *dev;
668 
669 	if (adev == NULL)
670 		return;
671 
672 	if (notify == NULL) {
673 		DRM_ERROR("DMUB HPD callback notification was NULL");
674 		return;
675 	}
676 
677 	if (notify->link_index > adev->dm.dc->link_count) {
678 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
679 		return;
680 	}
681 
682 	link_index = notify->link_index;
683 	link = adev->dm.dc->links[link_index];
684 	dev = adev->dm.ddev;
685 
686 	drm_connector_list_iter_begin(dev, &iter);
687 	drm_for_each_connector_iter(connector, &iter) {
688 		aconnector = to_amdgpu_dm_connector(connector);
689 		if (link && aconnector->dc_link == link) {
690 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
691 			hpd_aconnector = aconnector;
692 			break;
693 		}
694 	}
695 	drm_connector_list_iter_end(&iter);
696 
697 	if (hpd_aconnector) {
698 		if (notify->type == DMUB_NOTIFICATION_HPD)
699 			handle_hpd_irq_helper(hpd_aconnector);
700 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
701 			handle_hpd_rx_irq(hpd_aconnector);
702 	}
703 }
704 
705 /**
706  * register_dmub_notify_callback - Sets callback for DMUB notify
707  * @adev: amdgpu_device pointer
708  * @type: Type of dmub notification
709  * @callback: Dmub interrupt callback function
710  * @dmub_int_thread_offload: offload indicator
711  *
712  * API to register a dmub callback handler for a dmub notification
713  * Also sets indicator whether callback processing to be offloaded.
714  * to dmub interrupt handling thread
715  * Return: true if successfully registered, false if there is existing registration
716  */
717 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
718 					  enum dmub_notification_type type,
719 					  dmub_notify_interrupt_callback_t callback,
720 					  bool dmub_int_thread_offload)
721 {
722 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
723 		adev->dm.dmub_callback[type] = callback;
724 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
725 	} else
726 		return false;
727 
728 	return true;
729 }
730 
731 static void dm_handle_hpd_work(struct work_struct *work)
732 {
733 	struct dmub_hpd_work *dmub_hpd_wrk;
734 
735 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
736 
737 	if (!dmub_hpd_wrk->dmub_notify) {
738 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
739 		return;
740 	}
741 
742 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
743 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
744 		dmub_hpd_wrk->dmub_notify);
745 	}
746 
747 	kfree(dmub_hpd_wrk->dmub_notify);
748 	kfree(dmub_hpd_wrk);
749 
750 }
751 
752 #define DMUB_TRACE_MAX_READ 64
753 /**
754  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
755  * @interrupt_params: used for determining the Outbox instance
756  *
757  * Handles the Outbox Interrupt
758  * event handler.
759  */
760 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
761 {
762 	struct dmub_notification notify;
763 	struct common_irq_params *irq_params = interrupt_params;
764 	struct amdgpu_device *adev = irq_params->adev;
765 	struct amdgpu_display_manager *dm = &adev->dm;
766 	struct dmcub_trace_buf_entry entry = { 0 };
767 	uint32_t count = 0;
768 	struct dmub_hpd_work *dmub_hpd_wrk;
769 	struct dc_link *plink = NULL;
770 
771 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
772 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
773 
774 		do {
775 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
776 			if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
777 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
778 				continue;
779 			}
780 			if (!dm->dmub_callback[notify.type]) {
781 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
782 				continue;
783 			}
784 			if (dm->dmub_thread_offload[notify.type] == true) {
785 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
786 				if (!dmub_hpd_wrk) {
787 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
788 					return;
789 				}
790 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
791 				if (!dmub_hpd_wrk->dmub_notify) {
792 					kfree(dmub_hpd_wrk);
793 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
794 					return;
795 				}
796 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
797 				if (dmub_hpd_wrk->dmub_notify)
798 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
799 				dmub_hpd_wrk->adev = adev;
800 				if (notify.type == DMUB_NOTIFICATION_HPD) {
801 					plink = adev->dm.dc->links[notify.link_index];
802 					if (plink) {
803 						plink->hpd_status =
804 							notify.hpd_status == DP_HPD_PLUG;
805 					}
806 				}
807 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
808 			} else {
809 				dm->dmub_callback[notify.type](adev, &notify);
810 			}
811 		} while (notify.pending_notification);
812 	}
813 
814 
815 	do {
816 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
817 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
818 							entry.param0, entry.param1);
819 
820 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
821 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
822 		} else
823 			break;
824 
825 		count++;
826 
827 	} while (count <= DMUB_TRACE_MAX_READ);
828 
829 	if (count > DMUB_TRACE_MAX_READ)
830 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
831 }
832 #endif /* CONFIG_DRM_AMD_DC_DCN */
833 
834 static int dm_set_clockgating_state(void *handle,
835 		  enum amd_clockgating_state state)
836 {
837 	return 0;
838 }
839 
840 static int dm_set_powergating_state(void *handle,
841 		  enum amd_powergating_state state)
842 {
843 	return 0;
844 }
845 
846 /* Prototypes of private functions */
847 static int dm_early_init(void* handle);
848 
849 /* Allocate memory for FBC compressed data  */
850 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
851 {
852 	struct drm_device *dev = connector->dev;
853 	struct amdgpu_device *adev = drm_to_adev(dev);
854 	struct dm_compressor_info *compressor = &adev->dm.compressor;
855 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
856 	struct drm_display_mode *mode;
857 	unsigned long max_size = 0;
858 
859 	if (adev->dm.dc->fbc_compressor == NULL)
860 		return;
861 
862 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
863 		return;
864 
865 	if (compressor->bo_ptr)
866 		return;
867 
868 
869 	list_for_each_entry(mode, &connector->modes, head) {
870 		if (max_size < mode->htotal * mode->vtotal)
871 			max_size = mode->htotal * mode->vtotal;
872 	}
873 
874 	if (max_size) {
875 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
876 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
877 			    &compressor->gpu_addr, &compressor->cpu_addr);
878 
879 		if (r)
880 			DRM_ERROR("DM: Failed to initialize FBC\n");
881 		else {
882 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
883 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
884 		}
885 
886 	}
887 
888 }
889 
890 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
891 					  int pipe, bool *enabled,
892 					  unsigned char *buf, int max_bytes)
893 {
894 	struct drm_device *dev = dev_get_drvdata(kdev);
895 	struct amdgpu_device *adev = drm_to_adev(dev);
896 	struct drm_connector *connector;
897 	struct drm_connector_list_iter conn_iter;
898 	struct amdgpu_dm_connector *aconnector;
899 	int ret = 0;
900 
901 	*enabled = false;
902 
903 	mutex_lock(&adev->dm.audio_lock);
904 
905 	drm_connector_list_iter_begin(dev, &conn_iter);
906 	drm_for_each_connector_iter(connector, &conn_iter) {
907 		aconnector = to_amdgpu_dm_connector(connector);
908 		if (aconnector->audio_inst != port)
909 			continue;
910 
911 		*enabled = true;
912 		ret = drm_eld_size(connector->eld);
913 		memcpy(buf, connector->eld, min(max_bytes, ret));
914 
915 		break;
916 	}
917 	drm_connector_list_iter_end(&conn_iter);
918 
919 	mutex_unlock(&adev->dm.audio_lock);
920 
921 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
922 
923 	return ret;
924 }
925 
926 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
927 	.get_eld = amdgpu_dm_audio_component_get_eld,
928 };
929 
930 static int amdgpu_dm_audio_component_bind(struct device *kdev,
931 				       struct device *hda_kdev, void *data)
932 {
933 	struct drm_device *dev = dev_get_drvdata(kdev);
934 	struct amdgpu_device *adev = drm_to_adev(dev);
935 	struct drm_audio_component *acomp = data;
936 
937 	acomp->ops = &amdgpu_dm_audio_component_ops;
938 	acomp->dev = kdev;
939 	adev->dm.audio_component = acomp;
940 
941 	return 0;
942 }
943 
944 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
945 					  struct device *hda_kdev, void *data)
946 {
947 	struct drm_device *dev = dev_get_drvdata(kdev);
948 	struct amdgpu_device *adev = drm_to_adev(dev);
949 	struct drm_audio_component *acomp = data;
950 
951 	acomp->ops = NULL;
952 	acomp->dev = NULL;
953 	adev->dm.audio_component = NULL;
954 }
955 
956 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
957 	.bind	= amdgpu_dm_audio_component_bind,
958 	.unbind	= amdgpu_dm_audio_component_unbind,
959 };
960 
961 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
962 {
963 	int i, ret;
964 
965 	if (!amdgpu_audio)
966 		return 0;
967 
968 	adev->mode_info.audio.enabled = true;
969 
970 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
971 
972 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
973 		adev->mode_info.audio.pin[i].channels = -1;
974 		adev->mode_info.audio.pin[i].rate = -1;
975 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
976 		adev->mode_info.audio.pin[i].status_bits = 0;
977 		adev->mode_info.audio.pin[i].category_code = 0;
978 		adev->mode_info.audio.pin[i].connected = false;
979 		adev->mode_info.audio.pin[i].id =
980 			adev->dm.dc->res_pool->audios[i]->inst;
981 		adev->mode_info.audio.pin[i].offset = 0;
982 	}
983 
984 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
985 	if (ret < 0)
986 		return ret;
987 
988 	adev->dm.audio_registered = true;
989 
990 	return 0;
991 }
992 
993 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
994 {
995 	if (!amdgpu_audio)
996 		return;
997 
998 	if (!adev->mode_info.audio.enabled)
999 		return;
1000 
1001 	if (adev->dm.audio_registered) {
1002 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1003 		adev->dm.audio_registered = false;
1004 	}
1005 
1006 	/* TODO: Disable audio? */
1007 
1008 	adev->mode_info.audio.enabled = false;
1009 }
1010 
1011 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1012 {
1013 	struct drm_audio_component *acomp = adev->dm.audio_component;
1014 
1015 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1016 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1017 
1018 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1019 						 pin, -1);
1020 	}
1021 }
1022 
1023 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1024 {
1025 	const struct dmcub_firmware_header_v1_0 *hdr;
1026 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1027 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1028 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1029 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1030 	struct abm *abm = adev->dm.dc->res_pool->abm;
1031 	struct dmub_srv_hw_params hw_params;
1032 	enum dmub_status status;
1033 	const unsigned char *fw_inst_const, *fw_bss_data;
1034 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1035 	bool has_hw_support;
1036 
1037 	if (!dmub_srv)
1038 		/* DMUB isn't supported on the ASIC. */
1039 		return 0;
1040 
1041 	if (!fb_info) {
1042 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1043 		return -EINVAL;
1044 	}
1045 
1046 	if (!dmub_fw) {
1047 		/* Firmware required for DMUB support. */
1048 		DRM_ERROR("No firmware provided for DMUB.\n");
1049 		return -EINVAL;
1050 	}
1051 
1052 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1053 	if (status != DMUB_STATUS_OK) {
1054 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1055 		return -EINVAL;
1056 	}
1057 
1058 	if (!has_hw_support) {
1059 		DRM_INFO("DMUB unsupported on ASIC\n");
1060 		return 0;
1061 	}
1062 
1063 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1064 	status = dmub_srv_hw_reset(dmub_srv);
1065 	if (status != DMUB_STATUS_OK)
1066 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1067 
1068 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1069 
1070 	fw_inst_const = dmub_fw->data +
1071 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1072 			PSP_HEADER_BYTES;
1073 
1074 	fw_bss_data = dmub_fw->data +
1075 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1076 		      le32_to_cpu(hdr->inst_const_bytes);
1077 
1078 	/* Copy firmware and bios info into FB memory. */
1079 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1080 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1081 
1082 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1083 
1084 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1085 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1086 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1087 	 * will be done by dm_dmub_hw_init
1088 	 */
1089 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1090 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1091 				fw_inst_const_size);
1092 	}
1093 
1094 	if (fw_bss_data_size)
1095 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1096 		       fw_bss_data, fw_bss_data_size);
1097 
1098 	/* Copy firmware bios info into FB memory. */
1099 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1100 	       adev->bios_size);
1101 
1102 	/* Reset regions that need to be reset. */
1103 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1104 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1105 
1106 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1107 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1108 
1109 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1110 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1111 
1112 	/* Initialize hardware. */
1113 	memset(&hw_params, 0, sizeof(hw_params));
1114 	hw_params.fb_base = adev->gmc.fb_start;
1115 	hw_params.fb_offset = adev->gmc.aper_base;
1116 
1117 	/* backdoor load firmware and trigger dmub running */
1118 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1119 		hw_params.load_inst_const = true;
1120 
1121 	if (dmcu)
1122 		hw_params.psp_version = dmcu->psp_version;
1123 
1124 	for (i = 0; i < fb_info->num_fb; ++i)
1125 		hw_params.fb[i] = &fb_info->fb[i];
1126 
1127 	switch (adev->ip_versions[DCE_HWIP][0]) {
1128 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1129 		hw_params.dpia_supported = true;
1130 #if defined(CONFIG_DRM_AMD_DC_DCN)
1131 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1132 #endif
1133 		break;
1134 	default:
1135 		break;
1136 	}
1137 
1138 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1139 	if (status != DMUB_STATUS_OK) {
1140 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1141 		return -EINVAL;
1142 	}
1143 
1144 	/* Wait for firmware load to finish. */
1145 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1146 	if (status != DMUB_STATUS_OK)
1147 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1148 
1149 	/* Init DMCU and ABM if available. */
1150 	if (dmcu && abm) {
1151 		dmcu->funcs->dmcu_init(dmcu);
1152 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1153 	}
1154 
1155 	if (!adev->dm.dc->ctx->dmub_srv)
1156 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1157 	if (!adev->dm.dc->ctx->dmub_srv) {
1158 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1159 		return -ENOMEM;
1160 	}
1161 
1162 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1163 		 adev->dm.dmcub_fw_version);
1164 
1165 	return 0;
1166 }
1167 
1168 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1169 {
1170 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1171 	enum dmub_status status;
1172 	bool init;
1173 
1174 	if (!dmub_srv) {
1175 		/* DMUB isn't supported on the ASIC. */
1176 		return;
1177 	}
1178 
1179 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1180 	if (status != DMUB_STATUS_OK)
1181 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1182 
1183 	if (status == DMUB_STATUS_OK && init) {
1184 		/* Wait for firmware load to finish. */
1185 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1186 		if (status != DMUB_STATUS_OK)
1187 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1188 	} else {
1189 		/* Perform the full hardware initialization. */
1190 		dm_dmub_hw_init(adev);
1191 	}
1192 }
1193 
1194 #if defined(CONFIG_DRM_AMD_DC_DCN)
1195 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1196 {
1197 	uint64_t pt_base;
1198 	uint32_t logical_addr_low;
1199 	uint32_t logical_addr_high;
1200 	uint32_t agp_base, agp_bot, agp_top;
1201 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1202 
1203 	memset(pa_config, 0, sizeof(*pa_config));
1204 
1205 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1206 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1207 
1208 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1209 		/*
1210 		 * Raven2 has a HW issue that it is unable to use the vram which
1211 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1212 		 * workaround that increase system aperture high address (add 1)
1213 		 * to get rid of the VM fault and hardware hang.
1214 		 */
1215 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1216 	else
1217 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1218 
1219 	agp_base = 0;
1220 	agp_bot = adev->gmc.agp_start >> 24;
1221 	agp_top = adev->gmc.agp_end >> 24;
1222 
1223 
1224 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1225 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1226 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1227 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1228 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1229 	page_table_base.low_part = lower_32_bits(pt_base);
1230 
1231 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1232 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1233 
1234 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1235 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1236 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1237 
1238 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1239 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1240 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1241 
1242 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1243 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1244 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1245 
1246 	pa_config->is_hvm_enabled = 0;
1247 
1248 }
1249 #endif
1250 #if defined(CONFIG_DRM_AMD_DC_DCN)
1251 static void vblank_control_worker(struct work_struct *work)
1252 {
1253 	struct vblank_control_work *vblank_work =
1254 		container_of(work, struct vblank_control_work, work);
1255 	struct amdgpu_display_manager *dm = vblank_work->dm;
1256 
1257 	mutex_lock(&dm->dc_lock);
1258 
1259 	if (vblank_work->enable)
1260 		dm->active_vblank_irq_count++;
1261 	else if(dm->active_vblank_irq_count)
1262 		dm->active_vblank_irq_count--;
1263 
1264 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1265 
1266 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1267 
1268 	/* Control PSR based on vblank requirements from OS */
1269 	if (vblank_work->stream && vblank_work->stream->link) {
1270 		if (vblank_work->enable) {
1271 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1272 				amdgpu_dm_psr_disable(vblank_work->stream);
1273 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1274 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1275 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1276 			amdgpu_dm_psr_enable(vblank_work->stream);
1277 		}
1278 	}
1279 
1280 	mutex_unlock(&dm->dc_lock);
1281 
1282 	dc_stream_release(vblank_work->stream);
1283 
1284 	kfree(vblank_work);
1285 }
1286 
1287 #endif
1288 
1289 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1290 {
1291 	struct hpd_rx_irq_offload_work *offload_work;
1292 	struct amdgpu_dm_connector *aconnector;
1293 	struct dc_link *dc_link;
1294 	struct amdgpu_device *adev;
1295 	enum dc_connection_type new_connection_type = dc_connection_none;
1296 	unsigned long flags;
1297 
1298 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1299 	aconnector = offload_work->offload_wq->aconnector;
1300 
1301 	if (!aconnector) {
1302 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1303 		goto skip;
1304 	}
1305 
1306 	adev = drm_to_adev(aconnector->base.dev);
1307 	dc_link = aconnector->dc_link;
1308 
1309 	mutex_lock(&aconnector->hpd_lock);
1310 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1311 		DRM_ERROR("KMS: Failed to detect connector\n");
1312 	mutex_unlock(&aconnector->hpd_lock);
1313 
1314 	if (new_connection_type == dc_connection_none)
1315 		goto skip;
1316 
1317 	if (amdgpu_in_reset(adev))
1318 		goto skip;
1319 
1320 	mutex_lock(&adev->dm.dc_lock);
1321 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1322 		dc_link_dp_handle_automated_test(dc_link);
1323 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1324 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1325 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1326 		dc_link_dp_handle_link_loss(dc_link);
1327 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1328 		offload_work->offload_wq->is_handling_link_loss = false;
1329 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1330 	}
1331 	mutex_unlock(&adev->dm.dc_lock);
1332 
1333 skip:
1334 	kfree(offload_work);
1335 
1336 }
1337 
1338 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1339 {
1340 	int max_caps = dc->caps.max_links;
1341 	int i = 0;
1342 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1343 
1344 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1345 
1346 	if (!hpd_rx_offload_wq)
1347 		return NULL;
1348 
1349 
1350 	for (i = 0; i < max_caps; i++) {
1351 		hpd_rx_offload_wq[i].wq =
1352 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1353 
1354 		if (hpd_rx_offload_wq[i].wq == NULL) {
1355 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1356 			return NULL;
1357 		}
1358 
1359 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1360 	}
1361 
1362 	return hpd_rx_offload_wq;
1363 }
1364 
1365 struct amdgpu_stutter_quirk {
1366 	u16 chip_vendor;
1367 	u16 chip_device;
1368 	u16 subsys_vendor;
1369 	u16 subsys_device;
1370 	u8 revision;
1371 };
1372 
1373 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1374 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1375 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1376 	{ 0, 0, 0, 0, 0 },
1377 };
1378 
1379 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1380 {
1381 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1382 
1383 	while (p && p->chip_device != 0) {
1384 		if (pdev->vendor == p->chip_vendor &&
1385 		    pdev->device == p->chip_device &&
1386 		    pdev->subsystem_vendor == p->subsys_vendor &&
1387 		    pdev->subsystem_device == p->subsys_device &&
1388 		    pdev->revision == p->revision) {
1389 			return true;
1390 		}
1391 		++p;
1392 	}
1393 	return false;
1394 }
1395 
1396 static int amdgpu_dm_init(struct amdgpu_device *adev)
1397 {
1398 	struct dc_init_data init_data;
1399 #ifdef CONFIG_DRM_AMD_DC_HDCP
1400 	struct dc_callback_init init_params;
1401 #endif
1402 	int r;
1403 
1404 	adev->dm.ddev = adev_to_drm(adev);
1405 	adev->dm.adev = adev;
1406 
1407 	/* Zero all the fields */
1408 	memset(&init_data, 0, sizeof(init_data));
1409 #ifdef CONFIG_DRM_AMD_DC_HDCP
1410 	memset(&init_params, 0, sizeof(init_params));
1411 #endif
1412 
1413 	mutex_init(&adev->dm.dc_lock);
1414 	mutex_init(&adev->dm.audio_lock);
1415 #if defined(CONFIG_DRM_AMD_DC_DCN)
1416 	spin_lock_init(&adev->dm.vblank_lock);
1417 #endif
1418 
1419 	if(amdgpu_dm_irq_init(adev)) {
1420 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1421 		goto error;
1422 	}
1423 
1424 	init_data.asic_id.chip_family = adev->family;
1425 
1426 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1427 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1428 	init_data.asic_id.chip_id = adev->pdev->device;
1429 
1430 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1431 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1432 	init_data.asic_id.atombios_base_address =
1433 		adev->mode_info.atom_context->bios;
1434 
1435 	init_data.driver = adev;
1436 
1437 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1438 
1439 	if (!adev->dm.cgs_device) {
1440 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1441 		goto error;
1442 	}
1443 
1444 	init_data.cgs_device = adev->dm.cgs_device;
1445 
1446 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1447 
1448 	switch (adev->ip_versions[DCE_HWIP][0]) {
1449 	case IP_VERSION(2, 1, 0):
1450 		switch (adev->dm.dmcub_fw_version) {
1451 		case 0: /* development */
1452 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1453 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1454 			init_data.flags.disable_dmcu = false;
1455 			break;
1456 		default:
1457 			init_data.flags.disable_dmcu = true;
1458 		}
1459 		break;
1460 	case IP_VERSION(2, 0, 3):
1461 		init_data.flags.disable_dmcu = true;
1462 		break;
1463 	default:
1464 		break;
1465 	}
1466 
1467 	switch (adev->asic_type) {
1468 	case CHIP_CARRIZO:
1469 	case CHIP_STONEY:
1470 		init_data.flags.gpu_vm_support = true;
1471 		break;
1472 	default:
1473 		switch (adev->ip_versions[DCE_HWIP][0]) {
1474 		case IP_VERSION(1, 0, 0):
1475 		case IP_VERSION(1, 0, 1):
1476 			/* enable S/G on PCO and RV2 */
1477 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1478 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1479 				init_data.flags.gpu_vm_support = true;
1480 			break;
1481 		case IP_VERSION(2, 1, 0):
1482 		case IP_VERSION(3, 0, 1):
1483 		case IP_VERSION(3, 1, 2):
1484 		case IP_VERSION(3, 1, 3):
1485 		case IP_VERSION(3, 1, 5):
1486 		case IP_VERSION(3, 1, 6):
1487 			init_data.flags.gpu_vm_support = true;
1488 			break;
1489 		default:
1490 			break;
1491 		}
1492 		break;
1493 	}
1494 
1495 	if (init_data.flags.gpu_vm_support)
1496 		adev->mode_info.gpu_vm_support = true;
1497 
1498 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1499 		init_data.flags.fbc_support = true;
1500 
1501 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1502 		init_data.flags.multi_mon_pp_mclk_switch = true;
1503 
1504 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1505 		init_data.flags.disable_fractional_pwm = true;
1506 
1507 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1508 		init_data.flags.edp_no_power_sequencing = true;
1509 
1510 #ifdef CONFIG_DRM_AMD_DC_DCN
1511 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1512 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1513 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1514 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1515 #endif
1516 
1517 	init_data.flags.seamless_boot_edp_requested = false;
1518 
1519 	if (check_seamless_boot_capability(adev)) {
1520 		init_data.flags.seamless_boot_edp_requested = true;
1521 		init_data.flags.allow_seamless_boot_optimization = true;
1522 		DRM_INFO("Seamless boot condition check passed\n");
1523 	}
1524 
1525 	INIT_LIST_HEAD(&adev->dm.da_list);
1526 	/* Display Core create. */
1527 	adev->dm.dc = dc_create(&init_data);
1528 
1529 	if (adev->dm.dc) {
1530 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1531 	} else {
1532 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1533 		goto error;
1534 	}
1535 
1536 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1537 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1538 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1539 	}
1540 
1541 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1542 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1543 	if (dm_should_disable_stutter(adev->pdev))
1544 		adev->dm.dc->debug.disable_stutter = true;
1545 
1546 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1547 		adev->dm.dc->debug.disable_stutter = true;
1548 
1549 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1550 		adev->dm.dc->debug.disable_dsc = true;
1551 		adev->dm.dc->debug.disable_dsc_edp = true;
1552 	}
1553 
1554 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1555 		adev->dm.dc->debug.disable_clock_gate = true;
1556 
1557 	r = dm_dmub_hw_init(adev);
1558 	if (r) {
1559 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1560 		goto error;
1561 	}
1562 
1563 	dc_hardware_init(adev->dm.dc);
1564 
1565 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1566 	if (!adev->dm.hpd_rx_offload_wq) {
1567 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1568 		goto error;
1569 	}
1570 
1571 #if defined(CONFIG_DRM_AMD_DC_DCN)
1572 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1573 		struct dc_phy_addr_space_config pa_config;
1574 
1575 		mmhub_read_system_context(adev, &pa_config);
1576 
1577 		// Call the DC init_memory func
1578 		dc_setup_system_context(adev->dm.dc, &pa_config);
1579 	}
1580 #endif
1581 
1582 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1583 	if (!adev->dm.freesync_module) {
1584 		DRM_ERROR(
1585 		"amdgpu: failed to initialize freesync_module.\n");
1586 	} else
1587 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1588 				adev->dm.freesync_module);
1589 
1590 	amdgpu_dm_init_color_mod();
1591 
1592 #if defined(CONFIG_DRM_AMD_DC_DCN)
1593 	if (adev->dm.dc->caps.max_links > 0) {
1594 		adev->dm.vblank_control_workqueue =
1595 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1596 		if (!adev->dm.vblank_control_workqueue)
1597 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1598 	}
1599 #endif
1600 
1601 #ifdef CONFIG_DRM_AMD_DC_HDCP
1602 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1603 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1604 
1605 		if (!adev->dm.hdcp_workqueue)
1606 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1607 		else
1608 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1609 
1610 		dc_init_callbacks(adev->dm.dc, &init_params);
1611 	}
1612 #endif
1613 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1614 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1615 #endif
1616 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1617 		init_completion(&adev->dm.dmub_aux_transfer_done);
1618 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1619 		if (!adev->dm.dmub_notify) {
1620 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1621 			goto error;
1622 		}
1623 
1624 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1625 		if (!adev->dm.delayed_hpd_wq) {
1626 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1627 			goto error;
1628 		}
1629 
1630 		amdgpu_dm_outbox_init(adev);
1631 #if defined(CONFIG_DRM_AMD_DC_DCN)
1632 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1633 			dmub_aux_setconfig_callback, false)) {
1634 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1635 			goto error;
1636 		}
1637 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1638 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1639 			goto error;
1640 		}
1641 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1642 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1643 			goto error;
1644 		}
1645 #endif /* CONFIG_DRM_AMD_DC_DCN */
1646 	}
1647 
1648 	if (amdgpu_dm_initialize_drm_device(adev)) {
1649 		DRM_ERROR(
1650 		"amdgpu: failed to initialize sw for display support.\n");
1651 		goto error;
1652 	}
1653 
1654 	/* create fake encoders for MST */
1655 	dm_dp_create_fake_mst_encoders(adev);
1656 
1657 	/* TODO: Add_display_info? */
1658 
1659 	/* TODO use dynamic cursor width */
1660 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1661 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1662 
1663 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1664 		DRM_ERROR(
1665 		"amdgpu: failed to initialize sw for display support.\n");
1666 		goto error;
1667 	}
1668 
1669 
1670 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1671 
1672 	return 0;
1673 error:
1674 	amdgpu_dm_fini(adev);
1675 
1676 	return -EINVAL;
1677 }
1678 
1679 static int amdgpu_dm_early_fini(void *handle)
1680 {
1681 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1682 
1683 	amdgpu_dm_audio_fini(adev);
1684 
1685 	return 0;
1686 }
1687 
1688 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1689 {
1690 	int i;
1691 
1692 #if defined(CONFIG_DRM_AMD_DC_DCN)
1693 	if (adev->dm.vblank_control_workqueue) {
1694 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1695 		adev->dm.vblank_control_workqueue = NULL;
1696 	}
1697 #endif
1698 
1699 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1700 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1701 	}
1702 
1703 	amdgpu_dm_destroy_drm_device(&adev->dm);
1704 
1705 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1706 	if (adev->dm.crc_rd_wrk) {
1707 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1708 		kfree(adev->dm.crc_rd_wrk);
1709 		adev->dm.crc_rd_wrk = NULL;
1710 	}
1711 #endif
1712 #ifdef CONFIG_DRM_AMD_DC_HDCP
1713 	if (adev->dm.hdcp_workqueue) {
1714 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1715 		adev->dm.hdcp_workqueue = NULL;
1716 	}
1717 
1718 	if (adev->dm.dc)
1719 		dc_deinit_callbacks(adev->dm.dc);
1720 #endif
1721 
1722 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1723 
1724 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1725 		kfree(adev->dm.dmub_notify);
1726 		adev->dm.dmub_notify = NULL;
1727 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1728 		adev->dm.delayed_hpd_wq = NULL;
1729 	}
1730 
1731 	if (adev->dm.dmub_bo)
1732 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1733 				      &adev->dm.dmub_bo_gpu_addr,
1734 				      &adev->dm.dmub_bo_cpu_addr);
1735 
1736 	if (adev->dm.hpd_rx_offload_wq) {
1737 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1738 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1739 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1740 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1741 			}
1742 		}
1743 
1744 		kfree(adev->dm.hpd_rx_offload_wq);
1745 		adev->dm.hpd_rx_offload_wq = NULL;
1746 	}
1747 
1748 	/* DC Destroy TODO: Replace destroy DAL */
1749 	if (adev->dm.dc)
1750 		dc_destroy(&adev->dm.dc);
1751 	/*
1752 	 * TODO: pageflip, vlank interrupt
1753 	 *
1754 	 * amdgpu_dm_irq_fini(adev);
1755 	 */
1756 
1757 	if (adev->dm.cgs_device) {
1758 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1759 		adev->dm.cgs_device = NULL;
1760 	}
1761 	if (adev->dm.freesync_module) {
1762 		mod_freesync_destroy(adev->dm.freesync_module);
1763 		adev->dm.freesync_module = NULL;
1764 	}
1765 
1766 	mutex_destroy(&adev->dm.audio_lock);
1767 	mutex_destroy(&adev->dm.dc_lock);
1768 
1769 	return;
1770 }
1771 
1772 static int load_dmcu_fw(struct amdgpu_device *adev)
1773 {
1774 	const char *fw_name_dmcu = NULL;
1775 	int r;
1776 	const struct dmcu_firmware_header_v1_0 *hdr;
1777 
1778 	switch(adev->asic_type) {
1779 #if defined(CONFIG_DRM_AMD_DC_SI)
1780 	case CHIP_TAHITI:
1781 	case CHIP_PITCAIRN:
1782 	case CHIP_VERDE:
1783 	case CHIP_OLAND:
1784 #endif
1785 	case CHIP_BONAIRE:
1786 	case CHIP_HAWAII:
1787 	case CHIP_KAVERI:
1788 	case CHIP_KABINI:
1789 	case CHIP_MULLINS:
1790 	case CHIP_TONGA:
1791 	case CHIP_FIJI:
1792 	case CHIP_CARRIZO:
1793 	case CHIP_STONEY:
1794 	case CHIP_POLARIS11:
1795 	case CHIP_POLARIS10:
1796 	case CHIP_POLARIS12:
1797 	case CHIP_VEGAM:
1798 	case CHIP_VEGA10:
1799 	case CHIP_VEGA12:
1800 	case CHIP_VEGA20:
1801 		return 0;
1802 	case CHIP_NAVI12:
1803 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1804 		break;
1805 	case CHIP_RAVEN:
1806 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1807 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1808 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1809 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1810 		else
1811 			return 0;
1812 		break;
1813 	default:
1814 		switch (adev->ip_versions[DCE_HWIP][0]) {
1815 		case IP_VERSION(2, 0, 2):
1816 		case IP_VERSION(2, 0, 3):
1817 		case IP_VERSION(2, 0, 0):
1818 		case IP_VERSION(2, 1, 0):
1819 		case IP_VERSION(3, 0, 0):
1820 		case IP_VERSION(3, 0, 2):
1821 		case IP_VERSION(3, 0, 3):
1822 		case IP_VERSION(3, 0, 1):
1823 		case IP_VERSION(3, 1, 2):
1824 		case IP_VERSION(3, 1, 3):
1825 		case IP_VERSION(3, 1, 5):
1826 		case IP_VERSION(3, 1, 6):
1827 			return 0;
1828 		default:
1829 			break;
1830 		}
1831 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1832 		return -EINVAL;
1833 	}
1834 
1835 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1836 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1837 		return 0;
1838 	}
1839 
1840 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1841 	if (r == -ENOENT) {
1842 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1843 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1844 		adev->dm.fw_dmcu = NULL;
1845 		return 0;
1846 	}
1847 	if (r) {
1848 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1849 			fw_name_dmcu);
1850 		return r;
1851 	}
1852 
1853 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1854 	if (r) {
1855 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1856 			fw_name_dmcu);
1857 		release_firmware(adev->dm.fw_dmcu);
1858 		adev->dm.fw_dmcu = NULL;
1859 		return r;
1860 	}
1861 
1862 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1863 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1864 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1865 	adev->firmware.fw_size +=
1866 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1867 
1868 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1869 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1870 	adev->firmware.fw_size +=
1871 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1872 
1873 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1874 
1875 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1876 
1877 	return 0;
1878 }
1879 
1880 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1881 {
1882 	struct amdgpu_device *adev = ctx;
1883 
1884 	return dm_read_reg(adev->dm.dc->ctx, address);
1885 }
1886 
1887 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1888 				     uint32_t value)
1889 {
1890 	struct amdgpu_device *adev = ctx;
1891 
1892 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1893 }
1894 
1895 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1896 {
1897 	struct dmub_srv_create_params create_params;
1898 	struct dmub_srv_region_params region_params;
1899 	struct dmub_srv_region_info region_info;
1900 	struct dmub_srv_fb_params fb_params;
1901 	struct dmub_srv_fb_info *fb_info;
1902 	struct dmub_srv *dmub_srv;
1903 	const struct dmcub_firmware_header_v1_0 *hdr;
1904 	const char *fw_name_dmub;
1905 	enum dmub_asic dmub_asic;
1906 	enum dmub_status status;
1907 	int r;
1908 
1909 	switch (adev->ip_versions[DCE_HWIP][0]) {
1910 	case IP_VERSION(2, 1, 0):
1911 		dmub_asic = DMUB_ASIC_DCN21;
1912 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1913 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1914 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1915 		break;
1916 	case IP_VERSION(3, 0, 0):
1917 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1918 			dmub_asic = DMUB_ASIC_DCN30;
1919 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1920 		} else {
1921 			dmub_asic = DMUB_ASIC_DCN30;
1922 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1923 		}
1924 		break;
1925 	case IP_VERSION(3, 0, 1):
1926 		dmub_asic = DMUB_ASIC_DCN301;
1927 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1928 		break;
1929 	case IP_VERSION(3, 0, 2):
1930 		dmub_asic = DMUB_ASIC_DCN302;
1931 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1932 		break;
1933 	case IP_VERSION(3, 0, 3):
1934 		dmub_asic = DMUB_ASIC_DCN303;
1935 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1936 		break;
1937 	case IP_VERSION(3, 1, 2):
1938 	case IP_VERSION(3, 1, 3):
1939 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1940 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1941 		break;
1942 	case IP_VERSION(3, 1, 5):
1943 		dmub_asic = DMUB_ASIC_DCN315;
1944 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1945 		break;
1946 	case IP_VERSION(3, 1, 6):
1947 		dmub_asic = DMUB_ASIC_DCN316;
1948 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
1949 		break;
1950 	default:
1951 		/* ASIC doesn't support DMUB. */
1952 		return 0;
1953 	}
1954 
1955 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1956 	if (r) {
1957 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1958 		return 0;
1959 	}
1960 
1961 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1962 	if (r) {
1963 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1964 		return 0;
1965 	}
1966 
1967 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1968 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1969 
1970 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1971 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1972 			AMDGPU_UCODE_ID_DMCUB;
1973 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1974 			adev->dm.dmub_fw;
1975 		adev->firmware.fw_size +=
1976 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1977 
1978 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1979 			 adev->dm.dmcub_fw_version);
1980 	}
1981 
1982 
1983 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1984 	dmub_srv = adev->dm.dmub_srv;
1985 
1986 	if (!dmub_srv) {
1987 		DRM_ERROR("Failed to allocate DMUB service!\n");
1988 		return -ENOMEM;
1989 	}
1990 
1991 	memset(&create_params, 0, sizeof(create_params));
1992 	create_params.user_ctx = adev;
1993 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1994 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1995 	create_params.asic = dmub_asic;
1996 
1997 	/* Create the DMUB service. */
1998 	status = dmub_srv_create(dmub_srv, &create_params);
1999 	if (status != DMUB_STATUS_OK) {
2000 		DRM_ERROR("Error creating DMUB service: %d\n", status);
2001 		return -EINVAL;
2002 	}
2003 
2004 	/* Calculate the size of all the regions for the DMUB service. */
2005 	memset(&region_params, 0, sizeof(region_params));
2006 
2007 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2008 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2009 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2010 	region_params.vbios_size = adev->bios_size;
2011 	region_params.fw_bss_data = region_params.bss_data_size ?
2012 		adev->dm.dmub_fw->data +
2013 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2014 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
2015 	region_params.fw_inst_const =
2016 		adev->dm.dmub_fw->data +
2017 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2018 		PSP_HEADER_BYTES;
2019 
2020 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2021 					   &region_info);
2022 
2023 	if (status != DMUB_STATUS_OK) {
2024 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2025 		return -EINVAL;
2026 	}
2027 
2028 	/*
2029 	 * Allocate a framebuffer based on the total size of all the regions.
2030 	 * TODO: Move this into GART.
2031 	 */
2032 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2033 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2034 				    &adev->dm.dmub_bo_gpu_addr,
2035 				    &adev->dm.dmub_bo_cpu_addr);
2036 	if (r)
2037 		return r;
2038 
2039 	/* Rebase the regions on the framebuffer address. */
2040 	memset(&fb_params, 0, sizeof(fb_params));
2041 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2042 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2043 	fb_params.region_info = &region_info;
2044 
2045 	adev->dm.dmub_fb_info =
2046 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2047 	fb_info = adev->dm.dmub_fb_info;
2048 
2049 	if (!fb_info) {
2050 		DRM_ERROR(
2051 			"Failed to allocate framebuffer info for DMUB service!\n");
2052 		return -ENOMEM;
2053 	}
2054 
2055 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2056 	if (status != DMUB_STATUS_OK) {
2057 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2058 		return -EINVAL;
2059 	}
2060 
2061 	return 0;
2062 }
2063 
2064 static int dm_sw_init(void *handle)
2065 {
2066 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2067 	int r;
2068 
2069 	r = dm_dmub_sw_init(adev);
2070 	if (r)
2071 		return r;
2072 
2073 	return load_dmcu_fw(adev);
2074 }
2075 
2076 static int dm_sw_fini(void *handle)
2077 {
2078 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2079 
2080 	kfree(adev->dm.dmub_fb_info);
2081 	adev->dm.dmub_fb_info = NULL;
2082 
2083 	if (adev->dm.dmub_srv) {
2084 		dmub_srv_destroy(adev->dm.dmub_srv);
2085 		adev->dm.dmub_srv = NULL;
2086 	}
2087 
2088 	release_firmware(adev->dm.dmub_fw);
2089 	adev->dm.dmub_fw = NULL;
2090 
2091 	release_firmware(adev->dm.fw_dmcu);
2092 	adev->dm.fw_dmcu = NULL;
2093 
2094 	return 0;
2095 }
2096 
2097 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2098 {
2099 	struct amdgpu_dm_connector *aconnector;
2100 	struct drm_connector *connector;
2101 	struct drm_connector_list_iter iter;
2102 	int ret = 0;
2103 
2104 	drm_connector_list_iter_begin(dev, &iter);
2105 	drm_for_each_connector_iter(connector, &iter) {
2106 		aconnector = to_amdgpu_dm_connector(connector);
2107 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2108 		    aconnector->mst_mgr.aux) {
2109 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2110 					 aconnector,
2111 					 aconnector->base.base.id);
2112 
2113 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2114 			if (ret < 0) {
2115 				DRM_ERROR("DM_MST: Failed to start MST\n");
2116 				aconnector->dc_link->type =
2117 					dc_connection_single;
2118 				break;
2119 			}
2120 		}
2121 	}
2122 	drm_connector_list_iter_end(&iter);
2123 
2124 	return ret;
2125 }
2126 
2127 static int dm_late_init(void *handle)
2128 {
2129 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2130 
2131 	struct dmcu_iram_parameters params;
2132 	unsigned int linear_lut[16];
2133 	int i;
2134 	struct dmcu *dmcu = NULL;
2135 
2136 	dmcu = adev->dm.dc->res_pool->dmcu;
2137 
2138 	for (i = 0; i < 16; i++)
2139 		linear_lut[i] = 0xFFFF * i / 15;
2140 
2141 	params.set = 0;
2142 	params.backlight_ramping_override = false;
2143 	params.backlight_ramping_start = 0xCCCC;
2144 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2145 	params.backlight_lut_array_size = 16;
2146 	params.backlight_lut_array = linear_lut;
2147 
2148 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2149 	 * 0xFFFF x 0.01 = 0x28F
2150 	 */
2151 	params.min_abm_backlight = 0x28F;
2152 	/* In the case where abm is implemented on dmcub,
2153 	* dmcu object will be null.
2154 	* ABM 2.4 and up are implemented on dmcub.
2155 	*/
2156 	if (dmcu) {
2157 		if (!dmcu_load_iram(dmcu, params))
2158 			return -EINVAL;
2159 	} else if (adev->dm.dc->ctx->dmub_srv) {
2160 		struct dc_link *edp_links[MAX_NUM_EDP];
2161 		int edp_num;
2162 
2163 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2164 		for (i = 0; i < edp_num; i++) {
2165 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2166 				return -EINVAL;
2167 		}
2168 	}
2169 
2170 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2171 }
2172 
2173 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2174 {
2175 	struct amdgpu_dm_connector *aconnector;
2176 	struct drm_connector *connector;
2177 	struct drm_connector_list_iter iter;
2178 	struct drm_dp_mst_topology_mgr *mgr;
2179 	int ret;
2180 	bool need_hotplug = false;
2181 
2182 	drm_connector_list_iter_begin(dev, &iter);
2183 	drm_for_each_connector_iter(connector, &iter) {
2184 		aconnector = to_amdgpu_dm_connector(connector);
2185 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2186 		    aconnector->mst_port)
2187 			continue;
2188 
2189 		mgr = &aconnector->mst_mgr;
2190 
2191 		if (suspend) {
2192 			drm_dp_mst_topology_mgr_suspend(mgr);
2193 		} else {
2194 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2195 			if (ret < 0) {
2196 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2197 				need_hotplug = true;
2198 			}
2199 		}
2200 	}
2201 	drm_connector_list_iter_end(&iter);
2202 
2203 	if (need_hotplug)
2204 		drm_kms_helper_hotplug_event(dev);
2205 }
2206 
2207 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2208 {
2209 	int ret = 0;
2210 
2211 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2212 	 * on window driver dc implementation.
2213 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2214 	 * should be passed to smu during boot up and resume from s3.
2215 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2216 	 * dcn20_resource_construct
2217 	 * then call pplib functions below to pass the settings to smu:
2218 	 * smu_set_watermarks_for_clock_ranges
2219 	 * smu_set_watermarks_table
2220 	 * navi10_set_watermarks_table
2221 	 * smu_write_watermarks_table
2222 	 *
2223 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2224 	 * dc has implemented different flow for window driver:
2225 	 * dc_hardware_init / dc_set_power_state
2226 	 * dcn10_init_hw
2227 	 * notify_wm_ranges
2228 	 * set_wm_ranges
2229 	 * -- Linux
2230 	 * smu_set_watermarks_for_clock_ranges
2231 	 * renoir_set_watermarks_table
2232 	 * smu_write_watermarks_table
2233 	 *
2234 	 * For Linux,
2235 	 * dc_hardware_init -> amdgpu_dm_init
2236 	 * dc_set_power_state --> dm_resume
2237 	 *
2238 	 * therefore, this function apply to navi10/12/14 but not Renoir
2239 	 * *
2240 	 */
2241 	switch (adev->ip_versions[DCE_HWIP][0]) {
2242 	case IP_VERSION(2, 0, 2):
2243 	case IP_VERSION(2, 0, 0):
2244 		break;
2245 	default:
2246 		return 0;
2247 	}
2248 
2249 	ret = amdgpu_dpm_write_watermarks_table(adev);
2250 	if (ret) {
2251 		DRM_ERROR("Failed to update WMTABLE!\n");
2252 		return ret;
2253 	}
2254 
2255 	return 0;
2256 }
2257 
2258 /**
2259  * dm_hw_init() - Initialize DC device
2260  * @handle: The base driver device containing the amdgpu_dm device.
2261  *
2262  * Initialize the &struct amdgpu_display_manager device. This involves calling
2263  * the initializers of each DM component, then populating the struct with them.
2264  *
2265  * Although the function implies hardware initialization, both hardware and
2266  * software are initialized here. Splitting them out to their relevant init
2267  * hooks is a future TODO item.
2268  *
2269  * Some notable things that are initialized here:
2270  *
2271  * - Display Core, both software and hardware
2272  * - DC modules that we need (freesync and color management)
2273  * - DRM software states
2274  * - Interrupt sources and handlers
2275  * - Vblank support
2276  * - Debug FS entries, if enabled
2277  */
2278 static int dm_hw_init(void *handle)
2279 {
2280 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2281 	/* Create DAL display manager */
2282 	amdgpu_dm_init(adev);
2283 	amdgpu_dm_hpd_init(adev);
2284 
2285 	return 0;
2286 }
2287 
2288 /**
2289  * dm_hw_fini() - Teardown DC device
2290  * @handle: The base driver device containing the amdgpu_dm device.
2291  *
2292  * Teardown components within &struct amdgpu_display_manager that require
2293  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2294  * were loaded. Also flush IRQ workqueues and disable them.
2295  */
2296 static int dm_hw_fini(void *handle)
2297 {
2298 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2299 
2300 	amdgpu_dm_hpd_fini(adev);
2301 
2302 	amdgpu_dm_irq_fini(adev);
2303 	amdgpu_dm_fini(adev);
2304 	return 0;
2305 }
2306 
2307 
2308 static int dm_enable_vblank(struct drm_crtc *crtc);
2309 static void dm_disable_vblank(struct drm_crtc *crtc);
2310 
2311 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2312 				 struct dc_state *state, bool enable)
2313 {
2314 	enum dc_irq_source irq_source;
2315 	struct amdgpu_crtc *acrtc;
2316 	int rc = -EBUSY;
2317 	int i = 0;
2318 
2319 	for (i = 0; i < state->stream_count; i++) {
2320 		acrtc = get_crtc_by_otg_inst(
2321 				adev, state->stream_status[i].primary_otg_inst);
2322 
2323 		if (acrtc && state->stream_status[i].plane_count != 0) {
2324 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2325 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2326 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2327 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2328 			if (rc)
2329 				DRM_WARN("Failed to %s pflip interrupts\n",
2330 					 enable ? "enable" : "disable");
2331 
2332 			if (enable) {
2333 				rc = dm_enable_vblank(&acrtc->base);
2334 				if (rc)
2335 					DRM_WARN("Failed to enable vblank interrupts\n");
2336 			} else {
2337 				dm_disable_vblank(&acrtc->base);
2338 			}
2339 
2340 		}
2341 	}
2342 
2343 }
2344 
2345 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2346 {
2347 	struct dc_state *context = NULL;
2348 	enum dc_status res = DC_ERROR_UNEXPECTED;
2349 	int i;
2350 	struct dc_stream_state *del_streams[MAX_PIPES];
2351 	int del_streams_count = 0;
2352 
2353 	memset(del_streams, 0, sizeof(del_streams));
2354 
2355 	context = dc_create_state(dc);
2356 	if (context == NULL)
2357 		goto context_alloc_fail;
2358 
2359 	dc_resource_state_copy_construct_current(dc, context);
2360 
2361 	/* First remove from context all streams */
2362 	for (i = 0; i < context->stream_count; i++) {
2363 		struct dc_stream_state *stream = context->streams[i];
2364 
2365 		del_streams[del_streams_count++] = stream;
2366 	}
2367 
2368 	/* Remove all planes for removed streams and then remove the streams */
2369 	for (i = 0; i < del_streams_count; i++) {
2370 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2371 			res = DC_FAIL_DETACH_SURFACES;
2372 			goto fail;
2373 		}
2374 
2375 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2376 		if (res != DC_OK)
2377 			goto fail;
2378 	}
2379 
2380 	res = dc_commit_state(dc, context);
2381 
2382 fail:
2383 	dc_release_state(context);
2384 
2385 context_alloc_fail:
2386 	return res;
2387 }
2388 
2389 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2390 {
2391 	int i;
2392 
2393 	if (dm->hpd_rx_offload_wq) {
2394 		for (i = 0; i < dm->dc->caps.max_links; i++)
2395 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2396 	}
2397 }
2398 
2399 static int dm_suspend(void *handle)
2400 {
2401 	struct amdgpu_device *adev = handle;
2402 	struct amdgpu_display_manager *dm = &adev->dm;
2403 	int ret = 0;
2404 
2405 	if (amdgpu_in_reset(adev)) {
2406 		mutex_lock(&dm->dc_lock);
2407 
2408 #if defined(CONFIG_DRM_AMD_DC_DCN)
2409 		dc_allow_idle_optimizations(adev->dm.dc, false);
2410 #endif
2411 
2412 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2413 
2414 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2415 
2416 		amdgpu_dm_commit_zero_streams(dm->dc);
2417 
2418 		amdgpu_dm_irq_suspend(adev);
2419 
2420 		hpd_rx_irq_work_suspend(dm);
2421 
2422 		return ret;
2423 	}
2424 
2425 	WARN_ON(adev->dm.cached_state);
2426 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2427 
2428 	s3_handle_mst(adev_to_drm(adev), true);
2429 
2430 	amdgpu_dm_irq_suspend(adev);
2431 
2432 	hpd_rx_irq_work_suspend(dm);
2433 
2434 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2435 
2436 	return 0;
2437 }
2438 
2439 struct amdgpu_dm_connector *
2440 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2441 					     struct drm_crtc *crtc)
2442 {
2443 	uint32_t i;
2444 	struct drm_connector_state *new_con_state;
2445 	struct drm_connector *connector;
2446 	struct drm_crtc *crtc_from_state;
2447 
2448 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2449 		crtc_from_state = new_con_state->crtc;
2450 
2451 		if (crtc_from_state == crtc)
2452 			return to_amdgpu_dm_connector(connector);
2453 	}
2454 
2455 	return NULL;
2456 }
2457 
2458 static void emulated_link_detect(struct dc_link *link)
2459 {
2460 	struct dc_sink_init_data sink_init_data = { 0 };
2461 	struct display_sink_capability sink_caps = { 0 };
2462 	enum dc_edid_status edid_status;
2463 	struct dc_context *dc_ctx = link->ctx;
2464 	struct dc_sink *sink = NULL;
2465 	struct dc_sink *prev_sink = NULL;
2466 
2467 	link->type = dc_connection_none;
2468 	prev_sink = link->local_sink;
2469 
2470 	if (prev_sink)
2471 		dc_sink_release(prev_sink);
2472 
2473 	switch (link->connector_signal) {
2474 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2475 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2476 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2477 		break;
2478 	}
2479 
2480 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2481 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2482 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2483 		break;
2484 	}
2485 
2486 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2487 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2488 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2489 		break;
2490 	}
2491 
2492 	case SIGNAL_TYPE_LVDS: {
2493 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2494 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2495 		break;
2496 	}
2497 
2498 	case SIGNAL_TYPE_EDP: {
2499 		sink_caps.transaction_type =
2500 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2501 		sink_caps.signal = SIGNAL_TYPE_EDP;
2502 		break;
2503 	}
2504 
2505 	case SIGNAL_TYPE_DISPLAY_PORT: {
2506 		sink_caps.transaction_type =
2507 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2508 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2509 		break;
2510 	}
2511 
2512 	default:
2513 		DC_ERROR("Invalid connector type! signal:%d\n",
2514 			link->connector_signal);
2515 		return;
2516 	}
2517 
2518 	sink_init_data.link = link;
2519 	sink_init_data.sink_signal = sink_caps.signal;
2520 
2521 	sink = dc_sink_create(&sink_init_data);
2522 	if (!sink) {
2523 		DC_ERROR("Failed to create sink!\n");
2524 		return;
2525 	}
2526 
2527 	/* dc_sink_create returns a new reference */
2528 	link->local_sink = sink;
2529 
2530 	edid_status = dm_helpers_read_local_edid(
2531 			link->ctx,
2532 			link,
2533 			sink);
2534 
2535 	if (edid_status != EDID_OK)
2536 		DC_ERROR("Failed to read EDID");
2537 
2538 }
2539 
2540 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2541 				     struct amdgpu_display_manager *dm)
2542 {
2543 	struct {
2544 		struct dc_surface_update surface_updates[MAX_SURFACES];
2545 		struct dc_plane_info plane_infos[MAX_SURFACES];
2546 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2547 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2548 		struct dc_stream_update stream_update;
2549 	} * bundle;
2550 	int k, m;
2551 
2552 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2553 
2554 	if (!bundle) {
2555 		dm_error("Failed to allocate update bundle\n");
2556 		goto cleanup;
2557 	}
2558 
2559 	for (k = 0; k < dc_state->stream_count; k++) {
2560 		bundle->stream_update.stream = dc_state->streams[k];
2561 
2562 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2563 			bundle->surface_updates[m].surface =
2564 				dc_state->stream_status->plane_states[m];
2565 			bundle->surface_updates[m].surface->force_full_update =
2566 				true;
2567 		}
2568 		dc_commit_updates_for_stream(
2569 			dm->dc, bundle->surface_updates,
2570 			dc_state->stream_status->plane_count,
2571 			dc_state->streams[k], &bundle->stream_update, dc_state);
2572 	}
2573 
2574 cleanup:
2575 	kfree(bundle);
2576 
2577 	return;
2578 }
2579 
2580 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2581 {
2582 	struct dc_stream_state *stream_state;
2583 	struct amdgpu_dm_connector *aconnector = link->priv;
2584 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2585 	struct dc_stream_update stream_update;
2586 	bool dpms_off = true;
2587 
2588 	memset(&stream_update, 0, sizeof(stream_update));
2589 	stream_update.dpms_off = &dpms_off;
2590 
2591 	mutex_lock(&adev->dm.dc_lock);
2592 	stream_state = dc_stream_find_from_link(link);
2593 
2594 	if (stream_state == NULL) {
2595 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2596 		mutex_unlock(&adev->dm.dc_lock);
2597 		return;
2598 	}
2599 
2600 	stream_update.stream = stream_state;
2601 	acrtc_state->force_dpms_off = true;
2602 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2603 				     stream_state, &stream_update,
2604 				     stream_state->ctx->dc->current_state);
2605 	mutex_unlock(&adev->dm.dc_lock);
2606 }
2607 
2608 static int dm_resume(void *handle)
2609 {
2610 	struct amdgpu_device *adev = handle;
2611 	struct drm_device *ddev = adev_to_drm(adev);
2612 	struct amdgpu_display_manager *dm = &adev->dm;
2613 	struct amdgpu_dm_connector *aconnector;
2614 	struct drm_connector *connector;
2615 	struct drm_connector_list_iter iter;
2616 	struct drm_crtc *crtc;
2617 	struct drm_crtc_state *new_crtc_state;
2618 	struct dm_crtc_state *dm_new_crtc_state;
2619 	struct drm_plane *plane;
2620 	struct drm_plane_state *new_plane_state;
2621 	struct dm_plane_state *dm_new_plane_state;
2622 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2623 	enum dc_connection_type new_connection_type = dc_connection_none;
2624 	struct dc_state *dc_state;
2625 	int i, r, j;
2626 
2627 	if (amdgpu_in_reset(adev)) {
2628 		dc_state = dm->cached_dc_state;
2629 
2630 		/*
2631 		 * The dc->current_state is backed up into dm->cached_dc_state
2632 		 * before we commit 0 streams.
2633 		 *
2634 		 * DC will clear link encoder assignments on the real state
2635 		 * but the changes won't propagate over to the copy we made
2636 		 * before the 0 streams commit.
2637 		 *
2638 		 * DC expects that link encoder assignments are *not* valid
2639 		 * when committing a state, so as a workaround we can copy
2640 		 * off of the current state.
2641 		 *
2642 		 * We lose the previous assignments, but we had already
2643 		 * commit 0 streams anyway.
2644 		 */
2645 		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2646 
2647 		if (dc_enable_dmub_notifications(adev->dm.dc))
2648 			amdgpu_dm_outbox_init(adev);
2649 
2650 		r = dm_dmub_hw_init(adev);
2651 		if (r)
2652 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2653 
2654 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2655 		dc_resume(dm->dc);
2656 
2657 		amdgpu_dm_irq_resume_early(adev);
2658 
2659 		for (i = 0; i < dc_state->stream_count; i++) {
2660 			dc_state->streams[i]->mode_changed = true;
2661 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2662 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2663 					= 0xffffffff;
2664 			}
2665 		}
2666 
2667 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2668 
2669 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2670 
2671 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2672 
2673 		dc_release_state(dm->cached_dc_state);
2674 		dm->cached_dc_state = NULL;
2675 
2676 		amdgpu_dm_irq_resume_late(adev);
2677 
2678 		mutex_unlock(&dm->dc_lock);
2679 
2680 		return 0;
2681 	}
2682 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2683 	dc_release_state(dm_state->context);
2684 	dm_state->context = dc_create_state(dm->dc);
2685 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2686 	dc_resource_state_construct(dm->dc, dm_state->context);
2687 
2688 	/* Re-enable outbox interrupts for DPIA. */
2689 	if (dc_enable_dmub_notifications(adev->dm.dc))
2690 		amdgpu_dm_outbox_init(adev);
2691 
2692 	/* Before powering on DC we need to re-initialize DMUB. */
2693 	dm_dmub_hw_resume(adev);
2694 
2695 	/* power on hardware */
2696 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2697 
2698 	/* program HPD filter */
2699 	dc_resume(dm->dc);
2700 
2701 	/*
2702 	 * early enable HPD Rx IRQ, should be done before set mode as short
2703 	 * pulse interrupts are used for MST
2704 	 */
2705 	amdgpu_dm_irq_resume_early(adev);
2706 
2707 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2708 	s3_handle_mst(ddev, false);
2709 
2710 	/* Do detection*/
2711 	drm_connector_list_iter_begin(ddev, &iter);
2712 	drm_for_each_connector_iter(connector, &iter) {
2713 		aconnector = to_amdgpu_dm_connector(connector);
2714 
2715 		/*
2716 		 * this is the case when traversing through already created
2717 		 * MST connectors, should be skipped
2718 		 */
2719 		if (aconnector->dc_link &&
2720 		    aconnector->dc_link->type == dc_connection_mst_branch)
2721 			continue;
2722 
2723 		mutex_lock(&aconnector->hpd_lock);
2724 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2725 			DRM_ERROR("KMS: Failed to detect connector\n");
2726 
2727 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2728 			emulated_link_detect(aconnector->dc_link);
2729 		else
2730 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2731 
2732 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2733 			aconnector->fake_enable = false;
2734 
2735 		if (aconnector->dc_sink)
2736 			dc_sink_release(aconnector->dc_sink);
2737 		aconnector->dc_sink = NULL;
2738 		amdgpu_dm_update_connector_after_detect(aconnector);
2739 		mutex_unlock(&aconnector->hpd_lock);
2740 	}
2741 	drm_connector_list_iter_end(&iter);
2742 
2743 	/* Force mode set in atomic commit */
2744 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2745 		new_crtc_state->active_changed = true;
2746 
2747 	/*
2748 	 * atomic_check is expected to create the dc states. We need to release
2749 	 * them here, since they were duplicated as part of the suspend
2750 	 * procedure.
2751 	 */
2752 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2753 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2754 		if (dm_new_crtc_state->stream) {
2755 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2756 			dc_stream_release(dm_new_crtc_state->stream);
2757 			dm_new_crtc_state->stream = NULL;
2758 		}
2759 	}
2760 
2761 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2762 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2763 		if (dm_new_plane_state->dc_state) {
2764 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2765 			dc_plane_state_release(dm_new_plane_state->dc_state);
2766 			dm_new_plane_state->dc_state = NULL;
2767 		}
2768 	}
2769 
2770 	drm_atomic_helper_resume(ddev, dm->cached_state);
2771 
2772 	dm->cached_state = NULL;
2773 
2774 	amdgpu_dm_irq_resume_late(adev);
2775 
2776 	amdgpu_dm_smu_write_watermarks_table(adev);
2777 
2778 	return 0;
2779 }
2780 
2781 /**
2782  * DOC: DM Lifecycle
2783  *
2784  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2785  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2786  * the base driver's device list to be initialized and torn down accordingly.
2787  *
2788  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2789  */
2790 
2791 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2792 	.name = "dm",
2793 	.early_init = dm_early_init,
2794 	.late_init = dm_late_init,
2795 	.sw_init = dm_sw_init,
2796 	.sw_fini = dm_sw_fini,
2797 	.early_fini = amdgpu_dm_early_fini,
2798 	.hw_init = dm_hw_init,
2799 	.hw_fini = dm_hw_fini,
2800 	.suspend = dm_suspend,
2801 	.resume = dm_resume,
2802 	.is_idle = dm_is_idle,
2803 	.wait_for_idle = dm_wait_for_idle,
2804 	.check_soft_reset = dm_check_soft_reset,
2805 	.soft_reset = dm_soft_reset,
2806 	.set_clockgating_state = dm_set_clockgating_state,
2807 	.set_powergating_state = dm_set_powergating_state,
2808 };
2809 
2810 const struct amdgpu_ip_block_version dm_ip_block =
2811 {
2812 	.type = AMD_IP_BLOCK_TYPE_DCE,
2813 	.major = 1,
2814 	.minor = 0,
2815 	.rev = 0,
2816 	.funcs = &amdgpu_dm_funcs,
2817 };
2818 
2819 
2820 /**
2821  * DOC: atomic
2822  *
2823  * *WIP*
2824  */
2825 
2826 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2827 	.fb_create = amdgpu_display_user_framebuffer_create,
2828 	.get_format_info = amd_get_format_info,
2829 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2830 	.atomic_check = amdgpu_dm_atomic_check,
2831 	.atomic_commit = drm_atomic_helper_commit,
2832 };
2833 
2834 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2835 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2836 };
2837 
2838 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2839 {
2840 	u32 max_cll, min_cll, max, min, q, r;
2841 	struct amdgpu_dm_backlight_caps *caps;
2842 	struct amdgpu_display_manager *dm;
2843 	struct drm_connector *conn_base;
2844 	struct amdgpu_device *adev;
2845 	struct dc_link *link = NULL;
2846 	static const u8 pre_computed_values[] = {
2847 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2848 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2849 	int i;
2850 
2851 	if (!aconnector || !aconnector->dc_link)
2852 		return;
2853 
2854 	link = aconnector->dc_link;
2855 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2856 		return;
2857 
2858 	conn_base = &aconnector->base;
2859 	adev = drm_to_adev(conn_base->dev);
2860 	dm = &adev->dm;
2861 	for (i = 0; i < dm->num_of_edps; i++) {
2862 		if (link == dm->backlight_link[i])
2863 			break;
2864 	}
2865 	if (i >= dm->num_of_edps)
2866 		return;
2867 	caps = &dm->backlight_caps[i];
2868 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2869 	caps->aux_support = false;
2870 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2871 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2872 
2873 	if (caps->ext_caps->bits.oled == 1 /*||
2874 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2875 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2876 		caps->aux_support = true;
2877 
2878 	if (amdgpu_backlight == 0)
2879 		caps->aux_support = false;
2880 	else if (amdgpu_backlight == 1)
2881 		caps->aux_support = true;
2882 
2883 	/* From the specification (CTA-861-G), for calculating the maximum
2884 	 * luminance we need to use:
2885 	 *	Luminance = 50*2**(CV/32)
2886 	 * Where CV is a one-byte value.
2887 	 * For calculating this expression we may need float point precision;
2888 	 * to avoid this complexity level, we take advantage that CV is divided
2889 	 * by a constant. From the Euclids division algorithm, we know that CV
2890 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2891 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2892 	 * need to pre-compute the value of r/32. For pre-computing the values
2893 	 * We just used the following Ruby line:
2894 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2895 	 * The results of the above expressions can be verified at
2896 	 * pre_computed_values.
2897 	 */
2898 	q = max_cll >> 5;
2899 	r = max_cll % 32;
2900 	max = (1 << q) * pre_computed_values[r];
2901 
2902 	// min luminance: maxLum * (CV/255)^2 / 100
2903 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2904 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2905 
2906 	caps->aux_max_input_signal = max;
2907 	caps->aux_min_input_signal = min;
2908 }
2909 
2910 void amdgpu_dm_update_connector_after_detect(
2911 		struct amdgpu_dm_connector *aconnector)
2912 {
2913 	struct drm_connector *connector = &aconnector->base;
2914 	struct drm_device *dev = connector->dev;
2915 	struct dc_sink *sink;
2916 
2917 	/* MST handled by drm_mst framework */
2918 	if (aconnector->mst_mgr.mst_state == true)
2919 		return;
2920 
2921 	sink = aconnector->dc_link->local_sink;
2922 	if (sink)
2923 		dc_sink_retain(sink);
2924 
2925 	/*
2926 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2927 	 * the connector sink is set to either fake or physical sink depends on link status.
2928 	 * Skip if already done during boot.
2929 	 */
2930 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2931 			&& aconnector->dc_em_sink) {
2932 
2933 		/*
2934 		 * For S3 resume with headless use eml_sink to fake stream
2935 		 * because on resume connector->sink is set to NULL
2936 		 */
2937 		mutex_lock(&dev->mode_config.mutex);
2938 
2939 		if (sink) {
2940 			if (aconnector->dc_sink) {
2941 				amdgpu_dm_update_freesync_caps(connector, NULL);
2942 				/*
2943 				 * retain and release below are used to
2944 				 * bump up refcount for sink because the link doesn't point
2945 				 * to it anymore after disconnect, so on next crtc to connector
2946 				 * reshuffle by UMD we will get into unwanted dc_sink release
2947 				 */
2948 				dc_sink_release(aconnector->dc_sink);
2949 			}
2950 			aconnector->dc_sink = sink;
2951 			dc_sink_retain(aconnector->dc_sink);
2952 			amdgpu_dm_update_freesync_caps(connector,
2953 					aconnector->edid);
2954 		} else {
2955 			amdgpu_dm_update_freesync_caps(connector, NULL);
2956 			if (!aconnector->dc_sink) {
2957 				aconnector->dc_sink = aconnector->dc_em_sink;
2958 				dc_sink_retain(aconnector->dc_sink);
2959 			}
2960 		}
2961 
2962 		mutex_unlock(&dev->mode_config.mutex);
2963 
2964 		if (sink)
2965 			dc_sink_release(sink);
2966 		return;
2967 	}
2968 
2969 	/*
2970 	 * TODO: temporary guard to look for proper fix
2971 	 * if this sink is MST sink, we should not do anything
2972 	 */
2973 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2974 		dc_sink_release(sink);
2975 		return;
2976 	}
2977 
2978 	if (aconnector->dc_sink == sink) {
2979 		/*
2980 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2981 		 * Do nothing!!
2982 		 */
2983 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2984 				aconnector->connector_id);
2985 		if (sink)
2986 			dc_sink_release(sink);
2987 		return;
2988 	}
2989 
2990 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2991 		aconnector->connector_id, aconnector->dc_sink, sink);
2992 
2993 	mutex_lock(&dev->mode_config.mutex);
2994 
2995 	/*
2996 	 * 1. Update status of the drm connector
2997 	 * 2. Send an event and let userspace tell us what to do
2998 	 */
2999 	if (sink) {
3000 		/*
3001 		 * TODO: check if we still need the S3 mode update workaround.
3002 		 * If yes, put it here.
3003 		 */
3004 		if (aconnector->dc_sink) {
3005 			amdgpu_dm_update_freesync_caps(connector, NULL);
3006 			dc_sink_release(aconnector->dc_sink);
3007 		}
3008 
3009 		aconnector->dc_sink = sink;
3010 		dc_sink_retain(aconnector->dc_sink);
3011 		if (sink->dc_edid.length == 0) {
3012 			aconnector->edid = NULL;
3013 			if (aconnector->dc_link->aux_mode) {
3014 				drm_dp_cec_unset_edid(
3015 					&aconnector->dm_dp_aux.aux);
3016 			}
3017 		} else {
3018 			aconnector->edid =
3019 				(struct edid *)sink->dc_edid.raw_edid;
3020 
3021 			if (aconnector->dc_link->aux_mode)
3022 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3023 						    aconnector->edid);
3024 		}
3025 
3026 		drm_connector_update_edid_property(connector, aconnector->edid);
3027 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3028 		update_connector_ext_caps(aconnector);
3029 	} else {
3030 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3031 		amdgpu_dm_update_freesync_caps(connector, NULL);
3032 		drm_connector_update_edid_property(connector, NULL);
3033 		aconnector->num_modes = 0;
3034 		dc_sink_release(aconnector->dc_sink);
3035 		aconnector->dc_sink = NULL;
3036 		aconnector->edid = NULL;
3037 #ifdef CONFIG_DRM_AMD_DC_HDCP
3038 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3039 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3040 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3041 #endif
3042 	}
3043 
3044 	mutex_unlock(&dev->mode_config.mutex);
3045 
3046 	update_subconnector_property(aconnector);
3047 
3048 	if (sink)
3049 		dc_sink_release(sink);
3050 }
3051 
3052 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3053 {
3054 	struct drm_connector *connector = &aconnector->base;
3055 	struct drm_device *dev = connector->dev;
3056 	enum dc_connection_type new_connection_type = dc_connection_none;
3057 	struct amdgpu_device *adev = drm_to_adev(dev);
3058 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3059 	struct dm_crtc_state *dm_crtc_state = NULL;
3060 
3061 	if (adev->dm.disable_hpd_irq)
3062 		return;
3063 
3064 	if (dm_con_state->base.state && dm_con_state->base.crtc)
3065 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3066 					dm_con_state->base.state,
3067 					dm_con_state->base.crtc));
3068 	/*
3069 	 * In case of failure or MST no need to update connector status or notify the OS
3070 	 * since (for MST case) MST does this in its own context.
3071 	 */
3072 	mutex_lock(&aconnector->hpd_lock);
3073 
3074 #ifdef CONFIG_DRM_AMD_DC_HDCP
3075 	if (adev->dm.hdcp_workqueue) {
3076 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3077 		dm_con_state->update_hdcp = true;
3078 	}
3079 #endif
3080 	if (aconnector->fake_enable)
3081 		aconnector->fake_enable = false;
3082 
3083 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3084 		DRM_ERROR("KMS: Failed to detect connector\n");
3085 
3086 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3087 		emulated_link_detect(aconnector->dc_link);
3088 
3089 		drm_modeset_lock_all(dev);
3090 		dm_restore_drm_connector_state(dev, connector);
3091 		drm_modeset_unlock_all(dev);
3092 
3093 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3094 			drm_kms_helper_connector_hotplug_event(connector);
3095 
3096 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3097 		if (new_connection_type == dc_connection_none &&
3098 		    aconnector->dc_link->type == dc_connection_none &&
3099 		    dm_crtc_state)
3100 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3101 
3102 		amdgpu_dm_update_connector_after_detect(aconnector);
3103 
3104 		drm_modeset_lock_all(dev);
3105 		dm_restore_drm_connector_state(dev, connector);
3106 		drm_modeset_unlock_all(dev);
3107 
3108 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3109 			drm_kms_helper_connector_hotplug_event(connector);
3110 	}
3111 	mutex_unlock(&aconnector->hpd_lock);
3112 
3113 }
3114 
3115 static void handle_hpd_irq(void *param)
3116 {
3117 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3118 
3119 	handle_hpd_irq_helper(aconnector);
3120 
3121 }
3122 
3123 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3124 {
3125 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3126 	uint8_t dret;
3127 	bool new_irq_handled = false;
3128 	int dpcd_addr;
3129 	int dpcd_bytes_to_read;
3130 
3131 	const int max_process_count = 30;
3132 	int process_count = 0;
3133 
3134 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3135 
3136 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3137 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3138 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3139 		dpcd_addr = DP_SINK_COUNT;
3140 	} else {
3141 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3142 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3143 		dpcd_addr = DP_SINK_COUNT_ESI;
3144 	}
3145 
3146 	dret = drm_dp_dpcd_read(
3147 		&aconnector->dm_dp_aux.aux,
3148 		dpcd_addr,
3149 		esi,
3150 		dpcd_bytes_to_read);
3151 
3152 	while (dret == dpcd_bytes_to_read &&
3153 		process_count < max_process_count) {
3154 		uint8_t retry;
3155 		dret = 0;
3156 
3157 		process_count++;
3158 
3159 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3160 		/* handle HPD short pulse irq */
3161 		if (aconnector->mst_mgr.mst_state)
3162 			drm_dp_mst_hpd_irq(
3163 				&aconnector->mst_mgr,
3164 				esi,
3165 				&new_irq_handled);
3166 
3167 		if (new_irq_handled) {
3168 			/* ACK at DPCD to notify down stream */
3169 			const int ack_dpcd_bytes_to_write =
3170 				dpcd_bytes_to_read - 1;
3171 
3172 			for (retry = 0; retry < 3; retry++) {
3173 				uint8_t wret;
3174 
3175 				wret = drm_dp_dpcd_write(
3176 					&aconnector->dm_dp_aux.aux,
3177 					dpcd_addr + 1,
3178 					&esi[1],
3179 					ack_dpcd_bytes_to_write);
3180 				if (wret == ack_dpcd_bytes_to_write)
3181 					break;
3182 			}
3183 
3184 			/* check if there is new irq to be handled */
3185 			dret = drm_dp_dpcd_read(
3186 				&aconnector->dm_dp_aux.aux,
3187 				dpcd_addr,
3188 				esi,
3189 				dpcd_bytes_to_read);
3190 
3191 			new_irq_handled = false;
3192 		} else {
3193 			break;
3194 		}
3195 	}
3196 
3197 	if (process_count == max_process_count)
3198 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3199 }
3200 
3201 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3202 							union hpd_irq_data hpd_irq_data)
3203 {
3204 	struct hpd_rx_irq_offload_work *offload_work =
3205 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3206 
3207 	if (!offload_work) {
3208 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3209 		return;
3210 	}
3211 
3212 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3213 	offload_work->data = hpd_irq_data;
3214 	offload_work->offload_wq = offload_wq;
3215 
3216 	queue_work(offload_wq->wq, &offload_work->work);
3217 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3218 }
3219 
3220 static void handle_hpd_rx_irq(void *param)
3221 {
3222 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3223 	struct drm_connector *connector = &aconnector->base;
3224 	struct drm_device *dev = connector->dev;
3225 	struct dc_link *dc_link = aconnector->dc_link;
3226 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3227 	bool result = false;
3228 	enum dc_connection_type new_connection_type = dc_connection_none;
3229 	struct amdgpu_device *adev = drm_to_adev(dev);
3230 	union hpd_irq_data hpd_irq_data;
3231 	bool link_loss = false;
3232 	bool has_left_work = false;
3233 	int idx = aconnector->base.index;
3234 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3235 
3236 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3237 
3238 	if (adev->dm.disable_hpd_irq)
3239 		return;
3240 
3241 	/*
3242 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3243 	 * conflict, after implement i2c helper, this mutex should be
3244 	 * retired.
3245 	 */
3246 	mutex_lock(&aconnector->hpd_lock);
3247 
3248 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3249 						&link_loss, true, &has_left_work);
3250 
3251 	if (!has_left_work)
3252 		goto out;
3253 
3254 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3255 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3256 		goto out;
3257 	}
3258 
3259 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3260 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3261 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3262 			dm_handle_mst_sideband_msg(aconnector);
3263 			goto out;
3264 		}
3265 
3266 		if (link_loss) {
3267 			bool skip = false;
3268 
3269 			spin_lock(&offload_wq->offload_lock);
3270 			skip = offload_wq->is_handling_link_loss;
3271 
3272 			if (!skip)
3273 				offload_wq->is_handling_link_loss = true;
3274 
3275 			spin_unlock(&offload_wq->offload_lock);
3276 
3277 			if (!skip)
3278 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3279 
3280 			goto out;
3281 		}
3282 	}
3283 
3284 out:
3285 	if (result && !is_mst_root_connector) {
3286 		/* Downstream Port status changed. */
3287 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3288 			DRM_ERROR("KMS: Failed to detect connector\n");
3289 
3290 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3291 			emulated_link_detect(dc_link);
3292 
3293 			if (aconnector->fake_enable)
3294 				aconnector->fake_enable = false;
3295 
3296 			amdgpu_dm_update_connector_after_detect(aconnector);
3297 
3298 
3299 			drm_modeset_lock_all(dev);
3300 			dm_restore_drm_connector_state(dev, connector);
3301 			drm_modeset_unlock_all(dev);
3302 
3303 			drm_kms_helper_connector_hotplug_event(connector);
3304 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3305 
3306 			if (aconnector->fake_enable)
3307 				aconnector->fake_enable = false;
3308 
3309 			amdgpu_dm_update_connector_after_detect(aconnector);
3310 
3311 
3312 			drm_modeset_lock_all(dev);
3313 			dm_restore_drm_connector_state(dev, connector);
3314 			drm_modeset_unlock_all(dev);
3315 
3316 			drm_kms_helper_connector_hotplug_event(connector);
3317 		}
3318 	}
3319 #ifdef CONFIG_DRM_AMD_DC_HDCP
3320 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3321 		if (adev->dm.hdcp_workqueue)
3322 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3323 	}
3324 #endif
3325 
3326 	if (dc_link->type != dc_connection_mst_branch)
3327 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3328 
3329 	mutex_unlock(&aconnector->hpd_lock);
3330 }
3331 
3332 static void register_hpd_handlers(struct amdgpu_device *adev)
3333 {
3334 	struct drm_device *dev = adev_to_drm(adev);
3335 	struct drm_connector *connector;
3336 	struct amdgpu_dm_connector *aconnector;
3337 	const struct dc_link *dc_link;
3338 	struct dc_interrupt_params int_params = {0};
3339 
3340 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3341 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3342 
3343 	list_for_each_entry(connector,
3344 			&dev->mode_config.connector_list, head)	{
3345 
3346 		aconnector = to_amdgpu_dm_connector(connector);
3347 		dc_link = aconnector->dc_link;
3348 
3349 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3350 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3351 			int_params.irq_source = dc_link->irq_source_hpd;
3352 
3353 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3354 					handle_hpd_irq,
3355 					(void *) aconnector);
3356 		}
3357 
3358 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3359 
3360 			/* Also register for DP short pulse (hpd_rx). */
3361 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3362 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3363 
3364 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3365 					handle_hpd_rx_irq,
3366 					(void *) aconnector);
3367 
3368 			if (adev->dm.hpd_rx_offload_wq)
3369 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3370 					aconnector;
3371 		}
3372 	}
3373 }
3374 
3375 #if defined(CONFIG_DRM_AMD_DC_SI)
3376 /* Register IRQ sources and initialize IRQ callbacks */
3377 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3378 {
3379 	struct dc *dc = adev->dm.dc;
3380 	struct common_irq_params *c_irq_params;
3381 	struct dc_interrupt_params int_params = {0};
3382 	int r;
3383 	int i;
3384 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3385 
3386 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3387 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3388 
3389 	/*
3390 	 * Actions of amdgpu_irq_add_id():
3391 	 * 1. Register a set() function with base driver.
3392 	 *    Base driver will call set() function to enable/disable an
3393 	 *    interrupt in DC hardware.
3394 	 * 2. Register amdgpu_dm_irq_handler().
3395 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3396 	 *    coming from DC hardware.
3397 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3398 	 *    for acknowledging and handling. */
3399 
3400 	/* Use VBLANK interrupt */
3401 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3402 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3403 		if (r) {
3404 			DRM_ERROR("Failed to add crtc irq id!\n");
3405 			return r;
3406 		}
3407 
3408 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3409 		int_params.irq_source =
3410 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3411 
3412 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3413 
3414 		c_irq_params->adev = adev;
3415 		c_irq_params->irq_src = int_params.irq_source;
3416 
3417 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3418 				dm_crtc_high_irq, c_irq_params);
3419 	}
3420 
3421 	/* Use GRPH_PFLIP interrupt */
3422 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3423 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3424 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3425 		if (r) {
3426 			DRM_ERROR("Failed to add page flip irq id!\n");
3427 			return r;
3428 		}
3429 
3430 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3431 		int_params.irq_source =
3432 			dc_interrupt_to_irq_source(dc, i, 0);
3433 
3434 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3435 
3436 		c_irq_params->adev = adev;
3437 		c_irq_params->irq_src = int_params.irq_source;
3438 
3439 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3440 				dm_pflip_high_irq, c_irq_params);
3441 
3442 	}
3443 
3444 	/* HPD */
3445 	r = amdgpu_irq_add_id(adev, client_id,
3446 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3447 	if (r) {
3448 		DRM_ERROR("Failed to add hpd irq id!\n");
3449 		return r;
3450 	}
3451 
3452 	register_hpd_handlers(adev);
3453 
3454 	return 0;
3455 }
3456 #endif
3457 
3458 /* Register IRQ sources and initialize IRQ callbacks */
3459 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3460 {
3461 	struct dc *dc = adev->dm.dc;
3462 	struct common_irq_params *c_irq_params;
3463 	struct dc_interrupt_params int_params = {0};
3464 	int r;
3465 	int i;
3466 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3467 
3468 	if (adev->family >= AMDGPU_FAMILY_AI)
3469 		client_id = SOC15_IH_CLIENTID_DCE;
3470 
3471 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3472 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3473 
3474 	/*
3475 	 * Actions of amdgpu_irq_add_id():
3476 	 * 1. Register a set() function with base driver.
3477 	 *    Base driver will call set() function to enable/disable an
3478 	 *    interrupt in DC hardware.
3479 	 * 2. Register amdgpu_dm_irq_handler().
3480 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3481 	 *    coming from DC hardware.
3482 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3483 	 *    for acknowledging and handling. */
3484 
3485 	/* Use VBLANK interrupt */
3486 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3487 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3488 		if (r) {
3489 			DRM_ERROR("Failed to add crtc irq id!\n");
3490 			return r;
3491 		}
3492 
3493 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3494 		int_params.irq_source =
3495 			dc_interrupt_to_irq_source(dc, i, 0);
3496 
3497 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3498 
3499 		c_irq_params->adev = adev;
3500 		c_irq_params->irq_src = int_params.irq_source;
3501 
3502 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3503 				dm_crtc_high_irq, c_irq_params);
3504 	}
3505 
3506 	/* Use VUPDATE interrupt */
3507 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3508 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3509 		if (r) {
3510 			DRM_ERROR("Failed to add vupdate irq id!\n");
3511 			return r;
3512 		}
3513 
3514 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3515 		int_params.irq_source =
3516 			dc_interrupt_to_irq_source(dc, i, 0);
3517 
3518 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3519 
3520 		c_irq_params->adev = adev;
3521 		c_irq_params->irq_src = int_params.irq_source;
3522 
3523 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3524 				dm_vupdate_high_irq, c_irq_params);
3525 	}
3526 
3527 	/* Use GRPH_PFLIP interrupt */
3528 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3529 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3530 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3531 		if (r) {
3532 			DRM_ERROR("Failed to add page flip irq id!\n");
3533 			return r;
3534 		}
3535 
3536 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3537 		int_params.irq_source =
3538 			dc_interrupt_to_irq_source(dc, i, 0);
3539 
3540 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3541 
3542 		c_irq_params->adev = adev;
3543 		c_irq_params->irq_src = int_params.irq_source;
3544 
3545 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3546 				dm_pflip_high_irq, c_irq_params);
3547 
3548 	}
3549 
3550 	/* HPD */
3551 	r = amdgpu_irq_add_id(adev, client_id,
3552 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3553 	if (r) {
3554 		DRM_ERROR("Failed to add hpd irq id!\n");
3555 		return r;
3556 	}
3557 
3558 	register_hpd_handlers(adev);
3559 
3560 	return 0;
3561 }
3562 
3563 #if defined(CONFIG_DRM_AMD_DC_DCN)
3564 /* Register IRQ sources and initialize IRQ callbacks */
3565 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3566 {
3567 	struct dc *dc = adev->dm.dc;
3568 	struct common_irq_params *c_irq_params;
3569 	struct dc_interrupt_params int_params = {0};
3570 	int r;
3571 	int i;
3572 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3573 	static const unsigned int vrtl_int_srcid[] = {
3574 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3575 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3576 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3577 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3578 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3579 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3580 	};
3581 #endif
3582 
3583 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3584 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3585 
3586 	/*
3587 	 * Actions of amdgpu_irq_add_id():
3588 	 * 1. Register a set() function with base driver.
3589 	 *    Base driver will call set() function to enable/disable an
3590 	 *    interrupt in DC hardware.
3591 	 * 2. Register amdgpu_dm_irq_handler().
3592 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3593 	 *    coming from DC hardware.
3594 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3595 	 *    for acknowledging and handling.
3596 	 */
3597 
3598 	/* Use VSTARTUP interrupt */
3599 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3600 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3601 			i++) {
3602 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3603 
3604 		if (r) {
3605 			DRM_ERROR("Failed to add crtc irq id!\n");
3606 			return r;
3607 		}
3608 
3609 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3610 		int_params.irq_source =
3611 			dc_interrupt_to_irq_source(dc, i, 0);
3612 
3613 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3614 
3615 		c_irq_params->adev = adev;
3616 		c_irq_params->irq_src = int_params.irq_source;
3617 
3618 		amdgpu_dm_irq_register_interrupt(
3619 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3620 	}
3621 
3622 	/* Use otg vertical line interrupt */
3623 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3624 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3625 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3626 				vrtl_int_srcid[i], &adev->vline0_irq);
3627 
3628 		if (r) {
3629 			DRM_ERROR("Failed to add vline0 irq id!\n");
3630 			return r;
3631 		}
3632 
3633 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3634 		int_params.irq_source =
3635 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3636 
3637 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3638 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3639 			break;
3640 		}
3641 
3642 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3643 					- DC_IRQ_SOURCE_DC1_VLINE0];
3644 
3645 		c_irq_params->adev = adev;
3646 		c_irq_params->irq_src = int_params.irq_source;
3647 
3648 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3649 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3650 	}
3651 #endif
3652 
3653 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3654 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3655 	 * to trigger at end of each vblank, regardless of state of the lock,
3656 	 * matching DCE behaviour.
3657 	 */
3658 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3659 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3660 	     i++) {
3661 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3662 
3663 		if (r) {
3664 			DRM_ERROR("Failed to add vupdate irq id!\n");
3665 			return r;
3666 		}
3667 
3668 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3669 		int_params.irq_source =
3670 			dc_interrupt_to_irq_source(dc, i, 0);
3671 
3672 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3673 
3674 		c_irq_params->adev = adev;
3675 		c_irq_params->irq_src = int_params.irq_source;
3676 
3677 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3678 				dm_vupdate_high_irq, c_irq_params);
3679 	}
3680 
3681 	/* Use GRPH_PFLIP interrupt */
3682 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3683 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3684 			i++) {
3685 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3686 		if (r) {
3687 			DRM_ERROR("Failed to add page flip irq id!\n");
3688 			return r;
3689 		}
3690 
3691 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3692 		int_params.irq_source =
3693 			dc_interrupt_to_irq_source(dc, i, 0);
3694 
3695 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3696 
3697 		c_irq_params->adev = adev;
3698 		c_irq_params->irq_src = int_params.irq_source;
3699 
3700 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3701 				dm_pflip_high_irq, c_irq_params);
3702 
3703 	}
3704 
3705 	/* HPD */
3706 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3707 			&adev->hpd_irq);
3708 	if (r) {
3709 		DRM_ERROR("Failed to add hpd irq id!\n");
3710 		return r;
3711 	}
3712 
3713 	register_hpd_handlers(adev);
3714 
3715 	return 0;
3716 }
3717 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3718 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3719 {
3720 	struct dc *dc = adev->dm.dc;
3721 	struct common_irq_params *c_irq_params;
3722 	struct dc_interrupt_params int_params = {0};
3723 	int r, i;
3724 
3725 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3726 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3727 
3728 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3729 			&adev->dmub_outbox_irq);
3730 	if (r) {
3731 		DRM_ERROR("Failed to add outbox irq id!\n");
3732 		return r;
3733 	}
3734 
3735 	if (dc->ctx->dmub_srv) {
3736 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3737 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3738 		int_params.irq_source =
3739 		dc_interrupt_to_irq_source(dc, i, 0);
3740 
3741 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3742 
3743 		c_irq_params->adev = adev;
3744 		c_irq_params->irq_src = int_params.irq_source;
3745 
3746 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3747 				dm_dmub_outbox1_low_irq, c_irq_params);
3748 	}
3749 
3750 	return 0;
3751 }
3752 #endif
3753 
3754 /*
3755  * Acquires the lock for the atomic state object and returns
3756  * the new atomic state.
3757  *
3758  * This should only be called during atomic check.
3759  */
3760 int dm_atomic_get_state(struct drm_atomic_state *state,
3761 			struct dm_atomic_state **dm_state)
3762 {
3763 	struct drm_device *dev = state->dev;
3764 	struct amdgpu_device *adev = drm_to_adev(dev);
3765 	struct amdgpu_display_manager *dm = &adev->dm;
3766 	struct drm_private_state *priv_state;
3767 
3768 	if (*dm_state)
3769 		return 0;
3770 
3771 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3772 	if (IS_ERR(priv_state))
3773 		return PTR_ERR(priv_state);
3774 
3775 	*dm_state = to_dm_atomic_state(priv_state);
3776 
3777 	return 0;
3778 }
3779 
3780 static struct dm_atomic_state *
3781 dm_atomic_get_new_state(struct drm_atomic_state *state)
3782 {
3783 	struct drm_device *dev = state->dev;
3784 	struct amdgpu_device *adev = drm_to_adev(dev);
3785 	struct amdgpu_display_manager *dm = &adev->dm;
3786 	struct drm_private_obj *obj;
3787 	struct drm_private_state *new_obj_state;
3788 	int i;
3789 
3790 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3791 		if (obj->funcs == dm->atomic_obj.funcs)
3792 			return to_dm_atomic_state(new_obj_state);
3793 	}
3794 
3795 	return NULL;
3796 }
3797 
3798 static struct drm_private_state *
3799 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3800 {
3801 	struct dm_atomic_state *old_state, *new_state;
3802 
3803 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3804 	if (!new_state)
3805 		return NULL;
3806 
3807 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3808 
3809 	old_state = to_dm_atomic_state(obj->state);
3810 
3811 	if (old_state && old_state->context)
3812 		new_state->context = dc_copy_state(old_state->context);
3813 
3814 	if (!new_state->context) {
3815 		kfree(new_state);
3816 		return NULL;
3817 	}
3818 
3819 	return &new_state->base;
3820 }
3821 
3822 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3823 				    struct drm_private_state *state)
3824 {
3825 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3826 
3827 	if (dm_state && dm_state->context)
3828 		dc_release_state(dm_state->context);
3829 
3830 	kfree(dm_state);
3831 }
3832 
3833 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3834 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3835 	.atomic_destroy_state = dm_atomic_destroy_state,
3836 };
3837 
3838 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3839 {
3840 	struct dm_atomic_state *state;
3841 	int r;
3842 
3843 	adev->mode_info.mode_config_initialized = true;
3844 
3845 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3846 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3847 
3848 	adev_to_drm(adev)->mode_config.max_width = 16384;
3849 	adev_to_drm(adev)->mode_config.max_height = 16384;
3850 
3851 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3852 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3853 	/* indicates support for immediate flip */
3854 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3855 
3856 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3857 
3858 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3859 	if (!state)
3860 		return -ENOMEM;
3861 
3862 	state->context = dc_create_state(adev->dm.dc);
3863 	if (!state->context) {
3864 		kfree(state);
3865 		return -ENOMEM;
3866 	}
3867 
3868 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3869 
3870 	drm_atomic_private_obj_init(adev_to_drm(adev),
3871 				    &adev->dm.atomic_obj,
3872 				    &state->base,
3873 				    &dm_atomic_state_funcs);
3874 
3875 	r = amdgpu_display_modeset_create_props(adev);
3876 	if (r) {
3877 		dc_release_state(state->context);
3878 		kfree(state);
3879 		return r;
3880 	}
3881 
3882 	r = amdgpu_dm_audio_init(adev);
3883 	if (r) {
3884 		dc_release_state(state->context);
3885 		kfree(state);
3886 		return r;
3887 	}
3888 
3889 	return 0;
3890 }
3891 
3892 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3893 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3894 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3895 
3896 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3897 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3898 
3899 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3900 					    int bl_idx)
3901 {
3902 #if defined(CONFIG_ACPI)
3903 	struct amdgpu_dm_backlight_caps caps;
3904 
3905 	memset(&caps, 0, sizeof(caps));
3906 
3907 	if (dm->backlight_caps[bl_idx].caps_valid)
3908 		return;
3909 
3910 	amdgpu_acpi_get_backlight_caps(&caps);
3911 	if (caps.caps_valid) {
3912 		dm->backlight_caps[bl_idx].caps_valid = true;
3913 		if (caps.aux_support)
3914 			return;
3915 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3916 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3917 	} else {
3918 		dm->backlight_caps[bl_idx].min_input_signal =
3919 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3920 		dm->backlight_caps[bl_idx].max_input_signal =
3921 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3922 	}
3923 #else
3924 	if (dm->backlight_caps[bl_idx].aux_support)
3925 		return;
3926 
3927 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3928 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3929 #endif
3930 }
3931 
3932 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3933 				unsigned *min, unsigned *max)
3934 {
3935 	if (!caps)
3936 		return 0;
3937 
3938 	if (caps->aux_support) {
3939 		// Firmware limits are in nits, DC API wants millinits.
3940 		*max = 1000 * caps->aux_max_input_signal;
3941 		*min = 1000 * caps->aux_min_input_signal;
3942 	} else {
3943 		// Firmware limits are 8-bit, PWM control is 16-bit.
3944 		*max = 0x101 * caps->max_input_signal;
3945 		*min = 0x101 * caps->min_input_signal;
3946 	}
3947 	return 1;
3948 }
3949 
3950 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3951 					uint32_t brightness)
3952 {
3953 	unsigned min, max;
3954 
3955 	if (!get_brightness_range(caps, &min, &max))
3956 		return brightness;
3957 
3958 	// Rescale 0..255 to min..max
3959 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3960 				       AMDGPU_MAX_BL_LEVEL);
3961 }
3962 
3963 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3964 				      uint32_t brightness)
3965 {
3966 	unsigned min, max;
3967 
3968 	if (!get_brightness_range(caps, &min, &max))
3969 		return brightness;
3970 
3971 	if (brightness < min)
3972 		return 0;
3973 	// Rescale min..max to 0..255
3974 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3975 				 max - min);
3976 }
3977 
3978 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3979 					 int bl_idx,
3980 					 u32 user_brightness)
3981 {
3982 	struct amdgpu_dm_backlight_caps caps;
3983 	struct dc_link *link;
3984 	u32 brightness;
3985 	bool rc;
3986 
3987 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3988 	caps = dm->backlight_caps[bl_idx];
3989 
3990 	dm->brightness[bl_idx] = user_brightness;
3991 	/* update scratch register */
3992 	if (bl_idx == 0)
3993 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3994 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3995 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3996 
3997 	/* Change brightness based on AUX property */
3998 	if (caps.aux_support) {
3999 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
4000 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4001 		if (!rc)
4002 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4003 	} else {
4004 		rc = dc_link_set_backlight_level(link, brightness, 0);
4005 		if (!rc)
4006 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4007 	}
4008 
4009 	if (rc)
4010 		dm->actual_brightness[bl_idx] = user_brightness;
4011 }
4012 
4013 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4014 {
4015 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4016 	int i;
4017 
4018 	for (i = 0; i < dm->num_of_edps; i++) {
4019 		if (bd == dm->backlight_dev[i])
4020 			break;
4021 	}
4022 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4023 		i = 0;
4024 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4025 
4026 	return 0;
4027 }
4028 
4029 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4030 					 int bl_idx)
4031 {
4032 	struct amdgpu_dm_backlight_caps caps;
4033 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4034 
4035 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4036 	caps = dm->backlight_caps[bl_idx];
4037 
4038 	if (caps.aux_support) {
4039 		u32 avg, peak;
4040 		bool rc;
4041 
4042 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4043 		if (!rc)
4044 			return dm->brightness[bl_idx];
4045 		return convert_brightness_to_user(&caps, avg);
4046 	} else {
4047 		int ret = dc_link_get_backlight_level(link);
4048 
4049 		if (ret == DC_ERROR_UNEXPECTED)
4050 			return dm->brightness[bl_idx];
4051 		return convert_brightness_to_user(&caps, ret);
4052 	}
4053 }
4054 
4055 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4056 {
4057 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4058 	int i;
4059 
4060 	for (i = 0; i < dm->num_of_edps; i++) {
4061 		if (bd == dm->backlight_dev[i])
4062 			break;
4063 	}
4064 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4065 		i = 0;
4066 	return amdgpu_dm_backlight_get_level(dm, i);
4067 }
4068 
4069 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4070 	.options = BL_CORE_SUSPENDRESUME,
4071 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4072 	.update_status	= amdgpu_dm_backlight_update_status,
4073 };
4074 
4075 static void
4076 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4077 {
4078 	char bl_name[16];
4079 	struct backlight_properties props = { 0 };
4080 
4081 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4082 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4083 
4084 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4085 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4086 	props.type = BACKLIGHT_RAW;
4087 
4088 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4089 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4090 
4091 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4092 								       adev_to_drm(dm->adev)->dev,
4093 								       dm,
4094 								       &amdgpu_dm_backlight_ops,
4095 								       &props);
4096 
4097 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4098 		DRM_ERROR("DM: Backlight registration failed!\n");
4099 	else
4100 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4101 }
4102 #endif
4103 
4104 static int initialize_plane(struct amdgpu_display_manager *dm,
4105 			    struct amdgpu_mode_info *mode_info, int plane_id,
4106 			    enum drm_plane_type plane_type,
4107 			    const struct dc_plane_cap *plane_cap)
4108 {
4109 	struct drm_plane *plane;
4110 	unsigned long possible_crtcs;
4111 	int ret = 0;
4112 
4113 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4114 	if (!plane) {
4115 		DRM_ERROR("KMS: Failed to allocate plane\n");
4116 		return -ENOMEM;
4117 	}
4118 	plane->type = plane_type;
4119 
4120 	/*
4121 	 * HACK: IGT tests expect that the primary plane for a CRTC
4122 	 * can only have one possible CRTC. Only expose support for
4123 	 * any CRTC if they're not going to be used as a primary plane
4124 	 * for a CRTC - like overlay or underlay planes.
4125 	 */
4126 	possible_crtcs = 1 << plane_id;
4127 	if (plane_id >= dm->dc->caps.max_streams)
4128 		possible_crtcs = 0xff;
4129 
4130 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4131 
4132 	if (ret) {
4133 		DRM_ERROR("KMS: Failed to initialize plane\n");
4134 		kfree(plane);
4135 		return ret;
4136 	}
4137 
4138 	if (mode_info)
4139 		mode_info->planes[plane_id] = plane;
4140 
4141 	return ret;
4142 }
4143 
4144 
4145 static void register_backlight_device(struct amdgpu_display_manager *dm,
4146 				      struct dc_link *link)
4147 {
4148 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4149 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4150 
4151 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4152 	    link->type != dc_connection_none) {
4153 		/*
4154 		 * Event if registration failed, we should continue with
4155 		 * DM initialization because not having a backlight control
4156 		 * is better then a black screen.
4157 		 */
4158 		if (!dm->backlight_dev[dm->num_of_edps])
4159 			amdgpu_dm_register_backlight_device(dm);
4160 
4161 		if (dm->backlight_dev[dm->num_of_edps]) {
4162 			dm->backlight_link[dm->num_of_edps] = link;
4163 			dm->num_of_edps++;
4164 		}
4165 	}
4166 #endif
4167 }
4168 
4169 
4170 /*
4171  * In this architecture, the association
4172  * connector -> encoder -> crtc
4173  * id not really requried. The crtc and connector will hold the
4174  * display_index as an abstraction to use with DAL component
4175  *
4176  * Returns 0 on success
4177  */
4178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4179 {
4180 	struct amdgpu_display_manager *dm = &adev->dm;
4181 	int32_t i;
4182 	struct amdgpu_dm_connector *aconnector = NULL;
4183 	struct amdgpu_encoder *aencoder = NULL;
4184 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4185 	uint32_t link_cnt;
4186 	int32_t primary_planes;
4187 	enum dc_connection_type new_connection_type = dc_connection_none;
4188 	const struct dc_plane_cap *plane;
4189 	bool psr_feature_enabled = false;
4190 
4191 	dm->display_indexes_num = dm->dc->caps.max_streams;
4192 	/* Update the actual used number of crtc */
4193 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4194 
4195 	link_cnt = dm->dc->caps.max_links;
4196 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4197 		DRM_ERROR("DM: Failed to initialize mode config\n");
4198 		return -EINVAL;
4199 	}
4200 
4201 	/* There is one primary plane per CRTC */
4202 	primary_planes = dm->dc->caps.max_streams;
4203 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4204 
4205 	/*
4206 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4207 	 * Order is reversed to match iteration order in atomic check.
4208 	 */
4209 	for (i = (primary_planes - 1); i >= 0; i--) {
4210 		plane = &dm->dc->caps.planes[i];
4211 
4212 		if (initialize_plane(dm, mode_info, i,
4213 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4214 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4215 			goto fail;
4216 		}
4217 	}
4218 
4219 	/*
4220 	 * Initialize overlay planes, index starting after primary planes.
4221 	 * These planes have a higher DRM index than the primary planes since
4222 	 * they should be considered as having a higher z-order.
4223 	 * Order is reversed to match iteration order in atomic check.
4224 	 *
4225 	 * Only support DCN for now, and only expose one so we don't encourage
4226 	 * userspace to use up all the pipes.
4227 	 */
4228 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4229 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4230 
4231 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4232 			continue;
4233 
4234 		if (!plane->blends_with_above || !plane->blends_with_below)
4235 			continue;
4236 
4237 		if (!plane->pixel_format_support.argb8888)
4238 			continue;
4239 
4240 		if (initialize_plane(dm, NULL, primary_planes + i,
4241 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4242 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4243 			goto fail;
4244 		}
4245 
4246 		/* Only create one overlay plane. */
4247 		break;
4248 	}
4249 
4250 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4251 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4252 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4253 			goto fail;
4254 		}
4255 
4256 #if defined(CONFIG_DRM_AMD_DC_DCN)
4257 	/* Use Outbox interrupt */
4258 	switch (adev->ip_versions[DCE_HWIP][0]) {
4259 	case IP_VERSION(3, 0, 0):
4260 	case IP_VERSION(3, 1, 2):
4261 	case IP_VERSION(3, 1, 3):
4262 	case IP_VERSION(3, 1, 5):
4263 	case IP_VERSION(3, 1, 6):
4264 	case IP_VERSION(2, 1, 0):
4265 		if (register_outbox_irq_handlers(dm->adev)) {
4266 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4267 			goto fail;
4268 		}
4269 		break;
4270 	default:
4271 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4272 			      adev->ip_versions[DCE_HWIP][0]);
4273 	}
4274 
4275 	/* Determine whether to enable PSR support by default. */
4276 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4277 		switch (adev->ip_versions[DCE_HWIP][0]) {
4278 		case IP_VERSION(3, 1, 2):
4279 		case IP_VERSION(3, 1, 3):
4280 		case IP_VERSION(3, 1, 5):
4281 		case IP_VERSION(3, 1, 6):
4282 			psr_feature_enabled = true;
4283 			break;
4284 		default:
4285 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4286 			break;
4287 		}
4288 	}
4289 #endif
4290 
4291 	/* Disable vblank IRQs aggressively for power-saving. */
4292 	adev_to_drm(adev)->vblank_disable_immediate = true;
4293 
4294 	/* loops over all connectors on the board */
4295 	for (i = 0; i < link_cnt; i++) {
4296 		struct dc_link *link = NULL;
4297 
4298 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4299 			DRM_ERROR(
4300 				"KMS: Cannot support more than %d display indexes\n",
4301 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4302 			continue;
4303 		}
4304 
4305 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4306 		if (!aconnector)
4307 			goto fail;
4308 
4309 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4310 		if (!aencoder)
4311 			goto fail;
4312 
4313 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4314 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4315 			goto fail;
4316 		}
4317 
4318 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4319 			DRM_ERROR("KMS: Failed to initialize connector\n");
4320 			goto fail;
4321 		}
4322 
4323 		link = dc_get_link_at_index(dm->dc, i);
4324 
4325 		if (!dc_link_detect_sink(link, &new_connection_type))
4326 			DRM_ERROR("KMS: Failed to detect connector\n");
4327 
4328 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4329 			emulated_link_detect(link);
4330 			amdgpu_dm_update_connector_after_detect(aconnector);
4331 
4332 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4333 			amdgpu_dm_update_connector_after_detect(aconnector);
4334 			register_backlight_device(dm, link);
4335 			if (dm->num_of_edps)
4336 				update_connector_ext_caps(aconnector);
4337 			if (psr_feature_enabled)
4338 				amdgpu_dm_set_psr_caps(link);
4339 
4340 			/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4341 			 * PSR is also supported.
4342 			 */
4343 			if (link->psr_settings.psr_feature_enabled)
4344 				adev_to_drm(adev)->vblank_disable_immediate = false;
4345 		}
4346 
4347 
4348 	}
4349 
4350 	/* Software is initialized. Now we can register interrupt handlers. */
4351 	switch (adev->asic_type) {
4352 #if defined(CONFIG_DRM_AMD_DC_SI)
4353 	case CHIP_TAHITI:
4354 	case CHIP_PITCAIRN:
4355 	case CHIP_VERDE:
4356 	case CHIP_OLAND:
4357 		if (dce60_register_irq_handlers(dm->adev)) {
4358 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4359 			goto fail;
4360 		}
4361 		break;
4362 #endif
4363 	case CHIP_BONAIRE:
4364 	case CHIP_HAWAII:
4365 	case CHIP_KAVERI:
4366 	case CHIP_KABINI:
4367 	case CHIP_MULLINS:
4368 	case CHIP_TONGA:
4369 	case CHIP_FIJI:
4370 	case CHIP_CARRIZO:
4371 	case CHIP_STONEY:
4372 	case CHIP_POLARIS11:
4373 	case CHIP_POLARIS10:
4374 	case CHIP_POLARIS12:
4375 	case CHIP_VEGAM:
4376 	case CHIP_VEGA10:
4377 	case CHIP_VEGA12:
4378 	case CHIP_VEGA20:
4379 		if (dce110_register_irq_handlers(dm->adev)) {
4380 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4381 			goto fail;
4382 		}
4383 		break;
4384 	default:
4385 #if defined(CONFIG_DRM_AMD_DC_DCN)
4386 		switch (adev->ip_versions[DCE_HWIP][0]) {
4387 		case IP_VERSION(1, 0, 0):
4388 		case IP_VERSION(1, 0, 1):
4389 		case IP_VERSION(2, 0, 2):
4390 		case IP_VERSION(2, 0, 3):
4391 		case IP_VERSION(2, 0, 0):
4392 		case IP_VERSION(2, 1, 0):
4393 		case IP_VERSION(3, 0, 0):
4394 		case IP_VERSION(3, 0, 2):
4395 		case IP_VERSION(3, 0, 3):
4396 		case IP_VERSION(3, 0, 1):
4397 		case IP_VERSION(3, 1, 2):
4398 		case IP_VERSION(3, 1, 3):
4399 		case IP_VERSION(3, 1, 5):
4400 		case IP_VERSION(3, 1, 6):
4401 			if (dcn10_register_irq_handlers(dm->adev)) {
4402 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4403 				goto fail;
4404 			}
4405 			break;
4406 		default:
4407 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4408 					adev->ip_versions[DCE_HWIP][0]);
4409 			goto fail;
4410 		}
4411 #endif
4412 		break;
4413 	}
4414 
4415 	return 0;
4416 fail:
4417 	kfree(aencoder);
4418 	kfree(aconnector);
4419 
4420 	return -EINVAL;
4421 }
4422 
4423 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4424 {
4425 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4426 	return;
4427 }
4428 
4429 /******************************************************************************
4430  * amdgpu_display_funcs functions
4431  *****************************************************************************/
4432 
4433 /*
4434  * dm_bandwidth_update - program display watermarks
4435  *
4436  * @adev: amdgpu_device pointer
4437  *
4438  * Calculate and program the display watermarks and line buffer allocation.
4439  */
4440 static void dm_bandwidth_update(struct amdgpu_device *adev)
4441 {
4442 	/* TODO: implement later */
4443 }
4444 
4445 static const struct amdgpu_display_funcs dm_display_funcs = {
4446 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4447 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4448 	.backlight_set_level = NULL, /* never called for DC */
4449 	.backlight_get_level = NULL, /* never called for DC */
4450 	.hpd_sense = NULL,/* called unconditionally */
4451 	.hpd_set_polarity = NULL, /* called unconditionally */
4452 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4453 	.page_flip_get_scanoutpos =
4454 		dm_crtc_get_scanoutpos,/* called unconditionally */
4455 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4456 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4457 };
4458 
4459 #if defined(CONFIG_DEBUG_KERNEL_DC)
4460 
4461 static ssize_t s3_debug_store(struct device *device,
4462 			      struct device_attribute *attr,
4463 			      const char *buf,
4464 			      size_t count)
4465 {
4466 	int ret;
4467 	int s3_state;
4468 	struct drm_device *drm_dev = dev_get_drvdata(device);
4469 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4470 
4471 	ret = kstrtoint(buf, 0, &s3_state);
4472 
4473 	if (ret == 0) {
4474 		if (s3_state) {
4475 			dm_resume(adev);
4476 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4477 		} else
4478 			dm_suspend(adev);
4479 	}
4480 
4481 	return ret == 0 ? count : 0;
4482 }
4483 
4484 DEVICE_ATTR_WO(s3_debug);
4485 
4486 #endif
4487 
4488 static int dm_early_init(void *handle)
4489 {
4490 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4491 
4492 	switch (adev->asic_type) {
4493 #if defined(CONFIG_DRM_AMD_DC_SI)
4494 	case CHIP_TAHITI:
4495 	case CHIP_PITCAIRN:
4496 	case CHIP_VERDE:
4497 		adev->mode_info.num_crtc = 6;
4498 		adev->mode_info.num_hpd = 6;
4499 		adev->mode_info.num_dig = 6;
4500 		break;
4501 	case CHIP_OLAND:
4502 		adev->mode_info.num_crtc = 2;
4503 		adev->mode_info.num_hpd = 2;
4504 		adev->mode_info.num_dig = 2;
4505 		break;
4506 #endif
4507 	case CHIP_BONAIRE:
4508 	case CHIP_HAWAII:
4509 		adev->mode_info.num_crtc = 6;
4510 		adev->mode_info.num_hpd = 6;
4511 		adev->mode_info.num_dig = 6;
4512 		break;
4513 	case CHIP_KAVERI:
4514 		adev->mode_info.num_crtc = 4;
4515 		adev->mode_info.num_hpd = 6;
4516 		adev->mode_info.num_dig = 7;
4517 		break;
4518 	case CHIP_KABINI:
4519 	case CHIP_MULLINS:
4520 		adev->mode_info.num_crtc = 2;
4521 		adev->mode_info.num_hpd = 6;
4522 		adev->mode_info.num_dig = 6;
4523 		break;
4524 	case CHIP_FIJI:
4525 	case CHIP_TONGA:
4526 		adev->mode_info.num_crtc = 6;
4527 		adev->mode_info.num_hpd = 6;
4528 		adev->mode_info.num_dig = 7;
4529 		break;
4530 	case CHIP_CARRIZO:
4531 		adev->mode_info.num_crtc = 3;
4532 		adev->mode_info.num_hpd = 6;
4533 		adev->mode_info.num_dig = 9;
4534 		break;
4535 	case CHIP_STONEY:
4536 		adev->mode_info.num_crtc = 2;
4537 		adev->mode_info.num_hpd = 6;
4538 		adev->mode_info.num_dig = 9;
4539 		break;
4540 	case CHIP_POLARIS11:
4541 	case CHIP_POLARIS12:
4542 		adev->mode_info.num_crtc = 5;
4543 		adev->mode_info.num_hpd = 5;
4544 		adev->mode_info.num_dig = 5;
4545 		break;
4546 	case CHIP_POLARIS10:
4547 	case CHIP_VEGAM:
4548 		adev->mode_info.num_crtc = 6;
4549 		adev->mode_info.num_hpd = 6;
4550 		adev->mode_info.num_dig = 6;
4551 		break;
4552 	case CHIP_VEGA10:
4553 	case CHIP_VEGA12:
4554 	case CHIP_VEGA20:
4555 		adev->mode_info.num_crtc = 6;
4556 		adev->mode_info.num_hpd = 6;
4557 		adev->mode_info.num_dig = 6;
4558 		break;
4559 	default:
4560 #if defined(CONFIG_DRM_AMD_DC_DCN)
4561 		switch (adev->ip_versions[DCE_HWIP][0]) {
4562 		case IP_VERSION(2, 0, 2):
4563 		case IP_VERSION(3, 0, 0):
4564 			adev->mode_info.num_crtc = 6;
4565 			adev->mode_info.num_hpd = 6;
4566 			adev->mode_info.num_dig = 6;
4567 			break;
4568 		case IP_VERSION(2, 0, 0):
4569 		case IP_VERSION(3, 0, 2):
4570 			adev->mode_info.num_crtc = 5;
4571 			adev->mode_info.num_hpd = 5;
4572 			adev->mode_info.num_dig = 5;
4573 			break;
4574 		case IP_VERSION(2, 0, 3):
4575 		case IP_VERSION(3, 0, 3):
4576 			adev->mode_info.num_crtc = 2;
4577 			adev->mode_info.num_hpd = 2;
4578 			adev->mode_info.num_dig = 2;
4579 			break;
4580 		case IP_VERSION(1, 0, 0):
4581 		case IP_VERSION(1, 0, 1):
4582 		case IP_VERSION(3, 0, 1):
4583 		case IP_VERSION(2, 1, 0):
4584 		case IP_VERSION(3, 1, 2):
4585 		case IP_VERSION(3, 1, 3):
4586 		case IP_VERSION(3, 1, 5):
4587 		case IP_VERSION(3, 1, 6):
4588 			adev->mode_info.num_crtc = 4;
4589 			adev->mode_info.num_hpd = 4;
4590 			adev->mode_info.num_dig = 4;
4591 			break;
4592 		default:
4593 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4594 					adev->ip_versions[DCE_HWIP][0]);
4595 			return -EINVAL;
4596 		}
4597 #endif
4598 		break;
4599 	}
4600 
4601 	amdgpu_dm_set_irq_funcs(adev);
4602 
4603 	if (adev->mode_info.funcs == NULL)
4604 		adev->mode_info.funcs = &dm_display_funcs;
4605 
4606 	/*
4607 	 * Note: Do NOT change adev->audio_endpt_rreg and
4608 	 * adev->audio_endpt_wreg because they are initialised in
4609 	 * amdgpu_device_init()
4610 	 */
4611 #if defined(CONFIG_DEBUG_KERNEL_DC)
4612 	device_create_file(
4613 		adev_to_drm(adev)->dev,
4614 		&dev_attr_s3_debug);
4615 #endif
4616 
4617 	return 0;
4618 }
4619 
4620 static bool modeset_required(struct drm_crtc_state *crtc_state,
4621 			     struct dc_stream_state *new_stream,
4622 			     struct dc_stream_state *old_stream)
4623 {
4624 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4625 }
4626 
4627 static bool modereset_required(struct drm_crtc_state *crtc_state)
4628 {
4629 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4630 }
4631 
4632 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4633 {
4634 	drm_encoder_cleanup(encoder);
4635 	kfree(encoder);
4636 }
4637 
4638 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4639 	.destroy = amdgpu_dm_encoder_destroy,
4640 };
4641 
4642 
4643 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4644 					 struct drm_framebuffer *fb,
4645 					 int *min_downscale, int *max_upscale)
4646 {
4647 	struct amdgpu_device *adev = drm_to_adev(dev);
4648 	struct dc *dc = adev->dm.dc;
4649 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4650 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4651 
4652 	switch (fb->format->format) {
4653 	case DRM_FORMAT_P010:
4654 	case DRM_FORMAT_NV12:
4655 	case DRM_FORMAT_NV21:
4656 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4657 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4658 		break;
4659 
4660 	case DRM_FORMAT_XRGB16161616F:
4661 	case DRM_FORMAT_ARGB16161616F:
4662 	case DRM_FORMAT_XBGR16161616F:
4663 	case DRM_FORMAT_ABGR16161616F:
4664 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4665 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4666 		break;
4667 
4668 	default:
4669 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4670 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4671 		break;
4672 	}
4673 
4674 	/*
4675 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4676 	 * scaling factor of 1.0 == 1000 units.
4677 	 */
4678 	if (*max_upscale == 1)
4679 		*max_upscale = 1000;
4680 
4681 	if (*min_downscale == 1)
4682 		*min_downscale = 1000;
4683 }
4684 
4685 
4686 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4687 				const struct drm_plane_state *state,
4688 				struct dc_scaling_info *scaling_info)
4689 {
4690 	int scale_w, scale_h, min_downscale, max_upscale;
4691 
4692 	memset(scaling_info, 0, sizeof(*scaling_info));
4693 
4694 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4695 	scaling_info->src_rect.x = state->src_x >> 16;
4696 	scaling_info->src_rect.y = state->src_y >> 16;
4697 
4698 	/*
4699 	 * For reasons we don't (yet) fully understand a non-zero
4700 	 * src_y coordinate into an NV12 buffer can cause a
4701 	 * system hang on DCN1x.
4702 	 * To avoid hangs (and maybe be overly cautious)
4703 	 * let's reject both non-zero src_x and src_y.
4704 	 *
4705 	 * We currently know of only one use-case to reproduce a
4706 	 * scenario with non-zero src_x and src_y for NV12, which
4707 	 * is to gesture the YouTube Android app into full screen
4708 	 * on ChromeOS.
4709 	 */
4710 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4711 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4712 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4713 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4714 		return -EINVAL;
4715 
4716 	scaling_info->src_rect.width = state->src_w >> 16;
4717 	if (scaling_info->src_rect.width == 0)
4718 		return -EINVAL;
4719 
4720 	scaling_info->src_rect.height = state->src_h >> 16;
4721 	if (scaling_info->src_rect.height == 0)
4722 		return -EINVAL;
4723 
4724 	scaling_info->dst_rect.x = state->crtc_x;
4725 	scaling_info->dst_rect.y = state->crtc_y;
4726 
4727 	if (state->crtc_w == 0)
4728 		return -EINVAL;
4729 
4730 	scaling_info->dst_rect.width = state->crtc_w;
4731 
4732 	if (state->crtc_h == 0)
4733 		return -EINVAL;
4734 
4735 	scaling_info->dst_rect.height = state->crtc_h;
4736 
4737 	/* DRM doesn't specify clipping on destination output. */
4738 	scaling_info->clip_rect = scaling_info->dst_rect;
4739 
4740 	/* Validate scaling per-format with DC plane caps */
4741 	if (state->plane && state->plane->dev && state->fb) {
4742 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4743 					     &min_downscale, &max_upscale);
4744 	} else {
4745 		min_downscale = 250;
4746 		max_upscale = 16000;
4747 	}
4748 
4749 	scale_w = scaling_info->dst_rect.width * 1000 /
4750 		  scaling_info->src_rect.width;
4751 
4752 	if (scale_w < min_downscale || scale_w > max_upscale)
4753 		return -EINVAL;
4754 
4755 	scale_h = scaling_info->dst_rect.height * 1000 /
4756 		  scaling_info->src_rect.height;
4757 
4758 	if (scale_h < min_downscale || scale_h > max_upscale)
4759 		return -EINVAL;
4760 
4761 	/*
4762 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4763 	 * assume reasonable defaults based on the format.
4764 	 */
4765 
4766 	return 0;
4767 }
4768 
4769 static void
4770 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4771 				 uint64_t tiling_flags)
4772 {
4773 	/* Fill GFX8 params */
4774 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4775 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4776 
4777 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4778 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4779 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4780 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4781 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4782 
4783 		/* XXX fix me for VI */
4784 		tiling_info->gfx8.num_banks = num_banks;
4785 		tiling_info->gfx8.array_mode =
4786 				DC_ARRAY_2D_TILED_THIN1;
4787 		tiling_info->gfx8.tile_split = tile_split;
4788 		tiling_info->gfx8.bank_width = bankw;
4789 		tiling_info->gfx8.bank_height = bankh;
4790 		tiling_info->gfx8.tile_aspect = mtaspect;
4791 		tiling_info->gfx8.tile_mode =
4792 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4793 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4794 			== DC_ARRAY_1D_TILED_THIN1) {
4795 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4796 	}
4797 
4798 	tiling_info->gfx8.pipe_config =
4799 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4800 }
4801 
4802 static void
4803 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4804 				  union dc_tiling_info *tiling_info)
4805 {
4806 	tiling_info->gfx9.num_pipes =
4807 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4808 	tiling_info->gfx9.num_banks =
4809 		adev->gfx.config.gb_addr_config_fields.num_banks;
4810 	tiling_info->gfx9.pipe_interleave =
4811 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4812 	tiling_info->gfx9.num_shader_engines =
4813 		adev->gfx.config.gb_addr_config_fields.num_se;
4814 	tiling_info->gfx9.max_compressed_frags =
4815 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4816 	tiling_info->gfx9.num_rb_per_se =
4817 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4818 	tiling_info->gfx9.shaderEnable = 1;
4819 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4820 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4821 }
4822 
4823 static int
4824 validate_dcc(struct amdgpu_device *adev,
4825 	     const enum surface_pixel_format format,
4826 	     const enum dc_rotation_angle rotation,
4827 	     const union dc_tiling_info *tiling_info,
4828 	     const struct dc_plane_dcc_param *dcc,
4829 	     const struct dc_plane_address *address,
4830 	     const struct plane_size *plane_size)
4831 {
4832 	struct dc *dc = adev->dm.dc;
4833 	struct dc_dcc_surface_param input;
4834 	struct dc_surface_dcc_cap output;
4835 
4836 	memset(&input, 0, sizeof(input));
4837 	memset(&output, 0, sizeof(output));
4838 
4839 	if (!dcc->enable)
4840 		return 0;
4841 
4842 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4843 	    !dc->cap_funcs.get_dcc_compression_cap)
4844 		return -EINVAL;
4845 
4846 	input.format = format;
4847 	input.surface_size.width = plane_size->surface_size.width;
4848 	input.surface_size.height = plane_size->surface_size.height;
4849 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4850 
4851 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4852 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4853 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4854 		input.scan = SCAN_DIRECTION_VERTICAL;
4855 
4856 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4857 		return -EINVAL;
4858 
4859 	if (!output.capable)
4860 		return -EINVAL;
4861 
4862 	if (dcc->independent_64b_blks == 0 &&
4863 	    output.grph.rgb.independent_64b_blks != 0)
4864 		return -EINVAL;
4865 
4866 	return 0;
4867 }
4868 
4869 static bool
4870 modifier_has_dcc(uint64_t modifier)
4871 {
4872 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4873 }
4874 
4875 static unsigned
4876 modifier_gfx9_swizzle_mode(uint64_t modifier)
4877 {
4878 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4879 		return 0;
4880 
4881 	return AMD_FMT_MOD_GET(TILE, modifier);
4882 }
4883 
4884 static const struct drm_format_info *
4885 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4886 {
4887 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4888 }
4889 
4890 static void
4891 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4892 				    union dc_tiling_info *tiling_info,
4893 				    uint64_t modifier)
4894 {
4895 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4896 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4897 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4898 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4899 
4900 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4901 
4902 	if (!IS_AMD_FMT_MOD(modifier))
4903 		return;
4904 
4905 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4906 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4907 
4908 	if (adev->family >= AMDGPU_FAMILY_NV) {
4909 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4910 	} else {
4911 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4912 
4913 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4914 	}
4915 }
4916 
4917 enum dm_micro_swizzle {
4918 	MICRO_SWIZZLE_Z = 0,
4919 	MICRO_SWIZZLE_S = 1,
4920 	MICRO_SWIZZLE_D = 2,
4921 	MICRO_SWIZZLE_R = 3
4922 };
4923 
4924 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4925 					  uint32_t format,
4926 					  uint64_t modifier)
4927 {
4928 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4929 	const struct drm_format_info *info = drm_format_info(format);
4930 	int i;
4931 
4932 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4933 
4934 	if (!info)
4935 		return false;
4936 
4937 	/*
4938 	 * We always have to allow these modifiers:
4939 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4940 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4941 	 */
4942 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4943 	    modifier == DRM_FORMAT_MOD_INVALID) {
4944 		return true;
4945 	}
4946 
4947 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4948 	for (i = 0; i < plane->modifier_count; i++) {
4949 		if (modifier == plane->modifiers[i])
4950 			break;
4951 	}
4952 	if (i == plane->modifier_count)
4953 		return false;
4954 
4955 	/*
4956 	 * For D swizzle the canonical modifier depends on the bpp, so check
4957 	 * it here.
4958 	 */
4959 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4960 	    adev->family >= AMDGPU_FAMILY_NV) {
4961 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4962 			return false;
4963 	}
4964 
4965 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4966 	    info->cpp[0] < 8)
4967 		return false;
4968 
4969 	if (modifier_has_dcc(modifier)) {
4970 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4971 		if (info->cpp[0] != 4)
4972 			return false;
4973 		/* We support multi-planar formats, but not when combined with
4974 		 * additional DCC metadata planes. */
4975 		if (info->num_planes > 1)
4976 			return false;
4977 	}
4978 
4979 	return true;
4980 }
4981 
4982 static void
4983 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4984 {
4985 	if (!*mods)
4986 		return;
4987 
4988 	if (*cap - *size < 1) {
4989 		uint64_t new_cap = *cap * 2;
4990 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4991 
4992 		if (!new_mods) {
4993 			kfree(*mods);
4994 			*mods = NULL;
4995 			return;
4996 		}
4997 
4998 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4999 		kfree(*mods);
5000 		*mods = new_mods;
5001 		*cap = new_cap;
5002 	}
5003 
5004 	(*mods)[*size] = mod;
5005 	*size += 1;
5006 }
5007 
5008 static void
5009 add_gfx9_modifiers(const struct amdgpu_device *adev,
5010 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
5011 {
5012 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5013 	int pipe_xor_bits = min(8, pipes +
5014 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5015 	int bank_xor_bits = min(8 - pipe_xor_bits,
5016 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5017 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5018 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5019 
5020 
5021 	if (adev->family == AMDGPU_FAMILY_RV) {
5022 		/* Raven2 and later */
5023 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5024 
5025 		/*
5026 		 * No _D DCC swizzles yet because we only allow 32bpp, which
5027 		 * doesn't support _D on DCN
5028 		 */
5029 
5030 		if (has_constant_encode) {
5031 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5032 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5033 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5034 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5035 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5036 				    AMD_FMT_MOD_SET(DCC, 1) |
5037 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5038 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5039 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5040 		}
5041 
5042 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5043 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5044 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5045 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5046 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5047 			    AMD_FMT_MOD_SET(DCC, 1) |
5048 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5049 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5050 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5051 
5052 		if (has_constant_encode) {
5053 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5054 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5055 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5056 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5057 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5058 				    AMD_FMT_MOD_SET(DCC, 1) |
5059 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5060 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5061 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5062 
5063 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5064 				    AMD_FMT_MOD_SET(RB, rb) |
5065 				    AMD_FMT_MOD_SET(PIPE, pipes));
5066 		}
5067 
5068 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5069 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5070 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5071 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5072 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5073 			    AMD_FMT_MOD_SET(DCC, 1) |
5074 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5075 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5076 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5077 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5078 			    AMD_FMT_MOD_SET(RB, rb) |
5079 			    AMD_FMT_MOD_SET(PIPE, pipes));
5080 	}
5081 
5082 	/*
5083 	 * Only supported for 64bpp on Raven, will be filtered on format in
5084 	 * dm_plane_format_mod_supported.
5085 	 */
5086 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5087 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5088 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5089 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5090 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5091 
5092 	if (adev->family == AMDGPU_FAMILY_RV) {
5093 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5094 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5095 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5096 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5097 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5098 	}
5099 
5100 	/*
5101 	 * Only supported for 64bpp on Raven, will be filtered on format in
5102 	 * dm_plane_format_mod_supported.
5103 	 */
5104 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5105 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5106 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5107 
5108 	if (adev->family == AMDGPU_FAMILY_RV) {
5109 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5110 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5111 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5112 	}
5113 }
5114 
5115 static void
5116 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5117 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5118 {
5119 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5120 
5121 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5122 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5123 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5124 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5125 		    AMD_FMT_MOD_SET(DCC, 1) |
5126 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5127 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5128 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5129 
5130 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5131 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5132 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5133 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5134 		    AMD_FMT_MOD_SET(DCC, 1) |
5135 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5136 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5137 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5138 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5139 
5140 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5141 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5142 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5143 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5144 
5145 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5146 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5147 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5148 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5149 
5150 
5151 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5152 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5153 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5154 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5155 
5156 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5157 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5158 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5159 }
5160 
5161 static void
5162 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5163 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5164 {
5165 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5166 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5167 
5168 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5169 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5170 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5171 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5172 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5173 		    AMD_FMT_MOD_SET(DCC, 1) |
5174 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5175 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5176 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5177 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5178 
5179 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5180 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5181 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5182 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5183 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5184 		    AMD_FMT_MOD_SET(DCC, 1) |
5185 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5186 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5187 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5188 
5189 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5190 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5191 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5192 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5193 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5194 		    AMD_FMT_MOD_SET(DCC, 1) |
5195 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5196 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5197 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5198 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5199 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5200 
5201 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5202 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5203 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5204 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5205 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5206 		    AMD_FMT_MOD_SET(DCC, 1) |
5207 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5208 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5209 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5210 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5211 
5212 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5213 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5214 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5215 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5216 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5217 
5218 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5219 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5220 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5221 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5222 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5223 
5224 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5225 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5226 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5227 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5228 
5229 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5230 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5231 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5232 }
5233 
5234 static int
5235 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5236 {
5237 	uint64_t size = 0, capacity = 128;
5238 	*mods = NULL;
5239 
5240 	/* We have not hooked up any pre-GFX9 modifiers. */
5241 	if (adev->family < AMDGPU_FAMILY_AI)
5242 		return 0;
5243 
5244 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5245 
5246 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5247 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5248 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5249 		return *mods ? 0 : -ENOMEM;
5250 	}
5251 
5252 	switch (adev->family) {
5253 	case AMDGPU_FAMILY_AI:
5254 	case AMDGPU_FAMILY_RV:
5255 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5256 		break;
5257 	case AMDGPU_FAMILY_NV:
5258 	case AMDGPU_FAMILY_VGH:
5259 	case AMDGPU_FAMILY_YC:
5260 	case AMDGPU_FAMILY_GC_10_3_6:
5261 	case AMDGPU_FAMILY_GC_10_3_7:
5262 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5263 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5264 		else
5265 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5266 		break;
5267 	}
5268 
5269 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5270 
5271 	/* INVALID marks the end of the list. */
5272 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5273 
5274 	if (!*mods)
5275 		return -ENOMEM;
5276 
5277 	return 0;
5278 }
5279 
5280 static int
5281 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5282 					  const struct amdgpu_framebuffer *afb,
5283 					  const enum surface_pixel_format format,
5284 					  const enum dc_rotation_angle rotation,
5285 					  const struct plane_size *plane_size,
5286 					  union dc_tiling_info *tiling_info,
5287 					  struct dc_plane_dcc_param *dcc,
5288 					  struct dc_plane_address *address,
5289 					  const bool force_disable_dcc)
5290 {
5291 	const uint64_t modifier = afb->base.modifier;
5292 	int ret = 0;
5293 
5294 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5295 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5296 
5297 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5298 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5299 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5300 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5301 
5302 		dcc->enable = 1;
5303 		dcc->meta_pitch = afb->base.pitches[1];
5304 		dcc->independent_64b_blks = independent_64b_blks;
5305 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5306 			if (independent_64b_blks && independent_128b_blks)
5307 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5308 			else if (independent_128b_blks)
5309 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5310 			else if (independent_64b_blks && !independent_128b_blks)
5311 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5312 			else
5313 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5314 		} else {
5315 			if (independent_64b_blks)
5316 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5317 			else
5318 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5319 		}
5320 
5321 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5322 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5323 	}
5324 
5325 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5326 	if (ret)
5327 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5328 
5329 	return ret;
5330 }
5331 
5332 static int
5333 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5334 			     const struct amdgpu_framebuffer *afb,
5335 			     const enum surface_pixel_format format,
5336 			     const enum dc_rotation_angle rotation,
5337 			     const uint64_t tiling_flags,
5338 			     union dc_tiling_info *tiling_info,
5339 			     struct plane_size *plane_size,
5340 			     struct dc_plane_dcc_param *dcc,
5341 			     struct dc_plane_address *address,
5342 			     bool tmz_surface,
5343 			     bool force_disable_dcc)
5344 {
5345 	const struct drm_framebuffer *fb = &afb->base;
5346 	int ret;
5347 
5348 	memset(tiling_info, 0, sizeof(*tiling_info));
5349 	memset(plane_size, 0, sizeof(*plane_size));
5350 	memset(dcc, 0, sizeof(*dcc));
5351 	memset(address, 0, sizeof(*address));
5352 
5353 	address->tmz_surface = tmz_surface;
5354 
5355 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5356 		uint64_t addr = afb->address + fb->offsets[0];
5357 
5358 		plane_size->surface_size.x = 0;
5359 		plane_size->surface_size.y = 0;
5360 		plane_size->surface_size.width = fb->width;
5361 		plane_size->surface_size.height = fb->height;
5362 		plane_size->surface_pitch =
5363 			fb->pitches[0] / fb->format->cpp[0];
5364 
5365 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5366 		address->grph.addr.low_part = lower_32_bits(addr);
5367 		address->grph.addr.high_part = upper_32_bits(addr);
5368 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5369 		uint64_t luma_addr = afb->address + fb->offsets[0];
5370 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5371 
5372 		plane_size->surface_size.x = 0;
5373 		plane_size->surface_size.y = 0;
5374 		plane_size->surface_size.width = fb->width;
5375 		plane_size->surface_size.height = fb->height;
5376 		plane_size->surface_pitch =
5377 			fb->pitches[0] / fb->format->cpp[0];
5378 
5379 		plane_size->chroma_size.x = 0;
5380 		plane_size->chroma_size.y = 0;
5381 		/* TODO: set these based on surface format */
5382 		plane_size->chroma_size.width = fb->width / 2;
5383 		plane_size->chroma_size.height = fb->height / 2;
5384 
5385 		plane_size->chroma_pitch =
5386 			fb->pitches[1] / fb->format->cpp[1];
5387 
5388 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5389 		address->video_progressive.luma_addr.low_part =
5390 			lower_32_bits(luma_addr);
5391 		address->video_progressive.luma_addr.high_part =
5392 			upper_32_bits(luma_addr);
5393 		address->video_progressive.chroma_addr.low_part =
5394 			lower_32_bits(chroma_addr);
5395 		address->video_progressive.chroma_addr.high_part =
5396 			upper_32_bits(chroma_addr);
5397 	}
5398 
5399 	if (adev->family >= AMDGPU_FAMILY_AI) {
5400 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5401 								rotation, plane_size,
5402 								tiling_info, dcc,
5403 								address,
5404 								force_disable_dcc);
5405 		if (ret)
5406 			return ret;
5407 	} else {
5408 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5409 	}
5410 
5411 	return 0;
5412 }
5413 
5414 static void
5415 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5416 			       bool *per_pixel_alpha, bool *global_alpha,
5417 			       int *global_alpha_value)
5418 {
5419 	*per_pixel_alpha = false;
5420 	*global_alpha = false;
5421 	*global_alpha_value = 0xff;
5422 
5423 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5424 		return;
5425 
5426 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5427 		static const uint32_t alpha_formats[] = {
5428 			DRM_FORMAT_ARGB8888,
5429 			DRM_FORMAT_RGBA8888,
5430 			DRM_FORMAT_ABGR8888,
5431 		};
5432 		uint32_t format = plane_state->fb->format->format;
5433 		unsigned int i;
5434 
5435 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5436 			if (format == alpha_formats[i]) {
5437 				*per_pixel_alpha = true;
5438 				break;
5439 			}
5440 		}
5441 	}
5442 
5443 	if (plane_state->alpha < 0xffff) {
5444 		*global_alpha = true;
5445 		*global_alpha_value = plane_state->alpha >> 8;
5446 	}
5447 }
5448 
5449 static int
5450 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5451 			    const enum surface_pixel_format format,
5452 			    enum dc_color_space *color_space)
5453 {
5454 	bool full_range;
5455 
5456 	*color_space = COLOR_SPACE_SRGB;
5457 
5458 	/* DRM color properties only affect non-RGB formats. */
5459 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5460 		return 0;
5461 
5462 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5463 
5464 	switch (plane_state->color_encoding) {
5465 	case DRM_COLOR_YCBCR_BT601:
5466 		if (full_range)
5467 			*color_space = COLOR_SPACE_YCBCR601;
5468 		else
5469 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5470 		break;
5471 
5472 	case DRM_COLOR_YCBCR_BT709:
5473 		if (full_range)
5474 			*color_space = COLOR_SPACE_YCBCR709;
5475 		else
5476 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5477 		break;
5478 
5479 	case DRM_COLOR_YCBCR_BT2020:
5480 		if (full_range)
5481 			*color_space = COLOR_SPACE_2020_YCBCR;
5482 		else
5483 			return -EINVAL;
5484 		break;
5485 
5486 	default:
5487 		return -EINVAL;
5488 	}
5489 
5490 	return 0;
5491 }
5492 
5493 static int
5494 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5495 			    const struct drm_plane_state *plane_state,
5496 			    const uint64_t tiling_flags,
5497 			    struct dc_plane_info *plane_info,
5498 			    struct dc_plane_address *address,
5499 			    bool tmz_surface,
5500 			    bool force_disable_dcc)
5501 {
5502 	const struct drm_framebuffer *fb = plane_state->fb;
5503 	const struct amdgpu_framebuffer *afb =
5504 		to_amdgpu_framebuffer(plane_state->fb);
5505 	int ret;
5506 
5507 	memset(plane_info, 0, sizeof(*plane_info));
5508 
5509 	switch (fb->format->format) {
5510 	case DRM_FORMAT_C8:
5511 		plane_info->format =
5512 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5513 		break;
5514 	case DRM_FORMAT_RGB565:
5515 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5516 		break;
5517 	case DRM_FORMAT_XRGB8888:
5518 	case DRM_FORMAT_ARGB8888:
5519 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5520 		break;
5521 	case DRM_FORMAT_XRGB2101010:
5522 	case DRM_FORMAT_ARGB2101010:
5523 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5524 		break;
5525 	case DRM_FORMAT_XBGR2101010:
5526 	case DRM_FORMAT_ABGR2101010:
5527 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5528 		break;
5529 	case DRM_FORMAT_XBGR8888:
5530 	case DRM_FORMAT_ABGR8888:
5531 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5532 		break;
5533 	case DRM_FORMAT_NV21:
5534 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5535 		break;
5536 	case DRM_FORMAT_NV12:
5537 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5538 		break;
5539 	case DRM_FORMAT_P010:
5540 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5541 		break;
5542 	case DRM_FORMAT_XRGB16161616F:
5543 	case DRM_FORMAT_ARGB16161616F:
5544 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5545 		break;
5546 	case DRM_FORMAT_XBGR16161616F:
5547 	case DRM_FORMAT_ABGR16161616F:
5548 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5549 		break;
5550 	case DRM_FORMAT_XRGB16161616:
5551 	case DRM_FORMAT_ARGB16161616:
5552 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5553 		break;
5554 	case DRM_FORMAT_XBGR16161616:
5555 	case DRM_FORMAT_ABGR16161616:
5556 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5557 		break;
5558 	default:
5559 		DRM_ERROR(
5560 			"Unsupported screen format %p4cc\n",
5561 			&fb->format->format);
5562 		return -EINVAL;
5563 	}
5564 
5565 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5566 	case DRM_MODE_ROTATE_0:
5567 		plane_info->rotation = ROTATION_ANGLE_0;
5568 		break;
5569 	case DRM_MODE_ROTATE_90:
5570 		plane_info->rotation = ROTATION_ANGLE_90;
5571 		break;
5572 	case DRM_MODE_ROTATE_180:
5573 		plane_info->rotation = ROTATION_ANGLE_180;
5574 		break;
5575 	case DRM_MODE_ROTATE_270:
5576 		plane_info->rotation = ROTATION_ANGLE_270;
5577 		break;
5578 	default:
5579 		plane_info->rotation = ROTATION_ANGLE_0;
5580 		break;
5581 	}
5582 
5583 	plane_info->visible = true;
5584 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5585 
5586 	plane_info->layer_index = 0;
5587 
5588 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5589 					  &plane_info->color_space);
5590 	if (ret)
5591 		return ret;
5592 
5593 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5594 					   plane_info->rotation, tiling_flags,
5595 					   &plane_info->tiling_info,
5596 					   &plane_info->plane_size,
5597 					   &plane_info->dcc, address, tmz_surface,
5598 					   force_disable_dcc);
5599 	if (ret)
5600 		return ret;
5601 
5602 	fill_blending_from_plane_state(
5603 		plane_state, &plane_info->per_pixel_alpha,
5604 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5605 
5606 	return 0;
5607 }
5608 
5609 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5610 				    struct dc_plane_state *dc_plane_state,
5611 				    struct drm_plane_state *plane_state,
5612 				    struct drm_crtc_state *crtc_state)
5613 {
5614 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5615 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5616 	struct dc_scaling_info scaling_info;
5617 	struct dc_plane_info plane_info;
5618 	int ret;
5619 	bool force_disable_dcc = false;
5620 
5621 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5622 	if (ret)
5623 		return ret;
5624 
5625 	dc_plane_state->src_rect = scaling_info.src_rect;
5626 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5627 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5628 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5629 
5630 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5631 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5632 					  afb->tiling_flags,
5633 					  &plane_info,
5634 					  &dc_plane_state->address,
5635 					  afb->tmz_surface,
5636 					  force_disable_dcc);
5637 	if (ret)
5638 		return ret;
5639 
5640 	dc_plane_state->format = plane_info.format;
5641 	dc_plane_state->color_space = plane_info.color_space;
5642 	dc_plane_state->format = plane_info.format;
5643 	dc_plane_state->plane_size = plane_info.plane_size;
5644 	dc_plane_state->rotation = plane_info.rotation;
5645 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5646 	dc_plane_state->stereo_format = plane_info.stereo_format;
5647 	dc_plane_state->tiling_info = plane_info.tiling_info;
5648 	dc_plane_state->visible = plane_info.visible;
5649 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5650 	dc_plane_state->global_alpha = plane_info.global_alpha;
5651 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5652 	dc_plane_state->dcc = plane_info.dcc;
5653 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5654 	dc_plane_state->flip_int_enabled = true;
5655 
5656 	/*
5657 	 * Always set input transfer function, since plane state is refreshed
5658 	 * every time.
5659 	 */
5660 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5661 	if (ret)
5662 		return ret;
5663 
5664 	return 0;
5665 }
5666 
5667 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5668 					   const struct dm_connector_state *dm_state,
5669 					   struct dc_stream_state *stream)
5670 {
5671 	enum amdgpu_rmx_type rmx_type;
5672 
5673 	struct rect src = { 0 }; /* viewport in composition space*/
5674 	struct rect dst = { 0 }; /* stream addressable area */
5675 
5676 	/* no mode. nothing to be done */
5677 	if (!mode)
5678 		return;
5679 
5680 	/* Full screen scaling by default */
5681 	src.width = mode->hdisplay;
5682 	src.height = mode->vdisplay;
5683 	dst.width = stream->timing.h_addressable;
5684 	dst.height = stream->timing.v_addressable;
5685 
5686 	if (dm_state) {
5687 		rmx_type = dm_state->scaling;
5688 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5689 			if (src.width * dst.height <
5690 					src.height * dst.width) {
5691 				/* height needs less upscaling/more downscaling */
5692 				dst.width = src.width *
5693 						dst.height / src.height;
5694 			} else {
5695 				/* width needs less upscaling/more downscaling */
5696 				dst.height = src.height *
5697 						dst.width / src.width;
5698 			}
5699 		} else if (rmx_type == RMX_CENTER) {
5700 			dst = src;
5701 		}
5702 
5703 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5704 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5705 
5706 		if (dm_state->underscan_enable) {
5707 			dst.x += dm_state->underscan_hborder / 2;
5708 			dst.y += dm_state->underscan_vborder / 2;
5709 			dst.width -= dm_state->underscan_hborder;
5710 			dst.height -= dm_state->underscan_vborder;
5711 		}
5712 	}
5713 
5714 	stream->src = src;
5715 	stream->dst = dst;
5716 
5717 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5718 		      dst.x, dst.y, dst.width, dst.height);
5719 
5720 }
5721 
5722 static enum dc_color_depth
5723 convert_color_depth_from_display_info(const struct drm_connector *connector,
5724 				      bool is_y420, int requested_bpc)
5725 {
5726 	uint8_t bpc;
5727 
5728 	if (is_y420) {
5729 		bpc = 8;
5730 
5731 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5732 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5733 			bpc = 16;
5734 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5735 			bpc = 12;
5736 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5737 			bpc = 10;
5738 	} else {
5739 		bpc = (uint8_t)connector->display_info.bpc;
5740 		/* Assume 8 bpc by default if no bpc is specified. */
5741 		bpc = bpc ? bpc : 8;
5742 	}
5743 
5744 	if (requested_bpc > 0) {
5745 		/*
5746 		 * Cap display bpc based on the user requested value.
5747 		 *
5748 		 * The value for state->max_bpc may not correctly updated
5749 		 * depending on when the connector gets added to the state
5750 		 * or if this was called outside of atomic check, so it
5751 		 * can't be used directly.
5752 		 */
5753 		bpc = min_t(u8, bpc, requested_bpc);
5754 
5755 		/* Round down to the nearest even number. */
5756 		bpc = bpc - (bpc & 1);
5757 	}
5758 
5759 	switch (bpc) {
5760 	case 0:
5761 		/*
5762 		 * Temporary Work around, DRM doesn't parse color depth for
5763 		 * EDID revision before 1.4
5764 		 * TODO: Fix edid parsing
5765 		 */
5766 		return COLOR_DEPTH_888;
5767 	case 6:
5768 		return COLOR_DEPTH_666;
5769 	case 8:
5770 		return COLOR_DEPTH_888;
5771 	case 10:
5772 		return COLOR_DEPTH_101010;
5773 	case 12:
5774 		return COLOR_DEPTH_121212;
5775 	case 14:
5776 		return COLOR_DEPTH_141414;
5777 	case 16:
5778 		return COLOR_DEPTH_161616;
5779 	default:
5780 		return COLOR_DEPTH_UNDEFINED;
5781 	}
5782 }
5783 
5784 static enum dc_aspect_ratio
5785 get_aspect_ratio(const struct drm_display_mode *mode_in)
5786 {
5787 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5788 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5789 }
5790 
5791 static enum dc_color_space
5792 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5793 {
5794 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5795 
5796 	switch (dc_crtc_timing->pixel_encoding)	{
5797 	case PIXEL_ENCODING_YCBCR422:
5798 	case PIXEL_ENCODING_YCBCR444:
5799 	case PIXEL_ENCODING_YCBCR420:
5800 	{
5801 		/*
5802 		 * 27030khz is the separation point between HDTV and SDTV
5803 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5804 		 * respectively
5805 		 */
5806 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5807 			if (dc_crtc_timing->flags.Y_ONLY)
5808 				color_space =
5809 					COLOR_SPACE_YCBCR709_LIMITED;
5810 			else
5811 				color_space = COLOR_SPACE_YCBCR709;
5812 		} else {
5813 			if (dc_crtc_timing->flags.Y_ONLY)
5814 				color_space =
5815 					COLOR_SPACE_YCBCR601_LIMITED;
5816 			else
5817 				color_space = COLOR_SPACE_YCBCR601;
5818 		}
5819 
5820 	}
5821 	break;
5822 	case PIXEL_ENCODING_RGB:
5823 		color_space = COLOR_SPACE_SRGB;
5824 		break;
5825 
5826 	default:
5827 		WARN_ON(1);
5828 		break;
5829 	}
5830 
5831 	return color_space;
5832 }
5833 
5834 static bool adjust_colour_depth_from_display_info(
5835 	struct dc_crtc_timing *timing_out,
5836 	const struct drm_display_info *info)
5837 {
5838 	enum dc_color_depth depth = timing_out->display_color_depth;
5839 	int normalized_clk;
5840 	do {
5841 		normalized_clk = timing_out->pix_clk_100hz / 10;
5842 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5843 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5844 			normalized_clk /= 2;
5845 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5846 		switch (depth) {
5847 		case COLOR_DEPTH_888:
5848 			break;
5849 		case COLOR_DEPTH_101010:
5850 			normalized_clk = (normalized_clk * 30) / 24;
5851 			break;
5852 		case COLOR_DEPTH_121212:
5853 			normalized_clk = (normalized_clk * 36) / 24;
5854 			break;
5855 		case COLOR_DEPTH_161616:
5856 			normalized_clk = (normalized_clk * 48) / 24;
5857 			break;
5858 		default:
5859 			/* The above depths are the only ones valid for HDMI. */
5860 			return false;
5861 		}
5862 		if (normalized_clk <= info->max_tmds_clock) {
5863 			timing_out->display_color_depth = depth;
5864 			return true;
5865 		}
5866 	} while (--depth > COLOR_DEPTH_666);
5867 	return false;
5868 }
5869 
5870 static void fill_stream_properties_from_drm_display_mode(
5871 	struct dc_stream_state *stream,
5872 	const struct drm_display_mode *mode_in,
5873 	const struct drm_connector *connector,
5874 	const struct drm_connector_state *connector_state,
5875 	const struct dc_stream_state *old_stream,
5876 	int requested_bpc)
5877 {
5878 	struct dc_crtc_timing *timing_out = &stream->timing;
5879 	const struct drm_display_info *info = &connector->display_info;
5880 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5881 	struct hdmi_vendor_infoframe hv_frame;
5882 	struct hdmi_avi_infoframe avi_frame;
5883 
5884 	memset(&hv_frame, 0, sizeof(hv_frame));
5885 	memset(&avi_frame, 0, sizeof(avi_frame));
5886 
5887 	timing_out->h_border_left = 0;
5888 	timing_out->h_border_right = 0;
5889 	timing_out->v_border_top = 0;
5890 	timing_out->v_border_bottom = 0;
5891 	/* TODO: un-hardcode */
5892 	if (drm_mode_is_420_only(info, mode_in)
5893 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5894 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5895 	else if (drm_mode_is_420_also(info, mode_in)
5896 			&& aconnector->force_yuv420_output)
5897 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5898 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5899 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5900 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5901 	else
5902 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5903 
5904 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5905 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5906 		connector,
5907 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5908 		requested_bpc);
5909 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5910 	timing_out->hdmi_vic = 0;
5911 
5912 	if(old_stream) {
5913 		timing_out->vic = old_stream->timing.vic;
5914 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5915 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5916 	} else {
5917 		timing_out->vic = drm_match_cea_mode(mode_in);
5918 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5919 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5920 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5921 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5922 	}
5923 
5924 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5925 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5926 		timing_out->vic = avi_frame.video_code;
5927 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5928 		timing_out->hdmi_vic = hv_frame.vic;
5929 	}
5930 
5931 	if (is_freesync_video_mode(mode_in, aconnector)) {
5932 		timing_out->h_addressable = mode_in->hdisplay;
5933 		timing_out->h_total = mode_in->htotal;
5934 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5935 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5936 		timing_out->v_total = mode_in->vtotal;
5937 		timing_out->v_addressable = mode_in->vdisplay;
5938 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5939 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5940 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5941 	} else {
5942 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5943 		timing_out->h_total = mode_in->crtc_htotal;
5944 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5945 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5946 		timing_out->v_total = mode_in->crtc_vtotal;
5947 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5948 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5949 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5950 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5951 	}
5952 
5953 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5954 
5955 	stream->output_color_space = get_output_color_space(timing_out);
5956 
5957 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5958 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5959 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5960 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5961 		    drm_mode_is_420_also(info, mode_in) &&
5962 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5963 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5964 			adjust_colour_depth_from_display_info(timing_out, info);
5965 		}
5966 	}
5967 }
5968 
5969 static void fill_audio_info(struct audio_info *audio_info,
5970 			    const struct drm_connector *drm_connector,
5971 			    const struct dc_sink *dc_sink)
5972 {
5973 	int i = 0;
5974 	int cea_revision = 0;
5975 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5976 
5977 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5978 	audio_info->product_id = edid_caps->product_id;
5979 
5980 	cea_revision = drm_connector->display_info.cea_rev;
5981 
5982 	strscpy(audio_info->display_name,
5983 		edid_caps->display_name,
5984 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5985 
5986 	if (cea_revision >= 3) {
5987 		audio_info->mode_count = edid_caps->audio_mode_count;
5988 
5989 		for (i = 0; i < audio_info->mode_count; ++i) {
5990 			audio_info->modes[i].format_code =
5991 					(enum audio_format_code)
5992 					(edid_caps->audio_modes[i].format_code);
5993 			audio_info->modes[i].channel_count =
5994 					edid_caps->audio_modes[i].channel_count;
5995 			audio_info->modes[i].sample_rates.all =
5996 					edid_caps->audio_modes[i].sample_rate;
5997 			audio_info->modes[i].sample_size =
5998 					edid_caps->audio_modes[i].sample_size;
5999 		}
6000 	}
6001 
6002 	audio_info->flags.all = edid_caps->speaker_flags;
6003 
6004 	/* TODO: We only check for the progressive mode, check for interlace mode too */
6005 	if (drm_connector->latency_present[0]) {
6006 		audio_info->video_latency = drm_connector->video_latency[0];
6007 		audio_info->audio_latency = drm_connector->audio_latency[0];
6008 	}
6009 
6010 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6011 
6012 }
6013 
6014 static void
6015 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6016 				      struct drm_display_mode *dst_mode)
6017 {
6018 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6019 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6020 	dst_mode->crtc_clock = src_mode->crtc_clock;
6021 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6022 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6023 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6024 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6025 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
6026 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
6027 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6028 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6029 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6030 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6031 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6032 }
6033 
6034 static void
6035 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6036 					const struct drm_display_mode *native_mode,
6037 					bool scale_enabled)
6038 {
6039 	if (scale_enabled) {
6040 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6041 	} else if (native_mode->clock == drm_mode->clock &&
6042 			native_mode->htotal == drm_mode->htotal &&
6043 			native_mode->vtotal == drm_mode->vtotal) {
6044 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6045 	} else {
6046 		/* no scaling nor amdgpu inserted, no need to patch */
6047 	}
6048 }
6049 
6050 static struct dc_sink *
6051 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6052 {
6053 	struct dc_sink_init_data sink_init_data = { 0 };
6054 	struct dc_sink *sink = NULL;
6055 	sink_init_data.link = aconnector->dc_link;
6056 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6057 
6058 	sink = dc_sink_create(&sink_init_data);
6059 	if (!sink) {
6060 		DRM_ERROR("Failed to create sink!\n");
6061 		return NULL;
6062 	}
6063 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6064 
6065 	return sink;
6066 }
6067 
6068 static void set_multisync_trigger_params(
6069 		struct dc_stream_state *stream)
6070 {
6071 	struct dc_stream_state *master = NULL;
6072 
6073 	if (stream->triggered_crtc_reset.enabled) {
6074 		master = stream->triggered_crtc_reset.event_source;
6075 		stream->triggered_crtc_reset.event =
6076 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6077 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6078 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6079 	}
6080 }
6081 
6082 static void set_master_stream(struct dc_stream_state *stream_set[],
6083 			      int stream_count)
6084 {
6085 	int j, highest_rfr = 0, master_stream = 0;
6086 
6087 	for (j = 0;  j < stream_count; j++) {
6088 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6089 			int refresh_rate = 0;
6090 
6091 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6092 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6093 			if (refresh_rate > highest_rfr) {
6094 				highest_rfr = refresh_rate;
6095 				master_stream = j;
6096 			}
6097 		}
6098 	}
6099 	for (j = 0;  j < stream_count; j++) {
6100 		if (stream_set[j])
6101 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6102 	}
6103 }
6104 
6105 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6106 {
6107 	int i = 0;
6108 	struct dc_stream_state *stream;
6109 
6110 	if (context->stream_count < 2)
6111 		return;
6112 	for (i = 0; i < context->stream_count ; i++) {
6113 		if (!context->streams[i])
6114 			continue;
6115 		/*
6116 		 * TODO: add a function to read AMD VSDB bits and set
6117 		 * crtc_sync_master.multi_sync_enabled flag
6118 		 * For now it's set to false
6119 		 */
6120 	}
6121 
6122 	set_master_stream(context->streams, context->stream_count);
6123 
6124 	for (i = 0; i < context->stream_count ; i++) {
6125 		stream = context->streams[i];
6126 
6127 		if (!stream)
6128 			continue;
6129 
6130 		set_multisync_trigger_params(stream);
6131 	}
6132 }
6133 
6134 #if defined(CONFIG_DRM_AMD_DC_DCN)
6135 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6136 							struct dc_sink *sink, struct dc_stream_state *stream,
6137 							struct dsc_dec_dpcd_caps *dsc_caps)
6138 {
6139 	stream->timing.flags.DSC = 0;
6140 	dsc_caps->is_dsc_supported = false;
6141 
6142 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6143 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6144 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6145 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6146 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6147 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6148 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6149 				dsc_caps);
6150 	}
6151 }
6152 
6153 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6154 				    struct dc_sink *sink, struct dc_stream_state *stream,
6155 				    struct dsc_dec_dpcd_caps *dsc_caps,
6156 				    uint32_t max_dsc_target_bpp_limit_override)
6157 {
6158 	const struct dc_link_settings *verified_link_cap = NULL;
6159 	uint32_t link_bw_in_kbps;
6160 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6161 	struct dc *dc = sink->ctx->dc;
6162 	struct dc_dsc_bw_range bw_range = {0};
6163 	struct dc_dsc_config dsc_cfg = {0};
6164 
6165 	verified_link_cap = dc_link_get_link_cap(stream->link);
6166 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6167 	edp_min_bpp_x16 = 8 * 16;
6168 	edp_max_bpp_x16 = 8 * 16;
6169 
6170 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6171 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6172 
6173 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6174 		edp_min_bpp_x16 = edp_max_bpp_x16;
6175 
6176 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6177 				dc->debug.dsc_min_slice_height_override,
6178 				edp_min_bpp_x16, edp_max_bpp_x16,
6179 				dsc_caps,
6180 				&stream->timing,
6181 				&bw_range)) {
6182 
6183 		if (bw_range.max_kbps < link_bw_in_kbps) {
6184 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6185 					dsc_caps,
6186 					dc->debug.dsc_min_slice_height_override,
6187 					max_dsc_target_bpp_limit_override,
6188 					0,
6189 					&stream->timing,
6190 					&dsc_cfg)) {
6191 				stream->timing.dsc_cfg = dsc_cfg;
6192 				stream->timing.flags.DSC = 1;
6193 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6194 			}
6195 			return;
6196 		}
6197 	}
6198 
6199 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6200 				dsc_caps,
6201 				dc->debug.dsc_min_slice_height_override,
6202 				max_dsc_target_bpp_limit_override,
6203 				link_bw_in_kbps,
6204 				&stream->timing,
6205 				&dsc_cfg)) {
6206 		stream->timing.dsc_cfg = dsc_cfg;
6207 		stream->timing.flags.DSC = 1;
6208 	}
6209 }
6210 
6211 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6212 										struct dc_sink *sink, struct dc_stream_state *stream,
6213 										struct dsc_dec_dpcd_caps *dsc_caps)
6214 {
6215 	struct drm_connector *drm_connector = &aconnector->base;
6216 	uint32_t link_bandwidth_kbps;
6217 	uint32_t max_dsc_target_bpp_limit_override = 0;
6218 	struct dc *dc = sink->ctx->dc;
6219 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6220 	uint32_t dsc_max_supported_bw_in_kbps;
6221 
6222 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6223 							dc_link_get_link_cap(aconnector->dc_link));
6224 
6225 	if (stream->link && stream->link->local_sink)
6226 		max_dsc_target_bpp_limit_override =
6227 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6228 
6229 	/* Set DSC policy according to dsc_clock_en */
6230 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6231 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6232 
6233 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6234 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6235 
6236 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6237 
6238 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6239 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6240 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6241 						dsc_caps,
6242 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6243 						max_dsc_target_bpp_limit_override,
6244 						link_bandwidth_kbps,
6245 						&stream->timing,
6246 						&stream->timing.dsc_cfg)) {
6247 				stream->timing.flags.DSC = 1;
6248 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6249 								 __func__, drm_connector->name);
6250 			}
6251 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6252 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6253 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6254 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6255 
6256 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6257 					max_supported_bw_in_kbps > 0 &&
6258 					dsc_max_supported_bw_in_kbps > 0)
6259 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6260 						dsc_caps,
6261 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6262 						max_dsc_target_bpp_limit_override,
6263 						dsc_max_supported_bw_in_kbps,
6264 						&stream->timing,
6265 						&stream->timing.dsc_cfg)) {
6266 					stream->timing.flags.DSC = 1;
6267 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6268 									 __func__, drm_connector->name);
6269 				}
6270 		}
6271 	}
6272 
6273 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6274 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6275 		stream->timing.flags.DSC = 1;
6276 
6277 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6278 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6279 
6280 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6281 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6282 
6283 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6284 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6285 }
6286 #endif /* CONFIG_DRM_AMD_DC_DCN */
6287 
6288 /**
6289  * DOC: FreeSync Video
6290  *
6291  * When a userspace application wants to play a video, the content follows a
6292  * standard format definition that usually specifies the FPS for that format.
6293  * The below list illustrates some video format and the expected FPS,
6294  * respectively:
6295  *
6296  * - TV/NTSC (23.976 FPS)
6297  * - Cinema (24 FPS)
6298  * - TV/PAL (25 FPS)
6299  * - TV/NTSC (29.97 FPS)
6300  * - TV/NTSC (30 FPS)
6301  * - Cinema HFR (48 FPS)
6302  * - TV/PAL (50 FPS)
6303  * - Commonly used (60 FPS)
6304  * - Multiples of 24 (48,72,96,120 FPS)
6305  *
6306  * The list of standards video format is not huge and can be added to the
6307  * connector modeset list beforehand. With that, userspace can leverage
6308  * FreeSync to extends the front porch in order to attain the target refresh
6309  * rate. Such a switch will happen seamlessly, without screen blanking or
6310  * reprogramming of the output in any other way. If the userspace requests a
6311  * modesetting change compatible with FreeSync modes that only differ in the
6312  * refresh rate, DC will skip the full update and avoid blink during the
6313  * transition. For example, the video player can change the modesetting from
6314  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6315  * causing any display blink. This same concept can be applied to a mode
6316  * setting change.
6317  */
6318 static struct drm_display_mode *
6319 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6320 			  bool use_probed_modes)
6321 {
6322 	struct drm_display_mode *m, *m_pref = NULL;
6323 	u16 current_refresh, highest_refresh;
6324 	struct list_head *list_head = use_probed_modes ?
6325 						    &aconnector->base.probed_modes :
6326 						    &aconnector->base.modes;
6327 
6328 	if (aconnector->freesync_vid_base.clock != 0)
6329 		return &aconnector->freesync_vid_base;
6330 
6331 	/* Find the preferred mode */
6332 	list_for_each_entry (m, list_head, head) {
6333 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6334 			m_pref = m;
6335 			break;
6336 		}
6337 	}
6338 
6339 	if (!m_pref) {
6340 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6341 		m_pref = list_first_entry_or_null(
6342 			&aconnector->base.modes, struct drm_display_mode, head);
6343 		if (!m_pref) {
6344 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6345 			return NULL;
6346 		}
6347 	}
6348 
6349 	highest_refresh = drm_mode_vrefresh(m_pref);
6350 
6351 	/*
6352 	 * Find the mode with highest refresh rate with same resolution.
6353 	 * For some monitors, preferred mode is not the mode with highest
6354 	 * supported refresh rate.
6355 	 */
6356 	list_for_each_entry (m, list_head, head) {
6357 		current_refresh  = drm_mode_vrefresh(m);
6358 
6359 		if (m->hdisplay == m_pref->hdisplay &&
6360 		    m->vdisplay == m_pref->vdisplay &&
6361 		    highest_refresh < current_refresh) {
6362 			highest_refresh = current_refresh;
6363 			m_pref = m;
6364 		}
6365 	}
6366 
6367 	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6368 	return m_pref;
6369 }
6370 
6371 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6372 				   struct amdgpu_dm_connector *aconnector)
6373 {
6374 	struct drm_display_mode *high_mode;
6375 	int timing_diff;
6376 
6377 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6378 	if (!high_mode || !mode)
6379 		return false;
6380 
6381 	timing_diff = high_mode->vtotal - mode->vtotal;
6382 
6383 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6384 	    high_mode->hdisplay != mode->hdisplay ||
6385 	    high_mode->vdisplay != mode->vdisplay ||
6386 	    high_mode->hsync_start != mode->hsync_start ||
6387 	    high_mode->hsync_end != mode->hsync_end ||
6388 	    high_mode->htotal != mode->htotal ||
6389 	    high_mode->hskew != mode->hskew ||
6390 	    high_mode->vscan != mode->vscan ||
6391 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6392 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6393 		return false;
6394 	else
6395 		return true;
6396 }
6397 
6398 static struct dc_stream_state *
6399 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6400 		       const struct drm_display_mode *drm_mode,
6401 		       const struct dm_connector_state *dm_state,
6402 		       const struct dc_stream_state *old_stream,
6403 		       int requested_bpc)
6404 {
6405 	struct drm_display_mode *preferred_mode = NULL;
6406 	struct drm_connector *drm_connector;
6407 	const struct drm_connector_state *con_state =
6408 		dm_state ? &dm_state->base : NULL;
6409 	struct dc_stream_state *stream = NULL;
6410 	struct drm_display_mode mode = *drm_mode;
6411 	struct drm_display_mode saved_mode;
6412 	struct drm_display_mode *freesync_mode = NULL;
6413 	bool native_mode_found = false;
6414 	bool recalculate_timing = false;
6415 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6416 	int mode_refresh;
6417 	int preferred_refresh = 0;
6418 #if defined(CONFIG_DRM_AMD_DC_DCN)
6419 	struct dsc_dec_dpcd_caps dsc_caps;
6420 #endif
6421 	struct dc_sink *sink = NULL;
6422 
6423 	memset(&saved_mode, 0, sizeof(saved_mode));
6424 
6425 	if (aconnector == NULL) {
6426 		DRM_ERROR("aconnector is NULL!\n");
6427 		return stream;
6428 	}
6429 
6430 	drm_connector = &aconnector->base;
6431 
6432 	if (!aconnector->dc_sink) {
6433 		sink = create_fake_sink(aconnector);
6434 		if (!sink)
6435 			return stream;
6436 	} else {
6437 		sink = aconnector->dc_sink;
6438 		dc_sink_retain(sink);
6439 	}
6440 
6441 	stream = dc_create_stream_for_sink(sink);
6442 
6443 	if (stream == NULL) {
6444 		DRM_ERROR("Failed to create stream for sink!\n");
6445 		goto finish;
6446 	}
6447 
6448 	stream->dm_stream_context = aconnector;
6449 
6450 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6451 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6452 
6453 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6454 		/* Search for preferred mode */
6455 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6456 			native_mode_found = true;
6457 			break;
6458 		}
6459 	}
6460 	if (!native_mode_found)
6461 		preferred_mode = list_first_entry_or_null(
6462 				&aconnector->base.modes,
6463 				struct drm_display_mode,
6464 				head);
6465 
6466 	mode_refresh = drm_mode_vrefresh(&mode);
6467 
6468 	if (preferred_mode == NULL) {
6469 		/*
6470 		 * This may not be an error, the use case is when we have no
6471 		 * usermode calls to reset and set mode upon hotplug. In this
6472 		 * case, we call set mode ourselves to restore the previous mode
6473 		 * and the modelist may not be filled in in time.
6474 		 */
6475 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6476 	} else {
6477 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6478 		if (recalculate_timing) {
6479 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6480 			drm_mode_copy(&saved_mode, &mode);
6481 			drm_mode_copy(&mode, freesync_mode);
6482 		} else {
6483 			decide_crtc_timing_for_drm_display_mode(
6484 				&mode, preferred_mode, scale);
6485 
6486 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6487 		}
6488 	}
6489 
6490 	if (recalculate_timing)
6491 		drm_mode_set_crtcinfo(&saved_mode, 0);
6492 	else if (!dm_state)
6493 		drm_mode_set_crtcinfo(&mode, 0);
6494 
6495        /*
6496 	* If scaling is enabled and refresh rate didn't change
6497 	* we copy the vic and polarities of the old timings
6498 	*/
6499 	if (!scale || mode_refresh != preferred_refresh)
6500 		fill_stream_properties_from_drm_display_mode(
6501 			stream, &mode, &aconnector->base, con_state, NULL,
6502 			requested_bpc);
6503 	else
6504 		fill_stream_properties_from_drm_display_mode(
6505 			stream, &mode, &aconnector->base, con_state, old_stream,
6506 			requested_bpc);
6507 
6508 #if defined(CONFIG_DRM_AMD_DC_DCN)
6509 	/* SST DSC determination policy */
6510 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6511 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6512 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6513 #endif
6514 
6515 	update_stream_scaling_settings(&mode, dm_state, stream);
6516 
6517 	fill_audio_info(
6518 		&stream->audio_info,
6519 		drm_connector,
6520 		sink);
6521 
6522 	update_stream_signal(stream, sink);
6523 
6524 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6525 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6526 
6527 	if (stream->link->psr_settings.psr_feature_enabled) {
6528 		//
6529 		// should decide stream support vsc sdp colorimetry capability
6530 		// before building vsc info packet
6531 		//
6532 		stream->use_vsc_sdp_for_colorimetry = false;
6533 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6534 			stream->use_vsc_sdp_for_colorimetry =
6535 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6536 		} else {
6537 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6538 				stream->use_vsc_sdp_for_colorimetry = true;
6539 		}
6540 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6541 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6542 
6543 	}
6544 finish:
6545 	dc_sink_release(sink);
6546 
6547 	return stream;
6548 }
6549 
6550 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6551 {
6552 	drm_crtc_cleanup(crtc);
6553 	kfree(crtc);
6554 }
6555 
6556 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6557 				  struct drm_crtc_state *state)
6558 {
6559 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6560 
6561 	/* TODO Destroy dc_stream objects are stream object is flattened */
6562 	if (cur->stream)
6563 		dc_stream_release(cur->stream);
6564 
6565 
6566 	__drm_atomic_helper_crtc_destroy_state(state);
6567 
6568 
6569 	kfree(state);
6570 }
6571 
6572 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6573 {
6574 	struct dm_crtc_state *state;
6575 
6576 	if (crtc->state)
6577 		dm_crtc_destroy_state(crtc, crtc->state);
6578 
6579 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6580 	if (WARN_ON(!state))
6581 		return;
6582 
6583 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6584 }
6585 
6586 static struct drm_crtc_state *
6587 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6588 {
6589 	struct dm_crtc_state *state, *cur;
6590 
6591 	cur = to_dm_crtc_state(crtc->state);
6592 
6593 	if (WARN_ON(!crtc->state))
6594 		return NULL;
6595 
6596 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6597 	if (!state)
6598 		return NULL;
6599 
6600 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6601 
6602 	if (cur->stream) {
6603 		state->stream = cur->stream;
6604 		dc_stream_retain(state->stream);
6605 	}
6606 
6607 	state->active_planes = cur->active_planes;
6608 	state->vrr_infopacket = cur->vrr_infopacket;
6609 	state->abm_level = cur->abm_level;
6610 	state->vrr_supported = cur->vrr_supported;
6611 	state->freesync_config = cur->freesync_config;
6612 	state->cm_has_degamma = cur->cm_has_degamma;
6613 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6614 	state->force_dpms_off = cur->force_dpms_off;
6615 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6616 
6617 	return &state->base;
6618 }
6619 
6620 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6621 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6622 {
6623 	crtc_debugfs_init(crtc);
6624 
6625 	return 0;
6626 }
6627 #endif
6628 
6629 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6630 {
6631 	enum dc_irq_source irq_source;
6632 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6633 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6634 	int rc;
6635 
6636 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6637 
6638 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6639 
6640 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6641 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6642 	return rc;
6643 }
6644 
6645 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6646 {
6647 	enum dc_irq_source irq_source;
6648 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6649 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6650 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6651 #if defined(CONFIG_DRM_AMD_DC_DCN)
6652 	struct amdgpu_display_manager *dm = &adev->dm;
6653 	struct vblank_control_work *work;
6654 #endif
6655 	int rc = 0;
6656 
6657 	if (enable) {
6658 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6659 		if (amdgpu_dm_vrr_active(acrtc_state))
6660 			rc = dm_set_vupdate_irq(crtc, true);
6661 	} else {
6662 		/* vblank irq off -> vupdate irq off */
6663 		rc = dm_set_vupdate_irq(crtc, false);
6664 	}
6665 
6666 	if (rc)
6667 		return rc;
6668 
6669 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6670 
6671 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6672 		return -EBUSY;
6673 
6674 	if (amdgpu_in_reset(adev))
6675 		return 0;
6676 
6677 #if defined(CONFIG_DRM_AMD_DC_DCN)
6678 	if (dm->vblank_control_workqueue) {
6679 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6680 		if (!work)
6681 			return -ENOMEM;
6682 
6683 		INIT_WORK(&work->work, vblank_control_worker);
6684 		work->dm = dm;
6685 		work->acrtc = acrtc;
6686 		work->enable = enable;
6687 
6688 		if (acrtc_state->stream) {
6689 			dc_stream_retain(acrtc_state->stream);
6690 			work->stream = acrtc_state->stream;
6691 		}
6692 
6693 		queue_work(dm->vblank_control_workqueue, &work->work);
6694 	}
6695 #endif
6696 
6697 	return 0;
6698 }
6699 
6700 static int dm_enable_vblank(struct drm_crtc *crtc)
6701 {
6702 	return dm_set_vblank(crtc, true);
6703 }
6704 
6705 static void dm_disable_vblank(struct drm_crtc *crtc)
6706 {
6707 	dm_set_vblank(crtc, false);
6708 }
6709 
6710 /* Implemented only the options currently availible for the driver */
6711 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6712 	.reset = dm_crtc_reset_state,
6713 	.destroy = amdgpu_dm_crtc_destroy,
6714 	.set_config = drm_atomic_helper_set_config,
6715 	.page_flip = drm_atomic_helper_page_flip,
6716 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6717 	.atomic_destroy_state = dm_crtc_destroy_state,
6718 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6719 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6720 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6721 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6722 	.enable_vblank = dm_enable_vblank,
6723 	.disable_vblank = dm_disable_vblank,
6724 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6725 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6726 	.late_register = amdgpu_dm_crtc_late_register,
6727 #endif
6728 };
6729 
6730 static enum drm_connector_status
6731 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6732 {
6733 	bool connected;
6734 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6735 
6736 	/*
6737 	 * Notes:
6738 	 * 1. This interface is NOT called in context of HPD irq.
6739 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6740 	 * makes it a bad place for *any* MST-related activity.
6741 	 */
6742 
6743 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6744 	    !aconnector->fake_enable)
6745 		connected = (aconnector->dc_sink != NULL);
6746 	else
6747 		connected = (aconnector->base.force == DRM_FORCE_ON);
6748 
6749 	update_subconnector_property(aconnector);
6750 
6751 	return (connected ? connector_status_connected :
6752 			connector_status_disconnected);
6753 }
6754 
6755 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6756 					    struct drm_connector_state *connector_state,
6757 					    struct drm_property *property,
6758 					    uint64_t val)
6759 {
6760 	struct drm_device *dev = connector->dev;
6761 	struct amdgpu_device *adev = drm_to_adev(dev);
6762 	struct dm_connector_state *dm_old_state =
6763 		to_dm_connector_state(connector->state);
6764 	struct dm_connector_state *dm_new_state =
6765 		to_dm_connector_state(connector_state);
6766 
6767 	int ret = -EINVAL;
6768 
6769 	if (property == dev->mode_config.scaling_mode_property) {
6770 		enum amdgpu_rmx_type rmx_type;
6771 
6772 		switch (val) {
6773 		case DRM_MODE_SCALE_CENTER:
6774 			rmx_type = RMX_CENTER;
6775 			break;
6776 		case DRM_MODE_SCALE_ASPECT:
6777 			rmx_type = RMX_ASPECT;
6778 			break;
6779 		case DRM_MODE_SCALE_FULLSCREEN:
6780 			rmx_type = RMX_FULL;
6781 			break;
6782 		case DRM_MODE_SCALE_NONE:
6783 		default:
6784 			rmx_type = RMX_OFF;
6785 			break;
6786 		}
6787 
6788 		if (dm_old_state->scaling == rmx_type)
6789 			return 0;
6790 
6791 		dm_new_state->scaling = rmx_type;
6792 		ret = 0;
6793 	} else if (property == adev->mode_info.underscan_hborder_property) {
6794 		dm_new_state->underscan_hborder = val;
6795 		ret = 0;
6796 	} else if (property == adev->mode_info.underscan_vborder_property) {
6797 		dm_new_state->underscan_vborder = val;
6798 		ret = 0;
6799 	} else if (property == adev->mode_info.underscan_property) {
6800 		dm_new_state->underscan_enable = val;
6801 		ret = 0;
6802 	} else if (property == adev->mode_info.abm_level_property) {
6803 		dm_new_state->abm_level = val;
6804 		ret = 0;
6805 	}
6806 
6807 	return ret;
6808 }
6809 
6810 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6811 					    const struct drm_connector_state *state,
6812 					    struct drm_property *property,
6813 					    uint64_t *val)
6814 {
6815 	struct drm_device *dev = connector->dev;
6816 	struct amdgpu_device *adev = drm_to_adev(dev);
6817 	struct dm_connector_state *dm_state =
6818 		to_dm_connector_state(state);
6819 	int ret = -EINVAL;
6820 
6821 	if (property == dev->mode_config.scaling_mode_property) {
6822 		switch (dm_state->scaling) {
6823 		case RMX_CENTER:
6824 			*val = DRM_MODE_SCALE_CENTER;
6825 			break;
6826 		case RMX_ASPECT:
6827 			*val = DRM_MODE_SCALE_ASPECT;
6828 			break;
6829 		case RMX_FULL:
6830 			*val = DRM_MODE_SCALE_FULLSCREEN;
6831 			break;
6832 		case RMX_OFF:
6833 		default:
6834 			*val = DRM_MODE_SCALE_NONE;
6835 			break;
6836 		}
6837 		ret = 0;
6838 	} else if (property == adev->mode_info.underscan_hborder_property) {
6839 		*val = dm_state->underscan_hborder;
6840 		ret = 0;
6841 	} else if (property == adev->mode_info.underscan_vborder_property) {
6842 		*val = dm_state->underscan_vborder;
6843 		ret = 0;
6844 	} else if (property == adev->mode_info.underscan_property) {
6845 		*val = dm_state->underscan_enable;
6846 		ret = 0;
6847 	} else if (property == adev->mode_info.abm_level_property) {
6848 		*val = dm_state->abm_level;
6849 		ret = 0;
6850 	}
6851 
6852 	return ret;
6853 }
6854 
6855 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6856 {
6857 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6858 
6859 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6860 }
6861 
6862 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6863 {
6864 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6865 	const struct dc_link *link = aconnector->dc_link;
6866 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6867 	struct amdgpu_display_manager *dm = &adev->dm;
6868 	int i;
6869 
6870 	/*
6871 	 * Call only if mst_mgr was iniitalized before since it's not done
6872 	 * for all connector types.
6873 	 */
6874 	if (aconnector->mst_mgr.dev)
6875 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6876 
6877 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6878 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6879 	for (i = 0; i < dm->num_of_edps; i++) {
6880 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6881 			backlight_device_unregister(dm->backlight_dev[i]);
6882 			dm->backlight_dev[i] = NULL;
6883 		}
6884 	}
6885 #endif
6886 
6887 	if (aconnector->dc_em_sink)
6888 		dc_sink_release(aconnector->dc_em_sink);
6889 	aconnector->dc_em_sink = NULL;
6890 	if (aconnector->dc_sink)
6891 		dc_sink_release(aconnector->dc_sink);
6892 	aconnector->dc_sink = NULL;
6893 
6894 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6895 	drm_connector_unregister(connector);
6896 	drm_connector_cleanup(connector);
6897 	if (aconnector->i2c) {
6898 		i2c_del_adapter(&aconnector->i2c->base);
6899 		kfree(aconnector->i2c);
6900 	}
6901 	kfree(aconnector->dm_dp_aux.aux.name);
6902 
6903 	kfree(connector);
6904 }
6905 
6906 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6907 {
6908 	struct dm_connector_state *state =
6909 		to_dm_connector_state(connector->state);
6910 
6911 	if (connector->state)
6912 		__drm_atomic_helper_connector_destroy_state(connector->state);
6913 
6914 	kfree(state);
6915 
6916 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6917 
6918 	if (state) {
6919 		state->scaling = RMX_OFF;
6920 		state->underscan_enable = false;
6921 		state->underscan_hborder = 0;
6922 		state->underscan_vborder = 0;
6923 		state->base.max_requested_bpc = 8;
6924 		state->vcpi_slots = 0;
6925 		state->pbn = 0;
6926 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6927 			state->abm_level = amdgpu_dm_abm_level;
6928 
6929 		__drm_atomic_helper_connector_reset(connector, &state->base);
6930 	}
6931 }
6932 
6933 struct drm_connector_state *
6934 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6935 {
6936 	struct dm_connector_state *state =
6937 		to_dm_connector_state(connector->state);
6938 
6939 	struct dm_connector_state *new_state =
6940 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6941 
6942 	if (!new_state)
6943 		return NULL;
6944 
6945 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6946 
6947 	new_state->freesync_capable = state->freesync_capable;
6948 	new_state->abm_level = state->abm_level;
6949 	new_state->scaling = state->scaling;
6950 	new_state->underscan_enable = state->underscan_enable;
6951 	new_state->underscan_hborder = state->underscan_hborder;
6952 	new_state->underscan_vborder = state->underscan_vborder;
6953 	new_state->vcpi_slots = state->vcpi_slots;
6954 	new_state->pbn = state->pbn;
6955 	return &new_state->base;
6956 }
6957 
6958 static int
6959 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6960 {
6961 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6962 		to_amdgpu_dm_connector(connector);
6963 	int r;
6964 
6965 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6966 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6967 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6968 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6969 		if (r)
6970 			return r;
6971 	}
6972 
6973 #if defined(CONFIG_DEBUG_FS)
6974 	connector_debugfs_init(amdgpu_dm_connector);
6975 #endif
6976 
6977 	return 0;
6978 }
6979 
6980 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6981 	.reset = amdgpu_dm_connector_funcs_reset,
6982 	.detect = amdgpu_dm_connector_detect,
6983 	.fill_modes = drm_helper_probe_single_connector_modes,
6984 	.destroy = amdgpu_dm_connector_destroy,
6985 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6986 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6987 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6988 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6989 	.late_register = amdgpu_dm_connector_late_register,
6990 	.early_unregister = amdgpu_dm_connector_unregister
6991 };
6992 
6993 static int get_modes(struct drm_connector *connector)
6994 {
6995 	return amdgpu_dm_connector_get_modes(connector);
6996 }
6997 
6998 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6999 {
7000 	struct dc_sink_init_data init_params = {
7001 			.link = aconnector->dc_link,
7002 			.sink_signal = SIGNAL_TYPE_VIRTUAL
7003 	};
7004 	struct edid *edid;
7005 
7006 	if (!aconnector->base.edid_blob_ptr) {
7007 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7008 				aconnector->base.name);
7009 
7010 		aconnector->base.force = DRM_FORCE_OFF;
7011 		aconnector->base.override_edid = false;
7012 		return;
7013 	}
7014 
7015 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7016 
7017 	aconnector->edid = edid;
7018 
7019 	aconnector->dc_em_sink = dc_link_add_remote_sink(
7020 		aconnector->dc_link,
7021 		(uint8_t *)edid,
7022 		(edid->extensions + 1) * EDID_LENGTH,
7023 		&init_params);
7024 
7025 	if (aconnector->base.force == DRM_FORCE_ON) {
7026 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
7027 		aconnector->dc_link->local_sink :
7028 		aconnector->dc_em_sink;
7029 		dc_sink_retain(aconnector->dc_sink);
7030 	}
7031 }
7032 
7033 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7034 {
7035 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7036 
7037 	/*
7038 	 * In case of headless boot with force on for DP managed connector
7039 	 * Those settings have to be != 0 to get initial modeset
7040 	 */
7041 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7042 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7043 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7044 	}
7045 
7046 
7047 	aconnector->base.override_edid = true;
7048 	create_eml_sink(aconnector);
7049 }
7050 
7051 struct dc_stream_state *
7052 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7053 				const struct drm_display_mode *drm_mode,
7054 				const struct dm_connector_state *dm_state,
7055 				const struct dc_stream_state *old_stream)
7056 {
7057 	struct drm_connector *connector = &aconnector->base;
7058 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7059 	struct dc_stream_state *stream;
7060 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7061 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7062 	enum dc_status dc_result = DC_OK;
7063 
7064 	do {
7065 		stream = create_stream_for_sink(aconnector, drm_mode,
7066 						dm_state, old_stream,
7067 						requested_bpc);
7068 		if (stream == NULL) {
7069 			DRM_ERROR("Failed to create stream for sink!\n");
7070 			break;
7071 		}
7072 
7073 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7074 
7075 		if (dc_result != DC_OK) {
7076 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7077 				      drm_mode->hdisplay,
7078 				      drm_mode->vdisplay,
7079 				      drm_mode->clock,
7080 				      dc_result,
7081 				      dc_status_to_str(dc_result));
7082 
7083 			dc_stream_release(stream);
7084 			stream = NULL;
7085 			requested_bpc -= 2; /* lower bpc to retry validation */
7086 		}
7087 
7088 	} while (stream == NULL && requested_bpc >= 6);
7089 
7090 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7091 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7092 
7093 		aconnector->force_yuv420_output = true;
7094 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7095 						dm_state, old_stream);
7096 		aconnector->force_yuv420_output = false;
7097 	}
7098 
7099 	return stream;
7100 }
7101 
7102 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7103 				   struct drm_display_mode *mode)
7104 {
7105 	int result = MODE_ERROR;
7106 	struct dc_sink *dc_sink;
7107 	/* TODO: Unhardcode stream count */
7108 	struct dc_stream_state *stream;
7109 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7110 
7111 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7112 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7113 		return result;
7114 
7115 	/*
7116 	 * Only run this the first time mode_valid is called to initilialize
7117 	 * EDID mgmt
7118 	 */
7119 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7120 		!aconnector->dc_em_sink)
7121 		handle_edid_mgmt(aconnector);
7122 
7123 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7124 
7125 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7126 				aconnector->base.force != DRM_FORCE_ON) {
7127 		DRM_ERROR("dc_sink is NULL!\n");
7128 		goto fail;
7129 	}
7130 
7131 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7132 	if (stream) {
7133 		dc_stream_release(stream);
7134 		result = MODE_OK;
7135 	}
7136 
7137 fail:
7138 	/* TODO: error handling*/
7139 	return result;
7140 }
7141 
7142 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7143 				struct dc_info_packet *out)
7144 {
7145 	struct hdmi_drm_infoframe frame;
7146 	unsigned char buf[30]; /* 26 + 4 */
7147 	ssize_t len;
7148 	int ret, i;
7149 
7150 	memset(out, 0, sizeof(*out));
7151 
7152 	if (!state->hdr_output_metadata)
7153 		return 0;
7154 
7155 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7156 	if (ret)
7157 		return ret;
7158 
7159 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7160 	if (len < 0)
7161 		return (int)len;
7162 
7163 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7164 	if (len != 30)
7165 		return -EINVAL;
7166 
7167 	/* Prepare the infopacket for DC. */
7168 	switch (state->connector->connector_type) {
7169 	case DRM_MODE_CONNECTOR_HDMIA:
7170 		out->hb0 = 0x87; /* type */
7171 		out->hb1 = 0x01; /* version */
7172 		out->hb2 = 0x1A; /* length */
7173 		out->sb[0] = buf[3]; /* checksum */
7174 		i = 1;
7175 		break;
7176 
7177 	case DRM_MODE_CONNECTOR_DisplayPort:
7178 	case DRM_MODE_CONNECTOR_eDP:
7179 		out->hb0 = 0x00; /* sdp id, zero */
7180 		out->hb1 = 0x87; /* type */
7181 		out->hb2 = 0x1D; /* payload len - 1 */
7182 		out->hb3 = (0x13 << 2); /* sdp version */
7183 		out->sb[0] = 0x01; /* version */
7184 		out->sb[1] = 0x1A; /* length */
7185 		i = 2;
7186 		break;
7187 
7188 	default:
7189 		return -EINVAL;
7190 	}
7191 
7192 	memcpy(&out->sb[i], &buf[4], 26);
7193 	out->valid = true;
7194 
7195 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7196 		       sizeof(out->sb), false);
7197 
7198 	return 0;
7199 }
7200 
7201 static int
7202 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7203 				 struct drm_atomic_state *state)
7204 {
7205 	struct drm_connector_state *new_con_state =
7206 		drm_atomic_get_new_connector_state(state, conn);
7207 	struct drm_connector_state *old_con_state =
7208 		drm_atomic_get_old_connector_state(state, conn);
7209 	struct drm_crtc *crtc = new_con_state->crtc;
7210 	struct drm_crtc_state *new_crtc_state;
7211 	int ret;
7212 
7213 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7214 
7215 	if (!crtc)
7216 		return 0;
7217 
7218 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7219 		struct dc_info_packet hdr_infopacket;
7220 
7221 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7222 		if (ret)
7223 			return ret;
7224 
7225 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7226 		if (IS_ERR(new_crtc_state))
7227 			return PTR_ERR(new_crtc_state);
7228 
7229 		/*
7230 		 * DC considers the stream backends changed if the
7231 		 * static metadata changes. Forcing the modeset also
7232 		 * gives a simple way for userspace to switch from
7233 		 * 8bpc to 10bpc when setting the metadata to enter
7234 		 * or exit HDR.
7235 		 *
7236 		 * Changing the static metadata after it's been
7237 		 * set is permissible, however. So only force a
7238 		 * modeset if we're entering or exiting HDR.
7239 		 */
7240 		new_crtc_state->mode_changed =
7241 			!old_con_state->hdr_output_metadata ||
7242 			!new_con_state->hdr_output_metadata;
7243 	}
7244 
7245 	return 0;
7246 }
7247 
7248 static const struct drm_connector_helper_funcs
7249 amdgpu_dm_connector_helper_funcs = {
7250 	/*
7251 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7252 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7253 	 * are missing after user start lightdm. So we need to renew modes list.
7254 	 * in get_modes call back, not just return the modes count
7255 	 */
7256 	.get_modes = get_modes,
7257 	.mode_valid = amdgpu_dm_connector_mode_valid,
7258 	.atomic_check = amdgpu_dm_connector_atomic_check,
7259 };
7260 
7261 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7262 {
7263 }
7264 
7265 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7266 {
7267 	struct drm_atomic_state *state = new_crtc_state->state;
7268 	struct drm_plane *plane;
7269 	int num_active = 0;
7270 
7271 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7272 		struct drm_plane_state *new_plane_state;
7273 
7274 		/* Cursor planes are "fake". */
7275 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7276 			continue;
7277 
7278 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7279 
7280 		if (!new_plane_state) {
7281 			/*
7282 			 * The plane is enable on the CRTC and hasn't changed
7283 			 * state. This means that it previously passed
7284 			 * validation and is therefore enabled.
7285 			 */
7286 			num_active += 1;
7287 			continue;
7288 		}
7289 
7290 		/* We need a framebuffer to be considered enabled. */
7291 		num_active += (new_plane_state->fb != NULL);
7292 	}
7293 
7294 	return num_active;
7295 }
7296 
7297 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7298 					 struct drm_crtc_state *new_crtc_state)
7299 {
7300 	struct dm_crtc_state *dm_new_crtc_state =
7301 		to_dm_crtc_state(new_crtc_state);
7302 
7303 	dm_new_crtc_state->active_planes = 0;
7304 
7305 	if (!dm_new_crtc_state->stream)
7306 		return;
7307 
7308 	dm_new_crtc_state->active_planes =
7309 		count_crtc_active_planes(new_crtc_state);
7310 }
7311 
7312 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7313 				       struct drm_atomic_state *state)
7314 {
7315 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7316 									  crtc);
7317 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7318 	struct dc *dc = adev->dm.dc;
7319 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7320 	int ret = -EINVAL;
7321 
7322 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7323 
7324 	dm_update_crtc_active_planes(crtc, crtc_state);
7325 
7326 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7327 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7328 		return ret;
7329 	}
7330 
7331 	/*
7332 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7333 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7334 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7335 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7336 	 */
7337 	if (crtc_state->enable &&
7338 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7339 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7340 		return -EINVAL;
7341 	}
7342 
7343 	/* In some use cases, like reset, no stream is attached */
7344 	if (!dm_crtc_state->stream)
7345 		return 0;
7346 
7347 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7348 		return 0;
7349 
7350 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7351 	return ret;
7352 }
7353 
7354 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7355 				      const struct drm_display_mode *mode,
7356 				      struct drm_display_mode *adjusted_mode)
7357 {
7358 	return true;
7359 }
7360 
7361 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7362 	.disable = dm_crtc_helper_disable,
7363 	.atomic_check = dm_crtc_helper_atomic_check,
7364 	.mode_fixup = dm_crtc_helper_mode_fixup,
7365 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7366 };
7367 
7368 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7369 {
7370 
7371 }
7372 
7373 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7374 {
7375 	switch (display_color_depth) {
7376 		case COLOR_DEPTH_666:
7377 			return 6;
7378 		case COLOR_DEPTH_888:
7379 			return 8;
7380 		case COLOR_DEPTH_101010:
7381 			return 10;
7382 		case COLOR_DEPTH_121212:
7383 			return 12;
7384 		case COLOR_DEPTH_141414:
7385 			return 14;
7386 		case COLOR_DEPTH_161616:
7387 			return 16;
7388 		default:
7389 			break;
7390 		}
7391 	return 0;
7392 }
7393 
7394 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7395 					  struct drm_crtc_state *crtc_state,
7396 					  struct drm_connector_state *conn_state)
7397 {
7398 	struct drm_atomic_state *state = crtc_state->state;
7399 	struct drm_connector *connector = conn_state->connector;
7400 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7401 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7402 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7403 	struct drm_dp_mst_topology_mgr *mst_mgr;
7404 	struct drm_dp_mst_port *mst_port;
7405 	enum dc_color_depth color_depth;
7406 	int clock, bpp = 0;
7407 	bool is_y420 = false;
7408 
7409 	if (!aconnector->port || !aconnector->dc_sink)
7410 		return 0;
7411 
7412 	mst_port = aconnector->port;
7413 	mst_mgr = &aconnector->mst_port->mst_mgr;
7414 
7415 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7416 		return 0;
7417 
7418 	if (!state->duplicated) {
7419 		int max_bpc = conn_state->max_requested_bpc;
7420 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7421 				aconnector->force_yuv420_output;
7422 		color_depth = convert_color_depth_from_display_info(connector,
7423 								    is_y420,
7424 								    max_bpc);
7425 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7426 		clock = adjusted_mode->clock;
7427 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7428 	}
7429 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7430 									   mst_mgr,
7431 									   mst_port,
7432 									   dm_new_connector_state->pbn,
7433 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7434 	if (dm_new_connector_state->vcpi_slots < 0) {
7435 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7436 		return dm_new_connector_state->vcpi_slots;
7437 	}
7438 	return 0;
7439 }
7440 
7441 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7442 	.disable = dm_encoder_helper_disable,
7443 	.atomic_check = dm_encoder_helper_atomic_check
7444 };
7445 
7446 #if defined(CONFIG_DRM_AMD_DC_DCN)
7447 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7448 					    struct dc_state *dc_state,
7449 					    struct dsc_mst_fairness_vars *vars)
7450 {
7451 	struct dc_stream_state *stream = NULL;
7452 	struct drm_connector *connector;
7453 	struct drm_connector_state *new_con_state;
7454 	struct amdgpu_dm_connector *aconnector;
7455 	struct dm_connector_state *dm_conn_state;
7456 	int i, j;
7457 	int vcpi, pbn_div, pbn, slot_num = 0;
7458 
7459 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7460 
7461 		aconnector = to_amdgpu_dm_connector(connector);
7462 
7463 		if (!aconnector->port)
7464 			continue;
7465 
7466 		if (!new_con_state || !new_con_state->crtc)
7467 			continue;
7468 
7469 		dm_conn_state = to_dm_connector_state(new_con_state);
7470 
7471 		for (j = 0; j < dc_state->stream_count; j++) {
7472 			stream = dc_state->streams[j];
7473 			if (!stream)
7474 				continue;
7475 
7476 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7477 				break;
7478 
7479 			stream = NULL;
7480 		}
7481 
7482 		if (!stream)
7483 			continue;
7484 
7485 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7486 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7487 		for (j = 0; j < dc_state->stream_count; j++) {
7488 			if (vars[j].aconnector == aconnector) {
7489 				pbn = vars[j].pbn;
7490 				break;
7491 			}
7492 		}
7493 
7494 		if (j == dc_state->stream_count)
7495 			continue;
7496 
7497 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7498 
7499 		if (stream->timing.flags.DSC != 1) {
7500 			dm_conn_state->pbn = pbn;
7501 			dm_conn_state->vcpi_slots = slot_num;
7502 
7503 			drm_dp_mst_atomic_enable_dsc(state,
7504 						     aconnector->port,
7505 						     dm_conn_state->pbn,
7506 						     0,
7507 						     false);
7508 			continue;
7509 		}
7510 
7511 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7512 						    aconnector->port,
7513 						    pbn, pbn_div,
7514 						    true);
7515 		if (vcpi < 0)
7516 			return vcpi;
7517 
7518 		dm_conn_state->pbn = pbn;
7519 		dm_conn_state->vcpi_slots = vcpi;
7520 	}
7521 	return 0;
7522 }
7523 #endif
7524 
7525 static void dm_drm_plane_reset(struct drm_plane *plane)
7526 {
7527 	struct dm_plane_state *amdgpu_state = NULL;
7528 
7529 	if (plane->state)
7530 		plane->funcs->atomic_destroy_state(plane, plane->state);
7531 
7532 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7533 	WARN_ON(amdgpu_state == NULL);
7534 
7535 	if (amdgpu_state)
7536 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7537 }
7538 
7539 static struct drm_plane_state *
7540 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7541 {
7542 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7543 
7544 	old_dm_plane_state = to_dm_plane_state(plane->state);
7545 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7546 	if (!dm_plane_state)
7547 		return NULL;
7548 
7549 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7550 
7551 	if (old_dm_plane_state->dc_state) {
7552 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7553 		dc_plane_state_retain(dm_plane_state->dc_state);
7554 	}
7555 
7556 	return &dm_plane_state->base;
7557 }
7558 
7559 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7560 				struct drm_plane_state *state)
7561 {
7562 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7563 
7564 	if (dm_plane_state->dc_state)
7565 		dc_plane_state_release(dm_plane_state->dc_state);
7566 
7567 	drm_atomic_helper_plane_destroy_state(plane, state);
7568 }
7569 
7570 static const struct drm_plane_funcs dm_plane_funcs = {
7571 	.update_plane	= drm_atomic_helper_update_plane,
7572 	.disable_plane	= drm_atomic_helper_disable_plane,
7573 	.destroy	= drm_primary_helper_destroy,
7574 	.reset = dm_drm_plane_reset,
7575 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7576 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7577 	.format_mod_supported = dm_plane_format_mod_supported,
7578 };
7579 
7580 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7581 				      struct drm_plane_state *new_state)
7582 {
7583 	struct amdgpu_framebuffer *afb;
7584 	struct drm_gem_object *obj;
7585 	struct amdgpu_device *adev;
7586 	struct amdgpu_bo *rbo;
7587 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7588 	uint32_t domain;
7589 	int r;
7590 
7591 	if (!new_state->fb) {
7592 		DRM_DEBUG_KMS("No FB bound\n");
7593 		return 0;
7594 	}
7595 
7596 	afb = to_amdgpu_framebuffer(new_state->fb);
7597 	obj = new_state->fb->obj[0];
7598 	rbo = gem_to_amdgpu_bo(obj);
7599 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7600 
7601 	r = amdgpu_bo_reserve(rbo, true);
7602 	if (r) {
7603 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7604 		return r;
7605 	}
7606 
7607 	r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7608 	if (r) {
7609 		dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7610 		goto error_unlock;
7611 	}
7612 
7613 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7614 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7615 	else
7616 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7617 
7618 	r = amdgpu_bo_pin(rbo, domain);
7619 	if (unlikely(r != 0)) {
7620 		if (r != -ERESTARTSYS)
7621 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7622 		goto error_unlock;
7623 	}
7624 
7625 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7626 	if (unlikely(r != 0)) {
7627 		DRM_ERROR("%p bind failed\n", rbo);
7628 		goto error_unpin;
7629 	}
7630 
7631 	r = drm_gem_plane_helper_prepare_fb(plane, new_state);
7632 	if (unlikely(r != 0))
7633 		goto error_unpin;
7634 
7635 	amdgpu_bo_unreserve(rbo);
7636 
7637 	afb->address = amdgpu_bo_gpu_offset(rbo);
7638 
7639 	amdgpu_bo_ref(rbo);
7640 
7641 	/**
7642 	 * We don't do surface updates on planes that have been newly created,
7643 	 * but we also don't have the afb->address during atomic check.
7644 	 *
7645 	 * Fill in buffer attributes depending on the address here, but only on
7646 	 * newly created planes since they're not being used by DC yet and this
7647 	 * won't modify global state.
7648 	 */
7649 	dm_plane_state_old = to_dm_plane_state(plane->state);
7650 	dm_plane_state_new = to_dm_plane_state(new_state);
7651 
7652 	if (dm_plane_state_new->dc_state &&
7653 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7654 		struct dc_plane_state *plane_state =
7655 			dm_plane_state_new->dc_state;
7656 		bool force_disable_dcc = !plane_state->dcc.enable;
7657 
7658 		fill_plane_buffer_attributes(
7659 			adev, afb, plane_state->format, plane_state->rotation,
7660 			afb->tiling_flags,
7661 			&plane_state->tiling_info, &plane_state->plane_size,
7662 			&plane_state->dcc, &plane_state->address,
7663 			afb->tmz_surface, force_disable_dcc);
7664 	}
7665 
7666 	return 0;
7667 
7668 error_unpin:
7669 	amdgpu_bo_unpin(rbo);
7670 
7671 error_unlock:
7672 	amdgpu_bo_unreserve(rbo);
7673 	return r;
7674 }
7675 
7676 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7677 				       struct drm_plane_state *old_state)
7678 {
7679 	struct amdgpu_bo *rbo;
7680 	int r;
7681 
7682 	if (!old_state->fb)
7683 		return;
7684 
7685 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7686 	r = amdgpu_bo_reserve(rbo, false);
7687 	if (unlikely(r)) {
7688 		DRM_ERROR("failed to reserve rbo before unpin\n");
7689 		return;
7690 	}
7691 
7692 	amdgpu_bo_unpin(rbo);
7693 	amdgpu_bo_unreserve(rbo);
7694 	amdgpu_bo_unref(&rbo);
7695 }
7696 
7697 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7698 				       struct drm_crtc_state *new_crtc_state)
7699 {
7700 	struct drm_framebuffer *fb = state->fb;
7701 	int min_downscale, max_upscale;
7702 	int min_scale = 0;
7703 	int max_scale = INT_MAX;
7704 
7705 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7706 	if (fb && state->crtc) {
7707 		/* Validate viewport to cover the case when only the position changes */
7708 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7709 			int viewport_width = state->crtc_w;
7710 			int viewport_height = state->crtc_h;
7711 
7712 			if (state->crtc_x < 0)
7713 				viewport_width += state->crtc_x;
7714 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7715 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7716 
7717 			if (state->crtc_y < 0)
7718 				viewport_height += state->crtc_y;
7719 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7720 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7721 
7722 			if (viewport_width < 0 || viewport_height < 0) {
7723 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7724 				return -EINVAL;
7725 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7726 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7727 				return -EINVAL;
7728 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7729 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7730 				return -EINVAL;
7731 			}
7732 
7733 		}
7734 
7735 		/* Get min/max allowed scaling factors from plane caps. */
7736 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7737 					     &min_downscale, &max_upscale);
7738 		/*
7739 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7740 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7741 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7742 		 */
7743 		min_scale = (1000 << 16) / max_upscale;
7744 		max_scale = (1000 << 16) / min_downscale;
7745 	}
7746 
7747 	return drm_atomic_helper_check_plane_state(
7748 		state, new_crtc_state, min_scale, max_scale, true, true);
7749 }
7750 
7751 static int dm_plane_atomic_check(struct drm_plane *plane,
7752 				 struct drm_atomic_state *state)
7753 {
7754 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7755 										 plane);
7756 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7757 	struct dc *dc = adev->dm.dc;
7758 	struct dm_plane_state *dm_plane_state;
7759 	struct dc_scaling_info scaling_info;
7760 	struct drm_crtc_state *new_crtc_state;
7761 	int ret;
7762 
7763 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7764 
7765 	dm_plane_state = to_dm_plane_state(new_plane_state);
7766 
7767 	if (!dm_plane_state->dc_state)
7768 		return 0;
7769 
7770 	new_crtc_state =
7771 		drm_atomic_get_new_crtc_state(state,
7772 					      new_plane_state->crtc);
7773 	if (!new_crtc_state)
7774 		return -EINVAL;
7775 
7776 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7777 	if (ret)
7778 		return ret;
7779 
7780 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7781 	if (ret)
7782 		return ret;
7783 
7784 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7785 		return 0;
7786 
7787 	return -EINVAL;
7788 }
7789 
7790 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7791 				       struct drm_atomic_state *state)
7792 {
7793 	/* Only support async updates on cursor planes. */
7794 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7795 		return -EINVAL;
7796 
7797 	return 0;
7798 }
7799 
7800 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7801 					 struct drm_atomic_state *state)
7802 {
7803 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7804 									   plane);
7805 	struct drm_plane_state *old_state =
7806 		drm_atomic_get_old_plane_state(state, plane);
7807 
7808 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7809 
7810 	swap(plane->state->fb, new_state->fb);
7811 
7812 	plane->state->src_x = new_state->src_x;
7813 	plane->state->src_y = new_state->src_y;
7814 	plane->state->src_w = new_state->src_w;
7815 	plane->state->src_h = new_state->src_h;
7816 	plane->state->crtc_x = new_state->crtc_x;
7817 	plane->state->crtc_y = new_state->crtc_y;
7818 	plane->state->crtc_w = new_state->crtc_w;
7819 	plane->state->crtc_h = new_state->crtc_h;
7820 
7821 	handle_cursor_update(plane, old_state);
7822 }
7823 
7824 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7825 	.prepare_fb = dm_plane_helper_prepare_fb,
7826 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7827 	.atomic_check = dm_plane_atomic_check,
7828 	.atomic_async_check = dm_plane_atomic_async_check,
7829 	.atomic_async_update = dm_plane_atomic_async_update
7830 };
7831 
7832 /*
7833  * TODO: these are currently initialized to rgb formats only.
7834  * For future use cases we should either initialize them dynamically based on
7835  * plane capabilities, or initialize this array to all formats, so internal drm
7836  * check will succeed, and let DC implement proper check
7837  */
7838 static const uint32_t rgb_formats[] = {
7839 	DRM_FORMAT_XRGB8888,
7840 	DRM_FORMAT_ARGB8888,
7841 	DRM_FORMAT_RGBA8888,
7842 	DRM_FORMAT_XRGB2101010,
7843 	DRM_FORMAT_XBGR2101010,
7844 	DRM_FORMAT_ARGB2101010,
7845 	DRM_FORMAT_ABGR2101010,
7846 	DRM_FORMAT_XRGB16161616,
7847 	DRM_FORMAT_XBGR16161616,
7848 	DRM_FORMAT_ARGB16161616,
7849 	DRM_FORMAT_ABGR16161616,
7850 	DRM_FORMAT_XBGR8888,
7851 	DRM_FORMAT_ABGR8888,
7852 	DRM_FORMAT_RGB565,
7853 };
7854 
7855 static const uint32_t overlay_formats[] = {
7856 	DRM_FORMAT_XRGB8888,
7857 	DRM_FORMAT_ARGB8888,
7858 	DRM_FORMAT_RGBA8888,
7859 	DRM_FORMAT_XBGR8888,
7860 	DRM_FORMAT_ABGR8888,
7861 	DRM_FORMAT_RGB565
7862 };
7863 
7864 static const u32 cursor_formats[] = {
7865 	DRM_FORMAT_ARGB8888
7866 };
7867 
7868 static int get_plane_formats(const struct drm_plane *plane,
7869 			     const struct dc_plane_cap *plane_cap,
7870 			     uint32_t *formats, int max_formats)
7871 {
7872 	int i, num_formats = 0;
7873 
7874 	/*
7875 	 * TODO: Query support for each group of formats directly from
7876 	 * DC plane caps. This will require adding more formats to the
7877 	 * caps list.
7878 	 */
7879 
7880 	switch (plane->type) {
7881 	case DRM_PLANE_TYPE_PRIMARY:
7882 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7883 			if (num_formats >= max_formats)
7884 				break;
7885 
7886 			formats[num_formats++] = rgb_formats[i];
7887 		}
7888 
7889 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7890 			formats[num_formats++] = DRM_FORMAT_NV12;
7891 		if (plane_cap && plane_cap->pixel_format_support.p010)
7892 			formats[num_formats++] = DRM_FORMAT_P010;
7893 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7894 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7895 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7896 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7897 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7898 		}
7899 		break;
7900 
7901 	case DRM_PLANE_TYPE_OVERLAY:
7902 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7903 			if (num_formats >= max_formats)
7904 				break;
7905 
7906 			formats[num_formats++] = overlay_formats[i];
7907 		}
7908 		break;
7909 
7910 	case DRM_PLANE_TYPE_CURSOR:
7911 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7912 			if (num_formats >= max_formats)
7913 				break;
7914 
7915 			formats[num_formats++] = cursor_formats[i];
7916 		}
7917 		break;
7918 	}
7919 
7920 	return num_formats;
7921 }
7922 
7923 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7924 				struct drm_plane *plane,
7925 				unsigned long possible_crtcs,
7926 				const struct dc_plane_cap *plane_cap)
7927 {
7928 	uint32_t formats[32];
7929 	int num_formats;
7930 	int res = -EPERM;
7931 	unsigned int supported_rotations;
7932 	uint64_t *modifiers = NULL;
7933 
7934 	num_formats = get_plane_formats(plane, plane_cap, formats,
7935 					ARRAY_SIZE(formats));
7936 
7937 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7938 	if (res)
7939 		return res;
7940 
7941 	if (modifiers == NULL)
7942 		adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7943 
7944 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7945 				       &dm_plane_funcs, formats, num_formats,
7946 				       modifiers, plane->type, NULL);
7947 	kfree(modifiers);
7948 	if (res)
7949 		return res;
7950 
7951 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7952 	    plane_cap && plane_cap->per_pixel_alpha) {
7953 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7954 					  BIT(DRM_MODE_BLEND_PREMULTI);
7955 
7956 		drm_plane_create_alpha_property(plane);
7957 		drm_plane_create_blend_mode_property(plane, blend_caps);
7958 	}
7959 
7960 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7961 	    plane_cap &&
7962 	    (plane_cap->pixel_format_support.nv12 ||
7963 	     plane_cap->pixel_format_support.p010)) {
7964 		/* This only affects YUV formats. */
7965 		drm_plane_create_color_properties(
7966 			plane,
7967 			BIT(DRM_COLOR_YCBCR_BT601) |
7968 			BIT(DRM_COLOR_YCBCR_BT709) |
7969 			BIT(DRM_COLOR_YCBCR_BT2020),
7970 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7971 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7972 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7973 	}
7974 
7975 	supported_rotations =
7976 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7977 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7978 
7979 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7980 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7981 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7982 						   supported_rotations);
7983 
7984 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7985 
7986 	/* Create (reset) the plane state */
7987 	if (plane->funcs->reset)
7988 		plane->funcs->reset(plane);
7989 
7990 	return 0;
7991 }
7992 
7993 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7994 			       struct drm_plane *plane,
7995 			       uint32_t crtc_index)
7996 {
7997 	struct amdgpu_crtc *acrtc = NULL;
7998 	struct drm_plane *cursor_plane;
7999 
8000 	int res = -ENOMEM;
8001 
8002 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
8003 	if (!cursor_plane)
8004 		goto fail;
8005 
8006 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
8007 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
8008 
8009 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8010 	if (!acrtc)
8011 		goto fail;
8012 
8013 	res = drm_crtc_init_with_planes(
8014 			dm->ddev,
8015 			&acrtc->base,
8016 			plane,
8017 			cursor_plane,
8018 			&amdgpu_dm_crtc_funcs, NULL);
8019 
8020 	if (res)
8021 		goto fail;
8022 
8023 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8024 
8025 	/* Create (reset) the plane state */
8026 	if (acrtc->base.funcs->reset)
8027 		acrtc->base.funcs->reset(&acrtc->base);
8028 
8029 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8030 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8031 
8032 	acrtc->crtc_id = crtc_index;
8033 	acrtc->base.enabled = false;
8034 	acrtc->otg_inst = -1;
8035 
8036 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8037 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8038 				   true, MAX_COLOR_LUT_ENTRIES);
8039 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8040 
8041 	return 0;
8042 
8043 fail:
8044 	kfree(acrtc);
8045 	kfree(cursor_plane);
8046 	return res;
8047 }
8048 
8049 
8050 static int to_drm_connector_type(enum signal_type st)
8051 {
8052 	switch (st) {
8053 	case SIGNAL_TYPE_HDMI_TYPE_A:
8054 		return DRM_MODE_CONNECTOR_HDMIA;
8055 	case SIGNAL_TYPE_EDP:
8056 		return DRM_MODE_CONNECTOR_eDP;
8057 	case SIGNAL_TYPE_LVDS:
8058 		return DRM_MODE_CONNECTOR_LVDS;
8059 	case SIGNAL_TYPE_RGB:
8060 		return DRM_MODE_CONNECTOR_VGA;
8061 	case SIGNAL_TYPE_DISPLAY_PORT:
8062 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
8063 		return DRM_MODE_CONNECTOR_DisplayPort;
8064 	case SIGNAL_TYPE_DVI_DUAL_LINK:
8065 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
8066 		return DRM_MODE_CONNECTOR_DVID;
8067 	case SIGNAL_TYPE_VIRTUAL:
8068 		return DRM_MODE_CONNECTOR_VIRTUAL;
8069 
8070 	default:
8071 		return DRM_MODE_CONNECTOR_Unknown;
8072 	}
8073 }
8074 
8075 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8076 {
8077 	struct drm_encoder *encoder;
8078 
8079 	/* There is only one encoder per connector */
8080 	drm_connector_for_each_possible_encoder(connector, encoder)
8081 		return encoder;
8082 
8083 	return NULL;
8084 }
8085 
8086 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8087 {
8088 	struct drm_encoder *encoder;
8089 	struct amdgpu_encoder *amdgpu_encoder;
8090 
8091 	encoder = amdgpu_dm_connector_to_encoder(connector);
8092 
8093 	if (encoder == NULL)
8094 		return;
8095 
8096 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8097 
8098 	amdgpu_encoder->native_mode.clock = 0;
8099 
8100 	if (!list_empty(&connector->probed_modes)) {
8101 		struct drm_display_mode *preferred_mode = NULL;
8102 
8103 		list_for_each_entry(preferred_mode,
8104 				    &connector->probed_modes,
8105 				    head) {
8106 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8107 				amdgpu_encoder->native_mode = *preferred_mode;
8108 
8109 			break;
8110 		}
8111 
8112 	}
8113 }
8114 
8115 static struct drm_display_mode *
8116 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8117 			     char *name,
8118 			     int hdisplay, int vdisplay)
8119 {
8120 	struct drm_device *dev = encoder->dev;
8121 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8122 	struct drm_display_mode *mode = NULL;
8123 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8124 
8125 	mode = drm_mode_duplicate(dev, native_mode);
8126 
8127 	if (mode == NULL)
8128 		return NULL;
8129 
8130 	mode->hdisplay = hdisplay;
8131 	mode->vdisplay = vdisplay;
8132 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8133 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8134 
8135 	return mode;
8136 
8137 }
8138 
8139 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8140 						 struct drm_connector *connector)
8141 {
8142 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8143 	struct drm_display_mode *mode = NULL;
8144 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8145 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8146 				to_amdgpu_dm_connector(connector);
8147 	int i;
8148 	int n;
8149 	struct mode_size {
8150 		char name[DRM_DISPLAY_MODE_LEN];
8151 		int w;
8152 		int h;
8153 	} common_modes[] = {
8154 		{  "640x480",  640,  480},
8155 		{  "800x600",  800,  600},
8156 		{ "1024x768", 1024,  768},
8157 		{ "1280x720", 1280,  720},
8158 		{ "1280x800", 1280,  800},
8159 		{"1280x1024", 1280, 1024},
8160 		{ "1440x900", 1440,  900},
8161 		{"1680x1050", 1680, 1050},
8162 		{"1600x1200", 1600, 1200},
8163 		{"1920x1080", 1920, 1080},
8164 		{"1920x1200", 1920, 1200}
8165 	};
8166 
8167 	n = ARRAY_SIZE(common_modes);
8168 
8169 	for (i = 0; i < n; i++) {
8170 		struct drm_display_mode *curmode = NULL;
8171 		bool mode_existed = false;
8172 
8173 		if (common_modes[i].w > native_mode->hdisplay ||
8174 		    common_modes[i].h > native_mode->vdisplay ||
8175 		   (common_modes[i].w == native_mode->hdisplay &&
8176 		    common_modes[i].h == native_mode->vdisplay))
8177 			continue;
8178 
8179 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8180 			if (common_modes[i].w == curmode->hdisplay &&
8181 			    common_modes[i].h == curmode->vdisplay) {
8182 				mode_existed = true;
8183 				break;
8184 			}
8185 		}
8186 
8187 		if (mode_existed)
8188 			continue;
8189 
8190 		mode = amdgpu_dm_create_common_mode(encoder,
8191 				common_modes[i].name, common_modes[i].w,
8192 				common_modes[i].h);
8193 		if (!mode)
8194 			continue;
8195 
8196 		drm_mode_probed_add(connector, mode);
8197 		amdgpu_dm_connector->num_modes++;
8198 	}
8199 }
8200 
8201 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8202 {
8203 	struct drm_encoder *encoder;
8204 	struct amdgpu_encoder *amdgpu_encoder;
8205 	const struct drm_display_mode *native_mode;
8206 
8207 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8208 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8209 		return;
8210 
8211 	encoder = amdgpu_dm_connector_to_encoder(connector);
8212 	if (!encoder)
8213 		return;
8214 
8215 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8216 
8217 	native_mode = &amdgpu_encoder->native_mode;
8218 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8219 		return;
8220 
8221 	drm_connector_set_panel_orientation_with_quirk(connector,
8222 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8223 						       native_mode->hdisplay,
8224 						       native_mode->vdisplay);
8225 }
8226 
8227 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8228 					      struct edid *edid)
8229 {
8230 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8231 			to_amdgpu_dm_connector(connector);
8232 
8233 	if (edid) {
8234 		/* empty probed_modes */
8235 		INIT_LIST_HEAD(&connector->probed_modes);
8236 		amdgpu_dm_connector->num_modes =
8237 				drm_add_edid_modes(connector, edid);
8238 
8239 		/* sorting the probed modes before calling function
8240 		 * amdgpu_dm_get_native_mode() since EDID can have
8241 		 * more than one preferred mode. The modes that are
8242 		 * later in the probed mode list could be of higher
8243 		 * and preferred resolution. For example, 3840x2160
8244 		 * resolution in base EDID preferred timing and 4096x2160
8245 		 * preferred resolution in DID extension block later.
8246 		 */
8247 		drm_mode_sort(&connector->probed_modes);
8248 		amdgpu_dm_get_native_mode(connector);
8249 
8250 		/* Freesync capabilities are reset by calling
8251 		 * drm_add_edid_modes() and need to be
8252 		 * restored here.
8253 		 */
8254 		amdgpu_dm_update_freesync_caps(connector, edid);
8255 
8256 		amdgpu_set_panel_orientation(connector);
8257 	} else {
8258 		amdgpu_dm_connector->num_modes = 0;
8259 	}
8260 }
8261 
8262 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8263 			      struct drm_display_mode *mode)
8264 {
8265 	struct drm_display_mode *m;
8266 
8267 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8268 		if (drm_mode_equal(m, mode))
8269 			return true;
8270 	}
8271 
8272 	return false;
8273 }
8274 
8275 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8276 {
8277 	const struct drm_display_mode *m;
8278 	struct drm_display_mode *new_mode;
8279 	uint i;
8280 	uint32_t new_modes_count = 0;
8281 
8282 	/* Standard FPS values
8283 	 *
8284 	 * 23.976       - TV/NTSC
8285 	 * 24 	        - Cinema
8286 	 * 25 	        - TV/PAL
8287 	 * 29.97        - TV/NTSC
8288 	 * 30 	        - TV/NTSC
8289 	 * 48 	        - Cinema HFR
8290 	 * 50 	        - TV/PAL
8291 	 * 60 	        - Commonly used
8292 	 * 48,72,96,120 - Multiples of 24
8293 	 */
8294 	static const uint32_t common_rates[] = {
8295 		23976, 24000, 25000, 29970, 30000,
8296 		48000, 50000, 60000, 72000, 96000, 120000
8297 	};
8298 
8299 	/*
8300 	 * Find mode with highest refresh rate with the same resolution
8301 	 * as the preferred mode. Some monitors report a preferred mode
8302 	 * with lower resolution than the highest refresh rate supported.
8303 	 */
8304 
8305 	m = get_highest_refresh_rate_mode(aconnector, true);
8306 	if (!m)
8307 		return 0;
8308 
8309 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8310 		uint64_t target_vtotal, target_vtotal_diff;
8311 		uint64_t num, den;
8312 
8313 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8314 			continue;
8315 
8316 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8317 		    common_rates[i] > aconnector->max_vfreq * 1000)
8318 			continue;
8319 
8320 		num = (unsigned long long)m->clock * 1000 * 1000;
8321 		den = common_rates[i] * (unsigned long long)m->htotal;
8322 		target_vtotal = div_u64(num, den);
8323 		target_vtotal_diff = target_vtotal - m->vtotal;
8324 
8325 		/* Check for illegal modes */
8326 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8327 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8328 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8329 			continue;
8330 
8331 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8332 		if (!new_mode)
8333 			goto out;
8334 
8335 		new_mode->vtotal += (u16)target_vtotal_diff;
8336 		new_mode->vsync_start += (u16)target_vtotal_diff;
8337 		new_mode->vsync_end += (u16)target_vtotal_diff;
8338 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8339 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8340 
8341 		if (!is_duplicate_mode(aconnector, new_mode)) {
8342 			drm_mode_probed_add(&aconnector->base, new_mode);
8343 			new_modes_count += 1;
8344 		} else
8345 			drm_mode_destroy(aconnector->base.dev, new_mode);
8346 	}
8347  out:
8348 	return new_modes_count;
8349 }
8350 
8351 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8352 						   struct edid *edid)
8353 {
8354 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8355 		to_amdgpu_dm_connector(connector);
8356 
8357 	if (!edid)
8358 		return;
8359 
8360 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8361 		amdgpu_dm_connector->num_modes +=
8362 			add_fs_modes(amdgpu_dm_connector);
8363 }
8364 
8365 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8366 {
8367 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8368 			to_amdgpu_dm_connector(connector);
8369 	struct drm_encoder *encoder;
8370 	struct edid *edid = amdgpu_dm_connector->edid;
8371 
8372 	encoder = amdgpu_dm_connector_to_encoder(connector);
8373 
8374 	if (!drm_edid_is_valid(edid)) {
8375 		amdgpu_dm_connector->num_modes =
8376 				drm_add_modes_noedid(connector, 640, 480);
8377 	} else {
8378 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8379 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8380 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8381 	}
8382 	amdgpu_dm_fbc_init(connector);
8383 
8384 	return amdgpu_dm_connector->num_modes;
8385 }
8386 
8387 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8388 				     struct amdgpu_dm_connector *aconnector,
8389 				     int connector_type,
8390 				     struct dc_link *link,
8391 				     int link_index)
8392 {
8393 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8394 
8395 	/*
8396 	 * Some of the properties below require access to state, like bpc.
8397 	 * Allocate some default initial connector state with our reset helper.
8398 	 */
8399 	if (aconnector->base.funcs->reset)
8400 		aconnector->base.funcs->reset(&aconnector->base);
8401 
8402 	aconnector->connector_id = link_index;
8403 	aconnector->dc_link = link;
8404 	aconnector->base.interlace_allowed = false;
8405 	aconnector->base.doublescan_allowed = false;
8406 	aconnector->base.stereo_allowed = false;
8407 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8408 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8409 	aconnector->audio_inst = -1;
8410 	mutex_init(&aconnector->hpd_lock);
8411 
8412 	/*
8413 	 * configure support HPD hot plug connector_>polled default value is 0
8414 	 * which means HPD hot plug not supported
8415 	 */
8416 	switch (connector_type) {
8417 	case DRM_MODE_CONNECTOR_HDMIA:
8418 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8419 		aconnector->base.ycbcr_420_allowed =
8420 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8421 		break;
8422 	case DRM_MODE_CONNECTOR_DisplayPort:
8423 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8424 		link->link_enc = link_enc_cfg_get_link_enc(link);
8425 		ASSERT(link->link_enc);
8426 		if (link->link_enc)
8427 			aconnector->base.ycbcr_420_allowed =
8428 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8429 		break;
8430 	case DRM_MODE_CONNECTOR_DVID:
8431 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8432 		break;
8433 	default:
8434 		break;
8435 	}
8436 
8437 	drm_object_attach_property(&aconnector->base.base,
8438 				dm->ddev->mode_config.scaling_mode_property,
8439 				DRM_MODE_SCALE_NONE);
8440 
8441 	drm_object_attach_property(&aconnector->base.base,
8442 				adev->mode_info.underscan_property,
8443 				UNDERSCAN_OFF);
8444 	drm_object_attach_property(&aconnector->base.base,
8445 				adev->mode_info.underscan_hborder_property,
8446 				0);
8447 	drm_object_attach_property(&aconnector->base.base,
8448 				adev->mode_info.underscan_vborder_property,
8449 				0);
8450 
8451 	if (!aconnector->mst_port)
8452 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8453 
8454 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8455 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8456 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8457 
8458 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8459 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8460 		drm_object_attach_property(&aconnector->base.base,
8461 				adev->mode_info.abm_level_property, 0);
8462 	}
8463 
8464 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8465 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8466 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8467 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8468 
8469 		if (!aconnector->mst_port)
8470 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8471 
8472 #ifdef CONFIG_DRM_AMD_DC_HDCP
8473 		if (adev->dm.hdcp_workqueue)
8474 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8475 #endif
8476 	}
8477 }
8478 
8479 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8480 			      struct i2c_msg *msgs, int num)
8481 {
8482 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8483 	struct ddc_service *ddc_service = i2c->ddc_service;
8484 	struct i2c_command cmd;
8485 	int i;
8486 	int result = -EIO;
8487 
8488 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8489 
8490 	if (!cmd.payloads)
8491 		return result;
8492 
8493 	cmd.number_of_payloads = num;
8494 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8495 	cmd.speed = 100;
8496 
8497 	for (i = 0; i < num; i++) {
8498 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8499 		cmd.payloads[i].address = msgs[i].addr;
8500 		cmd.payloads[i].length = msgs[i].len;
8501 		cmd.payloads[i].data = msgs[i].buf;
8502 	}
8503 
8504 	if (dc_submit_i2c(
8505 			ddc_service->ctx->dc,
8506 			ddc_service->ddc_pin->hw_info.ddc_channel,
8507 			&cmd))
8508 		result = num;
8509 
8510 	kfree(cmd.payloads);
8511 	return result;
8512 }
8513 
8514 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8515 {
8516 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8517 }
8518 
8519 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8520 	.master_xfer = amdgpu_dm_i2c_xfer,
8521 	.functionality = amdgpu_dm_i2c_func,
8522 };
8523 
8524 static struct amdgpu_i2c_adapter *
8525 create_i2c(struct ddc_service *ddc_service,
8526 	   int link_index,
8527 	   int *res)
8528 {
8529 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8530 	struct amdgpu_i2c_adapter *i2c;
8531 
8532 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8533 	if (!i2c)
8534 		return NULL;
8535 	i2c->base.owner = THIS_MODULE;
8536 	i2c->base.class = I2C_CLASS_DDC;
8537 	i2c->base.dev.parent = &adev->pdev->dev;
8538 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8539 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8540 	i2c_set_adapdata(&i2c->base, i2c);
8541 	i2c->ddc_service = ddc_service;
8542 	if (i2c->ddc_service->ddc_pin)
8543 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8544 
8545 	return i2c;
8546 }
8547 
8548 
8549 /*
8550  * Note: this function assumes that dc_link_detect() was called for the
8551  * dc_link which will be represented by this aconnector.
8552  */
8553 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8554 				    struct amdgpu_dm_connector *aconnector,
8555 				    uint32_t link_index,
8556 				    struct amdgpu_encoder *aencoder)
8557 {
8558 	int res = 0;
8559 	int connector_type;
8560 	struct dc *dc = dm->dc;
8561 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8562 	struct amdgpu_i2c_adapter *i2c;
8563 
8564 	link->priv = aconnector;
8565 
8566 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8567 
8568 	i2c = create_i2c(link->ddc, link->link_index, &res);
8569 	if (!i2c) {
8570 		DRM_ERROR("Failed to create i2c adapter data\n");
8571 		return -ENOMEM;
8572 	}
8573 
8574 	aconnector->i2c = i2c;
8575 	res = i2c_add_adapter(&i2c->base);
8576 
8577 	if (res) {
8578 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8579 		goto out_free;
8580 	}
8581 
8582 	connector_type = to_drm_connector_type(link->connector_signal);
8583 
8584 	res = drm_connector_init_with_ddc(
8585 			dm->ddev,
8586 			&aconnector->base,
8587 			&amdgpu_dm_connector_funcs,
8588 			connector_type,
8589 			&i2c->base);
8590 
8591 	if (res) {
8592 		DRM_ERROR("connector_init failed\n");
8593 		aconnector->connector_id = -1;
8594 		goto out_free;
8595 	}
8596 
8597 	drm_connector_helper_add(
8598 			&aconnector->base,
8599 			&amdgpu_dm_connector_helper_funcs);
8600 
8601 	amdgpu_dm_connector_init_helper(
8602 		dm,
8603 		aconnector,
8604 		connector_type,
8605 		link,
8606 		link_index);
8607 
8608 	drm_connector_attach_encoder(
8609 		&aconnector->base, &aencoder->base);
8610 
8611 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8612 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8613 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8614 
8615 out_free:
8616 	if (res) {
8617 		kfree(i2c);
8618 		aconnector->i2c = NULL;
8619 	}
8620 	return res;
8621 }
8622 
8623 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8624 {
8625 	switch (adev->mode_info.num_crtc) {
8626 	case 1:
8627 		return 0x1;
8628 	case 2:
8629 		return 0x3;
8630 	case 3:
8631 		return 0x7;
8632 	case 4:
8633 		return 0xf;
8634 	case 5:
8635 		return 0x1f;
8636 	case 6:
8637 	default:
8638 		return 0x3f;
8639 	}
8640 }
8641 
8642 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8643 				  struct amdgpu_encoder *aencoder,
8644 				  uint32_t link_index)
8645 {
8646 	struct amdgpu_device *adev = drm_to_adev(dev);
8647 
8648 	int res = drm_encoder_init(dev,
8649 				   &aencoder->base,
8650 				   &amdgpu_dm_encoder_funcs,
8651 				   DRM_MODE_ENCODER_TMDS,
8652 				   NULL);
8653 
8654 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8655 
8656 	if (!res)
8657 		aencoder->encoder_id = link_index;
8658 	else
8659 		aencoder->encoder_id = -1;
8660 
8661 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8662 
8663 	return res;
8664 }
8665 
8666 static void manage_dm_interrupts(struct amdgpu_device *adev,
8667 				 struct amdgpu_crtc *acrtc,
8668 				 bool enable)
8669 {
8670 	/*
8671 	 * We have no guarantee that the frontend index maps to the same
8672 	 * backend index - some even map to more than one.
8673 	 *
8674 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8675 	 */
8676 	int irq_type =
8677 		amdgpu_display_crtc_idx_to_irq_type(
8678 			adev,
8679 			acrtc->crtc_id);
8680 
8681 	if (enable) {
8682 		drm_crtc_vblank_on(&acrtc->base);
8683 		amdgpu_irq_get(
8684 			adev,
8685 			&adev->pageflip_irq,
8686 			irq_type);
8687 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8688 		amdgpu_irq_get(
8689 			adev,
8690 			&adev->vline0_irq,
8691 			irq_type);
8692 #endif
8693 	} else {
8694 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8695 		amdgpu_irq_put(
8696 			adev,
8697 			&adev->vline0_irq,
8698 			irq_type);
8699 #endif
8700 		amdgpu_irq_put(
8701 			adev,
8702 			&adev->pageflip_irq,
8703 			irq_type);
8704 		drm_crtc_vblank_off(&acrtc->base);
8705 	}
8706 }
8707 
8708 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8709 				      struct amdgpu_crtc *acrtc)
8710 {
8711 	int irq_type =
8712 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8713 
8714 	/**
8715 	 * This reads the current state for the IRQ and force reapplies
8716 	 * the setting to hardware.
8717 	 */
8718 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8719 }
8720 
8721 static bool
8722 is_scaling_state_different(const struct dm_connector_state *dm_state,
8723 			   const struct dm_connector_state *old_dm_state)
8724 {
8725 	if (dm_state->scaling != old_dm_state->scaling)
8726 		return true;
8727 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8728 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8729 			return true;
8730 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8731 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8732 			return true;
8733 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8734 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8735 		return true;
8736 	return false;
8737 }
8738 
8739 #ifdef CONFIG_DRM_AMD_DC_HDCP
8740 static bool is_content_protection_different(struct drm_connector_state *state,
8741 					    const struct drm_connector_state *old_state,
8742 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8743 {
8744 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8745 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8746 
8747 	/* Handle: Type0/1 change */
8748 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8749 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8750 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8751 		return true;
8752 	}
8753 
8754 	/* CP is being re enabled, ignore this
8755 	 *
8756 	 * Handles:	ENABLED -> DESIRED
8757 	 */
8758 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8759 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8760 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8761 		return false;
8762 	}
8763 
8764 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8765 	 *
8766 	 * Handles:	UNDESIRED -> ENABLED
8767 	 */
8768 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8769 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8770 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8771 
8772 	/* Stream removed and re-enabled
8773 	 *
8774 	 * Can sometimes overlap with the HPD case,
8775 	 * thus set update_hdcp to false to avoid
8776 	 * setting HDCP multiple times.
8777 	 *
8778 	 * Handles:	DESIRED -> DESIRED (Special case)
8779 	 */
8780 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8781 		state->crtc && state->crtc->enabled &&
8782 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8783 		dm_con_state->update_hdcp = false;
8784 		return true;
8785 	}
8786 
8787 	/* Hot-plug, headless s3, dpms
8788 	 *
8789 	 * Only start HDCP if the display is connected/enabled.
8790 	 * update_hdcp flag will be set to false until the next
8791 	 * HPD comes in.
8792 	 *
8793 	 * Handles:	DESIRED -> DESIRED (Special case)
8794 	 */
8795 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8796 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8797 		dm_con_state->update_hdcp = false;
8798 		return true;
8799 	}
8800 
8801 	/*
8802 	 * Handles:	UNDESIRED -> UNDESIRED
8803 	 *		DESIRED -> DESIRED
8804 	 *		ENABLED -> ENABLED
8805 	 */
8806 	if (old_state->content_protection == state->content_protection)
8807 		return false;
8808 
8809 	/*
8810 	 * Handles:	UNDESIRED -> DESIRED
8811 	 *		DESIRED -> UNDESIRED
8812 	 *		ENABLED -> UNDESIRED
8813 	 */
8814 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8815 		return true;
8816 
8817 	/*
8818 	 * Handles:	DESIRED -> ENABLED
8819 	 */
8820 	return false;
8821 }
8822 
8823 #endif
8824 static void remove_stream(struct amdgpu_device *adev,
8825 			  struct amdgpu_crtc *acrtc,
8826 			  struct dc_stream_state *stream)
8827 {
8828 	/* this is the update mode case */
8829 
8830 	acrtc->otg_inst = -1;
8831 	acrtc->enabled = false;
8832 }
8833 
8834 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8835 			       struct dc_cursor_position *position)
8836 {
8837 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8838 	int x, y;
8839 	int xorigin = 0, yorigin = 0;
8840 
8841 	if (!crtc || !plane->state->fb)
8842 		return 0;
8843 
8844 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8845 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8846 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8847 			  __func__,
8848 			  plane->state->crtc_w,
8849 			  plane->state->crtc_h);
8850 		return -EINVAL;
8851 	}
8852 
8853 	x = plane->state->crtc_x;
8854 	y = plane->state->crtc_y;
8855 
8856 	if (x <= -amdgpu_crtc->max_cursor_width ||
8857 	    y <= -amdgpu_crtc->max_cursor_height)
8858 		return 0;
8859 
8860 	if (x < 0) {
8861 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8862 		x = 0;
8863 	}
8864 	if (y < 0) {
8865 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8866 		y = 0;
8867 	}
8868 	position->enable = true;
8869 	position->translate_by_source = true;
8870 	position->x = x;
8871 	position->y = y;
8872 	position->x_hotspot = xorigin;
8873 	position->y_hotspot = yorigin;
8874 
8875 	return 0;
8876 }
8877 
8878 static void handle_cursor_update(struct drm_plane *plane,
8879 				 struct drm_plane_state *old_plane_state)
8880 {
8881 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8882 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8883 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8884 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8885 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8886 	uint64_t address = afb ? afb->address : 0;
8887 	struct dc_cursor_position position = {0};
8888 	struct dc_cursor_attributes attributes;
8889 	int ret;
8890 
8891 	if (!plane->state->fb && !old_plane_state->fb)
8892 		return;
8893 
8894 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8895 		      __func__,
8896 		      amdgpu_crtc->crtc_id,
8897 		      plane->state->crtc_w,
8898 		      plane->state->crtc_h);
8899 
8900 	ret = get_cursor_position(plane, crtc, &position);
8901 	if (ret)
8902 		return;
8903 
8904 	if (!position.enable) {
8905 		/* turn off cursor */
8906 		if (crtc_state && crtc_state->stream) {
8907 			mutex_lock(&adev->dm.dc_lock);
8908 			dc_stream_set_cursor_position(crtc_state->stream,
8909 						      &position);
8910 			mutex_unlock(&adev->dm.dc_lock);
8911 		}
8912 		return;
8913 	}
8914 
8915 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8916 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8917 
8918 	memset(&attributes, 0, sizeof(attributes));
8919 	attributes.address.high_part = upper_32_bits(address);
8920 	attributes.address.low_part  = lower_32_bits(address);
8921 	attributes.width             = plane->state->crtc_w;
8922 	attributes.height            = plane->state->crtc_h;
8923 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8924 	attributes.rotation_angle    = 0;
8925 	attributes.attribute_flags.value = 0;
8926 
8927 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8928 
8929 	if (crtc_state->stream) {
8930 		mutex_lock(&adev->dm.dc_lock);
8931 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8932 							 &attributes))
8933 			DRM_ERROR("DC failed to set cursor attributes\n");
8934 
8935 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8936 						   &position))
8937 			DRM_ERROR("DC failed to set cursor position\n");
8938 		mutex_unlock(&adev->dm.dc_lock);
8939 	}
8940 }
8941 
8942 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8943 {
8944 
8945 	assert_spin_locked(&acrtc->base.dev->event_lock);
8946 	WARN_ON(acrtc->event);
8947 
8948 	acrtc->event = acrtc->base.state->event;
8949 
8950 	/* Set the flip status */
8951 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8952 
8953 	/* Mark this event as consumed */
8954 	acrtc->base.state->event = NULL;
8955 
8956 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8957 		     acrtc->crtc_id);
8958 }
8959 
8960 static void update_freesync_state_on_stream(
8961 	struct amdgpu_display_manager *dm,
8962 	struct dm_crtc_state *new_crtc_state,
8963 	struct dc_stream_state *new_stream,
8964 	struct dc_plane_state *surface,
8965 	u32 flip_timestamp_in_us)
8966 {
8967 	struct mod_vrr_params vrr_params;
8968 	struct dc_info_packet vrr_infopacket = {0};
8969 	struct amdgpu_device *adev = dm->adev;
8970 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8971 	unsigned long flags;
8972 	bool pack_sdp_v1_3 = false;
8973 
8974 	if (!new_stream)
8975 		return;
8976 
8977 	/*
8978 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8979 	 * For now it's sufficient to just guard against these conditions.
8980 	 */
8981 
8982 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8983 		return;
8984 
8985 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8986         vrr_params = acrtc->dm_irq_params.vrr_params;
8987 
8988 	if (surface) {
8989 		mod_freesync_handle_preflip(
8990 			dm->freesync_module,
8991 			surface,
8992 			new_stream,
8993 			flip_timestamp_in_us,
8994 			&vrr_params);
8995 
8996 		if (adev->family < AMDGPU_FAMILY_AI &&
8997 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8998 			mod_freesync_handle_v_update(dm->freesync_module,
8999 						     new_stream, &vrr_params);
9000 
9001 			/* Need to call this before the frame ends. */
9002 			dc_stream_adjust_vmin_vmax(dm->dc,
9003 						   new_crtc_state->stream,
9004 						   &vrr_params.adjust);
9005 		}
9006 	}
9007 
9008 	mod_freesync_build_vrr_infopacket(
9009 		dm->freesync_module,
9010 		new_stream,
9011 		&vrr_params,
9012 		PACKET_TYPE_VRR,
9013 		TRANSFER_FUNC_UNKNOWN,
9014 		&vrr_infopacket,
9015 		pack_sdp_v1_3);
9016 
9017 	new_crtc_state->freesync_timing_changed |=
9018 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9019 			&vrr_params.adjust,
9020 			sizeof(vrr_params.adjust)) != 0);
9021 
9022 	new_crtc_state->freesync_vrr_info_changed |=
9023 		(memcmp(&new_crtc_state->vrr_infopacket,
9024 			&vrr_infopacket,
9025 			sizeof(vrr_infopacket)) != 0);
9026 
9027 	acrtc->dm_irq_params.vrr_params = vrr_params;
9028 	new_crtc_state->vrr_infopacket = vrr_infopacket;
9029 
9030 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9031 	new_stream->vrr_infopacket = vrr_infopacket;
9032 
9033 	if (new_crtc_state->freesync_vrr_info_changed)
9034 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9035 			      new_crtc_state->base.crtc->base.id,
9036 			      (int)new_crtc_state->base.vrr_enabled,
9037 			      (int)vrr_params.state);
9038 
9039 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9040 }
9041 
9042 static void update_stream_irq_parameters(
9043 	struct amdgpu_display_manager *dm,
9044 	struct dm_crtc_state *new_crtc_state)
9045 {
9046 	struct dc_stream_state *new_stream = new_crtc_state->stream;
9047 	struct mod_vrr_params vrr_params;
9048 	struct mod_freesync_config config = new_crtc_state->freesync_config;
9049 	struct amdgpu_device *adev = dm->adev;
9050 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9051 	unsigned long flags;
9052 
9053 	if (!new_stream)
9054 		return;
9055 
9056 	/*
9057 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9058 	 * For now it's sufficient to just guard against these conditions.
9059 	 */
9060 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9061 		return;
9062 
9063 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9064 	vrr_params = acrtc->dm_irq_params.vrr_params;
9065 
9066 	if (new_crtc_state->vrr_supported &&
9067 	    config.min_refresh_in_uhz &&
9068 	    config.max_refresh_in_uhz) {
9069 		/*
9070 		 * if freesync compatible mode was set, config.state will be set
9071 		 * in atomic check
9072 		 */
9073 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9074 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9075 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9076 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9077 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9078 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9079 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9080 		} else {
9081 			config.state = new_crtc_state->base.vrr_enabled ?
9082 						     VRR_STATE_ACTIVE_VARIABLE :
9083 						     VRR_STATE_INACTIVE;
9084 		}
9085 	} else {
9086 		config.state = VRR_STATE_UNSUPPORTED;
9087 	}
9088 
9089 	mod_freesync_build_vrr_params(dm->freesync_module,
9090 				      new_stream,
9091 				      &config, &vrr_params);
9092 
9093 	new_crtc_state->freesync_timing_changed |=
9094 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9095 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9096 
9097 	new_crtc_state->freesync_config = config;
9098 	/* Copy state for access from DM IRQ handler */
9099 	acrtc->dm_irq_params.freesync_config = config;
9100 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9101 	acrtc->dm_irq_params.vrr_params = vrr_params;
9102 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9103 }
9104 
9105 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9106 					    struct dm_crtc_state *new_state)
9107 {
9108 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9109 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9110 
9111 	if (!old_vrr_active && new_vrr_active) {
9112 		/* Transition VRR inactive -> active:
9113 		 * While VRR is active, we must not disable vblank irq, as a
9114 		 * reenable after disable would compute bogus vblank/pflip
9115 		 * timestamps if it likely happened inside display front-porch.
9116 		 *
9117 		 * We also need vupdate irq for the actual core vblank handling
9118 		 * at end of vblank.
9119 		 */
9120 		dm_set_vupdate_irq(new_state->base.crtc, true);
9121 		drm_crtc_vblank_get(new_state->base.crtc);
9122 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9123 				 __func__, new_state->base.crtc->base.id);
9124 	} else if (old_vrr_active && !new_vrr_active) {
9125 		/* Transition VRR active -> inactive:
9126 		 * Allow vblank irq disable again for fixed refresh rate.
9127 		 */
9128 		dm_set_vupdate_irq(new_state->base.crtc, false);
9129 		drm_crtc_vblank_put(new_state->base.crtc);
9130 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9131 				 __func__, new_state->base.crtc->base.id);
9132 	}
9133 }
9134 
9135 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9136 {
9137 	struct drm_plane *plane;
9138 	struct drm_plane_state *old_plane_state;
9139 	int i;
9140 
9141 	/*
9142 	 * TODO: Make this per-stream so we don't issue redundant updates for
9143 	 * commits with multiple streams.
9144 	 */
9145 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9146 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9147 			handle_cursor_update(plane, old_plane_state);
9148 }
9149 
9150 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9151 				    struct dc_state *dc_state,
9152 				    struct drm_device *dev,
9153 				    struct amdgpu_display_manager *dm,
9154 				    struct drm_crtc *pcrtc,
9155 				    bool wait_for_vblank)
9156 {
9157 	uint32_t i;
9158 	uint64_t timestamp_ns;
9159 	struct drm_plane *plane;
9160 	struct drm_plane_state *old_plane_state, *new_plane_state;
9161 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9162 	struct drm_crtc_state *new_pcrtc_state =
9163 			drm_atomic_get_new_crtc_state(state, pcrtc);
9164 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9165 	struct dm_crtc_state *dm_old_crtc_state =
9166 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9167 	int planes_count = 0, vpos, hpos;
9168 	unsigned long flags;
9169 	struct amdgpu_bo *abo;
9170 	uint32_t target_vblank, last_flip_vblank;
9171 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9172 	bool pflip_present = false;
9173 	struct {
9174 		struct dc_surface_update surface_updates[MAX_SURFACES];
9175 		struct dc_plane_info plane_infos[MAX_SURFACES];
9176 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9177 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9178 		struct dc_stream_update stream_update;
9179 	} *bundle;
9180 
9181 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9182 
9183 	if (!bundle) {
9184 		dm_error("Failed to allocate update bundle\n");
9185 		goto cleanup;
9186 	}
9187 
9188 	/*
9189 	 * Disable the cursor first if we're disabling all the planes.
9190 	 * It'll remain on the screen after the planes are re-enabled
9191 	 * if we don't.
9192 	 */
9193 	if (acrtc_state->active_planes == 0)
9194 		amdgpu_dm_commit_cursors(state);
9195 
9196 	/* update planes when needed */
9197 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9198 		struct drm_crtc *crtc = new_plane_state->crtc;
9199 		struct drm_crtc_state *new_crtc_state;
9200 		struct drm_framebuffer *fb = new_plane_state->fb;
9201 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9202 		bool plane_needs_flip;
9203 		struct dc_plane_state *dc_plane;
9204 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9205 
9206 		/* Cursor plane is handled after stream updates */
9207 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9208 			continue;
9209 
9210 		if (!fb || !crtc || pcrtc != crtc)
9211 			continue;
9212 
9213 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9214 		if (!new_crtc_state->active)
9215 			continue;
9216 
9217 		dc_plane = dm_new_plane_state->dc_state;
9218 
9219 		bundle->surface_updates[planes_count].surface = dc_plane;
9220 		if (new_pcrtc_state->color_mgmt_changed) {
9221 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9222 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9223 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9224 		}
9225 
9226 		fill_dc_scaling_info(dm->adev, new_plane_state,
9227 				     &bundle->scaling_infos[planes_count]);
9228 
9229 		bundle->surface_updates[planes_count].scaling_info =
9230 			&bundle->scaling_infos[planes_count];
9231 
9232 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9233 
9234 		pflip_present = pflip_present || plane_needs_flip;
9235 
9236 		if (!plane_needs_flip) {
9237 			planes_count += 1;
9238 			continue;
9239 		}
9240 
9241 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9242 		fill_dc_plane_info_and_addr(
9243 			dm->adev, new_plane_state,
9244 			afb->tiling_flags,
9245 			&bundle->plane_infos[planes_count],
9246 			&bundle->flip_addrs[planes_count].address,
9247 			afb->tmz_surface, false);
9248 
9249 		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9250 				 new_plane_state->plane->index,
9251 				 bundle->plane_infos[planes_count].dcc.enable);
9252 
9253 		bundle->surface_updates[planes_count].plane_info =
9254 			&bundle->plane_infos[planes_count];
9255 
9256 		/*
9257 		 * Only allow immediate flips for fast updates that don't
9258 		 * change FB pitch, DCC state, rotation or mirroing.
9259 		 */
9260 		bundle->flip_addrs[planes_count].flip_immediate =
9261 			crtc->state->async_flip &&
9262 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9263 
9264 		timestamp_ns = ktime_get_ns();
9265 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9266 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9267 		bundle->surface_updates[planes_count].surface = dc_plane;
9268 
9269 		if (!bundle->surface_updates[planes_count].surface) {
9270 			DRM_ERROR("No surface for CRTC: id=%d\n",
9271 					acrtc_attach->crtc_id);
9272 			continue;
9273 		}
9274 
9275 		if (plane == pcrtc->primary)
9276 			update_freesync_state_on_stream(
9277 				dm,
9278 				acrtc_state,
9279 				acrtc_state->stream,
9280 				dc_plane,
9281 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9282 
9283 		drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9284 				 __func__,
9285 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9286 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9287 
9288 		planes_count += 1;
9289 
9290 	}
9291 
9292 	if (pflip_present) {
9293 		if (!vrr_active) {
9294 			/* Use old throttling in non-vrr fixed refresh rate mode
9295 			 * to keep flip scheduling based on target vblank counts
9296 			 * working in a backwards compatible way, e.g., for
9297 			 * clients using the GLX_OML_sync_control extension or
9298 			 * DRI3/Present extension with defined target_msc.
9299 			 */
9300 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9301 		}
9302 		else {
9303 			/* For variable refresh rate mode only:
9304 			 * Get vblank of last completed flip to avoid > 1 vrr
9305 			 * flips per video frame by use of throttling, but allow
9306 			 * flip programming anywhere in the possibly large
9307 			 * variable vrr vblank interval for fine-grained flip
9308 			 * timing control and more opportunity to avoid stutter
9309 			 * on late submission of flips.
9310 			 */
9311 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9312 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9313 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9314 		}
9315 
9316 		target_vblank = last_flip_vblank + wait_for_vblank;
9317 
9318 		/*
9319 		 * Wait until we're out of the vertical blank period before the one
9320 		 * targeted by the flip
9321 		 */
9322 		while ((acrtc_attach->enabled &&
9323 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9324 							    0, &vpos, &hpos, NULL,
9325 							    NULL, &pcrtc->hwmode)
9326 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9327 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9328 			(int)(target_vblank -
9329 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9330 			usleep_range(1000, 1100);
9331 		}
9332 
9333 		/**
9334 		 * Prepare the flip event for the pageflip interrupt to handle.
9335 		 *
9336 		 * This only works in the case where we've already turned on the
9337 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9338 		 * from 0 -> n planes we have to skip a hardware generated event
9339 		 * and rely on sending it from software.
9340 		 */
9341 		if (acrtc_attach->base.state->event &&
9342 		    acrtc_state->active_planes > 0 &&
9343 		    !acrtc_state->force_dpms_off) {
9344 			drm_crtc_vblank_get(pcrtc);
9345 
9346 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9347 
9348 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9349 			prepare_flip_isr(acrtc_attach);
9350 
9351 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9352 		}
9353 
9354 		if (acrtc_state->stream) {
9355 			if (acrtc_state->freesync_vrr_info_changed)
9356 				bundle->stream_update.vrr_infopacket =
9357 					&acrtc_state->stream->vrr_infopacket;
9358 		}
9359 	}
9360 
9361 	/* Update the planes if changed or disable if we don't have any. */
9362 	if ((planes_count || acrtc_state->active_planes == 0) &&
9363 		acrtc_state->stream) {
9364 #if defined(CONFIG_DRM_AMD_DC_DCN)
9365 		/*
9366 		 * If PSR or idle optimizations are enabled then flush out
9367 		 * any pending work before hardware programming.
9368 		 */
9369 		if (dm->vblank_control_workqueue)
9370 			flush_workqueue(dm->vblank_control_workqueue);
9371 #endif
9372 
9373 		bundle->stream_update.stream = acrtc_state->stream;
9374 		if (new_pcrtc_state->mode_changed) {
9375 			bundle->stream_update.src = acrtc_state->stream->src;
9376 			bundle->stream_update.dst = acrtc_state->stream->dst;
9377 		}
9378 
9379 		if (new_pcrtc_state->color_mgmt_changed) {
9380 			/*
9381 			 * TODO: This isn't fully correct since we've actually
9382 			 * already modified the stream in place.
9383 			 */
9384 			bundle->stream_update.gamut_remap =
9385 				&acrtc_state->stream->gamut_remap_matrix;
9386 			bundle->stream_update.output_csc_transform =
9387 				&acrtc_state->stream->csc_color_matrix;
9388 			bundle->stream_update.out_transfer_func =
9389 				acrtc_state->stream->out_transfer_func;
9390 		}
9391 
9392 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9393 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9394 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9395 
9396 		/*
9397 		 * If FreeSync state on the stream has changed then we need to
9398 		 * re-adjust the min/max bounds now that DC doesn't handle this
9399 		 * as part of commit.
9400 		 */
9401 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9402 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9403 			dc_stream_adjust_vmin_vmax(
9404 				dm->dc, acrtc_state->stream,
9405 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9406 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9407 		}
9408 		mutex_lock(&dm->dc_lock);
9409 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9410 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9411 			amdgpu_dm_psr_disable(acrtc_state->stream);
9412 
9413 		dc_commit_updates_for_stream(dm->dc,
9414 						     bundle->surface_updates,
9415 						     planes_count,
9416 						     acrtc_state->stream,
9417 						     &bundle->stream_update,
9418 						     dc_state);
9419 
9420 		/**
9421 		 * Enable or disable the interrupts on the backend.
9422 		 *
9423 		 * Most pipes are put into power gating when unused.
9424 		 *
9425 		 * When power gating is enabled on a pipe we lose the
9426 		 * interrupt enablement state when power gating is disabled.
9427 		 *
9428 		 * So we need to update the IRQ control state in hardware
9429 		 * whenever the pipe turns on (since it could be previously
9430 		 * power gated) or off (since some pipes can't be power gated
9431 		 * on some ASICs).
9432 		 */
9433 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9434 			dm_update_pflip_irq_state(drm_to_adev(dev),
9435 						  acrtc_attach);
9436 
9437 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9438 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9439 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9440 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9441 
9442 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9443 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9444 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9445 			struct amdgpu_dm_connector *aconn =
9446 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9447 
9448 			if (aconn->psr_skip_count > 0)
9449 				aconn->psr_skip_count--;
9450 
9451 			/* Allow PSR when skip count is 0. */
9452 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9453 		} else {
9454 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9455 		}
9456 
9457 		mutex_unlock(&dm->dc_lock);
9458 	}
9459 
9460 	/*
9461 	 * Update cursor state *after* programming all the planes.
9462 	 * This avoids redundant programming in the case where we're going
9463 	 * to be disabling a single plane - those pipes are being disabled.
9464 	 */
9465 	if (acrtc_state->active_planes)
9466 		amdgpu_dm_commit_cursors(state);
9467 
9468 cleanup:
9469 	kfree(bundle);
9470 }
9471 
9472 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9473 				   struct drm_atomic_state *state)
9474 {
9475 	struct amdgpu_device *adev = drm_to_adev(dev);
9476 	struct amdgpu_dm_connector *aconnector;
9477 	struct drm_connector *connector;
9478 	struct drm_connector_state *old_con_state, *new_con_state;
9479 	struct drm_crtc_state *new_crtc_state;
9480 	struct dm_crtc_state *new_dm_crtc_state;
9481 	const struct dc_stream_status *status;
9482 	int i, inst;
9483 
9484 	/* Notify device removals. */
9485 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9486 		if (old_con_state->crtc != new_con_state->crtc) {
9487 			/* CRTC changes require notification. */
9488 			goto notify;
9489 		}
9490 
9491 		if (!new_con_state->crtc)
9492 			continue;
9493 
9494 		new_crtc_state = drm_atomic_get_new_crtc_state(
9495 			state, new_con_state->crtc);
9496 
9497 		if (!new_crtc_state)
9498 			continue;
9499 
9500 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9501 			continue;
9502 
9503 	notify:
9504 		aconnector = to_amdgpu_dm_connector(connector);
9505 
9506 		mutex_lock(&adev->dm.audio_lock);
9507 		inst = aconnector->audio_inst;
9508 		aconnector->audio_inst = -1;
9509 		mutex_unlock(&adev->dm.audio_lock);
9510 
9511 		amdgpu_dm_audio_eld_notify(adev, inst);
9512 	}
9513 
9514 	/* Notify audio device additions. */
9515 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9516 		if (!new_con_state->crtc)
9517 			continue;
9518 
9519 		new_crtc_state = drm_atomic_get_new_crtc_state(
9520 			state, new_con_state->crtc);
9521 
9522 		if (!new_crtc_state)
9523 			continue;
9524 
9525 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9526 			continue;
9527 
9528 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9529 		if (!new_dm_crtc_state->stream)
9530 			continue;
9531 
9532 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9533 		if (!status)
9534 			continue;
9535 
9536 		aconnector = to_amdgpu_dm_connector(connector);
9537 
9538 		mutex_lock(&adev->dm.audio_lock);
9539 		inst = status->audio_inst;
9540 		aconnector->audio_inst = inst;
9541 		mutex_unlock(&adev->dm.audio_lock);
9542 
9543 		amdgpu_dm_audio_eld_notify(adev, inst);
9544 	}
9545 }
9546 
9547 /*
9548  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9549  * @crtc_state: the DRM CRTC state
9550  * @stream_state: the DC stream state.
9551  *
9552  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9553  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9554  */
9555 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9556 						struct dc_stream_state *stream_state)
9557 {
9558 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9559 }
9560 
9561 /**
9562  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9563  * @state: The atomic state to commit
9564  *
9565  * This will tell DC to commit the constructed DC state from atomic_check,
9566  * programming the hardware. Any failures here implies a hardware failure, since
9567  * atomic check should have filtered anything non-kosher.
9568  */
9569 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9570 {
9571 	struct drm_device *dev = state->dev;
9572 	struct amdgpu_device *adev = drm_to_adev(dev);
9573 	struct amdgpu_display_manager *dm = &adev->dm;
9574 	struct dm_atomic_state *dm_state;
9575 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9576 	uint32_t i, j;
9577 	struct drm_crtc *crtc;
9578 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9579 	unsigned long flags;
9580 	bool wait_for_vblank = true;
9581 	struct drm_connector *connector;
9582 	struct drm_connector_state *old_con_state, *new_con_state;
9583 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9584 	int crtc_disable_count = 0;
9585 	bool mode_set_reset_required = false;
9586 	int r;
9587 
9588 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9589 
9590 	r = drm_atomic_helper_wait_for_fences(dev, state, false);
9591 	if (unlikely(r))
9592 		DRM_ERROR("Waiting for fences timed out!");
9593 
9594 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9595 
9596 	dm_state = dm_atomic_get_new_state(state);
9597 	if (dm_state && dm_state->context) {
9598 		dc_state = dm_state->context;
9599 	} else {
9600 		/* No state changes, retain current state. */
9601 		dc_state_temp = dc_create_state(dm->dc);
9602 		ASSERT(dc_state_temp);
9603 		dc_state = dc_state_temp;
9604 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9605 	}
9606 
9607 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9608 				       new_crtc_state, i) {
9609 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9610 
9611 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9612 
9613 		if (old_crtc_state->active &&
9614 		    (!new_crtc_state->active ||
9615 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9616 			manage_dm_interrupts(adev, acrtc, false);
9617 			dc_stream_release(dm_old_crtc_state->stream);
9618 		}
9619 	}
9620 
9621 	drm_atomic_helper_calc_timestamping_constants(state);
9622 
9623 	/* update changed items */
9624 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9625 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9626 
9627 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9628 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9629 
9630 		drm_dbg_state(state->dev,
9631 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9632 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9633 			"connectors_changed:%d\n",
9634 			acrtc->crtc_id,
9635 			new_crtc_state->enable,
9636 			new_crtc_state->active,
9637 			new_crtc_state->planes_changed,
9638 			new_crtc_state->mode_changed,
9639 			new_crtc_state->active_changed,
9640 			new_crtc_state->connectors_changed);
9641 
9642 		/* Disable cursor if disabling crtc */
9643 		if (old_crtc_state->active && !new_crtc_state->active) {
9644 			struct dc_cursor_position position;
9645 
9646 			memset(&position, 0, sizeof(position));
9647 			mutex_lock(&dm->dc_lock);
9648 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9649 			mutex_unlock(&dm->dc_lock);
9650 		}
9651 
9652 		/* Copy all transient state flags into dc state */
9653 		if (dm_new_crtc_state->stream) {
9654 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9655 							    dm_new_crtc_state->stream);
9656 		}
9657 
9658 		/* handles headless hotplug case, updating new_state and
9659 		 * aconnector as needed
9660 		 */
9661 
9662 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9663 
9664 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9665 
9666 			if (!dm_new_crtc_state->stream) {
9667 				/*
9668 				 * this could happen because of issues with
9669 				 * userspace notifications delivery.
9670 				 * In this case userspace tries to set mode on
9671 				 * display which is disconnected in fact.
9672 				 * dc_sink is NULL in this case on aconnector.
9673 				 * We expect reset mode will come soon.
9674 				 *
9675 				 * This can also happen when unplug is done
9676 				 * during resume sequence ended
9677 				 *
9678 				 * In this case, we want to pretend we still
9679 				 * have a sink to keep the pipe running so that
9680 				 * hw state is consistent with the sw state
9681 				 */
9682 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9683 						__func__, acrtc->base.base.id);
9684 				continue;
9685 			}
9686 
9687 			if (dm_old_crtc_state->stream)
9688 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9689 
9690 			pm_runtime_get_noresume(dev->dev);
9691 
9692 			acrtc->enabled = true;
9693 			acrtc->hw_mode = new_crtc_state->mode;
9694 			crtc->hwmode = new_crtc_state->mode;
9695 			mode_set_reset_required = true;
9696 		} else if (modereset_required(new_crtc_state)) {
9697 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9698 			/* i.e. reset mode */
9699 			if (dm_old_crtc_state->stream)
9700 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9701 
9702 			mode_set_reset_required = true;
9703 		}
9704 	} /* for_each_crtc_in_state() */
9705 
9706 	if (dc_state) {
9707 		/* if there mode set or reset, disable eDP PSR */
9708 		if (mode_set_reset_required) {
9709 #if defined(CONFIG_DRM_AMD_DC_DCN)
9710 			if (dm->vblank_control_workqueue)
9711 				flush_workqueue(dm->vblank_control_workqueue);
9712 #endif
9713 			amdgpu_dm_psr_disable_all(dm);
9714 		}
9715 
9716 		dm_enable_per_frame_crtc_master_sync(dc_state);
9717 		mutex_lock(&dm->dc_lock);
9718 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9719 #if defined(CONFIG_DRM_AMD_DC_DCN)
9720                /* Allow idle optimization when vblank count is 0 for display off */
9721                if (dm->active_vblank_irq_count == 0)
9722                    dc_allow_idle_optimizations(dm->dc,true);
9723 #endif
9724 		mutex_unlock(&dm->dc_lock);
9725 	}
9726 
9727 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9728 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9729 
9730 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9731 
9732 		if (dm_new_crtc_state->stream != NULL) {
9733 			const struct dc_stream_status *status =
9734 					dc_stream_get_status(dm_new_crtc_state->stream);
9735 
9736 			if (!status)
9737 				status = dc_stream_get_status_from_state(dc_state,
9738 									 dm_new_crtc_state->stream);
9739 			if (!status)
9740 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9741 			else
9742 				acrtc->otg_inst = status->primary_otg_inst;
9743 		}
9744 	}
9745 #ifdef CONFIG_DRM_AMD_DC_HDCP
9746 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9747 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9748 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9749 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9750 
9751 		new_crtc_state = NULL;
9752 
9753 		if (acrtc)
9754 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9755 
9756 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9757 
9758 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9759 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9760 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9761 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9762 			dm_new_con_state->update_hdcp = true;
9763 			continue;
9764 		}
9765 
9766 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9767 			hdcp_update_display(
9768 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9769 				new_con_state->hdcp_content_type,
9770 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9771 	}
9772 #endif
9773 
9774 	/* Handle connector state changes */
9775 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9776 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9777 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9778 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9779 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9780 		struct dc_stream_update stream_update;
9781 		struct dc_info_packet hdr_packet;
9782 		struct dc_stream_status *status = NULL;
9783 		bool abm_changed, hdr_changed, scaling_changed;
9784 
9785 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9786 		memset(&stream_update, 0, sizeof(stream_update));
9787 
9788 		if (acrtc) {
9789 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9790 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9791 		}
9792 
9793 		/* Skip any modesets/resets */
9794 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9795 			continue;
9796 
9797 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9798 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9799 
9800 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9801 							     dm_old_con_state);
9802 
9803 		abm_changed = dm_new_crtc_state->abm_level !=
9804 			      dm_old_crtc_state->abm_level;
9805 
9806 		hdr_changed =
9807 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9808 
9809 		if (!scaling_changed && !abm_changed && !hdr_changed)
9810 			continue;
9811 
9812 		stream_update.stream = dm_new_crtc_state->stream;
9813 		if (scaling_changed) {
9814 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9815 					dm_new_con_state, dm_new_crtc_state->stream);
9816 
9817 			stream_update.src = dm_new_crtc_state->stream->src;
9818 			stream_update.dst = dm_new_crtc_state->stream->dst;
9819 		}
9820 
9821 		if (abm_changed) {
9822 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9823 
9824 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9825 		}
9826 
9827 		if (hdr_changed) {
9828 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9829 			stream_update.hdr_static_metadata = &hdr_packet;
9830 		}
9831 
9832 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9833 
9834 		if (WARN_ON(!status))
9835 			continue;
9836 
9837 		WARN_ON(!status->plane_count);
9838 
9839 		/*
9840 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9841 		 * Here we create an empty update on each plane.
9842 		 * To fix this, DC should permit updating only stream properties.
9843 		 */
9844 		for (j = 0; j < status->plane_count; j++)
9845 			dummy_updates[j].surface = status->plane_states[0];
9846 
9847 
9848 		mutex_lock(&dm->dc_lock);
9849 		dc_commit_updates_for_stream(dm->dc,
9850 						     dummy_updates,
9851 						     status->plane_count,
9852 						     dm_new_crtc_state->stream,
9853 						     &stream_update,
9854 						     dc_state);
9855 		mutex_unlock(&dm->dc_lock);
9856 	}
9857 
9858 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9859 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9860 				      new_crtc_state, i) {
9861 		if (old_crtc_state->active && !new_crtc_state->active)
9862 			crtc_disable_count++;
9863 
9864 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9865 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9866 
9867 		/* For freesync config update on crtc state and params for irq */
9868 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9869 
9870 		/* Handle vrr on->off / off->on transitions */
9871 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9872 						dm_new_crtc_state);
9873 	}
9874 
9875 	/**
9876 	 * Enable interrupts for CRTCs that are newly enabled or went through
9877 	 * a modeset. It was intentionally deferred until after the front end
9878 	 * state was modified to wait until the OTG was on and so the IRQ
9879 	 * handlers didn't access stale or invalid state.
9880 	 */
9881 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9882 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9883 #ifdef CONFIG_DEBUG_FS
9884 		bool configure_crc = false;
9885 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9886 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9887 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9888 #endif
9889 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9890 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9891 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9892 #endif
9893 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9894 
9895 		if (new_crtc_state->active &&
9896 		    (!old_crtc_state->active ||
9897 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9898 			dc_stream_retain(dm_new_crtc_state->stream);
9899 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9900 			manage_dm_interrupts(adev, acrtc, true);
9901 
9902 #ifdef CONFIG_DEBUG_FS
9903 			/**
9904 			 * Frontend may have changed so reapply the CRC capture
9905 			 * settings for the stream.
9906 			 */
9907 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9908 
9909 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9910 				configure_crc = true;
9911 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9912 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9913 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9914 					acrtc->dm_irq_params.crc_window.update_win = true;
9915 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9916 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9917 					crc_rd_wrk->crtc = crtc;
9918 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9919 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9920 				}
9921 #endif
9922 			}
9923 
9924 			if (configure_crc)
9925 				if (amdgpu_dm_crtc_configure_crc_source(
9926 					crtc, dm_new_crtc_state, cur_crc_src))
9927 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9928 #endif
9929 		}
9930 	}
9931 
9932 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9933 		if (new_crtc_state->async_flip)
9934 			wait_for_vblank = false;
9935 
9936 	/* update planes when needed per crtc*/
9937 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9938 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9939 
9940 		if (dm_new_crtc_state->stream)
9941 			amdgpu_dm_commit_planes(state, dc_state, dev,
9942 						dm, crtc, wait_for_vblank);
9943 	}
9944 
9945 	/* Update audio instances for each connector. */
9946 	amdgpu_dm_commit_audio(dev, state);
9947 
9948 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9949 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9950 	/* restore the backlight level */
9951 	for (i = 0; i < dm->num_of_edps; i++) {
9952 		if (dm->backlight_dev[i] &&
9953 		    (dm->actual_brightness[i] != dm->brightness[i]))
9954 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9955 	}
9956 #endif
9957 	/*
9958 	 * send vblank event on all events not handled in flip and
9959 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9960 	 */
9961 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9962 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9963 
9964 		if (new_crtc_state->event)
9965 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9966 
9967 		new_crtc_state->event = NULL;
9968 	}
9969 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9970 
9971 	/* Signal HW programming completion */
9972 	drm_atomic_helper_commit_hw_done(state);
9973 
9974 	if (wait_for_vblank)
9975 		drm_atomic_helper_wait_for_flip_done(dev, state);
9976 
9977 	drm_atomic_helper_cleanup_planes(dev, state);
9978 
9979 	/* return the stolen vga memory back to VRAM */
9980 	if (!adev->mman.keep_stolen_vga_memory)
9981 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9982 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9983 
9984 	/*
9985 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9986 	 * so we can put the GPU into runtime suspend if we're not driving any
9987 	 * displays anymore
9988 	 */
9989 	for (i = 0; i < crtc_disable_count; i++)
9990 		pm_runtime_put_autosuspend(dev->dev);
9991 	pm_runtime_mark_last_busy(dev->dev);
9992 
9993 	if (dc_state_temp)
9994 		dc_release_state(dc_state_temp);
9995 }
9996 
9997 
9998 static int dm_force_atomic_commit(struct drm_connector *connector)
9999 {
10000 	int ret = 0;
10001 	struct drm_device *ddev = connector->dev;
10002 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10003 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10004 	struct drm_plane *plane = disconnected_acrtc->base.primary;
10005 	struct drm_connector_state *conn_state;
10006 	struct drm_crtc_state *crtc_state;
10007 	struct drm_plane_state *plane_state;
10008 
10009 	if (!state)
10010 		return -ENOMEM;
10011 
10012 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
10013 
10014 	/* Construct an atomic state to restore previous display setting */
10015 
10016 	/*
10017 	 * Attach connectors to drm_atomic_state
10018 	 */
10019 	conn_state = drm_atomic_get_connector_state(state, connector);
10020 
10021 	ret = PTR_ERR_OR_ZERO(conn_state);
10022 	if (ret)
10023 		goto out;
10024 
10025 	/* Attach crtc to drm_atomic_state*/
10026 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10027 
10028 	ret = PTR_ERR_OR_ZERO(crtc_state);
10029 	if (ret)
10030 		goto out;
10031 
10032 	/* force a restore */
10033 	crtc_state->mode_changed = true;
10034 
10035 	/* Attach plane to drm_atomic_state */
10036 	plane_state = drm_atomic_get_plane_state(state, plane);
10037 
10038 	ret = PTR_ERR_OR_ZERO(plane_state);
10039 	if (ret)
10040 		goto out;
10041 
10042 	/* Call commit internally with the state we just constructed */
10043 	ret = drm_atomic_commit(state);
10044 
10045 out:
10046 	drm_atomic_state_put(state);
10047 	if (ret)
10048 		DRM_ERROR("Restoring old state failed with %i\n", ret);
10049 
10050 	return ret;
10051 }
10052 
10053 /*
10054  * This function handles all cases when set mode does not come upon hotplug.
10055  * This includes when a display is unplugged then plugged back into the
10056  * same port and when running without usermode desktop manager supprot
10057  */
10058 void dm_restore_drm_connector_state(struct drm_device *dev,
10059 				    struct drm_connector *connector)
10060 {
10061 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10062 	struct amdgpu_crtc *disconnected_acrtc;
10063 	struct dm_crtc_state *acrtc_state;
10064 
10065 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10066 		return;
10067 
10068 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10069 	if (!disconnected_acrtc)
10070 		return;
10071 
10072 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10073 	if (!acrtc_state->stream)
10074 		return;
10075 
10076 	/*
10077 	 * If the previous sink is not released and different from the current,
10078 	 * we deduce we are in a state where we can not rely on usermode call
10079 	 * to turn on the display, so we do it here
10080 	 */
10081 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10082 		dm_force_atomic_commit(&aconnector->base);
10083 }
10084 
10085 /*
10086  * Grabs all modesetting locks to serialize against any blocking commits,
10087  * Waits for completion of all non blocking commits.
10088  */
10089 static int do_aquire_global_lock(struct drm_device *dev,
10090 				 struct drm_atomic_state *state)
10091 {
10092 	struct drm_crtc *crtc;
10093 	struct drm_crtc_commit *commit;
10094 	long ret;
10095 
10096 	/*
10097 	 * Adding all modeset locks to aquire_ctx will
10098 	 * ensure that when the framework release it the
10099 	 * extra locks we are locking here will get released to
10100 	 */
10101 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10102 	if (ret)
10103 		return ret;
10104 
10105 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10106 		spin_lock(&crtc->commit_lock);
10107 		commit = list_first_entry_or_null(&crtc->commit_list,
10108 				struct drm_crtc_commit, commit_entry);
10109 		if (commit)
10110 			drm_crtc_commit_get(commit);
10111 		spin_unlock(&crtc->commit_lock);
10112 
10113 		if (!commit)
10114 			continue;
10115 
10116 		/*
10117 		 * Make sure all pending HW programming completed and
10118 		 * page flips done
10119 		 */
10120 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10121 
10122 		if (ret > 0)
10123 			ret = wait_for_completion_interruptible_timeout(
10124 					&commit->flip_done, 10*HZ);
10125 
10126 		if (ret == 0)
10127 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10128 				  "timed out\n", crtc->base.id, crtc->name);
10129 
10130 		drm_crtc_commit_put(commit);
10131 	}
10132 
10133 	return ret < 0 ? ret : 0;
10134 }
10135 
10136 static void get_freesync_config_for_crtc(
10137 	struct dm_crtc_state *new_crtc_state,
10138 	struct dm_connector_state *new_con_state)
10139 {
10140 	struct mod_freesync_config config = {0};
10141 	struct amdgpu_dm_connector *aconnector =
10142 			to_amdgpu_dm_connector(new_con_state->base.connector);
10143 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10144 	int vrefresh = drm_mode_vrefresh(mode);
10145 	bool fs_vid_mode = false;
10146 
10147 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10148 					vrefresh >= aconnector->min_vfreq &&
10149 					vrefresh <= aconnector->max_vfreq;
10150 
10151 	if (new_crtc_state->vrr_supported) {
10152 		new_crtc_state->stream->ignore_msa_timing_param = true;
10153 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10154 
10155 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10156 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10157 		config.vsif_supported = true;
10158 		config.btr = true;
10159 
10160 		if (fs_vid_mode) {
10161 			config.state = VRR_STATE_ACTIVE_FIXED;
10162 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10163 			goto out;
10164 		} else if (new_crtc_state->base.vrr_enabled) {
10165 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10166 		} else {
10167 			config.state = VRR_STATE_INACTIVE;
10168 		}
10169 	}
10170 out:
10171 	new_crtc_state->freesync_config = config;
10172 }
10173 
10174 static void reset_freesync_config_for_crtc(
10175 	struct dm_crtc_state *new_crtc_state)
10176 {
10177 	new_crtc_state->vrr_supported = false;
10178 
10179 	memset(&new_crtc_state->vrr_infopacket, 0,
10180 	       sizeof(new_crtc_state->vrr_infopacket));
10181 }
10182 
10183 static bool
10184 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10185 				 struct drm_crtc_state *new_crtc_state)
10186 {
10187 	const struct drm_display_mode *old_mode, *new_mode;
10188 
10189 	if (!old_crtc_state || !new_crtc_state)
10190 		return false;
10191 
10192 	old_mode = &old_crtc_state->mode;
10193 	new_mode = &new_crtc_state->mode;
10194 
10195 	if (old_mode->clock       == new_mode->clock &&
10196 	    old_mode->hdisplay    == new_mode->hdisplay &&
10197 	    old_mode->vdisplay    == new_mode->vdisplay &&
10198 	    old_mode->htotal      == new_mode->htotal &&
10199 	    old_mode->vtotal      != new_mode->vtotal &&
10200 	    old_mode->hsync_start == new_mode->hsync_start &&
10201 	    old_mode->vsync_start != new_mode->vsync_start &&
10202 	    old_mode->hsync_end   == new_mode->hsync_end &&
10203 	    old_mode->vsync_end   != new_mode->vsync_end &&
10204 	    old_mode->hskew       == new_mode->hskew &&
10205 	    old_mode->vscan       == new_mode->vscan &&
10206 	    (old_mode->vsync_end - old_mode->vsync_start) ==
10207 	    (new_mode->vsync_end - new_mode->vsync_start))
10208 		return true;
10209 
10210 	return false;
10211 }
10212 
10213 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10214 	uint64_t num, den, res;
10215 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10216 
10217 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10218 
10219 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10220 	den = (unsigned long long)new_crtc_state->mode.htotal *
10221 	      (unsigned long long)new_crtc_state->mode.vtotal;
10222 
10223 	res = div_u64(num, den);
10224 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10225 }
10226 
10227 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10228 			 struct drm_atomic_state *state,
10229 			 struct drm_crtc *crtc,
10230 			 struct drm_crtc_state *old_crtc_state,
10231 			 struct drm_crtc_state *new_crtc_state,
10232 			 bool enable,
10233 			 bool *lock_and_validation_needed)
10234 {
10235 	struct dm_atomic_state *dm_state = NULL;
10236 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10237 	struct dc_stream_state *new_stream;
10238 	int ret = 0;
10239 
10240 	/*
10241 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10242 	 * update changed items
10243 	 */
10244 	struct amdgpu_crtc *acrtc = NULL;
10245 	struct amdgpu_dm_connector *aconnector = NULL;
10246 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10247 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10248 
10249 	new_stream = NULL;
10250 
10251 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10252 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10253 	acrtc = to_amdgpu_crtc(crtc);
10254 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10255 
10256 	/* TODO This hack should go away */
10257 	if (aconnector && enable) {
10258 		/* Make sure fake sink is created in plug-in scenario */
10259 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10260 							    &aconnector->base);
10261 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10262 							    &aconnector->base);
10263 
10264 		if (IS_ERR(drm_new_conn_state)) {
10265 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10266 			goto fail;
10267 		}
10268 
10269 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10270 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10271 
10272 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10273 			goto skip_modeset;
10274 
10275 		new_stream = create_validate_stream_for_sink(aconnector,
10276 							     &new_crtc_state->mode,
10277 							     dm_new_conn_state,
10278 							     dm_old_crtc_state->stream);
10279 
10280 		/*
10281 		 * we can have no stream on ACTION_SET if a display
10282 		 * was disconnected during S3, in this case it is not an
10283 		 * error, the OS will be updated after detection, and
10284 		 * will do the right thing on next atomic commit
10285 		 */
10286 
10287 		if (!new_stream) {
10288 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10289 					__func__, acrtc->base.base.id);
10290 			ret = -ENOMEM;
10291 			goto fail;
10292 		}
10293 
10294 		/*
10295 		 * TODO: Check VSDB bits to decide whether this should
10296 		 * be enabled or not.
10297 		 */
10298 		new_stream->triggered_crtc_reset.enabled =
10299 			dm->force_timing_sync;
10300 
10301 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10302 
10303 		ret = fill_hdr_info_packet(drm_new_conn_state,
10304 					   &new_stream->hdr_static_metadata);
10305 		if (ret)
10306 			goto fail;
10307 
10308 		/*
10309 		 * If we already removed the old stream from the context
10310 		 * (and set the new stream to NULL) then we can't reuse
10311 		 * the old stream even if the stream and scaling are unchanged.
10312 		 * We'll hit the BUG_ON and black screen.
10313 		 *
10314 		 * TODO: Refactor this function to allow this check to work
10315 		 * in all conditions.
10316 		 */
10317 		if (dm_new_crtc_state->stream &&
10318 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10319 			goto skip_modeset;
10320 
10321 		if (dm_new_crtc_state->stream &&
10322 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10323 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10324 			new_crtc_state->mode_changed = false;
10325 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10326 					 new_crtc_state->mode_changed);
10327 		}
10328 	}
10329 
10330 	/* mode_changed flag may get updated above, need to check again */
10331 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10332 		goto skip_modeset;
10333 
10334 	drm_dbg_state(state->dev,
10335 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10336 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10337 		"connectors_changed:%d\n",
10338 		acrtc->crtc_id,
10339 		new_crtc_state->enable,
10340 		new_crtc_state->active,
10341 		new_crtc_state->planes_changed,
10342 		new_crtc_state->mode_changed,
10343 		new_crtc_state->active_changed,
10344 		new_crtc_state->connectors_changed);
10345 
10346 	/* Remove stream for any changed/disabled CRTC */
10347 	if (!enable) {
10348 
10349 		if (!dm_old_crtc_state->stream)
10350 			goto skip_modeset;
10351 
10352 		if (dm_new_crtc_state->stream &&
10353 		    is_timing_unchanged_for_freesync(new_crtc_state,
10354 						     old_crtc_state)) {
10355 			new_crtc_state->mode_changed = false;
10356 			DRM_DEBUG_DRIVER(
10357 				"Mode change not required for front porch change, "
10358 				"setting mode_changed to %d",
10359 				new_crtc_state->mode_changed);
10360 
10361 			set_freesync_fixed_config(dm_new_crtc_state);
10362 
10363 			goto skip_modeset;
10364 		} else if (aconnector &&
10365 			   is_freesync_video_mode(&new_crtc_state->mode,
10366 						  aconnector)) {
10367 			struct drm_display_mode *high_mode;
10368 
10369 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10370 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10371 				set_freesync_fixed_config(dm_new_crtc_state);
10372 			}
10373 		}
10374 
10375 		ret = dm_atomic_get_state(state, &dm_state);
10376 		if (ret)
10377 			goto fail;
10378 
10379 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10380 				crtc->base.id);
10381 
10382 		/* i.e. reset mode */
10383 		if (dc_remove_stream_from_ctx(
10384 				dm->dc,
10385 				dm_state->context,
10386 				dm_old_crtc_state->stream) != DC_OK) {
10387 			ret = -EINVAL;
10388 			goto fail;
10389 		}
10390 
10391 		dc_stream_release(dm_old_crtc_state->stream);
10392 		dm_new_crtc_state->stream = NULL;
10393 
10394 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10395 
10396 		*lock_and_validation_needed = true;
10397 
10398 	} else {/* Add stream for any updated/enabled CRTC */
10399 		/*
10400 		 * Quick fix to prevent NULL pointer on new_stream when
10401 		 * added MST connectors not found in existing crtc_state in the chained mode
10402 		 * TODO: need to dig out the root cause of that
10403 		 */
10404 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10405 			goto skip_modeset;
10406 
10407 		if (modereset_required(new_crtc_state))
10408 			goto skip_modeset;
10409 
10410 		if (modeset_required(new_crtc_state, new_stream,
10411 				     dm_old_crtc_state->stream)) {
10412 
10413 			WARN_ON(dm_new_crtc_state->stream);
10414 
10415 			ret = dm_atomic_get_state(state, &dm_state);
10416 			if (ret)
10417 				goto fail;
10418 
10419 			dm_new_crtc_state->stream = new_stream;
10420 
10421 			dc_stream_retain(new_stream);
10422 
10423 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10424 					 crtc->base.id);
10425 
10426 			if (dc_add_stream_to_ctx(
10427 					dm->dc,
10428 					dm_state->context,
10429 					dm_new_crtc_state->stream) != DC_OK) {
10430 				ret = -EINVAL;
10431 				goto fail;
10432 			}
10433 
10434 			*lock_and_validation_needed = true;
10435 		}
10436 	}
10437 
10438 skip_modeset:
10439 	/* Release extra reference */
10440 	if (new_stream)
10441 		 dc_stream_release(new_stream);
10442 
10443 	/*
10444 	 * We want to do dc stream updates that do not require a
10445 	 * full modeset below.
10446 	 */
10447 	if (!(enable && aconnector && new_crtc_state->active))
10448 		return 0;
10449 	/*
10450 	 * Given above conditions, the dc state cannot be NULL because:
10451 	 * 1. We're in the process of enabling CRTCs (just been added
10452 	 *    to the dc context, or already is on the context)
10453 	 * 2. Has a valid connector attached, and
10454 	 * 3. Is currently active and enabled.
10455 	 * => The dc stream state currently exists.
10456 	 */
10457 	BUG_ON(dm_new_crtc_state->stream == NULL);
10458 
10459 	/* Scaling or underscan settings */
10460 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10461 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10462 		update_stream_scaling_settings(
10463 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10464 
10465 	/* ABM settings */
10466 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10467 
10468 	/*
10469 	 * Color management settings. We also update color properties
10470 	 * when a modeset is needed, to ensure it gets reprogrammed.
10471 	 */
10472 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10473 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10474 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10475 		if (ret)
10476 			goto fail;
10477 	}
10478 
10479 	/* Update Freesync settings. */
10480 	get_freesync_config_for_crtc(dm_new_crtc_state,
10481 				     dm_new_conn_state);
10482 
10483 	return ret;
10484 
10485 fail:
10486 	if (new_stream)
10487 		dc_stream_release(new_stream);
10488 	return ret;
10489 }
10490 
10491 static bool should_reset_plane(struct drm_atomic_state *state,
10492 			       struct drm_plane *plane,
10493 			       struct drm_plane_state *old_plane_state,
10494 			       struct drm_plane_state *new_plane_state)
10495 {
10496 	struct drm_plane *other;
10497 	struct drm_plane_state *old_other_state, *new_other_state;
10498 	struct drm_crtc_state *new_crtc_state;
10499 	int i;
10500 
10501 	/*
10502 	 * TODO: Remove this hack once the checks below are sufficient
10503 	 * enough to determine when we need to reset all the planes on
10504 	 * the stream.
10505 	 */
10506 	if (state->allow_modeset)
10507 		return true;
10508 
10509 	/* Exit early if we know that we're adding or removing the plane. */
10510 	if (old_plane_state->crtc != new_plane_state->crtc)
10511 		return true;
10512 
10513 	/* old crtc == new_crtc == NULL, plane not in context. */
10514 	if (!new_plane_state->crtc)
10515 		return false;
10516 
10517 	new_crtc_state =
10518 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10519 
10520 	if (!new_crtc_state)
10521 		return true;
10522 
10523 	/* CRTC Degamma changes currently require us to recreate planes. */
10524 	if (new_crtc_state->color_mgmt_changed)
10525 		return true;
10526 
10527 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10528 		return true;
10529 
10530 	/*
10531 	 * If there are any new primary or overlay planes being added or
10532 	 * removed then the z-order can potentially change. To ensure
10533 	 * correct z-order and pipe acquisition the current DC architecture
10534 	 * requires us to remove and recreate all existing planes.
10535 	 *
10536 	 * TODO: Come up with a more elegant solution for this.
10537 	 */
10538 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10539 		struct amdgpu_framebuffer *old_afb, *new_afb;
10540 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10541 			continue;
10542 
10543 		if (old_other_state->crtc != new_plane_state->crtc &&
10544 		    new_other_state->crtc != new_plane_state->crtc)
10545 			continue;
10546 
10547 		if (old_other_state->crtc != new_other_state->crtc)
10548 			return true;
10549 
10550 		/* Src/dst size and scaling updates. */
10551 		if (old_other_state->src_w != new_other_state->src_w ||
10552 		    old_other_state->src_h != new_other_state->src_h ||
10553 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10554 		    old_other_state->crtc_h != new_other_state->crtc_h)
10555 			return true;
10556 
10557 		/* Rotation / mirroring updates. */
10558 		if (old_other_state->rotation != new_other_state->rotation)
10559 			return true;
10560 
10561 		/* Blending updates. */
10562 		if (old_other_state->pixel_blend_mode !=
10563 		    new_other_state->pixel_blend_mode)
10564 			return true;
10565 
10566 		/* Alpha updates. */
10567 		if (old_other_state->alpha != new_other_state->alpha)
10568 			return true;
10569 
10570 		/* Colorspace changes. */
10571 		if (old_other_state->color_range != new_other_state->color_range ||
10572 		    old_other_state->color_encoding != new_other_state->color_encoding)
10573 			return true;
10574 
10575 		/* Framebuffer checks fall at the end. */
10576 		if (!old_other_state->fb || !new_other_state->fb)
10577 			continue;
10578 
10579 		/* Pixel format changes can require bandwidth updates. */
10580 		if (old_other_state->fb->format != new_other_state->fb->format)
10581 			return true;
10582 
10583 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10584 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10585 
10586 		/* Tiling and DCC changes also require bandwidth updates. */
10587 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10588 		    old_afb->base.modifier != new_afb->base.modifier)
10589 			return true;
10590 	}
10591 
10592 	return false;
10593 }
10594 
10595 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10596 			      struct drm_plane_state *new_plane_state,
10597 			      struct drm_framebuffer *fb)
10598 {
10599 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10600 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10601 	unsigned int pitch;
10602 	bool linear;
10603 
10604 	if (fb->width > new_acrtc->max_cursor_width ||
10605 	    fb->height > new_acrtc->max_cursor_height) {
10606 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10607 				 new_plane_state->fb->width,
10608 				 new_plane_state->fb->height);
10609 		return -EINVAL;
10610 	}
10611 	if (new_plane_state->src_w != fb->width << 16 ||
10612 	    new_plane_state->src_h != fb->height << 16) {
10613 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10614 		return -EINVAL;
10615 	}
10616 
10617 	/* Pitch in pixels */
10618 	pitch = fb->pitches[0] / fb->format->cpp[0];
10619 
10620 	if (fb->width != pitch) {
10621 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10622 				 fb->width, pitch);
10623 		return -EINVAL;
10624 	}
10625 
10626 	switch (pitch) {
10627 	case 64:
10628 	case 128:
10629 	case 256:
10630 		/* FB pitch is supported by cursor plane */
10631 		break;
10632 	default:
10633 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10634 		return -EINVAL;
10635 	}
10636 
10637 	/* Core DRM takes care of checking FB modifiers, so we only need to
10638 	 * check tiling flags when the FB doesn't have a modifier. */
10639 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10640 		if (adev->family < AMDGPU_FAMILY_AI) {
10641 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10642 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10643 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10644 		} else {
10645 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10646 		}
10647 		if (!linear) {
10648 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10649 			return -EINVAL;
10650 		}
10651 	}
10652 
10653 	return 0;
10654 }
10655 
10656 static int dm_update_plane_state(struct dc *dc,
10657 				 struct drm_atomic_state *state,
10658 				 struct drm_plane *plane,
10659 				 struct drm_plane_state *old_plane_state,
10660 				 struct drm_plane_state *new_plane_state,
10661 				 bool enable,
10662 				 bool *lock_and_validation_needed)
10663 {
10664 
10665 	struct dm_atomic_state *dm_state = NULL;
10666 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10667 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10668 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10669 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10670 	struct amdgpu_crtc *new_acrtc;
10671 	bool needs_reset;
10672 	int ret = 0;
10673 
10674 
10675 	new_plane_crtc = new_plane_state->crtc;
10676 	old_plane_crtc = old_plane_state->crtc;
10677 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10678 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10679 
10680 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10681 		if (!enable || !new_plane_crtc ||
10682 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10683 			return 0;
10684 
10685 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10686 
10687 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10688 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10689 			return -EINVAL;
10690 		}
10691 
10692 		if (new_plane_state->fb) {
10693 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10694 						 new_plane_state->fb);
10695 			if (ret)
10696 				return ret;
10697 		}
10698 
10699 		return 0;
10700 	}
10701 
10702 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10703 					 new_plane_state);
10704 
10705 	/* Remove any changed/removed planes */
10706 	if (!enable) {
10707 		if (!needs_reset)
10708 			return 0;
10709 
10710 		if (!old_plane_crtc)
10711 			return 0;
10712 
10713 		old_crtc_state = drm_atomic_get_old_crtc_state(
10714 				state, old_plane_crtc);
10715 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10716 
10717 		if (!dm_old_crtc_state->stream)
10718 			return 0;
10719 
10720 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10721 				plane->base.id, old_plane_crtc->base.id);
10722 
10723 		ret = dm_atomic_get_state(state, &dm_state);
10724 		if (ret)
10725 			return ret;
10726 
10727 		if (!dc_remove_plane_from_context(
10728 				dc,
10729 				dm_old_crtc_state->stream,
10730 				dm_old_plane_state->dc_state,
10731 				dm_state->context)) {
10732 
10733 			return -EINVAL;
10734 		}
10735 
10736 
10737 		dc_plane_state_release(dm_old_plane_state->dc_state);
10738 		dm_new_plane_state->dc_state = NULL;
10739 
10740 		*lock_and_validation_needed = true;
10741 
10742 	} else { /* Add new planes */
10743 		struct dc_plane_state *dc_new_plane_state;
10744 
10745 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10746 			return 0;
10747 
10748 		if (!new_plane_crtc)
10749 			return 0;
10750 
10751 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10752 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10753 
10754 		if (!dm_new_crtc_state->stream)
10755 			return 0;
10756 
10757 		if (!needs_reset)
10758 			return 0;
10759 
10760 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10761 		if (ret)
10762 			return ret;
10763 
10764 		WARN_ON(dm_new_plane_state->dc_state);
10765 
10766 		dc_new_plane_state = dc_create_plane_state(dc);
10767 		if (!dc_new_plane_state)
10768 			return -ENOMEM;
10769 
10770 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10771 				 plane->base.id, new_plane_crtc->base.id);
10772 
10773 		ret = fill_dc_plane_attributes(
10774 			drm_to_adev(new_plane_crtc->dev),
10775 			dc_new_plane_state,
10776 			new_plane_state,
10777 			new_crtc_state);
10778 		if (ret) {
10779 			dc_plane_state_release(dc_new_plane_state);
10780 			return ret;
10781 		}
10782 
10783 		ret = dm_atomic_get_state(state, &dm_state);
10784 		if (ret) {
10785 			dc_plane_state_release(dc_new_plane_state);
10786 			return ret;
10787 		}
10788 
10789 		/*
10790 		 * Any atomic check errors that occur after this will
10791 		 * not need a release. The plane state will be attached
10792 		 * to the stream, and therefore part of the atomic
10793 		 * state. It'll be released when the atomic state is
10794 		 * cleaned.
10795 		 */
10796 		if (!dc_add_plane_to_context(
10797 				dc,
10798 				dm_new_crtc_state->stream,
10799 				dc_new_plane_state,
10800 				dm_state->context)) {
10801 
10802 			dc_plane_state_release(dc_new_plane_state);
10803 			return -EINVAL;
10804 		}
10805 
10806 		dm_new_plane_state->dc_state = dc_new_plane_state;
10807 
10808 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10809 
10810 		/* Tell DC to do a full surface update every time there
10811 		 * is a plane change. Inefficient, but works for now.
10812 		 */
10813 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10814 
10815 		*lock_and_validation_needed = true;
10816 	}
10817 
10818 
10819 	return ret;
10820 }
10821 
10822 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10823 				       int *src_w, int *src_h)
10824 {
10825 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10826 	case DRM_MODE_ROTATE_90:
10827 	case DRM_MODE_ROTATE_270:
10828 		*src_w = plane_state->src_h >> 16;
10829 		*src_h = plane_state->src_w >> 16;
10830 		break;
10831 	case DRM_MODE_ROTATE_0:
10832 	case DRM_MODE_ROTATE_180:
10833 	default:
10834 		*src_w = plane_state->src_w >> 16;
10835 		*src_h = plane_state->src_h >> 16;
10836 		break;
10837 	}
10838 }
10839 
10840 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10841 				struct drm_crtc *crtc,
10842 				struct drm_crtc_state *new_crtc_state)
10843 {
10844 	struct drm_plane *cursor = crtc->cursor, *underlying;
10845 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10846 	int i;
10847 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10848 	int cursor_src_w, cursor_src_h;
10849 	int underlying_src_w, underlying_src_h;
10850 
10851 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10852 	 * cursor per pipe but it's going to inherit the scaling and
10853 	 * positioning from the underlying pipe. Check the cursor plane's
10854 	 * blending properties match the underlying planes'. */
10855 
10856 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10857 	if (!new_cursor_state || !new_cursor_state->fb) {
10858 		return 0;
10859 	}
10860 
10861 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10862 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10863 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10864 
10865 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10866 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10867 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10868 			continue;
10869 
10870 		/* Ignore disabled planes */
10871 		if (!new_underlying_state->fb)
10872 			continue;
10873 
10874 		dm_get_oriented_plane_size(new_underlying_state,
10875 					   &underlying_src_w, &underlying_src_h);
10876 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10877 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10878 
10879 		if (cursor_scale_w != underlying_scale_w ||
10880 		    cursor_scale_h != underlying_scale_h) {
10881 			drm_dbg_atomic(crtc->dev,
10882 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10883 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10884 			return -EINVAL;
10885 		}
10886 
10887 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10888 		if (new_underlying_state->crtc_x <= 0 &&
10889 		    new_underlying_state->crtc_y <= 0 &&
10890 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10891 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10892 			break;
10893 	}
10894 
10895 	return 0;
10896 }
10897 
10898 #if defined(CONFIG_DRM_AMD_DC_DCN)
10899 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10900 {
10901 	struct drm_connector *connector;
10902 	struct drm_connector_state *conn_state, *old_conn_state;
10903 	struct amdgpu_dm_connector *aconnector = NULL;
10904 	int i;
10905 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10906 		if (!conn_state->crtc)
10907 			conn_state = old_conn_state;
10908 
10909 		if (conn_state->crtc != crtc)
10910 			continue;
10911 
10912 		aconnector = to_amdgpu_dm_connector(connector);
10913 		if (!aconnector->port || !aconnector->mst_port)
10914 			aconnector = NULL;
10915 		else
10916 			break;
10917 	}
10918 
10919 	if (!aconnector)
10920 		return 0;
10921 
10922 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10923 }
10924 #endif
10925 
10926 /**
10927  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10928  * @dev: The DRM device
10929  * @state: The atomic state to commit
10930  *
10931  * Validate that the given atomic state is programmable by DC into hardware.
10932  * This involves constructing a &struct dc_state reflecting the new hardware
10933  * state we wish to commit, then querying DC to see if it is programmable. It's
10934  * important not to modify the existing DC state. Otherwise, atomic_check
10935  * may unexpectedly commit hardware changes.
10936  *
10937  * When validating the DC state, it's important that the right locks are
10938  * acquired. For full updates case which removes/adds/updates streams on one
10939  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10940  * that any such full update commit will wait for completion of any outstanding
10941  * flip using DRMs synchronization events.
10942  *
10943  * Note that DM adds the affected connectors for all CRTCs in state, when that
10944  * might not seem necessary. This is because DC stream creation requires the
10945  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10946  * be possible but non-trivial - a possible TODO item.
10947  *
10948  * Return: -Error code if validation failed.
10949  */
10950 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10951 				  struct drm_atomic_state *state)
10952 {
10953 	struct amdgpu_device *adev = drm_to_adev(dev);
10954 	struct dm_atomic_state *dm_state = NULL;
10955 	struct dc *dc = adev->dm.dc;
10956 	struct drm_connector *connector;
10957 	struct drm_connector_state *old_con_state, *new_con_state;
10958 	struct drm_crtc *crtc;
10959 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10960 	struct drm_plane *plane;
10961 	struct drm_plane_state *old_plane_state, *new_plane_state;
10962 	enum dc_status status;
10963 	int ret, i;
10964 	bool lock_and_validation_needed = false;
10965 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10966 #if defined(CONFIG_DRM_AMD_DC_DCN)
10967 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10968 	struct drm_dp_mst_topology_state *mst_state;
10969 	struct drm_dp_mst_topology_mgr *mgr;
10970 #endif
10971 
10972 	trace_amdgpu_dm_atomic_check_begin(state);
10973 
10974 	ret = drm_atomic_helper_check_modeset(dev, state);
10975 	if (ret) {
10976 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10977 		goto fail;
10978 	}
10979 
10980 	/* Check connector changes */
10981 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10982 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10983 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10984 
10985 		/* Skip connectors that are disabled or part of modeset already. */
10986 		if (!old_con_state->crtc && !new_con_state->crtc)
10987 			continue;
10988 
10989 		if (!new_con_state->crtc)
10990 			continue;
10991 
10992 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10993 		if (IS_ERR(new_crtc_state)) {
10994 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10995 			ret = PTR_ERR(new_crtc_state);
10996 			goto fail;
10997 		}
10998 
10999 		if (dm_old_con_state->abm_level !=
11000 		    dm_new_con_state->abm_level)
11001 			new_crtc_state->connectors_changed = true;
11002 	}
11003 
11004 #if defined(CONFIG_DRM_AMD_DC_DCN)
11005 	if (dc_resource_is_dsc_encoding_supported(dc)) {
11006 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11007 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11008 				ret = add_affected_mst_dsc_crtcs(state, crtc);
11009 				if (ret) {
11010 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11011 					goto fail;
11012 				}
11013 			}
11014 		}
11015 		pre_validate_dsc(state, &dm_state, vars);
11016 	}
11017 #endif
11018 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11019 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11020 
11021 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11022 		    !new_crtc_state->color_mgmt_changed &&
11023 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11024 			dm_old_crtc_state->dsc_force_changed == false)
11025 			continue;
11026 
11027 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11028 		if (ret) {
11029 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11030 			goto fail;
11031 		}
11032 
11033 		if (!new_crtc_state->enable)
11034 			continue;
11035 
11036 		ret = drm_atomic_add_affected_connectors(state, crtc);
11037 		if (ret) {
11038 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11039 			goto fail;
11040 		}
11041 
11042 		ret = drm_atomic_add_affected_planes(state, crtc);
11043 		if (ret) {
11044 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11045 			goto fail;
11046 		}
11047 
11048 		if (dm_old_crtc_state->dsc_force_changed)
11049 			new_crtc_state->mode_changed = true;
11050 	}
11051 
11052 	/*
11053 	 * Add all primary and overlay planes on the CRTC to the state
11054 	 * whenever a plane is enabled to maintain correct z-ordering
11055 	 * and to enable fast surface updates.
11056 	 */
11057 	drm_for_each_crtc(crtc, dev) {
11058 		bool modified = false;
11059 
11060 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11061 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11062 				continue;
11063 
11064 			if (new_plane_state->crtc == crtc ||
11065 			    old_plane_state->crtc == crtc) {
11066 				modified = true;
11067 				break;
11068 			}
11069 		}
11070 
11071 		if (!modified)
11072 			continue;
11073 
11074 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11075 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11076 				continue;
11077 
11078 			new_plane_state =
11079 				drm_atomic_get_plane_state(state, plane);
11080 
11081 			if (IS_ERR(new_plane_state)) {
11082 				ret = PTR_ERR(new_plane_state);
11083 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11084 				goto fail;
11085 			}
11086 		}
11087 	}
11088 
11089 	/* Remove exiting planes if they are modified */
11090 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11091 		ret = dm_update_plane_state(dc, state, plane,
11092 					    old_plane_state,
11093 					    new_plane_state,
11094 					    false,
11095 					    &lock_and_validation_needed);
11096 		if (ret) {
11097 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11098 			goto fail;
11099 		}
11100 	}
11101 
11102 	/* Disable all crtcs which require disable */
11103 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11104 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11105 					   old_crtc_state,
11106 					   new_crtc_state,
11107 					   false,
11108 					   &lock_and_validation_needed);
11109 		if (ret) {
11110 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11111 			goto fail;
11112 		}
11113 	}
11114 
11115 	/* Enable all crtcs which require enable */
11116 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11117 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11118 					   old_crtc_state,
11119 					   new_crtc_state,
11120 					   true,
11121 					   &lock_and_validation_needed);
11122 		if (ret) {
11123 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11124 			goto fail;
11125 		}
11126 	}
11127 
11128 	/* Add new/modified planes */
11129 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11130 		ret = dm_update_plane_state(dc, state, plane,
11131 					    old_plane_state,
11132 					    new_plane_state,
11133 					    true,
11134 					    &lock_and_validation_needed);
11135 		if (ret) {
11136 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11137 			goto fail;
11138 		}
11139 	}
11140 
11141 	/* Run this here since we want to validate the streams we created */
11142 	ret = drm_atomic_helper_check_planes(dev, state);
11143 	if (ret) {
11144 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11145 		goto fail;
11146 	}
11147 
11148 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11149 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11150 		if (dm_new_crtc_state->mpo_requested)
11151 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11152 	}
11153 
11154 	/* Check cursor planes scaling */
11155 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11156 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11157 		if (ret) {
11158 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11159 			goto fail;
11160 		}
11161 	}
11162 
11163 	if (state->legacy_cursor_update) {
11164 		/*
11165 		 * This is a fast cursor update coming from the plane update
11166 		 * helper, check if it can be done asynchronously for better
11167 		 * performance.
11168 		 */
11169 		state->async_update =
11170 			!drm_atomic_helper_async_check(dev, state);
11171 
11172 		/*
11173 		 * Skip the remaining global validation if this is an async
11174 		 * update. Cursor updates can be done without affecting
11175 		 * state or bandwidth calcs and this avoids the performance
11176 		 * penalty of locking the private state object and
11177 		 * allocating a new dc_state.
11178 		 */
11179 		if (state->async_update)
11180 			return 0;
11181 	}
11182 
11183 	/* Check scaling and underscan changes*/
11184 	/* TODO Removed scaling changes validation due to inability to commit
11185 	 * new stream into context w\o causing full reset. Need to
11186 	 * decide how to handle.
11187 	 */
11188 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11189 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11190 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11191 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11192 
11193 		/* Skip any modesets/resets */
11194 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11195 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11196 			continue;
11197 
11198 		/* Skip any thing not scale or underscan changes */
11199 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11200 			continue;
11201 
11202 		lock_and_validation_needed = true;
11203 	}
11204 
11205 #if defined(CONFIG_DRM_AMD_DC_DCN)
11206 	/* set the slot info for each mst_state based on the link encoding format */
11207 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11208 		struct amdgpu_dm_connector *aconnector;
11209 		struct drm_connector *connector;
11210 		struct drm_connector_list_iter iter;
11211 		u8 link_coding_cap;
11212 
11213 		if (!mgr->mst_state )
11214 			continue;
11215 
11216 		drm_connector_list_iter_begin(dev, &iter);
11217 		drm_for_each_connector_iter(connector, &iter) {
11218 			int id = connector->index;
11219 
11220 			if (id == mst_state->mgr->conn_base_id) {
11221 				aconnector = to_amdgpu_dm_connector(connector);
11222 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11223 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11224 
11225 				break;
11226 			}
11227 		}
11228 		drm_connector_list_iter_end(&iter);
11229 
11230 	}
11231 #endif
11232 	/**
11233 	 * Streams and planes are reset when there are changes that affect
11234 	 * bandwidth. Anything that affects bandwidth needs to go through
11235 	 * DC global validation to ensure that the configuration can be applied
11236 	 * to hardware.
11237 	 *
11238 	 * We have to currently stall out here in atomic_check for outstanding
11239 	 * commits to finish in this case because our IRQ handlers reference
11240 	 * DRM state directly - we can end up disabling interrupts too early
11241 	 * if we don't.
11242 	 *
11243 	 * TODO: Remove this stall and drop DM state private objects.
11244 	 */
11245 	if (lock_and_validation_needed) {
11246 		ret = dm_atomic_get_state(state, &dm_state);
11247 		if (ret) {
11248 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11249 			goto fail;
11250 		}
11251 
11252 		ret = do_aquire_global_lock(dev, state);
11253 		if (ret) {
11254 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11255 			goto fail;
11256 		}
11257 
11258 #if defined(CONFIG_DRM_AMD_DC_DCN)
11259 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11260 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11261 			goto fail;
11262 		}
11263 
11264 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11265 		if (ret) {
11266 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11267 			goto fail;
11268 		}
11269 #endif
11270 
11271 		/*
11272 		 * Perform validation of MST topology in the state:
11273 		 * We need to perform MST atomic check before calling
11274 		 * dc_validate_global_state(), or there is a chance
11275 		 * to get stuck in an infinite loop and hang eventually.
11276 		 */
11277 		ret = drm_dp_mst_atomic_check(state);
11278 		if (ret) {
11279 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11280 			goto fail;
11281 		}
11282 		status = dc_validate_global_state(dc, dm_state->context, true);
11283 		if (status != DC_OK) {
11284 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11285 				       dc_status_to_str(status), status);
11286 			ret = -EINVAL;
11287 			goto fail;
11288 		}
11289 	} else {
11290 		/*
11291 		 * The commit is a fast update. Fast updates shouldn't change
11292 		 * the DC context, affect global validation, and can have their
11293 		 * commit work done in parallel with other commits not touching
11294 		 * the same resource. If we have a new DC context as part of
11295 		 * the DM atomic state from validation we need to free it and
11296 		 * retain the existing one instead.
11297 		 *
11298 		 * Furthermore, since the DM atomic state only contains the DC
11299 		 * context and can safely be annulled, we can free the state
11300 		 * and clear the associated private object now to free
11301 		 * some memory and avoid a possible use-after-free later.
11302 		 */
11303 
11304 		for (i = 0; i < state->num_private_objs; i++) {
11305 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11306 
11307 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11308 				int j = state->num_private_objs-1;
11309 
11310 				dm_atomic_destroy_state(obj,
11311 						state->private_objs[i].state);
11312 
11313 				/* If i is not at the end of the array then the
11314 				 * last element needs to be moved to where i was
11315 				 * before the array can safely be truncated.
11316 				 */
11317 				if (i != j)
11318 					state->private_objs[i] =
11319 						state->private_objs[j];
11320 
11321 				state->private_objs[j].ptr = NULL;
11322 				state->private_objs[j].state = NULL;
11323 				state->private_objs[j].old_state = NULL;
11324 				state->private_objs[j].new_state = NULL;
11325 
11326 				state->num_private_objs = j;
11327 				break;
11328 			}
11329 		}
11330 	}
11331 
11332 	/* Store the overall update type for use later in atomic check. */
11333 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11334 		struct dm_crtc_state *dm_new_crtc_state =
11335 			to_dm_crtc_state(new_crtc_state);
11336 
11337 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11338 							 UPDATE_TYPE_FULL :
11339 							 UPDATE_TYPE_FAST;
11340 	}
11341 
11342 	/* Must be success */
11343 	WARN_ON(ret);
11344 
11345 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11346 
11347 	return ret;
11348 
11349 fail:
11350 	if (ret == -EDEADLK)
11351 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11352 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11353 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11354 	else
11355 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11356 
11357 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11358 
11359 	return ret;
11360 }
11361 
11362 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11363 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11364 {
11365 	uint8_t dpcd_data;
11366 	bool capable = false;
11367 
11368 	if (amdgpu_dm_connector->dc_link &&
11369 		dm_helpers_dp_read_dpcd(
11370 				NULL,
11371 				amdgpu_dm_connector->dc_link,
11372 				DP_DOWN_STREAM_PORT_COUNT,
11373 				&dpcd_data,
11374 				sizeof(dpcd_data))) {
11375 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11376 	}
11377 
11378 	return capable;
11379 }
11380 
11381 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11382 		unsigned int offset,
11383 		unsigned int total_length,
11384 		uint8_t *data,
11385 		unsigned int length,
11386 		struct amdgpu_hdmi_vsdb_info *vsdb)
11387 {
11388 	bool res;
11389 	union dmub_rb_cmd cmd;
11390 	struct dmub_cmd_send_edid_cea *input;
11391 	struct dmub_cmd_edid_cea_output *output;
11392 
11393 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11394 		return false;
11395 
11396 	memset(&cmd, 0, sizeof(cmd));
11397 
11398 	input = &cmd.edid_cea.data.input;
11399 
11400 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11401 	cmd.edid_cea.header.sub_type = 0;
11402 	cmd.edid_cea.header.payload_bytes =
11403 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11404 	input->offset = offset;
11405 	input->length = length;
11406 	input->cea_total_length = total_length;
11407 	memcpy(input->payload, data, length);
11408 
11409 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11410 	if (!res) {
11411 		DRM_ERROR("EDID CEA parser failed\n");
11412 		return false;
11413 	}
11414 
11415 	output = &cmd.edid_cea.data.output;
11416 
11417 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11418 		if (!output->ack.success) {
11419 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11420 					output->ack.offset);
11421 		}
11422 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11423 		if (!output->amd_vsdb.vsdb_found)
11424 			return false;
11425 
11426 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11427 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11428 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11429 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11430 	} else {
11431 		DRM_WARN("Unknown EDID CEA parser results\n");
11432 		return false;
11433 	}
11434 
11435 	return true;
11436 }
11437 
11438 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11439 		uint8_t *edid_ext, int len,
11440 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11441 {
11442 	int i;
11443 
11444 	/* send extension block to DMCU for parsing */
11445 	for (i = 0; i < len; i += 8) {
11446 		bool res;
11447 		int offset;
11448 
11449 		/* send 8 bytes a time */
11450 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11451 			return false;
11452 
11453 		if (i+8 == len) {
11454 			/* EDID block sent completed, expect result */
11455 			int version, min_rate, max_rate;
11456 
11457 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11458 			if (res) {
11459 				/* amd vsdb found */
11460 				vsdb_info->freesync_supported = 1;
11461 				vsdb_info->amd_vsdb_version = version;
11462 				vsdb_info->min_refresh_rate_hz = min_rate;
11463 				vsdb_info->max_refresh_rate_hz = max_rate;
11464 				return true;
11465 			}
11466 			/* not amd vsdb */
11467 			return false;
11468 		}
11469 
11470 		/* check for ack*/
11471 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11472 		if (!res)
11473 			return false;
11474 	}
11475 
11476 	return false;
11477 }
11478 
11479 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11480 		uint8_t *edid_ext, int len,
11481 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11482 {
11483 	int i;
11484 
11485 	/* send extension block to DMCU for parsing */
11486 	for (i = 0; i < len; i += 8) {
11487 		/* send 8 bytes a time */
11488 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11489 			return false;
11490 	}
11491 
11492 	return vsdb_info->freesync_supported;
11493 }
11494 
11495 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11496 		uint8_t *edid_ext, int len,
11497 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11498 {
11499 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11500 
11501 	if (adev->dm.dmub_srv)
11502 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11503 	else
11504 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11505 }
11506 
11507 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11508 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11509 {
11510 	uint8_t *edid_ext = NULL;
11511 	int i;
11512 	bool valid_vsdb_found = false;
11513 
11514 	/*----- drm_find_cea_extension() -----*/
11515 	/* No EDID or EDID extensions */
11516 	if (edid == NULL || edid->extensions == 0)
11517 		return -ENODEV;
11518 
11519 	/* Find CEA extension */
11520 	for (i = 0; i < edid->extensions; i++) {
11521 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11522 		if (edid_ext[0] == CEA_EXT)
11523 			break;
11524 	}
11525 
11526 	if (i == edid->extensions)
11527 		return -ENODEV;
11528 
11529 	/*----- cea_db_offsets() -----*/
11530 	if (edid_ext[0] != CEA_EXT)
11531 		return -ENODEV;
11532 
11533 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11534 
11535 	return valid_vsdb_found ? i : -ENODEV;
11536 }
11537 
11538 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11539 					struct edid *edid)
11540 {
11541 	int i = 0;
11542 	struct detailed_timing *timing;
11543 	struct detailed_non_pixel *data;
11544 	struct detailed_data_monitor_range *range;
11545 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11546 			to_amdgpu_dm_connector(connector);
11547 	struct dm_connector_state *dm_con_state = NULL;
11548 	struct dc_sink *sink;
11549 
11550 	struct drm_device *dev = connector->dev;
11551 	struct amdgpu_device *adev = drm_to_adev(dev);
11552 	bool freesync_capable = false;
11553 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11554 
11555 	if (!connector->state) {
11556 		DRM_ERROR("%s - Connector has no state", __func__);
11557 		goto update;
11558 	}
11559 
11560 	sink = amdgpu_dm_connector->dc_sink ?
11561 		amdgpu_dm_connector->dc_sink :
11562 		amdgpu_dm_connector->dc_em_sink;
11563 
11564 	if (!edid || !sink) {
11565 		dm_con_state = to_dm_connector_state(connector->state);
11566 
11567 		amdgpu_dm_connector->min_vfreq = 0;
11568 		amdgpu_dm_connector->max_vfreq = 0;
11569 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11570 		connector->display_info.monitor_range.min_vfreq = 0;
11571 		connector->display_info.monitor_range.max_vfreq = 0;
11572 		freesync_capable = false;
11573 
11574 		goto update;
11575 	}
11576 
11577 	dm_con_state = to_dm_connector_state(connector->state);
11578 
11579 	if (!adev->dm.freesync_module)
11580 		goto update;
11581 
11582 
11583 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11584 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11585 		bool edid_check_required = false;
11586 
11587 		if (edid) {
11588 			edid_check_required = is_dp_capable_without_timing_msa(
11589 						adev->dm.dc,
11590 						amdgpu_dm_connector);
11591 		}
11592 
11593 		if (edid_check_required == true && (edid->version > 1 ||
11594 		   (edid->version == 1 && edid->revision > 1))) {
11595 			for (i = 0; i < 4; i++) {
11596 
11597 				timing	= &edid->detailed_timings[i];
11598 				data	= &timing->data.other_data;
11599 				range	= &data->data.range;
11600 				/*
11601 				 * Check if monitor has continuous frequency mode
11602 				 */
11603 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11604 					continue;
11605 				/*
11606 				 * Check for flag range limits only. If flag == 1 then
11607 				 * no additional timing information provided.
11608 				 * Default GTF, GTF Secondary curve and CVT are not
11609 				 * supported
11610 				 */
11611 				if (range->flags != 1)
11612 					continue;
11613 
11614 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11615 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11616 				amdgpu_dm_connector->pixel_clock_mhz =
11617 					range->pixel_clock_mhz * 10;
11618 
11619 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11620 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11621 
11622 				break;
11623 			}
11624 
11625 			if (amdgpu_dm_connector->max_vfreq -
11626 			    amdgpu_dm_connector->min_vfreq > 10) {
11627 
11628 				freesync_capable = true;
11629 			}
11630 		}
11631 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11632 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11633 		if (i >= 0 && vsdb_info.freesync_supported) {
11634 			timing  = &edid->detailed_timings[i];
11635 			data    = &timing->data.other_data;
11636 
11637 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11638 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11639 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11640 				freesync_capable = true;
11641 
11642 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11643 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11644 		}
11645 	}
11646 
11647 update:
11648 	if (dm_con_state)
11649 		dm_con_state->freesync_capable = freesync_capable;
11650 
11651 	if (connector->vrr_capable_property)
11652 		drm_connector_set_vrr_capable_property(connector,
11653 						       freesync_capable);
11654 }
11655 
11656 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11657 {
11658 	struct amdgpu_device *adev = drm_to_adev(dev);
11659 	struct dc *dc = adev->dm.dc;
11660 	int i;
11661 
11662 	mutex_lock(&adev->dm.dc_lock);
11663 	if (dc->current_state) {
11664 		for (i = 0; i < dc->current_state->stream_count; ++i)
11665 			dc->current_state->streams[i]
11666 				->triggered_crtc_reset.enabled =
11667 				adev->dm.force_timing_sync;
11668 
11669 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11670 		dc_trigger_sync(dc, dc->current_state);
11671 	}
11672 	mutex_unlock(&adev->dm.dc_lock);
11673 }
11674 
11675 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11676 		       uint32_t value, const char *func_name)
11677 {
11678 #ifdef DM_CHECK_ADDR_0
11679 	if (address == 0) {
11680 		DC_ERR("invalid register write. address = 0");
11681 		return;
11682 	}
11683 #endif
11684 	cgs_write_register(ctx->cgs_device, address, value);
11685 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11686 }
11687 
11688 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11689 			  const char *func_name)
11690 {
11691 	uint32_t value;
11692 #ifdef DM_CHECK_ADDR_0
11693 	if (address == 0) {
11694 		DC_ERR("invalid register read; address = 0\n");
11695 		return 0;
11696 	}
11697 #endif
11698 
11699 	if (ctx->dmub_srv &&
11700 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11701 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11702 		ASSERT(false);
11703 		return 0;
11704 	}
11705 
11706 	value = cgs_read_register(ctx->cgs_device, address);
11707 
11708 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11709 
11710 	return value;
11711 }
11712 
11713 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11714 						struct dc_context *ctx,
11715 						uint8_t status_type,
11716 						uint32_t *operation_result)
11717 {
11718 	struct amdgpu_device *adev = ctx->driver_context;
11719 	int return_status = -1;
11720 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11721 
11722 	if (is_cmd_aux) {
11723 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11724 			return_status = p_notify->aux_reply.length;
11725 			*operation_result = p_notify->result;
11726 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11727 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11728 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11729 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11730 		} else {
11731 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11732 		}
11733 	} else {
11734 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11735 			return_status = 0;
11736 			*operation_result = p_notify->sc_status;
11737 		} else {
11738 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11739 		}
11740 	}
11741 
11742 	return return_status;
11743 }
11744 
11745 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11746 	unsigned int link_index, void *cmd_payload, void *operation_result)
11747 {
11748 	struct amdgpu_device *adev = ctx->driver_context;
11749 	int ret = 0;
11750 
11751 	if (is_cmd_aux) {
11752 		dc_process_dmub_aux_transfer_async(ctx->dc,
11753 			link_index, (struct aux_payload *)cmd_payload);
11754 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11755 					(struct set_config_cmd_payload *)cmd_payload,
11756 					adev->dm.dmub_notify)) {
11757 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11758 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11759 					(uint32_t *)operation_result);
11760 	}
11761 
11762 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11763 	if (ret == 0) {
11764 		DRM_ERROR("wait_for_completion_timeout timeout!");
11765 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11766 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11767 				(uint32_t *)operation_result);
11768 	}
11769 
11770 	if (is_cmd_aux) {
11771 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11772 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11773 
11774 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11775 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11776 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11777 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11778 				       adev->dm.dmub_notify->aux_reply.length);
11779 			}
11780 		}
11781 	}
11782 
11783 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11784 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11785 			(uint32_t *)operation_result);
11786 }
11787 
11788 /*
11789  * Check whether seamless boot is supported.
11790  *
11791  * So far we only support seamless boot on CHIP_VANGOGH.
11792  * If everything goes well, we may consider expanding
11793  * seamless boot to other ASICs.
11794  */
11795 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11796 {
11797 	switch (adev->asic_type) {
11798 	case CHIP_VANGOGH:
11799 		if (!adev->mman.keep_stolen_vga_memory)
11800 			return true;
11801 		break;
11802 	default:
11803 		break;
11804 	}
11805 
11806 	return false;
11807 }
11808