1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/dp/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85 
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88 
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93 
94 #include "soc15_common.h"
95 #endif
96 
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100 
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
119 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
121 
122 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
123 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
124 
125 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
126 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
127 
128 /* Number of bytes in PSP header for firmware. */
129 #define PSP_HEADER_BYTES 0x100
130 
131 /* Number of bytes in PSP footer for firmware. */
132 #define PSP_FOOTER_BYTES 0x100
133 
134 /**
135  * DOC: overview
136  *
137  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
138  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
139  * requests into DC requests, and DC responses into DRM responses.
140  *
141  * The root control structure is &struct amdgpu_display_manager.
142  */
143 
144 /* basic init/fini API */
145 static int amdgpu_dm_init(struct amdgpu_device *adev);
146 static void amdgpu_dm_fini(struct amdgpu_device *adev);
147 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
148 
149 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
150 {
151 	switch (link->dpcd_caps.dongle_type) {
152 	case DISPLAY_DONGLE_NONE:
153 		return DRM_MODE_SUBCONNECTOR_Native;
154 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
155 		return DRM_MODE_SUBCONNECTOR_VGA;
156 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
157 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
158 		return DRM_MODE_SUBCONNECTOR_DVID;
159 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
160 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
161 		return DRM_MODE_SUBCONNECTOR_HDMIA;
162 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
163 	default:
164 		return DRM_MODE_SUBCONNECTOR_Unknown;
165 	}
166 }
167 
168 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
169 {
170 	struct dc_link *link = aconnector->dc_link;
171 	struct drm_connector *connector = &aconnector->base;
172 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
173 
174 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
175 		return;
176 
177 	if (aconnector->dc_sink)
178 		subconnector = get_subconnector_type(link);
179 
180 	drm_object_property_set_value(&connector->base,
181 			connector->dev->mode_config.dp_subconnector_property,
182 			subconnector);
183 }
184 
185 /*
186  * initializes drm_device display related structures, based on the information
187  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
188  * drm_encoder, drm_mode_config
189  *
190  * Returns 0 on success
191  */
192 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
193 /* removes and deallocates the drm structures, created by the above function */
194 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
195 
196 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
197 				struct drm_plane *plane,
198 				unsigned long possible_crtcs,
199 				const struct dc_plane_cap *plane_cap);
200 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
201 			       struct drm_plane *plane,
202 			       uint32_t link_index);
203 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
204 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
205 				    uint32_t link_index,
206 				    struct amdgpu_encoder *amdgpu_encoder);
207 static int amdgpu_dm_encoder_init(struct drm_device *dev,
208 				  struct amdgpu_encoder *aencoder,
209 				  uint32_t link_index);
210 
211 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
212 
213 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
214 
215 static int amdgpu_dm_atomic_check(struct drm_device *dev,
216 				  struct drm_atomic_state *state);
217 
218 static void handle_cursor_update(struct drm_plane *plane,
219 				 struct drm_plane_state *old_plane_state);
220 
221 static const struct drm_format_info *
222 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
223 
224 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
225 static void handle_hpd_rx_irq(void *param);
226 
227 static bool
228 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
229 				 struct drm_crtc_state *new_crtc_state);
230 /*
231  * dm_vblank_get_counter
232  *
233  * @brief
234  * Get counter for number of vertical blanks
235  *
236  * @param
237  * struct amdgpu_device *adev - [in] desired amdgpu device
238  * int disp_idx - [in] which CRTC to get the counter from
239  *
240  * @return
241  * Counter for vertical blanks
242  */
243 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
244 {
245 	if (crtc >= adev->mode_info.num_crtc)
246 		return 0;
247 	else {
248 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
249 
250 		if (acrtc->dm_irq_params.stream == NULL) {
251 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
252 				  crtc);
253 			return 0;
254 		}
255 
256 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
257 	}
258 }
259 
260 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
261 				  u32 *vbl, u32 *position)
262 {
263 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
264 
265 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
266 		return -EINVAL;
267 	else {
268 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
269 
270 		if (acrtc->dm_irq_params.stream ==  NULL) {
271 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
272 				  crtc);
273 			return 0;
274 		}
275 
276 		/*
277 		 * TODO rework base driver to use values directly.
278 		 * for now parse it back into reg-format
279 		 */
280 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
281 					 &v_blank_start,
282 					 &v_blank_end,
283 					 &h_position,
284 					 &v_position);
285 
286 		*position = v_position | (h_position << 16);
287 		*vbl = v_blank_start | (v_blank_end << 16);
288 	}
289 
290 	return 0;
291 }
292 
293 static bool dm_is_idle(void *handle)
294 {
295 	/* XXX todo */
296 	return true;
297 }
298 
299 static int dm_wait_for_idle(void *handle)
300 {
301 	/* XXX todo */
302 	return 0;
303 }
304 
305 static bool dm_check_soft_reset(void *handle)
306 {
307 	return false;
308 }
309 
310 static int dm_soft_reset(void *handle)
311 {
312 	/* XXX todo */
313 	return 0;
314 }
315 
316 static struct amdgpu_crtc *
317 get_crtc_by_otg_inst(struct amdgpu_device *adev,
318 		     int otg_inst)
319 {
320 	struct drm_device *dev = adev_to_drm(adev);
321 	struct drm_crtc *crtc;
322 	struct amdgpu_crtc *amdgpu_crtc;
323 
324 	if (WARN_ON(otg_inst == -1))
325 		return adev->mode_info.crtcs[0];
326 
327 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
328 		amdgpu_crtc = to_amdgpu_crtc(crtc);
329 
330 		if (amdgpu_crtc->otg_inst == otg_inst)
331 			return amdgpu_crtc;
332 	}
333 
334 	return NULL;
335 }
336 
337 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
338 {
339 	return acrtc->dm_irq_params.freesync_config.state ==
340 		       VRR_STATE_ACTIVE_VARIABLE ||
341 	       acrtc->dm_irq_params.freesync_config.state ==
342 		       VRR_STATE_ACTIVE_FIXED;
343 }
344 
345 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
346 {
347 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
348 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
349 }
350 
351 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
352 					      struct dm_crtc_state *new_state)
353 {
354 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
355 		return true;
356 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
357 		return true;
358 	else
359 		return false;
360 }
361 
362 /**
363  * dm_pflip_high_irq() - Handle pageflip interrupt
364  * @interrupt_params: ignored
365  *
366  * Handles the pageflip interrupt by notifying all interested parties
367  * that the pageflip has been completed.
368  */
369 static void dm_pflip_high_irq(void *interrupt_params)
370 {
371 	struct amdgpu_crtc *amdgpu_crtc;
372 	struct common_irq_params *irq_params = interrupt_params;
373 	struct amdgpu_device *adev = irq_params->adev;
374 	unsigned long flags;
375 	struct drm_pending_vblank_event *e;
376 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
377 	bool vrr_active;
378 
379 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
380 
381 	/* IRQ could occur when in initial stage */
382 	/* TODO work and BO cleanup */
383 	if (amdgpu_crtc == NULL) {
384 		DC_LOG_PFLIP("CRTC is null, returning.\n");
385 		return;
386 	}
387 
388 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
389 
390 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
391 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
392 						 amdgpu_crtc->pflip_status,
393 						 AMDGPU_FLIP_SUBMITTED,
394 						 amdgpu_crtc->crtc_id,
395 						 amdgpu_crtc);
396 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
397 		return;
398 	}
399 
400 	/* page flip completed. */
401 	e = amdgpu_crtc->event;
402 	amdgpu_crtc->event = NULL;
403 
404 	WARN_ON(!e);
405 
406 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
407 
408 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
409 	if (!vrr_active ||
410 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
411 				      &v_blank_end, &hpos, &vpos) ||
412 	    (vpos < v_blank_start)) {
413 		/* Update to correct count and vblank timestamp if racing with
414 		 * vblank irq. This also updates to the correct vblank timestamp
415 		 * even in VRR mode, as scanout is past the front-porch atm.
416 		 */
417 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
418 
419 		/* Wake up userspace by sending the pageflip event with proper
420 		 * count and timestamp of vblank of flip completion.
421 		 */
422 		if (e) {
423 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
424 
425 			/* Event sent, so done with vblank for this flip */
426 			drm_crtc_vblank_put(&amdgpu_crtc->base);
427 		}
428 	} else if (e) {
429 		/* VRR active and inside front-porch: vblank count and
430 		 * timestamp for pageflip event will only be up to date after
431 		 * drm_crtc_handle_vblank() has been executed from late vblank
432 		 * irq handler after start of back-porch (vline 0). We queue the
433 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
434 		 * updated timestamp and count, once it runs after us.
435 		 *
436 		 * We need to open-code this instead of using the helper
437 		 * drm_crtc_arm_vblank_event(), as that helper would
438 		 * call drm_crtc_accurate_vblank_count(), which we must
439 		 * not call in VRR mode while we are in front-porch!
440 		 */
441 
442 		/* sequence will be replaced by real count during send-out. */
443 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
444 		e->pipe = amdgpu_crtc->crtc_id;
445 
446 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
447 		e = NULL;
448 	}
449 
450 	/* Keep track of vblank of this flip for flip throttling. We use the
451 	 * cooked hw counter, as that one incremented at start of this vblank
452 	 * of pageflip completion, so last_flip_vblank is the forbidden count
453 	 * for queueing new pageflips if vsync + VRR is enabled.
454 	 */
455 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
456 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
457 
458 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
459 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
460 
461 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
462 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
463 		     vrr_active, (int) !e);
464 }
465 
466 static void dm_vupdate_high_irq(void *interrupt_params)
467 {
468 	struct common_irq_params *irq_params = interrupt_params;
469 	struct amdgpu_device *adev = irq_params->adev;
470 	struct amdgpu_crtc *acrtc;
471 	struct drm_device *drm_dev;
472 	struct drm_vblank_crtc *vblank;
473 	ktime_t frame_duration_ns, previous_timestamp;
474 	unsigned long flags;
475 	int vrr_active;
476 
477 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
478 
479 	if (acrtc) {
480 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
481 		drm_dev = acrtc->base.dev;
482 		vblank = &drm_dev->vblank[acrtc->base.index];
483 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
484 		frame_duration_ns = vblank->time - previous_timestamp;
485 
486 		if (frame_duration_ns > 0) {
487 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
488 						frame_duration_ns,
489 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
490 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
491 		}
492 
493 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
494 			      acrtc->crtc_id,
495 			      vrr_active);
496 
497 		/* Core vblank handling is done here after end of front-porch in
498 		 * vrr mode, as vblank timestamping will give valid results
499 		 * while now done after front-porch. This will also deliver
500 		 * page-flip completion events that have been queued to us
501 		 * if a pageflip happened inside front-porch.
502 		 */
503 		if (vrr_active) {
504 			drm_crtc_handle_vblank(&acrtc->base);
505 
506 			/* BTR processing for pre-DCE12 ASICs */
507 			if (acrtc->dm_irq_params.stream &&
508 			    adev->family < AMDGPU_FAMILY_AI) {
509 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
510 				mod_freesync_handle_v_update(
511 				    adev->dm.freesync_module,
512 				    acrtc->dm_irq_params.stream,
513 				    &acrtc->dm_irq_params.vrr_params);
514 
515 				dc_stream_adjust_vmin_vmax(
516 				    adev->dm.dc,
517 				    acrtc->dm_irq_params.stream,
518 				    &acrtc->dm_irq_params.vrr_params.adjust);
519 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
520 			}
521 		}
522 	}
523 }
524 
525 /**
526  * dm_crtc_high_irq() - Handles CRTC interrupt
527  * @interrupt_params: used for determining the CRTC instance
528  *
529  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
530  * event handler.
531  */
532 static void dm_crtc_high_irq(void *interrupt_params)
533 {
534 	struct common_irq_params *irq_params = interrupt_params;
535 	struct amdgpu_device *adev = irq_params->adev;
536 	struct amdgpu_crtc *acrtc;
537 	unsigned long flags;
538 	int vrr_active;
539 
540 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
541 	if (!acrtc)
542 		return;
543 
544 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
545 
546 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
547 		      vrr_active, acrtc->dm_irq_params.active_planes);
548 
549 	/**
550 	 * Core vblank handling at start of front-porch is only possible
551 	 * in non-vrr mode, as only there vblank timestamping will give
552 	 * valid results while done in front-porch. Otherwise defer it
553 	 * to dm_vupdate_high_irq after end of front-porch.
554 	 */
555 	if (!vrr_active)
556 		drm_crtc_handle_vblank(&acrtc->base);
557 
558 	/**
559 	 * Following stuff must happen at start of vblank, for crc
560 	 * computation and below-the-range btr support in vrr mode.
561 	 */
562 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
563 
564 	/* BTR updates need to happen before VUPDATE on Vega and above. */
565 	if (adev->family < AMDGPU_FAMILY_AI)
566 		return;
567 
568 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
569 
570 	if (acrtc->dm_irq_params.stream &&
571 	    acrtc->dm_irq_params.vrr_params.supported &&
572 	    acrtc->dm_irq_params.freesync_config.state ==
573 		    VRR_STATE_ACTIVE_VARIABLE) {
574 		mod_freesync_handle_v_update(adev->dm.freesync_module,
575 					     acrtc->dm_irq_params.stream,
576 					     &acrtc->dm_irq_params.vrr_params);
577 
578 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
579 					   &acrtc->dm_irq_params.vrr_params.adjust);
580 	}
581 
582 	/*
583 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
584 	 * In that case, pageflip completion interrupts won't fire and pageflip
585 	 * completion events won't get delivered. Prevent this by sending
586 	 * pending pageflip events from here if a flip is still pending.
587 	 *
588 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
589 	 * avoid race conditions between flip programming and completion,
590 	 * which could cause too early flip completion events.
591 	 */
592 	if (adev->family >= AMDGPU_FAMILY_RV &&
593 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
594 	    acrtc->dm_irq_params.active_planes == 0) {
595 		if (acrtc->event) {
596 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
597 			acrtc->event = NULL;
598 			drm_crtc_vblank_put(&acrtc->base);
599 		}
600 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
601 	}
602 
603 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
604 }
605 
606 #if defined(CONFIG_DRM_AMD_DC_DCN)
607 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
608 /**
609  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
610  * DCN generation ASICs
611  * @interrupt_params: interrupt parameters
612  *
613  * Used to set crc window/read out crc value at vertical line 0 position
614  */
615 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
616 {
617 	struct common_irq_params *irq_params = interrupt_params;
618 	struct amdgpu_device *adev = irq_params->adev;
619 	struct amdgpu_crtc *acrtc;
620 
621 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
622 
623 	if (!acrtc)
624 		return;
625 
626 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
627 }
628 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
629 
630 /**
631  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
632  * @adev: amdgpu_device pointer
633  * @notify: dmub notification structure
634  *
635  * Dmub AUX or SET_CONFIG command completion processing callback
636  * Copies dmub notification to DM which is to be read by AUX command.
637  * issuing thread and also signals the event to wake up the thread.
638  */
639 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
640 					struct dmub_notification *notify)
641 {
642 	if (adev->dm.dmub_notify)
643 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
644 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
645 		complete(&adev->dm.dmub_aux_transfer_done);
646 }
647 
648 /**
649  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
650  * @adev: amdgpu_device pointer
651  * @notify: dmub notification structure
652  *
653  * Dmub Hpd interrupt processing callback. Gets displayindex through the
654  * ink index and calls helper to do the processing.
655  */
656 static void dmub_hpd_callback(struct amdgpu_device *adev,
657 			      struct dmub_notification *notify)
658 {
659 	struct amdgpu_dm_connector *aconnector;
660 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
661 	struct drm_connector *connector;
662 	struct drm_connector_list_iter iter;
663 	struct dc_link *link;
664 	uint8_t link_index = 0;
665 	struct drm_device *dev;
666 
667 	if (adev == NULL)
668 		return;
669 
670 	if (notify == NULL) {
671 		DRM_ERROR("DMUB HPD callback notification was NULL");
672 		return;
673 	}
674 
675 	if (notify->link_index > adev->dm.dc->link_count) {
676 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
677 		return;
678 	}
679 
680 	link_index = notify->link_index;
681 	link = adev->dm.dc->links[link_index];
682 	dev = adev->dm.ddev;
683 
684 	drm_connector_list_iter_begin(dev, &iter);
685 	drm_for_each_connector_iter(connector, &iter) {
686 		aconnector = to_amdgpu_dm_connector(connector);
687 		if (link && aconnector->dc_link == link) {
688 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
689 			hpd_aconnector = aconnector;
690 			break;
691 		}
692 	}
693 	drm_connector_list_iter_end(&iter);
694 
695 	if (hpd_aconnector) {
696 		if (notify->type == DMUB_NOTIFICATION_HPD)
697 			handle_hpd_irq_helper(hpd_aconnector);
698 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
699 			handle_hpd_rx_irq(hpd_aconnector);
700 	}
701 }
702 
703 /**
704  * register_dmub_notify_callback - Sets callback for DMUB notify
705  * @adev: amdgpu_device pointer
706  * @type: Type of dmub notification
707  * @callback: Dmub interrupt callback function
708  * @dmub_int_thread_offload: offload indicator
709  *
710  * API to register a dmub callback handler for a dmub notification
711  * Also sets indicator whether callback processing to be offloaded.
712  * to dmub interrupt handling thread
713  * Return: true if successfully registered, false if there is existing registration
714  */
715 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
716 					  enum dmub_notification_type type,
717 					  dmub_notify_interrupt_callback_t callback,
718 					  bool dmub_int_thread_offload)
719 {
720 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
721 		adev->dm.dmub_callback[type] = callback;
722 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
723 	} else
724 		return false;
725 
726 	return true;
727 }
728 
729 static void dm_handle_hpd_work(struct work_struct *work)
730 {
731 	struct dmub_hpd_work *dmub_hpd_wrk;
732 
733 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
734 
735 	if (!dmub_hpd_wrk->dmub_notify) {
736 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
737 		return;
738 	}
739 
740 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
741 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
742 		dmub_hpd_wrk->dmub_notify);
743 	}
744 
745 	kfree(dmub_hpd_wrk->dmub_notify);
746 	kfree(dmub_hpd_wrk);
747 
748 }
749 
750 #define DMUB_TRACE_MAX_READ 64
751 /**
752  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
753  * @interrupt_params: used for determining the Outbox instance
754  *
755  * Handles the Outbox Interrupt
756  * event handler.
757  */
758 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
759 {
760 	struct dmub_notification notify;
761 	struct common_irq_params *irq_params = interrupt_params;
762 	struct amdgpu_device *adev = irq_params->adev;
763 	struct amdgpu_display_manager *dm = &adev->dm;
764 	struct dmcub_trace_buf_entry entry = { 0 };
765 	uint32_t count = 0;
766 	struct dmub_hpd_work *dmub_hpd_wrk;
767 	struct dc_link *plink = NULL;
768 
769 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
770 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
771 
772 		do {
773 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
774 			if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
775 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
776 				continue;
777 			}
778 			if (!dm->dmub_callback[notify.type]) {
779 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
780 				continue;
781 			}
782 			if (dm->dmub_thread_offload[notify.type] == true) {
783 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
784 				if (!dmub_hpd_wrk) {
785 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
786 					return;
787 				}
788 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
789 				if (!dmub_hpd_wrk->dmub_notify) {
790 					kfree(dmub_hpd_wrk);
791 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
792 					return;
793 				}
794 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
795 				if (dmub_hpd_wrk->dmub_notify)
796 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
797 				dmub_hpd_wrk->adev = adev;
798 				if (notify.type == DMUB_NOTIFICATION_HPD) {
799 					plink = adev->dm.dc->links[notify.link_index];
800 					if (plink) {
801 						plink->hpd_status =
802 							notify.hpd_status == DP_HPD_PLUG;
803 					}
804 				}
805 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
806 			} else {
807 				dm->dmub_callback[notify.type](adev, &notify);
808 			}
809 		} while (notify.pending_notification);
810 	}
811 
812 
813 	do {
814 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
815 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
816 							entry.param0, entry.param1);
817 
818 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
819 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
820 		} else
821 			break;
822 
823 		count++;
824 
825 	} while (count <= DMUB_TRACE_MAX_READ);
826 
827 	if (count > DMUB_TRACE_MAX_READ)
828 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
829 }
830 #endif /* CONFIG_DRM_AMD_DC_DCN */
831 
832 static int dm_set_clockgating_state(void *handle,
833 		  enum amd_clockgating_state state)
834 {
835 	return 0;
836 }
837 
838 static int dm_set_powergating_state(void *handle,
839 		  enum amd_powergating_state state)
840 {
841 	return 0;
842 }
843 
844 /* Prototypes of private functions */
845 static int dm_early_init(void* handle);
846 
847 /* Allocate memory for FBC compressed data  */
848 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
849 {
850 	struct drm_device *dev = connector->dev;
851 	struct amdgpu_device *adev = drm_to_adev(dev);
852 	struct dm_compressor_info *compressor = &adev->dm.compressor;
853 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
854 	struct drm_display_mode *mode;
855 	unsigned long max_size = 0;
856 
857 	if (adev->dm.dc->fbc_compressor == NULL)
858 		return;
859 
860 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
861 		return;
862 
863 	if (compressor->bo_ptr)
864 		return;
865 
866 
867 	list_for_each_entry(mode, &connector->modes, head) {
868 		if (max_size < mode->htotal * mode->vtotal)
869 			max_size = mode->htotal * mode->vtotal;
870 	}
871 
872 	if (max_size) {
873 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
874 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
875 			    &compressor->gpu_addr, &compressor->cpu_addr);
876 
877 		if (r)
878 			DRM_ERROR("DM: Failed to initialize FBC\n");
879 		else {
880 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
881 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
882 		}
883 
884 	}
885 
886 }
887 
888 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
889 					  int pipe, bool *enabled,
890 					  unsigned char *buf, int max_bytes)
891 {
892 	struct drm_device *dev = dev_get_drvdata(kdev);
893 	struct amdgpu_device *adev = drm_to_adev(dev);
894 	struct drm_connector *connector;
895 	struct drm_connector_list_iter conn_iter;
896 	struct amdgpu_dm_connector *aconnector;
897 	int ret = 0;
898 
899 	*enabled = false;
900 
901 	mutex_lock(&adev->dm.audio_lock);
902 
903 	drm_connector_list_iter_begin(dev, &conn_iter);
904 	drm_for_each_connector_iter(connector, &conn_iter) {
905 		aconnector = to_amdgpu_dm_connector(connector);
906 		if (aconnector->audio_inst != port)
907 			continue;
908 
909 		*enabled = true;
910 		ret = drm_eld_size(connector->eld);
911 		memcpy(buf, connector->eld, min(max_bytes, ret));
912 
913 		break;
914 	}
915 	drm_connector_list_iter_end(&conn_iter);
916 
917 	mutex_unlock(&adev->dm.audio_lock);
918 
919 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
920 
921 	return ret;
922 }
923 
924 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
925 	.get_eld = amdgpu_dm_audio_component_get_eld,
926 };
927 
928 static int amdgpu_dm_audio_component_bind(struct device *kdev,
929 				       struct device *hda_kdev, void *data)
930 {
931 	struct drm_device *dev = dev_get_drvdata(kdev);
932 	struct amdgpu_device *adev = drm_to_adev(dev);
933 	struct drm_audio_component *acomp = data;
934 
935 	acomp->ops = &amdgpu_dm_audio_component_ops;
936 	acomp->dev = kdev;
937 	adev->dm.audio_component = acomp;
938 
939 	return 0;
940 }
941 
942 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
943 					  struct device *hda_kdev, void *data)
944 {
945 	struct drm_device *dev = dev_get_drvdata(kdev);
946 	struct amdgpu_device *adev = drm_to_adev(dev);
947 	struct drm_audio_component *acomp = data;
948 
949 	acomp->ops = NULL;
950 	acomp->dev = NULL;
951 	adev->dm.audio_component = NULL;
952 }
953 
954 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
955 	.bind	= amdgpu_dm_audio_component_bind,
956 	.unbind	= amdgpu_dm_audio_component_unbind,
957 };
958 
959 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
960 {
961 	int i, ret;
962 
963 	if (!amdgpu_audio)
964 		return 0;
965 
966 	adev->mode_info.audio.enabled = true;
967 
968 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
969 
970 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
971 		adev->mode_info.audio.pin[i].channels = -1;
972 		adev->mode_info.audio.pin[i].rate = -1;
973 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
974 		adev->mode_info.audio.pin[i].status_bits = 0;
975 		adev->mode_info.audio.pin[i].category_code = 0;
976 		adev->mode_info.audio.pin[i].connected = false;
977 		adev->mode_info.audio.pin[i].id =
978 			adev->dm.dc->res_pool->audios[i]->inst;
979 		adev->mode_info.audio.pin[i].offset = 0;
980 	}
981 
982 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
983 	if (ret < 0)
984 		return ret;
985 
986 	adev->dm.audio_registered = true;
987 
988 	return 0;
989 }
990 
991 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
992 {
993 	if (!amdgpu_audio)
994 		return;
995 
996 	if (!adev->mode_info.audio.enabled)
997 		return;
998 
999 	if (adev->dm.audio_registered) {
1000 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1001 		adev->dm.audio_registered = false;
1002 	}
1003 
1004 	/* TODO: Disable audio? */
1005 
1006 	adev->mode_info.audio.enabled = false;
1007 }
1008 
1009 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1010 {
1011 	struct drm_audio_component *acomp = adev->dm.audio_component;
1012 
1013 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1014 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1015 
1016 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1017 						 pin, -1);
1018 	}
1019 }
1020 
1021 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1022 {
1023 	const struct dmcub_firmware_header_v1_0 *hdr;
1024 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1025 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1026 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1027 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1028 	struct abm *abm = adev->dm.dc->res_pool->abm;
1029 	struct dmub_srv_hw_params hw_params;
1030 	enum dmub_status status;
1031 	const unsigned char *fw_inst_const, *fw_bss_data;
1032 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1033 	bool has_hw_support;
1034 
1035 	if (!dmub_srv)
1036 		/* DMUB isn't supported on the ASIC. */
1037 		return 0;
1038 
1039 	if (!fb_info) {
1040 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1041 		return -EINVAL;
1042 	}
1043 
1044 	if (!dmub_fw) {
1045 		/* Firmware required for DMUB support. */
1046 		DRM_ERROR("No firmware provided for DMUB.\n");
1047 		return -EINVAL;
1048 	}
1049 
1050 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1051 	if (status != DMUB_STATUS_OK) {
1052 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1053 		return -EINVAL;
1054 	}
1055 
1056 	if (!has_hw_support) {
1057 		DRM_INFO("DMUB unsupported on ASIC\n");
1058 		return 0;
1059 	}
1060 
1061 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1062 	status = dmub_srv_hw_reset(dmub_srv);
1063 	if (status != DMUB_STATUS_OK)
1064 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1065 
1066 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1067 
1068 	fw_inst_const = dmub_fw->data +
1069 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1070 			PSP_HEADER_BYTES;
1071 
1072 	fw_bss_data = dmub_fw->data +
1073 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1074 		      le32_to_cpu(hdr->inst_const_bytes);
1075 
1076 	/* Copy firmware and bios info into FB memory. */
1077 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1078 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1079 
1080 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1081 
1082 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1083 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1084 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1085 	 * will be done by dm_dmub_hw_init
1086 	 */
1087 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1088 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1089 				fw_inst_const_size);
1090 	}
1091 
1092 	if (fw_bss_data_size)
1093 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1094 		       fw_bss_data, fw_bss_data_size);
1095 
1096 	/* Copy firmware bios info into FB memory. */
1097 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1098 	       adev->bios_size);
1099 
1100 	/* Reset regions that need to be reset. */
1101 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1102 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1103 
1104 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1105 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1106 
1107 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1108 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1109 
1110 	/* Initialize hardware. */
1111 	memset(&hw_params, 0, sizeof(hw_params));
1112 	hw_params.fb_base = adev->gmc.fb_start;
1113 	hw_params.fb_offset = adev->gmc.aper_base;
1114 
1115 	/* backdoor load firmware and trigger dmub running */
1116 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1117 		hw_params.load_inst_const = true;
1118 
1119 	if (dmcu)
1120 		hw_params.psp_version = dmcu->psp_version;
1121 
1122 	for (i = 0; i < fb_info->num_fb; ++i)
1123 		hw_params.fb[i] = &fb_info->fb[i];
1124 
1125 	switch (adev->ip_versions[DCE_HWIP][0]) {
1126 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1127 		hw_params.dpia_supported = true;
1128 #if defined(CONFIG_DRM_AMD_DC_DCN)
1129 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1130 #endif
1131 		break;
1132 	default:
1133 		break;
1134 	}
1135 
1136 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1137 	if (status != DMUB_STATUS_OK) {
1138 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1139 		return -EINVAL;
1140 	}
1141 
1142 	/* Wait for firmware load to finish. */
1143 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1144 	if (status != DMUB_STATUS_OK)
1145 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1146 
1147 	/* Init DMCU and ABM if available. */
1148 	if (dmcu && abm) {
1149 		dmcu->funcs->dmcu_init(dmcu);
1150 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1151 	}
1152 
1153 	if (!adev->dm.dc->ctx->dmub_srv)
1154 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1155 	if (!adev->dm.dc->ctx->dmub_srv) {
1156 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1157 		return -ENOMEM;
1158 	}
1159 
1160 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1161 		 adev->dm.dmcub_fw_version);
1162 
1163 	return 0;
1164 }
1165 
1166 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1167 {
1168 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1169 	enum dmub_status status;
1170 	bool init;
1171 
1172 	if (!dmub_srv) {
1173 		/* DMUB isn't supported on the ASIC. */
1174 		return;
1175 	}
1176 
1177 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1178 	if (status != DMUB_STATUS_OK)
1179 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1180 
1181 	if (status == DMUB_STATUS_OK && init) {
1182 		/* Wait for firmware load to finish. */
1183 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1184 		if (status != DMUB_STATUS_OK)
1185 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1186 	} else {
1187 		/* Perform the full hardware initialization. */
1188 		dm_dmub_hw_init(adev);
1189 	}
1190 }
1191 
1192 #if defined(CONFIG_DRM_AMD_DC_DCN)
1193 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1194 {
1195 	uint64_t pt_base;
1196 	uint32_t logical_addr_low;
1197 	uint32_t logical_addr_high;
1198 	uint32_t agp_base, agp_bot, agp_top;
1199 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1200 
1201 	memset(pa_config, 0, sizeof(*pa_config));
1202 
1203 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1204 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1205 
1206 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1207 		/*
1208 		 * Raven2 has a HW issue that it is unable to use the vram which
1209 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1210 		 * workaround that increase system aperture high address (add 1)
1211 		 * to get rid of the VM fault and hardware hang.
1212 		 */
1213 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1214 	else
1215 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1216 
1217 	agp_base = 0;
1218 	agp_bot = adev->gmc.agp_start >> 24;
1219 	agp_top = adev->gmc.agp_end >> 24;
1220 
1221 
1222 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1223 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1224 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1225 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1226 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1227 	page_table_base.low_part = lower_32_bits(pt_base);
1228 
1229 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1230 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1231 
1232 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1233 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1234 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1235 
1236 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1237 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1238 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1239 
1240 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1241 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1242 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1243 
1244 	pa_config->is_hvm_enabled = 0;
1245 
1246 }
1247 #endif
1248 #if defined(CONFIG_DRM_AMD_DC_DCN)
1249 static void vblank_control_worker(struct work_struct *work)
1250 {
1251 	struct vblank_control_work *vblank_work =
1252 		container_of(work, struct vblank_control_work, work);
1253 	struct amdgpu_display_manager *dm = vblank_work->dm;
1254 
1255 	mutex_lock(&dm->dc_lock);
1256 
1257 	if (vblank_work->enable)
1258 		dm->active_vblank_irq_count++;
1259 	else if(dm->active_vblank_irq_count)
1260 		dm->active_vblank_irq_count--;
1261 
1262 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1263 
1264 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1265 
1266 	/* Control PSR based on vblank requirements from OS */
1267 	if (vblank_work->stream && vblank_work->stream->link) {
1268 		if (vblank_work->enable) {
1269 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1270 				amdgpu_dm_psr_disable(vblank_work->stream);
1271 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1272 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1273 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1274 			amdgpu_dm_psr_enable(vblank_work->stream);
1275 		}
1276 	}
1277 
1278 	mutex_unlock(&dm->dc_lock);
1279 
1280 	dc_stream_release(vblank_work->stream);
1281 
1282 	kfree(vblank_work);
1283 }
1284 
1285 #endif
1286 
1287 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1288 {
1289 	struct hpd_rx_irq_offload_work *offload_work;
1290 	struct amdgpu_dm_connector *aconnector;
1291 	struct dc_link *dc_link;
1292 	struct amdgpu_device *adev;
1293 	enum dc_connection_type new_connection_type = dc_connection_none;
1294 	unsigned long flags;
1295 
1296 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1297 	aconnector = offload_work->offload_wq->aconnector;
1298 
1299 	if (!aconnector) {
1300 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1301 		goto skip;
1302 	}
1303 
1304 	adev = drm_to_adev(aconnector->base.dev);
1305 	dc_link = aconnector->dc_link;
1306 
1307 	mutex_lock(&aconnector->hpd_lock);
1308 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1309 		DRM_ERROR("KMS: Failed to detect connector\n");
1310 	mutex_unlock(&aconnector->hpd_lock);
1311 
1312 	if (new_connection_type == dc_connection_none)
1313 		goto skip;
1314 
1315 	if (amdgpu_in_reset(adev))
1316 		goto skip;
1317 
1318 	mutex_lock(&adev->dm.dc_lock);
1319 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1320 		dc_link_dp_handle_automated_test(dc_link);
1321 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1322 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1323 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1324 		dc_link_dp_handle_link_loss(dc_link);
1325 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1326 		offload_work->offload_wq->is_handling_link_loss = false;
1327 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1328 	}
1329 	mutex_unlock(&adev->dm.dc_lock);
1330 
1331 skip:
1332 	kfree(offload_work);
1333 
1334 }
1335 
1336 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1337 {
1338 	int max_caps = dc->caps.max_links;
1339 	int i = 0;
1340 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1341 
1342 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1343 
1344 	if (!hpd_rx_offload_wq)
1345 		return NULL;
1346 
1347 
1348 	for (i = 0; i < max_caps; i++) {
1349 		hpd_rx_offload_wq[i].wq =
1350 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1351 
1352 		if (hpd_rx_offload_wq[i].wq == NULL) {
1353 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1354 			return NULL;
1355 		}
1356 
1357 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1358 	}
1359 
1360 	return hpd_rx_offload_wq;
1361 }
1362 
1363 struct amdgpu_stutter_quirk {
1364 	u16 chip_vendor;
1365 	u16 chip_device;
1366 	u16 subsys_vendor;
1367 	u16 subsys_device;
1368 	u8 revision;
1369 };
1370 
1371 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1372 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1373 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1374 	{ 0, 0, 0, 0, 0 },
1375 };
1376 
1377 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1378 {
1379 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1380 
1381 	while (p && p->chip_device != 0) {
1382 		if (pdev->vendor == p->chip_vendor &&
1383 		    pdev->device == p->chip_device &&
1384 		    pdev->subsystem_vendor == p->subsys_vendor &&
1385 		    pdev->subsystem_device == p->subsys_device &&
1386 		    pdev->revision == p->revision) {
1387 			return true;
1388 		}
1389 		++p;
1390 	}
1391 	return false;
1392 }
1393 
1394 static int amdgpu_dm_init(struct amdgpu_device *adev)
1395 {
1396 	struct dc_init_data init_data;
1397 #ifdef CONFIG_DRM_AMD_DC_HDCP
1398 	struct dc_callback_init init_params;
1399 #endif
1400 	int r;
1401 
1402 	adev->dm.ddev = adev_to_drm(adev);
1403 	adev->dm.adev = adev;
1404 
1405 	/* Zero all the fields */
1406 	memset(&init_data, 0, sizeof(init_data));
1407 #ifdef CONFIG_DRM_AMD_DC_HDCP
1408 	memset(&init_params, 0, sizeof(init_params));
1409 #endif
1410 
1411 	mutex_init(&adev->dm.dc_lock);
1412 	mutex_init(&adev->dm.audio_lock);
1413 #if defined(CONFIG_DRM_AMD_DC_DCN)
1414 	spin_lock_init(&adev->dm.vblank_lock);
1415 #endif
1416 
1417 	if(amdgpu_dm_irq_init(adev)) {
1418 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1419 		goto error;
1420 	}
1421 
1422 	init_data.asic_id.chip_family = adev->family;
1423 
1424 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1425 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1426 	init_data.asic_id.chip_id = adev->pdev->device;
1427 
1428 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1429 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1430 	init_data.asic_id.atombios_base_address =
1431 		adev->mode_info.atom_context->bios;
1432 
1433 	init_data.driver = adev;
1434 
1435 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1436 
1437 	if (!adev->dm.cgs_device) {
1438 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1439 		goto error;
1440 	}
1441 
1442 	init_data.cgs_device = adev->dm.cgs_device;
1443 
1444 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1445 
1446 	switch (adev->ip_versions[DCE_HWIP][0]) {
1447 	case IP_VERSION(2, 1, 0):
1448 		switch (adev->dm.dmcub_fw_version) {
1449 		case 0: /* development */
1450 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1451 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1452 			init_data.flags.disable_dmcu = false;
1453 			break;
1454 		default:
1455 			init_data.flags.disable_dmcu = true;
1456 		}
1457 		break;
1458 	case IP_VERSION(2, 0, 3):
1459 		init_data.flags.disable_dmcu = true;
1460 		break;
1461 	default:
1462 		break;
1463 	}
1464 
1465 	switch (adev->asic_type) {
1466 	case CHIP_CARRIZO:
1467 	case CHIP_STONEY:
1468 		init_data.flags.gpu_vm_support = true;
1469 		break;
1470 	default:
1471 		switch (adev->ip_versions[DCE_HWIP][0]) {
1472 		case IP_VERSION(1, 0, 0):
1473 		case IP_VERSION(1, 0, 1):
1474 			/* enable S/G on PCO and RV2 */
1475 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1476 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1477 				init_data.flags.gpu_vm_support = true;
1478 			break;
1479 		case IP_VERSION(2, 1, 0):
1480 		case IP_VERSION(3, 0, 1):
1481 		case IP_VERSION(3, 1, 2):
1482 		case IP_VERSION(3, 1, 3):
1483 		case IP_VERSION(3, 1, 5):
1484 			init_data.flags.gpu_vm_support = true;
1485 			break;
1486 		default:
1487 			break;
1488 		}
1489 		break;
1490 	}
1491 
1492 	if (init_data.flags.gpu_vm_support)
1493 		adev->mode_info.gpu_vm_support = true;
1494 
1495 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1496 		init_data.flags.fbc_support = true;
1497 
1498 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1499 		init_data.flags.multi_mon_pp_mclk_switch = true;
1500 
1501 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1502 		init_data.flags.disable_fractional_pwm = true;
1503 
1504 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1505 		init_data.flags.edp_no_power_sequencing = true;
1506 
1507 #ifdef CONFIG_DRM_AMD_DC_DCN
1508 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1509 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1510 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1511 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1512 #endif
1513 
1514 	init_data.flags.seamless_boot_edp_requested = false;
1515 
1516 	if (check_seamless_boot_capability(adev)) {
1517 		init_data.flags.seamless_boot_edp_requested = true;
1518 		init_data.flags.allow_seamless_boot_optimization = true;
1519 		DRM_INFO("Seamless boot condition check passed\n");
1520 	}
1521 
1522 	INIT_LIST_HEAD(&adev->dm.da_list);
1523 	/* Display Core create. */
1524 	adev->dm.dc = dc_create(&init_data);
1525 
1526 	if (adev->dm.dc) {
1527 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1528 	} else {
1529 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1530 		goto error;
1531 	}
1532 
1533 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1534 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1535 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1536 	}
1537 
1538 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1539 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1540 	if (dm_should_disable_stutter(adev->pdev))
1541 		adev->dm.dc->debug.disable_stutter = true;
1542 
1543 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1544 		adev->dm.dc->debug.disable_stutter = true;
1545 
1546 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1547 		adev->dm.dc->debug.disable_dsc = true;
1548 		adev->dm.dc->debug.disable_dsc_edp = true;
1549 	}
1550 
1551 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1552 		adev->dm.dc->debug.disable_clock_gate = true;
1553 
1554 	r = dm_dmub_hw_init(adev);
1555 	if (r) {
1556 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1557 		goto error;
1558 	}
1559 
1560 	dc_hardware_init(adev->dm.dc);
1561 
1562 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1563 	if (!adev->dm.hpd_rx_offload_wq) {
1564 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1565 		goto error;
1566 	}
1567 
1568 #if defined(CONFIG_DRM_AMD_DC_DCN)
1569 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1570 		struct dc_phy_addr_space_config pa_config;
1571 
1572 		mmhub_read_system_context(adev, &pa_config);
1573 
1574 		// Call the DC init_memory func
1575 		dc_setup_system_context(adev->dm.dc, &pa_config);
1576 	}
1577 #endif
1578 
1579 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1580 	if (!adev->dm.freesync_module) {
1581 		DRM_ERROR(
1582 		"amdgpu: failed to initialize freesync_module.\n");
1583 	} else
1584 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1585 				adev->dm.freesync_module);
1586 
1587 	amdgpu_dm_init_color_mod();
1588 
1589 #if defined(CONFIG_DRM_AMD_DC_DCN)
1590 	if (adev->dm.dc->caps.max_links > 0) {
1591 		adev->dm.vblank_control_workqueue =
1592 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1593 		if (!adev->dm.vblank_control_workqueue)
1594 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1595 	}
1596 #endif
1597 
1598 #ifdef CONFIG_DRM_AMD_DC_HDCP
1599 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1600 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1601 
1602 		if (!adev->dm.hdcp_workqueue)
1603 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1604 		else
1605 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1606 
1607 		dc_init_callbacks(adev->dm.dc, &init_params);
1608 	}
1609 #endif
1610 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1611 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1612 #endif
1613 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1614 		init_completion(&adev->dm.dmub_aux_transfer_done);
1615 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1616 		if (!adev->dm.dmub_notify) {
1617 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1618 			goto error;
1619 		}
1620 
1621 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1622 		if (!adev->dm.delayed_hpd_wq) {
1623 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1624 			goto error;
1625 		}
1626 
1627 		amdgpu_dm_outbox_init(adev);
1628 #if defined(CONFIG_DRM_AMD_DC_DCN)
1629 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1630 			dmub_aux_setconfig_callback, false)) {
1631 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1632 			goto error;
1633 		}
1634 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1635 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1636 			goto error;
1637 		}
1638 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1639 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1640 			goto error;
1641 		}
1642 #endif /* CONFIG_DRM_AMD_DC_DCN */
1643 	}
1644 
1645 	if (amdgpu_dm_initialize_drm_device(adev)) {
1646 		DRM_ERROR(
1647 		"amdgpu: failed to initialize sw for display support.\n");
1648 		goto error;
1649 	}
1650 
1651 	/* create fake encoders for MST */
1652 	dm_dp_create_fake_mst_encoders(adev);
1653 
1654 	/* TODO: Add_display_info? */
1655 
1656 	/* TODO use dynamic cursor width */
1657 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1658 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1659 
1660 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1661 		DRM_ERROR(
1662 		"amdgpu: failed to initialize sw for display support.\n");
1663 		goto error;
1664 	}
1665 
1666 
1667 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1668 
1669 	return 0;
1670 error:
1671 	amdgpu_dm_fini(adev);
1672 
1673 	return -EINVAL;
1674 }
1675 
1676 static int amdgpu_dm_early_fini(void *handle)
1677 {
1678 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1679 
1680 	amdgpu_dm_audio_fini(adev);
1681 
1682 	return 0;
1683 }
1684 
1685 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1686 {
1687 	int i;
1688 
1689 #if defined(CONFIG_DRM_AMD_DC_DCN)
1690 	if (adev->dm.vblank_control_workqueue) {
1691 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1692 		adev->dm.vblank_control_workqueue = NULL;
1693 	}
1694 #endif
1695 
1696 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1697 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1698 	}
1699 
1700 	amdgpu_dm_destroy_drm_device(&adev->dm);
1701 
1702 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1703 	if (adev->dm.crc_rd_wrk) {
1704 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1705 		kfree(adev->dm.crc_rd_wrk);
1706 		adev->dm.crc_rd_wrk = NULL;
1707 	}
1708 #endif
1709 #ifdef CONFIG_DRM_AMD_DC_HDCP
1710 	if (adev->dm.hdcp_workqueue) {
1711 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1712 		adev->dm.hdcp_workqueue = NULL;
1713 	}
1714 
1715 	if (adev->dm.dc)
1716 		dc_deinit_callbacks(adev->dm.dc);
1717 #endif
1718 
1719 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1720 
1721 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1722 		kfree(adev->dm.dmub_notify);
1723 		adev->dm.dmub_notify = NULL;
1724 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1725 		adev->dm.delayed_hpd_wq = NULL;
1726 	}
1727 
1728 	if (adev->dm.dmub_bo)
1729 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1730 				      &adev->dm.dmub_bo_gpu_addr,
1731 				      &adev->dm.dmub_bo_cpu_addr);
1732 
1733 	if (adev->dm.hpd_rx_offload_wq) {
1734 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1735 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1736 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1737 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1738 			}
1739 		}
1740 
1741 		kfree(adev->dm.hpd_rx_offload_wq);
1742 		adev->dm.hpd_rx_offload_wq = NULL;
1743 	}
1744 
1745 	/* DC Destroy TODO: Replace destroy DAL */
1746 	if (adev->dm.dc)
1747 		dc_destroy(&adev->dm.dc);
1748 	/*
1749 	 * TODO: pageflip, vlank interrupt
1750 	 *
1751 	 * amdgpu_dm_irq_fini(adev);
1752 	 */
1753 
1754 	if (adev->dm.cgs_device) {
1755 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1756 		adev->dm.cgs_device = NULL;
1757 	}
1758 	if (adev->dm.freesync_module) {
1759 		mod_freesync_destroy(adev->dm.freesync_module);
1760 		adev->dm.freesync_module = NULL;
1761 	}
1762 
1763 	mutex_destroy(&adev->dm.audio_lock);
1764 	mutex_destroy(&adev->dm.dc_lock);
1765 
1766 	return;
1767 }
1768 
1769 static int load_dmcu_fw(struct amdgpu_device *adev)
1770 {
1771 	const char *fw_name_dmcu = NULL;
1772 	int r;
1773 	const struct dmcu_firmware_header_v1_0 *hdr;
1774 
1775 	switch(adev->asic_type) {
1776 #if defined(CONFIG_DRM_AMD_DC_SI)
1777 	case CHIP_TAHITI:
1778 	case CHIP_PITCAIRN:
1779 	case CHIP_VERDE:
1780 	case CHIP_OLAND:
1781 #endif
1782 	case CHIP_BONAIRE:
1783 	case CHIP_HAWAII:
1784 	case CHIP_KAVERI:
1785 	case CHIP_KABINI:
1786 	case CHIP_MULLINS:
1787 	case CHIP_TONGA:
1788 	case CHIP_FIJI:
1789 	case CHIP_CARRIZO:
1790 	case CHIP_STONEY:
1791 	case CHIP_POLARIS11:
1792 	case CHIP_POLARIS10:
1793 	case CHIP_POLARIS12:
1794 	case CHIP_VEGAM:
1795 	case CHIP_VEGA10:
1796 	case CHIP_VEGA12:
1797 	case CHIP_VEGA20:
1798 		return 0;
1799 	case CHIP_NAVI12:
1800 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1801 		break;
1802 	case CHIP_RAVEN:
1803 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1804 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1805 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1806 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1807 		else
1808 			return 0;
1809 		break;
1810 	default:
1811 		switch (adev->ip_versions[DCE_HWIP][0]) {
1812 		case IP_VERSION(2, 0, 2):
1813 		case IP_VERSION(2, 0, 3):
1814 		case IP_VERSION(2, 0, 0):
1815 		case IP_VERSION(2, 1, 0):
1816 		case IP_VERSION(3, 0, 0):
1817 		case IP_VERSION(3, 0, 2):
1818 		case IP_VERSION(3, 0, 3):
1819 		case IP_VERSION(3, 0, 1):
1820 		case IP_VERSION(3, 1, 2):
1821 		case IP_VERSION(3, 1, 3):
1822 		case IP_VERSION(3, 1, 5):
1823 		case IP_VERSION(3, 1, 6):
1824 			return 0;
1825 		default:
1826 			break;
1827 		}
1828 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1829 		return -EINVAL;
1830 	}
1831 
1832 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1833 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1834 		return 0;
1835 	}
1836 
1837 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1838 	if (r == -ENOENT) {
1839 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1840 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1841 		adev->dm.fw_dmcu = NULL;
1842 		return 0;
1843 	}
1844 	if (r) {
1845 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1846 			fw_name_dmcu);
1847 		return r;
1848 	}
1849 
1850 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1851 	if (r) {
1852 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1853 			fw_name_dmcu);
1854 		release_firmware(adev->dm.fw_dmcu);
1855 		adev->dm.fw_dmcu = NULL;
1856 		return r;
1857 	}
1858 
1859 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1860 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1861 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1862 	adev->firmware.fw_size +=
1863 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1864 
1865 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1866 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1867 	adev->firmware.fw_size +=
1868 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1869 
1870 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1871 
1872 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1873 
1874 	return 0;
1875 }
1876 
1877 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1878 {
1879 	struct amdgpu_device *adev = ctx;
1880 
1881 	return dm_read_reg(adev->dm.dc->ctx, address);
1882 }
1883 
1884 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1885 				     uint32_t value)
1886 {
1887 	struct amdgpu_device *adev = ctx;
1888 
1889 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1890 }
1891 
1892 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1893 {
1894 	struct dmub_srv_create_params create_params;
1895 	struct dmub_srv_region_params region_params;
1896 	struct dmub_srv_region_info region_info;
1897 	struct dmub_srv_fb_params fb_params;
1898 	struct dmub_srv_fb_info *fb_info;
1899 	struct dmub_srv *dmub_srv;
1900 	const struct dmcub_firmware_header_v1_0 *hdr;
1901 	const char *fw_name_dmub;
1902 	enum dmub_asic dmub_asic;
1903 	enum dmub_status status;
1904 	int r;
1905 
1906 	switch (adev->ip_versions[DCE_HWIP][0]) {
1907 	case IP_VERSION(2, 1, 0):
1908 		dmub_asic = DMUB_ASIC_DCN21;
1909 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1910 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1911 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1912 		break;
1913 	case IP_VERSION(3, 0, 0):
1914 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1915 			dmub_asic = DMUB_ASIC_DCN30;
1916 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1917 		} else {
1918 			dmub_asic = DMUB_ASIC_DCN30;
1919 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1920 		}
1921 		break;
1922 	case IP_VERSION(3, 0, 1):
1923 		dmub_asic = DMUB_ASIC_DCN301;
1924 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1925 		break;
1926 	case IP_VERSION(3, 0, 2):
1927 		dmub_asic = DMUB_ASIC_DCN302;
1928 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1929 		break;
1930 	case IP_VERSION(3, 0, 3):
1931 		dmub_asic = DMUB_ASIC_DCN303;
1932 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1933 		break;
1934 	case IP_VERSION(3, 1, 2):
1935 	case IP_VERSION(3, 1, 3):
1936 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1937 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1938 		break;
1939 	case IP_VERSION(3, 1, 5):
1940 		dmub_asic = DMUB_ASIC_DCN315;
1941 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1942 		break;
1943 	case IP_VERSION(3, 1, 6):
1944 		dmub_asic = DMUB_ASIC_DCN316;
1945 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
1946 		break;
1947 	default:
1948 		/* ASIC doesn't support DMUB. */
1949 		return 0;
1950 	}
1951 
1952 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1953 	if (r) {
1954 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1955 		return 0;
1956 	}
1957 
1958 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1959 	if (r) {
1960 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1961 		return 0;
1962 	}
1963 
1964 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1965 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1966 
1967 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1968 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1969 			AMDGPU_UCODE_ID_DMCUB;
1970 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1971 			adev->dm.dmub_fw;
1972 		adev->firmware.fw_size +=
1973 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1974 
1975 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1976 			 adev->dm.dmcub_fw_version);
1977 	}
1978 
1979 
1980 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1981 	dmub_srv = adev->dm.dmub_srv;
1982 
1983 	if (!dmub_srv) {
1984 		DRM_ERROR("Failed to allocate DMUB service!\n");
1985 		return -ENOMEM;
1986 	}
1987 
1988 	memset(&create_params, 0, sizeof(create_params));
1989 	create_params.user_ctx = adev;
1990 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1991 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1992 	create_params.asic = dmub_asic;
1993 
1994 	/* Create the DMUB service. */
1995 	status = dmub_srv_create(dmub_srv, &create_params);
1996 	if (status != DMUB_STATUS_OK) {
1997 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1998 		return -EINVAL;
1999 	}
2000 
2001 	/* Calculate the size of all the regions for the DMUB service. */
2002 	memset(&region_params, 0, sizeof(region_params));
2003 
2004 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2005 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2006 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2007 	region_params.vbios_size = adev->bios_size;
2008 	region_params.fw_bss_data = region_params.bss_data_size ?
2009 		adev->dm.dmub_fw->data +
2010 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2011 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
2012 	region_params.fw_inst_const =
2013 		adev->dm.dmub_fw->data +
2014 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2015 		PSP_HEADER_BYTES;
2016 
2017 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2018 					   &region_info);
2019 
2020 	if (status != DMUB_STATUS_OK) {
2021 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2022 		return -EINVAL;
2023 	}
2024 
2025 	/*
2026 	 * Allocate a framebuffer based on the total size of all the regions.
2027 	 * TODO: Move this into GART.
2028 	 */
2029 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2030 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2031 				    &adev->dm.dmub_bo_gpu_addr,
2032 				    &adev->dm.dmub_bo_cpu_addr);
2033 	if (r)
2034 		return r;
2035 
2036 	/* Rebase the regions on the framebuffer address. */
2037 	memset(&fb_params, 0, sizeof(fb_params));
2038 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2039 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2040 	fb_params.region_info = &region_info;
2041 
2042 	adev->dm.dmub_fb_info =
2043 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2044 	fb_info = adev->dm.dmub_fb_info;
2045 
2046 	if (!fb_info) {
2047 		DRM_ERROR(
2048 			"Failed to allocate framebuffer info for DMUB service!\n");
2049 		return -ENOMEM;
2050 	}
2051 
2052 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2053 	if (status != DMUB_STATUS_OK) {
2054 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2055 		return -EINVAL;
2056 	}
2057 
2058 	return 0;
2059 }
2060 
2061 static int dm_sw_init(void *handle)
2062 {
2063 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2064 	int r;
2065 
2066 	r = dm_dmub_sw_init(adev);
2067 	if (r)
2068 		return r;
2069 
2070 	return load_dmcu_fw(adev);
2071 }
2072 
2073 static int dm_sw_fini(void *handle)
2074 {
2075 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2076 
2077 	kfree(adev->dm.dmub_fb_info);
2078 	adev->dm.dmub_fb_info = NULL;
2079 
2080 	if (adev->dm.dmub_srv) {
2081 		dmub_srv_destroy(adev->dm.dmub_srv);
2082 		adev->dm.dmub_srv = NULL;
2083 	}
2084 
2085 	release_firmware(adev->dm.dmub_fw);
2086 	adev->dm.dmub_fw = NULL;
2087 
2088 	release_firmware(adev->dm.fw_dmcu);
2089 	adev->dm.fw_dmcu = NULL;
2090 
2091 	return 0;
2092 }
2093 
2094 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2095 {
2096 	struct amdgpu_dm_connector *aconnector;
2097 	struct drm_connector *connector;
2098 	struct drm_connector_list_iter iter;
2099 	int ret = 0;
2100 
2101 	drm_connector_list_iter_begin(dev, &iter);
2102 	drm_for_each_connector_iter(connector, &iter) {
2103 		aconnector = to_amdgpu_dm_connector(connector);
2104 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2105 		    aconnector->mst_mgr.aux) {
2106 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2107 					 aconnector,
2108 					 aconnector->base.base.id);
2109 
2110 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2111 			if (ret < 0) {
2112 				DRM_ERROR("DM_MST: Failed to start MST\n");
2113 				aconnector->dc_link->type =
2114 					dc_connection_single;
2115 				break;
2116 			}
2117 		}
2118 	}
2119 	drm_connector_list_iter_end(&iter);
2120 
2121 	return ret;
2122 }
2123 
2124 static int dm_late_init(void *handle)
2125 {
2126 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2127 
2128 	struct dmcu_iram_parameters params;
2129 	unsigned int linear_lut[16];
2130 	int i;
2131 	struct dmcu *dmcu = NULL;
2132 
2133 	dmcu = adev->dm.dc->res_pool->dmcu;
2134 
2135 	for (i = 0; i < 16; i++)
2136 		linear_lut[i] = 0xFFFF * i / 15;
2137 
2138 	params.set = 0;
2139 	params.backlight_ramping_override = false;
2140 	params.backlight_ramping_start = 0xCCCC;
2141 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2142 	params.backlight_lut_array_size = 16;
2143 	params.backlight_lut_array = linear_lut;
2144 
2145 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2146 	 * 0xFFFF x 0.01 = 0x28F
2147 	 */
2148 	params.min_abm_backlight = 0x28F;
2149 	/* In the case where abm is implemented on dmcub,
2150 	* dmcu object will be null.
2151 	* ABM 2.4 and up are implemented on dmcub.
2152 	*/
2153 	if (dmcu) {
2154 		if (!dmcu_load_iram(dmcu, params))
2155 			return -EINVAL;
2156 	} else if (adev->dm.dc->ctx->dmub_srv) {
2157 		struct dc_link *edp_links[MAX_NUM_EDP];
2158 		int edp_num;
2159 
2160 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2161 		for (i = 0; i < edp_num; i++) {
2162 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2163 				return -EINVAL;
2164 		}
2165 	}
2166 
2167 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2168 }
2169 
2170 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2171 {
2172 	struct amdgpu_dm_connector *aconnector;
2173 	struct drm_connector *connector;
2174 	struct drm_connector_list_iter iter;
2175 	struct drm_dp_mst_topology_mgr *mgr;
2176 	int ret;
2177 	bool need_hotplug = false;
2178 
2179 	drm_connector_list_iter_begin(dev, &iter);
2180 	drm_for_each_connector_iter(connector, &iter) {
2181 		aconnector = to_amdgpu_dm_connector(connector);
2182 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2183 		    aconnector->mst_port)
2184 			continue;
2185 
2186 		mgr = &aconnector->mst_mgr;
2187 
2188 		if (suspend) {
2189 			drm_dp_mst_topology_mgr_suspend(mgr);
2190 		} else {
2191 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2192 			if (ret < 0) {
2193 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2194 				need_hotplug = true;
2195 			}
2196 		}
2197 	}
2198 	drm_connector_list_iter_end(&iter);
2199 
2200 	if (need_hotplug)
2201 		drm_kms_helper_hotplug_event(dev);
2202 }
2203 
2204 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2205 {
2206 	int ret = 0;
2207 
2208 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2209 	 * on window driver dc implementation.
2210 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2211 	 * should be passed to smu during boot up and resume from s3.
2212 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2213 	 * dcn20_resource_construct
2214 	 * then call pplib functions below to pass the settings to smu:
2215 	 * smu_set_watermarks_for_clock_ranges
2216 	 * smu_set_watermarks_table
2217 	 * navi10_set_watermarks_table
2218 	 * smu_write_watermarks_table
2219 	 *
2220 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2221 	 * dc has implemented different flow for window driver:
2222 	 * dc_hardware_init / dc_set_power_state
2223 	 * dcn10_init_hw
2224 	 * notify_wm_ranges
2225 	 * set_wm_ranges
2226 	 * -- Linux
2227 	 * smu_set_watermarks_for_clock_ranges
2228 	 * renoir_set_watermarks_table
2229 	 * smu_write_watermarks_table
2230 	 *
2231 	 * For Linux,
2232 	 * dc_hardware_init -> amdgpu_dm_init
2233 	 * dc_set_power_state --> dm_resume
2234 	 *
2235 	 * therefore, this function apply to navi10/12/14 but not Renoir
2236 	 * *
2237 	 */
2238 	switch (adev->ip_versions[DCE_HWIP][0]) {
2239 	case IP_VERSION(2, 0, 2):
2240 	case IP_VERSION(2, 0, 0):
2241 		break;
2242 	default:
2243 		return 0;
2244 	}
2245 
2246 	ret = amdgpu_dpm_write_watermarks_table(adev);
2247 	if (ret) {
2248 		DRM_ERROR("Failed to update WMTABLE!\n");
2249 		return ret;
2250 	}
2251 
2252 	return 0;
2253 }
2254 
2255 /**
2256  * dm_hw_init() - Initialize DC device
2257  * @handle: The base driver device containing the amdgpu_dm device.
2258  *
2259  * Initialize the &struct amdgpu_display_manager device. This involves calling
2260  * the initializers of each DM component, then populating the struct with them.
2261  *
2262  * Although the function implies hardware initialization, both hardware and
2263  * software are initialized here. Splitting them out to their relevant init
2264  * hooks is a future TODO item.
2265  *
2266  * Some notable things that are initialized here:
2267  *
2268  * - Display Core, both software and hardware
2269  * - DC modules that we need (freesync and color management)
2270  * - DRM software states
2271  * - Interrupt sources and handlers
2272  * - Vblank support
2273  * - Debug FS entries, if enabled
2274  */
2275 static int dm_hw_init(void *handle)
2276 {
2277 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2278 	/* Create DAL display manager */
2279 	amdgpu_dm_init(adev);
2280 	amdgpu_dm_hpd_init(adev);
2281 
2282 	return 0;
2283 }
2284 
2285 /**
2286  * dm_hw_fini() - Teardown DC device
2287  * @handle: The base driver device containing the amdgpu_dm device.
2288  *
2289  * Teardown components within &struct amdgpu_display_manager that require
2290  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2291  * were loaded. Also flush IRQ workqueues and disable them.
2292  */
2293 static int dm_hw_fini(void *handle)
2294 {
2295 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2296 
2297 	amdgpu_dm_hpd_fini(adev);
2298 
2299 	amdgpu_dm_irq_fini(adev);
2300 	amdgpu_dm_fini(adev);
2301 	return 0;
2302 }
2303 
2304 
2305 static int dm_enable_vblank(struct drm_crtc *crtc);
2306 static void dm_disable_vblank(struct drm_crtc *crtc);
2307 
2308 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2309 				 struct dc_state *state, bool enable)
2310 {
2311 	enum dc_irq_source irq_source;
2312 	struct amdgpu_crtc *acrtc;
2313 	int rc = -EBUSY;
2314 	int i = 0;
2315 
2316 	for (i = 0; i < state->stream_count; i++) {
2317 		acrtc = get_crtc_by_otg_inst(
2318 				adev, state->stream_status[i].primary_otg_inst);
2319 
2320 		if (acrtc && state->stream_status[i].plane_count != 0) {
2321 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2322 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2323 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2324 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2325 			if (rc)
2326 				DRM_WARN("Failed to %s pflip interrupts\n",
2327 					 enable ? "enable" : "disable");
2328 
2329 			if (enable) {
2330 				rc = dm_enable_vblank(&acrtc->base);
2331 				if (rc)
2332 					DRM_WARN("Failed to enable vblank interrupts\n");
2333 			} else {
2334 				dm_disable_vblank(&acrtc->base);
2335 			}
2336 
2337 		}
2338 	}
2339 
2340 }
2341 
2342 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2343 {
2344 	struct dc_state *context = NULL;
2345 	enum dc_status res = DC_ERROR_UNEXPECTED;
2346 	int i;
2347 	struct dc_stream_state *del_streams[MAX_PIPES];
2348 	int del_streams_count = 0;
2349 
2350 	memset(del_streams, 0, sizeof(del_streams));
2351 
2352 	context = dc_create_state(dc);
2353 	if (context == NULL)
2354 		goto context_alloc_fail;
2355 
2356 	dc_resource_state_copy_construct_current(dc, context);
2357 
2358 	/* First remove from context all streams */
2359 	for (i = 0; i < context->stream_count; i++) {
2360 		struct dc_stream_state *stream = context->streams[i];
2361 
2362 		del_streams[del_streams_count++] = stream;
2363 	}
2364 
2365 	/* Remove all planes for removed streams and then remove the streams */
2366 	for (i = 0; i < del_streams_count; i++) {
2367 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2368 			res = DC_FAIL_DETACH_SURFACES;
2369 			goto fail;
2370 		}
2371 
2372 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2373 		if (res != DC_OK)
2374 			goto fail;
2375 	}
2376 
2377 	res = dc_commit_state(dc, context);
2378 
2379 fail:
2380 	dc_release_state(context);
2381 
2382 context_alloc_fail:
2383 	return res;
2384 }
2385 
2386 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2387 {
2388 	int i;
2389 
2390 	if (dm->hpd_rx_offload_wq) {
2391 		for (i = 0; i < dm->dc->caps.max_links; i++)
2392 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2393 	}
2394 }
2395 
2396 static int dm_suspend(void *handle)
2397 {
2398 	struct amdgpu_device *adev = handle;
2399 	struct amdgpu_display_manager *dm = &adev->dm;
2400 	int ret = 0;
2401 
2402 	if (amdgpu_in_reset(adev)) {
2403 		mutex_lock(&dm->dc_lock);
2404 
2405 #if defined(CONFIG_DRM_AMD_DC_DCN)
2406 		dc_allow_idle_optimizations(adev->dm.dc, false);
2407 #endif
2408 
2409 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2410 
2411 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2412 
2413 		amdgpu_dm_commit_zero_streams(dm->dc);
2414 
2415 		amdgpu_dm_irq_suspend(adev);
2416 
2417 		hpd_rx_irq_work_suspend(dm);
2418 
2419 		return ret;
2420 	}
2421 
2422 	WARN_ON(adev->dm.cached_state);
2423 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2424 
2425 	s3_handle_mst(adev_to_drm(adev), true);
2426 
2427 	amdgpu_dm_irq_suspend(adev);
2428 
2429 	hpd_rx_irq_work_suspend(dm);
2430 
2431 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2432 
2433 	return 0;
2434 }
2435 
2436 struct amdgpu_dm_connector *
2437 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2438 					     struct drm_crtc *crtc)
2439 {
2440 	uint32_t i;
2441 	struct drm_connector_state *new_con_state;
2442 	struct drm_connector *connector;
2443 	struct drm_crtc *crtc_from_state;
2444 
2445 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2446 		crtc_from_state = new_con_state->crtc;
2447 
2448 		if (crtc_from_state == crtc)
2449 			return to_amdgpu_dm_connector(connector);
2450 	}
2451 
2452 	return NULL;
2453 }
2454 
2455 static void emulated_link_detect(struct dc_link *link)
2456 {
2457 	struct dc_sink_init_data sink_init_data = { 0 };
2458 	struct display_sink_capability sink_caps = { 0 };
2459 	enum dc_edid_status edid_status;
2460 	struct dc_context *dc_ctx = link->ctx;
2461 	struct dc_sink *sink = NULL;
2462 	struct dc_sink *prev_sink = NULL;
2463 
2464 	link->type = dc_connection_none;
2465 	prev_sink = link->local_sink;
2466 
2467 	if (prev_sink)
2468 		dc_sink_release(prev_sink);
2469 
2470 	switch (link->connector_signal) {
2471 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2472 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2473 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2474 		break;
2475 	}
2476 
2477 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2478 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2479 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2480 		break;
2481 	}
2482 
2483 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2484 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2485 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2486 		break;
2487 	}
2488 
2489 	case SIGNAL_TYPE_LVDS: {
2490 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2491 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2492 		break;
2493 	}
2494 
2495 	case SIGNAL_TYPE_EDP: {
2496 		sink_caps.transaction_type =
2497 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2498 		sink_caps.signal = SIGNAL_TYPE_EDP;
2499 		break;
2500 	}
2501 
2502 	case SIGNAL_TYPE_DISPLAY_PORT: {
2503 		sink_caps.transaction_type =
2504 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2505 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2506 		break;
2507 	}
2508 
2509 	default:
2510 		DC_ERROR("Invalid connector type! signal:%d\n",
2511 			link->connector_signal);
2512 		return;
2513 	}
2514 
2515 	sink_init_data.link = link;
2516 	sink_init_data.sink_signal = sink_caps.signal;
2517 
2518 	sink = dc_sink_create(&sink_init_data);
2519 	if (!sink) {
2520 		DC_ERROR("Failed to create sink!\n");
2521 		return;
2522 	}
2523 
2524 	/* dc_sink_create returns a new reference */
2525 	link->local_sink = sink;
2526 
2527 	edid_status = dm_helpers_read_local_edid(
2528 			link->ctx,
2529 			link,
2530 			sink);
2531 
2532 	if (edid_status != EDID_OK)
2533 		DC_ERROR("Failed to read EDID");
2534 
2535 }
2536 
2537 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2538 				     struct amdgpu_display_manager *dm)
2539 {
2540 	struct {
2541 		struct dc_surface_update surface_updates[MAX_SURFACES];
2542 		struct dc_plane_info plane_infos[MAX_SURFACES];
2543 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2544 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2545 		struct dc_stream_update stream_update;
2546 	} * bundle;
2547 	int k, m;
2548 
2549 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2550 
2551 	if (!bundle) {
2552 		dm_error("Failed to allocate update bundle\n");
2553 		goto cleanup;
2554 	}
2555 
2556 	for (k = 0; k < dc_state->stream_count; k++) {
2557 		bundle->stream_update.stream = dc_state->streams[k];
2558 
2559 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2560 			bundle->surface_updates[m].surface =
2561 				dc_state->stream_status->plane_states[m];
2562 			bundle->surface_updates[m].surface->force_full_update =
2563 				true;
2564 		}
2565 		dc_commit_updates_for_stream(
2566 			dm->dc, bundle->surface_updates,
2567 			dc_state->stream_status->plane_count,
2568 			dc_state->streams[k], &bundle->stream_update, dc_state);
2569 	}
2570 
2571 cleanup:
2572 	kfree(bundle);
2573 
2574 	return;
2575 }
2576 
2577 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2578 {
2579 	struct dc_stream_state *stream_state;
2580 	struct amdgpu_dm_connector *aconnector = link->priv;
2581 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2582 	struct dc_stream_update stream_update;
2583 	bool dpms_off = true;
2584 
2585 	memset(&stream_update, 0, sizeof(stream_update));
2586 	stream_update.dpms_off = &dpms_off;
2587 
2588 	mutex_lock(&adev->dm.dc_lock);
2589 	stream_state = dc_stream_find_from_link(link);
2590 
2591 	if (stream_state == NULL) {
2592 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2593 		mutex_unlock(&adev->dm.dc_lock);
2594 		return;
2595 	}
2596 
2597 	stream_update.stream = stream_state;
2598 	acrtc_state->force_dpms_off = true;
2599 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2600 				     stream_state, &stream_update,
2601 				     stream_state->ctx->dc->current_state);
2602 	mutex_unlock(&adev->dm.dc_lock);
2603 }
2604 
2605 static int dm_resume(void *handle)
2606 {
2607 	struct amdgpu_device *adev = handle;
2608 	struct drm_device *ddev = adev_to_drm(adev);
2609 	struct amdgpu_display_manager *dm = &adev->dm;
2610 	struct amdgpu_dm_connector *aconnector;
2611 	struct drm_connector *connector;
2612 	struct drm_connector_list_iter iter;
2613 	struct drm_crtc *crtc;
2614 	struct drm_crtc_state *new_crtc_state;
2615 	struct dm_crtc_state *dm_new_crtc_state;
2616 	struct drm_plane *plane;
2617 	struct drm_plane_state *new_plane_state;
2618 	struct dm_plane_state *dm_new_plane_state;
2619 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2620 	enum dc_connection_type new_connection_type = dc_connection_none;
2621 	struct dc_state *dc_state;
2622 	int i, r, j;
2623 
2624 	if (amdgpu_in_reset(adev)) {
2625 		dc_state = dm->cached_dc_state;
2626 
2627 		/*
2628 		 * The dc->current_state is backed up into dm->cached_dc_state
2629 		 * before we commit 0 streams.
2630 		 *
2631 		 * DC will clear link encoder assignments on the real state
2632 		 * but the changes won't propagate over to the copy we made
2633 		 * before the 0 streams commit.
2634 		 *
2635 		 * DC expects that link encoder assignments are *not* valid
2636 		 * when committing a state, so as a workaround it needs to be
2637 		 * cleared here.
2638 		 */
2639 		link_enc_cfg_init(dm->dc, dc_state);
2640 
2641 		if (dc_enable_dmub_notifications(adev->dm.dc))
2642 			amdgpu_dm_outbox_init(adev);
2643 
2644 		r = dm_dmub_hw_init(adev);
2645 		if (r)
2646 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2647 
2648 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2649 		dc_resume(dm->dc);
2650 
2651 		amdgpu_dm_irq_resume_early(adev);
2652 
2653 		for (i = 0; i < dc_state->stream_count; i++) {
2654 			dc_state->streams[i]->mode_changed = true;
2655 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2656 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2657 					= 0xffffffff;
2658 			}
2659 		}
2660 
2661 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2662 
2663 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2664 
2665 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2666 
2667 		dc_release_state(dm->cached_dc_state);
2668 		dm->cached_dc_state = NULL;
2669 
2670 		amdgpu_dm_irq_resume_late(adev);
2671 
2672 		mutex_unlock(&dm->dc_lock);
2673 
2674 		return 0;
2675 	}
2676 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2677 	dc_release_state(dm_state->context);
2678 	dm_state->context = dc_create_state(dm->dc);
2679 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2680 	dc_resource_state_construct(dm->dc, dm_state->context);
2681 
2682 	/* Re-enable outbox interrupts for DPIA. */
2683 	if (dc_enable_dmub_notifications(adev->dm.dc))
2684 		amdgpu_dm_outbox_init(adev);
2685 
2686 	/* Before powering on DC we need to re-initialize DMUB. */
2687 	dm_dmub_hw_resume(adev);
2688 
2689 	/* power on hardware */
2690 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2691 
2692 	/* program HPD filter */
2693 	dc_resume(dm->dc);
2694 
2695 	/*
2696 	 * early enable HPD Rx IRQ, should be done before set mode as short
2697 	 * pulse interrupts are used for MST
2698 	 */
2699 	amdgpu_dm_irq_resume_early(adev);
2700 
2701 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2702 	s3_handle_mst(ddev, false);
2703 
2704 	/* Do detection*/
2705 	drm_connector_list_iter_begin(ddev, &iter);
2706 	drm_for_each_connector_iter(connector, &iter) {
2707 		aconnector = to_amdgpu_dm_connector(connector);
2708 
2709 		/*
2710 		 * this is the case when traversing through already created
2711 		 * MST connectors, should be skipped
2712 		 */
2713 		if (aconnector->mst_port)
2714 			continue;
2715 
2716 		mutex_lock(&aconnector->hpd_lock);
2717 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2718 			DRM_ERROR("KMS: Failed to detect connector\n");
2719 
2720 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2721 			emulated_link_detect(aconnector->dc_link);
2722 		else
2723 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2724 
2725 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2726 			aconnector->fake_enable = false;
2727 
2728 		if (aconnector->dc_sink)
2729 			dc_sink_release(aconnector->dc_sink);
2730 		aconnector->dc_sink = NULL;
2731 		amdgpu_dm_update_connector_after_detect(aconnector);
2732 		mutex_unlock(&aconnector->hpd_lock);
2733 	}
2734 	drm_connector_list_iter_end(&iter);
2735 
2736 	/* Force mode set in atomic commit */
2737 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2738 		new_crtc_state->active_changed = true;
2739 
2740 	/*
2741 	 * atomic_check is expected to create the dc states. We need to release
2742 	 * them here, since they were duplicated as part of the suspend
2743 	 * procedure.
2744 	 */
2745 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2746 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2747 		if (dm_new_crtc_state->stream) {
2748 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2749 			dc_stream_release(dm_new_crtc_state->stream);
2750 			dm_new_crtc_state->stream = NULL;
2751 		}
2752 	}
2753 
2754 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2755 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2756 		if (dm_new_plane_state->dc_state) {
2757 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2758 			dc_plane_state_release(dm_new_plane_state->dc_state);
2759 			dm_new_plane_state->dc_state = NULL;
2760 		}
2761 	}
2762 
2763 	drm_atomic_helper_resume(ddev, dm->cached_state);
2764 
2765 	dm->cached_state = NULL;
2766 
2767 	amdgpu_dm_irq_resume_late(adev);
2768 
2769 	amdgpu_dm_smu_write_watermarks_table(adev);
2770 
2771 	return 0;
2772 }
2773 
2774 /**
2775  * DOC: DM Lifecycle
2776  *
2777  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2778  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2779  * the base driver's device list to be initialized and torn down accordingly.
2780  *
2781  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2782  */
2783 
2784 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2785 	.name = "dm",
2786 	.early_init = dm_early_init,
2787 	.late_init = dm_late_init,
2788 	.sw_init = dm_sw_init,
2789 	.sw_fini = dm_sw_fini,
2790 	.early_fini = amdgpu_dm_early_fini,
2791 	.hw_init = dm_hw_init,
2792 	.hw_fini = dm_hw_fini,
2793 	.suspend = dm_suspend,
2794 	.resume = dm_resume,
2795 	.is_idle = dm_is_idle,
2796 	.wait_for_idle = dm_wait_for_idle,
2797 	.check_soft_reset = dm_check_soft_reset,
2798 	.soft_reset = dm_soft_reset,
2799 	.set_clockgating_state = dm_set_clockgating_state,
2800 	.set_powergating_state = dm_set_powergating_state,
2801 };
2802 
2803 const struct amdgpu_ip_block_version dm_ip_block =
2804 {
2805 	.type = AMD_IP_BLOCK_TYPE_DCE,
2806 	.major = 1,
2807 	.minor = 0,
2808 	.rev = 0,
2809 	.funcs = &amdgpu_dm_funcs,
2810 };
2811 
2812 
2813 /**
2814  * DOC: atomic
2815  *
2816  * *WIP*
2817  */
2818 
2819 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2820 	.fb_create = amdgpu_display_user_framebuffer_create,
2821 	.get_format_info = amd_get_format_info,
2822 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2823 	.atomic_check = amdgpu_dm_atomic_check,
2824 	.atomic_commit = drm_atomic_helper_commit,
2825 };
2826 
2827 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2828 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2829 };
2830 
2831 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2832 {
2833 	u32 max_cll, min_cll, max, min, q, r;
2834 	struct amdgpu_dm_backlight_caps *caps;
2835 	struct amdgpu_display_manager *dm;
2836 	struct drm_connector *conn_base;
2837 	struct amdgpu_device *adev;
2838 	struct dc_link *link = NULL;
2839 	static const u8 pre_computed_values[] = {
2840 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2841 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2842 	int i;
2843 
2844 	if (!aconnector || !aconnector->dc_link)
2845 		return;
2846 
2847 	link = aconnector->dc_link;
2848 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2849 		return;
2850 
2851 	conn_base = &aconnector->base;
2852 	adev = drm_to_adev(conn_base->dev);
2853 	dm = &adev->dm;
2854 	for (i = 0; i < dm->num_of_edps; i++) {
2855 		if (link == dm->backlight_link[i])
2856 			break;
2857 	}
2858 	if (i >= dm->num_of_edps)
2859 		return;
2860 	caps = &dm->backlight_caps[i];
2861 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2862 	caps->aux_support = false;
2863 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2864 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2865 
2866 	if (caps->ext_caps->bits.oled == 1 /*||
2867 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2868 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2869 		caps->aux_support = true;
2870 
2871 	if (amdgpu_backlight == 0)
2872 		caps->aux_support = false;
2873 	else if (amdgpu_backlight == 1)
2874 		caps->aux_support = true;
2875 
2876 	/* From the specification (CTA-861-G), for calculating the maximum
2877 	 * luminance we need to use:
2878 	 *	Luminance = 50*2**(CV/32)
2879 	 * Where CV is a one-byte value.
2880 	 * For calculating this expression we may need float point precision;
2881 	 * to avoid this complexity level, we take advantage that CV is divided
2882 	 * by a constant. From the Euclids division algorithm, we know that CV
2883 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2884 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2885 	 * need to pre-compute the value of r/32. For pre-computing the values
2886 	 * We just used the following Ruby line:
2887 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2888 	 * The results of the above expressions can be verified at
2889 	 * pre_computed_values.
2890 	 */
2891 	q = max_cll >> 5;
2892 	r = max_cll % 32;
2893 	max = (1 << q) * pre_computed_values[r];
2894 
2895 	// min luminance: maxLum * (CV/255)^2 / 100
2896 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2897 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2898 
2899 	caps->aux_max_input_signal = max;
2900 	caps->aux_min_input_signal = min;
2901 }
2902 
2903 void amdgpu_dm_update_connector_after_detect(
2904 		struct amdgpu_dm_connector *aconnector)
2905 {
2906 	struct drm_connector *connector = &aconnector->base;
2907 	struct drm_device *dev = connector->dev;
2908 	struct dc_sink *sink;
2909 
2910 	/* MST handled by drm_mst framework */
2911 	if (aconnector->mst_mgr.mst_state == true)
2912 		return;
2913 
2914 	sink = aconnector->dc_link->local_sink;
2915 	if (sink)
2916 		dc_sink_retain(sink);
2917 
2918 	/*
2919 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2920 	 * the connector sink is set to either fake or physical sink depends on link status.
2921 	 * Skip if already done during boot.
2922 	 */
2923 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2924 			&& aconnector->dc_em_sink) {
2925 
2926 		/*
2927 		 * For S3 resume with headless use eml_sink to fake stream
2928 		 * because on resume connector->sink is set to NULL
2929 		 */
2930 		mutex_lock(&dev->mode_config.mutex);
2931 
2932 		if (sink) {
2933 			if (aconnector->dc_sink) {
2934 				amdgpu_dm_update_freesync_caps(connector, NULL);
2935 				/*
2936 				 * retain and release below are used to
2937 				 * bump up refcount for sink because the link doesn't point
2938 				 * to it anymore after disconnect, so on next crtc to connector
2939 				 * reshuffle by UMD we will get into unwanted dc_sink release
2940 				 */
2941 				dc_sink_release(aconnector->dc_sink);
2942 			}
2943 			aconnector->dc_sink = sink;
2944 			dc_sink_retain(aconnector->dc_sink);
2945 			amdgpu_dm_update_freesync_caps(connector,
2946 					aconnector->edid);
2947 		} else {
2948 			amdgpu_dm_update_freesync_caps(connector, NULL);
2949 			if (!aconnector->dc_sink) {
2950 				aconnector->dc_sink = aconnector->dc_em_sink;
2951 				dc_sink_retain(aconnector->dc_sink);
2952 			}
2953 		}
2954 
2955 		mutex_unlock(&dev->mode_config.mutex);
2956 
2957 		if (sink)
2958 			dc_sink_release(sink);
2959 		return;
2960 	}
2961 
2962 	/*
2963 	 * TODO: temporary guard to look for proper fix
2964 	 * if this sink is MST sink, we should not do anything
2965 	 */
2966 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2967 		dc_sink_release(sink);
2968 		return;
2969 	}
2970 
2971 	if (aconnector->dc_sink == sink) {
2972 		/*
2973 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2974 		 * Do nothing!!
2975 		 */
2976 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2977 				aconnector->connector_id);
2978 		if (sink)
2979 			dc_sink_release(sink);
2980 		return;
2981 	}
2982 
2983 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2984 		aconnector->connector_id, aconnector->dc_sink, sink);
2985 
2986 	mutex_lock(&dev->mode_config.mutex);
2987 
2988 	/*
2989 	 * 1. Update status of the drm connector
2990 	 * 2. Send an event and let userspace tell us what to do
2991 	 */
2992 	if (sink) {
2993 		/*
2994 		 * TODO: check if we still need the S3 mode update workaround.
2995 		 * If yes, put it here.
2996 		 */
2997 		if (aconnector->dc_sink) {
2998 			amdgpu_dm_update_freesync_caps(connector, NULL);
2999 			dc_sink_release(aconnector->dc_sink);
3000 		}
3001 
3002 		aconnector->dc_sink = sink;
3003 		dc_sink_retain(aconnector->dc_sink);
3004 		if (sink->dc_edid.length == 0) {
3005 			aconnector->edid = NULL;
3006 			if (aconnector->dc_link->aux_mode) {
3007 				drm_dp_cec_unset_edid(
3008 					&aconnector->dm_dp_aux.aux);
3009 			}
3010 		} else {
3011 			aconnector->edid =
3012 				(struct edid *)sink->dc_edid.raw_edid;
3013 
3014 			if (aconnector->dc_link->aux_mode)
3015 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3016 						    aconnector->edid);
3017 		}
3018 
3019 		drm_connector_update_edid_property(connector, aconnector->edid);
3020 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3021 		update_connector_ext_caps(aconnector);
3022 	} else {
3023 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3024 		amdgpu_dm_update_freesync_caps(connector, NULL);
3025 		drm_connector_update_edid_property(connector, NULL);
3026 		aconnector->num_modes = 0;
3027 		dc_sink_release(aconnector->dc_sink);
3028 		aconnector->dc_sink = NULL;
3029 		aconnector->edid = NULL;
3030 #ifdef CONFIG_DRM_AMD_DC_HDCP
3031 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3032 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3033 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3034 #endif
3035 	}
3036 
3037 	mutex_unlock(&dev->mode_config.mutex);
3038 
3039 	update_subconnector_property(aconnector);
3040 
3041 	if (sink)
3042 		dc_sink_release(sink);
3043 }
3044 
3045 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3046 {
3047 	struct drm_connector *connector = &aconnector->base;
3048 	struct drm_device *dev = connector->dev;
3049 	enum dc_connection_type new_connection_type = dc_connection_none;
3050 	struct amdgpu_device *adev = drm_to_adev(dev);
3051 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3052 	struct dm_crtc_state *dm_crtc_state = NULL;
3053 
3054 	if (adev->dm.disable_hpd_irq)
3055 		return;
3056 
3057 	if (dm_con_state->base.state && dm_con_state->base.crtc)
3058 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3059 					dm_con_state->base.state,
3060 					dm_con_state->base.crtc));
3061 	/*
3062 	 * In case of failure or MST no need to update connector status or notify the OS
3063 	 * since (for MST case) MST does this in its own context.
3064 	 */
3065 	mutex_lock(&aconnector->hpd_lock);
3066 
3067 #ifdef CONFIG_DRM_AMD_DC_HDCP
3068 	if (adev->dm.hdcp_workqueue) {
3069 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3070 		dm_con_state->update_hdcp = true;
3071 	}
3072 #endif
3073 	if (aconnector->fake_enable)
3074 		aconnector->fake_enable = false;
3075 
3076 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3077 		DRM_ERROR("KMS: Failed to detect connector\n");
3078 
3079 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3080 		emulated_link_detect(aconnector->dc_link);
3081 
3082 		drm_modeset_lock_all(dev);
3083 		dm_restore_drm_connector_state(dev, connector);
3084 		drm_modeset_unlock_all(dev);
3085 
3086 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3087 			drm_kms_helper_connector_hotplug_event(connector);
3088 
3089 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3090 		if (new_connection_type == dc_connection_none &&
3091 		    aconnector->dc_link->type == dc_connection_none &&
3092 		    dm_crtc_state)
3093 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3094 
3095 		amdgpu_dm_update_connector_after_detect(aconnector);
3096 
3097 		drm_modeset_lock_all(dev);
3098 		dm_restore_drm_connector_state(dev, connector);
3099 		drm_modeset_unlock_all(dev);
3100 
3101 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3102 			drm_kms_helper_connector_hotplug_event(connector);
3103 	}
3104 	mutex_unlock(&aconnector->hpd_lock);
3105 
3106 }
3107 
3108 static void handle_hpd_irq(void *param)
3109 {
3110 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3111 
3112 	handle_hpd_irq_helper(aconnector);
3113 
3114 }
3115 
3116 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3117 {
3118 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3119 	uint8_t dret;
3120 	bool new_irq_handled = false;
3121 	int dpcd_addr;
3122 	int dpcd_bytes_to_read;
3123 
3124 	const int max_process_count = 30;
3125 	int process_count = 0;
3126 
3127 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3128 
3129 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3130 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3131 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3132 		dpcd_addr = DP_SINK_COUNT;
3133 	} else {
3134 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3135 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3136 		dpcd_addr = DP_SINK_COUNT_ESI;
3137 	}
3138 
3139 	dret = drm_dp_dpcd_read(
3140 		&aconnector->dm_dp_aux.aux,
3141 		dpcd_addr,
3142 		esi,
3143 		dpcd_bytes_to_read);
3144 
3145 	while (dret == dpcd_bytes_to_read &&
3146 		process_count < max_process_count) {
3147 		uint8_t retry;
3148 		dret = 0;
3149 
3150 		process_count++;
3151 
3152 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3153 		/* handle HPD short pulse irq */
3154 		if (aconnector->mst_mgr.mst_state)
3155 			drm_dp_mst_hpd_irq(
3156 				&aconnector->mst_mgr,
3157 				esi,
3158 				&new_irq_handled);
3159 
3160 		if (new_irq_handled) {
3161 			/* ACK at DPCD to notify down stream */
3162 			const int ack_dpcd_bytes_to_write =
3163 				dpcd_bytes_to_read - 1;
3164 
3165 			for (retry = 0; retry < 3; retry++) {
3166 				uint8_t wret;
3167 
3168 				wret = drm_dp_dpcd_write(
3169 					&aconnector->dm_dp_aux.aux,
3170 					dpcd_addr + 1,
3171 					&esi[1],
3172 					ack_dpcd_bytes_to_write);
3173 				if (wret == ack_dpcd_bytes_to_write)
3174 					break;
3175 			}
3176 
3177 			/* check if there is new irq to be handled */
3178 			dret = drm_dp_dpcd_read(
3179 				&aconnector->dm_dp_aux.aux,
3180 				dpcd_addr,
3181 				esi,
3182 				dpcd_bytes_to_read);
3183 
3184 			new_irq_handled = false;
3185 		} else {
3186 			break;
3187 		}
3188 	}
3189 
3190 	if (process_count == max_process_count)
3191 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3192 }
3193 
3194 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3195 							union hpd_irq_data hpd_irq_data)
3196 {
3197 	struct hpd_rx_irq_offload_work *offload_work =
3198 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3199 
3200 	if (!offload_work) {
3201 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3202 		return;
3203 	}
3204 
3205 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3206 	offload_work->data = hpd_irq_data;
3207 	offload_work->offload_wq = offload_wq;
3208 
3209 	queue_work(offload_wq->wq, &offload_work->work);
3210 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3211 }
3212 
3213 static void handle_hpd_rx_irq(void *param)
3214 {
3215 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3216 	struct drm_connector *connector = &aconnector->base;
3217 	struct drm_device *dev = connector->dev;
3218 	struct dc_link *dc_link = aconnector->dc_link;
3219 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3220 	bool result = false;
3221 	enum dc_connection_type new_connection_type = dc_connection_none;
3222 	struct amdgpu_device *adev = drm_to_adev(dev);
3223 	union hpd_irq_data hpd_irq_data;
3224 	bool link_loss = false;
3225 	bool has_left_work = false;
3226 	int idx = aconnector->base.index;
3227 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3228 
3229 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3230 
3231 	if (adev->dm.disable_hpd_irq)
3232 		return;
3233 
3234 	/*
3235 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3236 	 * conflict, after implement i2c helper, this mutex should be
3237 	 * retired.
3238 	 */
3239 	mutex_lock(&aconnector->hpd_lock);
3240 
3241 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3242 						&link_loss, true, &has_left_work);
3243 
3244 	if (!has_left_work)
3245 		goto out;
3246 
3247 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3248 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3249 		goto out;
3250 	}
3251 
3252 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3253 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3254 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3255 			dm_handle_mst_sideband_msg(aconnector);
3256 			goto out;
3257 		}
3258 
3259 		if (link_loss) {
3260 			bool skip = false;
3261 
3262 			spin_lock(&offload_wq->offload_lock);
3263 			skip = offload_wq->is_handling_link_loss;
3264 
3265 			if (!skip)
3266 				offload_wq->is_handling_link_loss = true;
3267 
3268 			spin_unlock(&offload_wq->offload_lock);
3269 
3270 			if (!skip)
3271 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3272 
3273 			goto out;
3274 		}
3275 	}
3276 
3277 out:
3278 	if (result && !is_mst_root_connector) {
3279 		/* Downstream Port status changed. */
3280 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3281 			DRM_ERROR("KMS: Failed to detect connector\n");
3282 
3283 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3284 			emulated_link_detect(dc_link);
3285 
3286 			if (aconnector->fake_enable)
3287 				aconnector->fake_enable = false;
3288 
3289 			amdgpu_dm_update_connector_after_detect(aconnector);
3290 
3291 
3292 			drm_modeset_lock_all(dev);
3293 			dm_restore_drm_connector_state(dev, connector);
3294 			drm_modeset_unlock_all(dev);
3295 
3296 			drm_kms_helper_connector_hotplug_event(connector);
3297 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3298 
3299 			if (aconnector->fake_enable)
3300 				aconnector->fake_enable = false;
3301 
3302 			amdgpu_dm_update_connector_after_detect(aconnector);
3303 
3304 
3305 			drm_modeset_lock_all(dev);
3306 			dm_restore_drm_connector_state(dev, connector);
3307 			drm_modeset_unlock_all(dev);
3308 
3309 			drm_kms_helper_connector_hotplug_event(connector);
3310 		}
3311 	}
3312 #ifdef CONFIG_DRM_AMD_DC_HDCP
3313 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3314 		if (adev->dm.hdcp_workqueue)
3315 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3316 	}
3317 #endif
3318 
3319 	if (dc_link->type != dc_connection_mst_branch)
3320 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3321 
3322 	mutex_unlock(&aconnector->hpd_lock);
3323 }
3324 
3325 static void register_hpd_handlers(struct amdgpu_device *adev)
3326 {
3327 	struct drm_device *dev = adev_to_drm(adev);
3328 	struct drm_connector *connector;
3329 	struct amdgpu_dm_connector *aconnector;
3330 	const struct dc_link *dc_link;
3331 	struct dc_interrupt_params int_params = {0};
3332 
3333 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3334 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3335 
3336 	list_for_each_entry(connector,
3337 			&dev->mode_config.connector_list, head)	{
3338 
3339 		aconnector = to_amdgpu_dm_connector(connector);
3340 		dc_link = aconnector->dc_link;
3341 
3342 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3343 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3344 			int_params.irq_source = dc_link->irq_source_hpd;
3345 
3346 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3347 					handle_hpd_irq,
3348 					(void *) aconnector);
3349 		}
3350 
3351 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3352 
3353 			/* Also register for DP short pulse (hpd_rx). */
3354 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3355 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3356 
3357 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3358 					handle_hpd_rx_irq,
3359 					(void *) aconnector);
3360 
3361 			if (adev->dm.hpd_rx_offload_wq)
3362 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3363 					aconnector;
3364 		}
3365 	}
3366 }
3367 
3368 #if defined(CONFIG_DRM_AMD_DC_SI)
3369 /* Register IRQ sources and initialize IRQ callbacks */
3370 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3371 {
3372 	struct dc *dc = adev->dm.dc;
3373 	struct common_irq_params *c_irq_params;
3374 	struct dc_interrupt_params int_params = {0};
3375 	int r;
3376 	int i;
3377 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3378 
3379 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3380 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3381 
3382 	/*
3383 	 * Actions of amdgpu_irq_add_id():
3384 	 * 1. Register a set() function with base driver.
3385 	 *    Base driver will call set() function to enable/disable an
3386 	 *    interrupt in DC hardware.
3387 	 * 2. Register amdgpu_dm_irq_handler().
3388 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3389 	 *    coming from DC hardware.
3390 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3391 	 *    for acknowledging and handling. */
3392 
3393 	/* Use VBLANK interrupt */
3394 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3395 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3396 		if (r) {
3397 			DRM_ERROR("Failed to add crtc irq id!\n");
3398 			return r;
3399 		}
3400 
3401 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3402 		int_params.irq_source =
3403 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3404 
3405 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3406 
3407 		c_irq_params->adev = adev;
3408 		c_irq_params->irq_src = int_params.irq_source;
3409 
3410 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3411 				dm_crtc_high_irq, c_irq_params);
3412 	}
3413 
3414 	/* Use GRPH_PFLIP interrupt */
3415 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3416 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3417 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3418 		if (r) {
3419 			DRM_ERROR("Failed to add page flip irq id!\n");
3420 			return r;
3421 		}
3422 
3423 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3424 		int_params.irq_source =
3425 			dc_interrupt_to_irq_source(dc, i, 0);
3426 
3427 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3428 
3429 		c_irq_params->adev = adev;
3430 		c_irq_params->irq_src = int_params.irq_source;
3431 
3432 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3433 				dm_pflip_high_irq, c_irq_params);
3434 
3435 	}
3436 
3437 	/* HPD */
3438 	r = amdgpu_irq_add_id(adev, client_id,
3439 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3440 	if (r) {
3441 		DRM_ERROR("Failed to add hpd irq id!\n");
3442 		return r;
3443 	}
3444 
3445 	register_hpd_handlers(adev);
3446 
3447 	return 0;
3448 }
3449 #endif
3450 
3451 /* Register IRQ sources and initialize IRQ callbacks */
3452 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3453 {
3454 	struct dc *dc = adev->dm.dc;
3455 	struct common_irq_params *c_irq_params;
3456 	struct dc_interrupt_params int_params = {0};
3457 	int r;
3458 	int i;
3459 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3460 
3461 	if (adev->family >= AMDGPU_FAMILY_AI)
3462 		client_id = SOC15_IH_CLIENTID_DCE;
3463 
3464 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3465 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3466 
3467 	/*
3468 	 * Actions of amdgpu_irq_add_id():
3469 	 * 1. Register a set() function with base driver.
3470 	 *    Base driver will call set() function to enable/disable an
3471 	 *    interrupt in DC hardware.
3472 	 * 2. Register amdgpu_dm_irq_handler().
3473 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3474 	 *    coming from DC hardware.
3475 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3476 	 *    for acknowledging and handling. */
3477 
3478 	/* Use VBLANK interrupt */
3479 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3480 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3481 		if (r) {
3482 			DRM_ERROR("Failed to add crtc irq id!\n");
3483 			return r;
3484 		}
3485 
3486 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3487 		int_params.irq_source =
3488 			dc_interrupt_to_irq_source(dc, i, 0);
3489 
3490 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3491 
3492 		c_irq_params->adev = adev;
3493 		c_irq_params->irq_src = int_params.irq_source;
3494 
3495 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3496 				dm_crtc_high_irq, c_irq_params);
3497 	}
3498 
3499 	/* Use VUPDATE interrupt */
3500 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3501 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3502 		if (r) {
3503 			DRM_ERROR("Failed to add vupdate irq id!\n");
3504 			return r;
3505 		}
3506 
3507 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3508 		int_params.irq_source =
3509 			dc_interrupt_to_irq_source(dc, i, 0);
3510 
3511 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3512 
3513 		c_irq_params->adev = adev;
3514 		c_irq_params->irq_src = int_params.irq_source;
3515 
3516 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3517 				dm_vupdate_high_irq, c_irq_params);
3518 	}
3519 
3520 	/* Use GRPH_PFLIP interrupt */
3521 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3522 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3523 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3524 		if (r) {
3525 			DRM_ERROR("Failed to add page flip irq id!\n");
3526 			return r;
3527 		}
3528 
3529 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3530 		int_params.irq_source =
3531 			dc_interrupt_to_irq_source(dc, i, 0);
3532 
3533 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3534 
3535 		c_irq_params->adev = adev;
3536 		c_irq_params->irq_src = int_params.irq_source;
3537 
3538 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3539 				dm_pflip_high_irq, c_irq_params);
3540 
3541 	}
3542 
3543 	/* HPD */
3544 	r = amdgpu_irq_add_id(adev, client_id,
3545 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3546 	if (r) {
3547 		DRM_ERROR("Failed to add hpd irq id!\n");
3548 		return r;
3549 	}
3550 
3551 	register_hpd_handlers(adev);
3552 
3553 	return 0;
3554 }
3555 
3556 #if defined(CONFIG_DRM_AMD_DC_DCN)
3557 /* Register IRQ sources and initialize IRQ callbacks */
3558 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3559 {
3560 	struct dc *dc = adev->dm.dc;
3561 	struct common_irq_params *c_irq_params;
3562 	struct dc_interrupt_params int_params = {0};
3563 	int r;
3564 	int i;
3565 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3566 	static const unsigned int vrtl_int_srcid[] = {
3567 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3568 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3569 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3570 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3571 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3572 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3573 	};
3574 #endif
3575 
3576 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3577 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3578 
3579 	/*
3580 	 * Actions of amdgpu_irq_add_id():
3581 	 * 1. Register a set() function with base driver.
3582 	 *    Base driver will call set() function to enable/disable an
3583 	 *    interrupt in DC hardware.
3584 	 * 2. Register amdgpu_dm_irq_handler().
3585 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3586 	 *    coming from DC hardware.
3587 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3588 	 *    for acknowledging and handling.
3589 	 */
3590 
3591 	/* Use VSTARTUP interrupt */
3592 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3593 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3594 			i++) {
3595 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3596 
3597 		if (r) {
3598 			DRM_ERROR("Failed to add crtc irq id!\n");
3599 			return r;
3600 		}
3601 
3602 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3603 		int_params.irq_source =
3604 			dc_interrupt_to_irq_source(dc, i, 0);
3605 
3606 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3607 
3608 		c_irq_params->adev = adev;
3609 		c_irq_params->irq_src = int_params.irq_source;
3610 
3611 		amdgpu_dm_irq_register_interrupt(
3612 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3613 	}
3614 
3615 	/* Use otg vertical line interrupt */
3616 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3617 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3618 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3619 				vrtl_int_srcid[i], &adev->vline0_irq);
3620 
3621 		if (r) {
3622 			DRM_ERROR("Failed to add vline0 irq id!\n");
3623 			return r;
3624 		}
3625 
3626 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3627 		int_params.irq_source =
3628 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3629 
3630 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3631 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3632 			break;
3633 		}
3634 
3635 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3636 					- DC_IRQ_SOURCE_DC1_VLINE0];
3637 
3638 		c_irq_params->adev = adev;
3639 		c_irq_params->irq_src = int_params.irq_source;
3640 
3641 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3642 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3643 	}
3644 #endif
3645 
3646 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3647 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3648 	 * to trigger at end of each vblank, regardless of state of the lock,
3649 	 * matching DCE behaviour.
3650 	 */
3651 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3652 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3653 	     i++) {
3654 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3655 
3656 		if (r) {
3657 			DRM_ERROR("Failed to add vupdate irq id!\n");
3658 			return r;
3659 		}
3660 
3661 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3662 		int_params.irq_source =
3663 			dc_interrupt_to_irq_source(dc, i, 0);
3664 
3665 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3666 
3667 		c_irq_params->adev = adev;
3668 		c_irq_params->irq_src = int_params.irq_source;
3669 
3670 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3671 				dm_vupdate_high_irq, c_irq_params);
3672 	}
3673 
3674 	/* Use GRPH_PFLIP interrupt */
3675 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3676 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3677 			i++) {
3678 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3679 		if (r) {
3680 			DRM_ERROR("Failed to add page flip irq id!\n");
3681 			return r;
3682 		}
3683 
3684 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3685 		int_params.irq_source =
3686 			dc_interrupt_to_irq_source(dc, i, 0);
3687 
3688 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3689 
3690 		c_irq_params->adev = adev;
3691 		c_irq_params->irq_src = int_params.irq_source;
3692 
3693 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3694 				dm_pflip_high_irq, c_irq_params);
3695 
3696 	}
3697 
3698 	/* HPD */
3699 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3700 			&adev->hpd_irq);
3701 	if (r) {
3702 		DRM_ERROR("Failed to add hpd irq id!\n");
3703 		return r;
3704 	}
3705 
3706 	register_hpd_handlers(adev);
3707 
3708 	return 0;
3709 }
3710 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3711 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3712 {
3713 	struct dc *dc = adev->dm.dc;
3714 	struct common_irq_params *c_irq_params;
3715 	struct dc_interrupt_params int_params = {0};
3716 	int r, i;
3717 
3718 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3719 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3720 
3721 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3722 			&adev->dmub_outbox_irq);
3723 	if (r) {
3724 		DRM_ERROR("Failed to add outbox irq id!\n");
3725 		return r;
3726 	}
3727 
3728 	if (dc->ctx->dmub_srv) {
3729 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3730 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3731 		int_params.irq_source =
3732 		dc_interrupt_to_irq_source(dc, i, 0);
3733 
3734 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3735 
3736 		c_irq_params->adev = adev;
3737 		c_irq_params->irq_src = int_params.irq_source;
3738 
3739 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3740 				dm_dmub_outbox1_low_irq, c_irq_params);
3741 	}
3742 
3743 	return 0;
3744 }
3745 #endif
3746 
3747 /*
3748  * Acquires the lock for the atomic state object and returns
3749  * the new atomic state.
3750  *
3751  * This should only be called during atomic check.
3752  */
3753 int dm_atomic_get_state(struct drm_atomic_state *state,
3754 			struct dm_atomic_state **dm_state)
3755 {
3756 	struct drm_device *dev = state->dev;
3757 	struct amdgpu_device *adev = drm_to_adev(dev);
3758 	struct amdgpu_display_manager *dm = &adev->dm;
3759 	struct drm_private_state *priv_state;
3760 
3761 	if (*dm_state)
3762 		return 0;
3763 
3764 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3765 	if (IS_ERR(priv_state))
3766 		return PTR_ERR(priv_state);
3767 
3768 	*dm_state = to_dm_atomic_state(priv_state);
3769 
3770 	return 0;
3771 }
3772 
3773 static struct dm_atomic_state *
3774 dm_atomic_get_new_state(struct drm_atomic_state *state)
3775 {
3776 	struct drm_device *dev = state->dev;
3777 	struct amdgpu_device *adev = drm_to_adev(dev);
3778 	struct amdgpu_display_manager *dm = &adev->dm;
3779 	struct drm_private_obj *obj;
3780 	struct drm_private_state *new_obj_state;
3781 	int i;
3782 
3783 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3784 		if (obj->funcs == dm->atomic_obj.funcs)
3785 			return to_dm_atomic_state(new_obj_state);
3786 	}
3787 
3788 	return NULL;
3789 }
3790 
3791 static struct drm_private_state *
3792 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3793 {
3794 	struct dm_atomic_state *old_state, *new_state;
3795 
3796 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3797 	if (!new_state)
3798 		return NULL;
3799 
3800 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3801 
3802 	old_state = to_dm_atomic_state(obj->state);
3803 
3804 	if (old_state && old_state->context)
3805 		new_state->context = dc_copy_state(old_state->context);
3806 
3807 	if (!new_state->context) {
3808 		kfree(new_state);
3809 		return NULL;
3810 	}
3811 
3812 	return &new_state->base;
3813 }
3814 
3815 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3816 				    struct drm_private_state *state)
3817 {
3818 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3819 
3820 	if (dm_state && dm_state->context)
3821 		dc_release_state(dm_state->context);
3822 
3823 	kfree(dm_state);
3824 }
3825 
3826 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3827 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3828 	.atomic_destroy_state = dm_atomic_destroy_state,
3829 };
3830 
3831 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3832 {
3833 	struct dm_atomic_state *state;
3834 	int r;
3835 
3836 	adev->mode_info.mode_config_initialized = true;
3837 
3838 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3839 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3840 
3841 	adev_to_drm(adev)->mode_config.max_width = 16384;
3842 	adev_to_drm(adev)->mode_config.max_height = 16384;
3843 
3844 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3845 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3846 	/* indicates support for immediate flip */
3847 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3848 
3849 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3850 
3851 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3852 	if (!state)
3853 		return -ENOMEM;
3854 
3855 	state->context = dc_create_state(adev->dm.dc);
3856 	if (!state->context) {
3857 		kfree(state);
3858 		return -ENOMEM;
3859 	}
3860 
3861 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3862 
3863 	drm_atomic_private_obj_init(adev_to_drm(adev),
3864 				    &adev->dm.atomic_obj,
3865 				    &state->base,
3866 				    &dm_atomic_state_funcs);
3867 
3868 	r = amdgpu_display_modeset_create_props(adev);
3869 	if (r) {
3870 		dc_release_state(state->context);
3871 		kfree(state);
3872 		return r;
3873 	}
3874 
3875 	r = amdgpu_dm_audio_init(adev);
3876 	if (r) {
3877 		dc_release_state(state->context);
3878 		kfree(state);
3879 		return r;
3880 	}
3881 
3882 	return 0;
3883 }
3884 
3885 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3886 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3887 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3888 
3889 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3890 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3891 
3892 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3893 					    int bl_idx)
3894 {
3895 #if defined(CONFIG_ACPI)
3896 	struct amdgpu_dm_backlight_caps caps;
3897 
3898 	memset(&caps, 0, sizeof(caps));
3899 
3900 	if (dm->backlight_caps[bl_idx].caps_valid)
3901 		return;
3902 
3903 	amdgpu_acpi_get_backlight_caps(&caps);
3904 	if (caps.caps_valid) {
3905 		dm->backlight_caps[bl_idx].caps_valid = true;
3906 		if (caps.aux_support)
3907 			return;
3908 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3909 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3910 	} else {
3911 		dm->backlight_caps[bl_idx].min_input_signal =
3912 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3913 		dm->backlight_caps[bl_idx].max_input_signal =
3914 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3915 	}
3916 #else
3917 	if (dm->backlight_caps[bl_idx].aux_support)
3918 		return;
3919 
3920 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3921 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3922 #endif
3923 }
3924 
3925 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3926 				unsigned *min, unsigned *max)
3927 {
3928 	if (!caps)
3929 		return 0;
3930 
3931 	if (caps->aux_support) {
3932 		// Firmware limits are in nits, DC API wants millinits.
3933 		*max = 1000 * caps->aux_max_input_signal;
3934 		*min = 1000 * caps->aux_min_input_signal;
3935 	} else {
3936 		// Firmware limits are 8-bit, PWM control is 16-bit.
3937 		*max = 0x101 * caps->max_input_signal;
3938 		*min = 0x101 * caps->min_input_signal;
3939 	}
3940 	return 1;
3941 }
3942 
3943 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3944 					uint32_t brightness)
3945 {
3946 	unsigned min, max;
3947 
3948 	if (!get_brightness_range(caps, &min, &max))
3949 		return brightness;
3950 
3951 	// Rescale 0..255 to min..max
3952 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3953 				       AMDGPU_MAX_BL_LEVEL);
3954 }
3955 
3956 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3957 				      uint32_t brightness)
3958 {
3959 	unsigned min, max;
3960 
3961 	if (!get_brightness_range(caps, &min, &max))
3962 		return brightness;
3963 
3964 	if (brightness < min)
3965 		return 0;
3966 	// Rescale min..max to 0..255
3967 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3968 				 max - min);
3969 }
3970 
3971 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3972 					 int bl_idx,
3973 					 u32 user_brightness)
3974 {
3975 	struct amdgpu_dm_backlight_caps caps;
3976 	struct dc_link *link;
3977 	u32 brightness;
3978 	bool rc;
3979 
3980 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3981 	caps = dm->backlight_caps[bl_idx];
3982 
3983 	dm->brightness[bl_idx] = user_brightness;
3984 	/* update scratch register */
3985 	if (bl_idx == 0)
3986 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3987 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3988 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3989 
3990 	/* Change brightness based on AUX property */
3991 	if (caps.aux_support) {
3992 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3993 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3994 		if (!rc)
3995 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3996 	} else {
3997 		rc = dc_link_set_backlight_level(link, brightness, 0);
3998 		if (!rc)
3999 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4000 	}
4001 
4002 	return rc ? 0 : 1;
4003 }
4004 
4005 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4006 {
4007 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4008 	int i;
4009 
4010 	for (i = 0; i < dm->num_of_edps; i++) {
4011 		if (bd == dm->backlight_dev[i])
4012 			break;
4013 	}
4014 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4015 		i = 0;
4016 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4017 
4018 	return 0;
4019 }
4020 
4021 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4022 					 int bl_idx)
4023 {
4024 	struct amdgpu_dm_backlight_caps caps;
4025 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4026 
4027 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4028 	caps = dm->backlight_caps[bl_idx];
4029 
4030 	if (caps.aux_support) {
4031 		u32 avg, peak;
4032 		bool rc;
4033 
4034 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4035 		if (!rc)
4036 			return dm->brightness[bl_idx];
4037 		return convert_brightness_to_user(&caps, avg);
4038 	} else {
4039 		int ret = dc_link_get_backlight_level(link);
4040 
4041 		if (ret == DC_ERROR_UNEXPECTED)
4042 			return dm->brightness[bl_idx];
4043 		return convert_brightness_to_user(&caps, ret);
4044 	}
4045 }
4046 
4047 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4048 {
4049 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4050 	int i;
4051 
4052 	for (i = 0; i < dm->num_of_edps; i++) {
4053 		if (bd == dm->backlight_dev[i])
4054 			break;
4055 	}
4056 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4057 		i = 0;
4058 	return amdgpu_dm_backlight_get_level(dm, i);
4059 }
4060 
4061 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4062 	.options = BL_CORE_SUSPENDRESUME,
4063 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4064 	.update_status	= amdgpu_dm_backlight_update_status,
4065 };
4066 
4067 static void
4068 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4069 {
4070 	char bl_name[16];
4071 	struct backlight_properties props = { 0 };
4072 
4073 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4074 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4075 
4076 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4077 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4078 	props.type = BACKLIGHT_RAW;
4079 
4080 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4081 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4082 
4083 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4084 								       adev_to_drm(dm->adev)->dev,
4085 								       dm,
4086 								       &amdgpu_dm_backlight_ops,
4087 								       &props);
4088 
4089 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4090 		DRM_ERROR("DM: Backlight registration failed!\n");
4091 	else
4092 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4093 }
4094 #endif
4095 
4096 static int initialize_plane(struct amdgpu_display_manager *dm,
4097 			    struct amdgpu_mode_info *mode_info, int plane_id,
4098 			    enum drm_plane_type plane_type,
4099 			    const struct dc_plane_cap *plane_cap)
4100 {
4101 	struct drm_plane *plane;
4102 	unsigned long possible_crtcs;
4103 	int ret = 0;
4104 
4105 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4106 	if (!plane) {
4107 		DRM_ERROR("KMS: Failed to allocate plane\n");
4108 		return -ENOMEM;
4109 	}
4110 	plane->type = plane_type;
4111 
4112 	/*
4113 	 * HACK: IGT tests expect that the primary plane for a CRTC
4114 	 * can only have one possible CRTC. Only expose support for
4115 	 * any CRTC if they're not going to be used as a primary plane
4116 	 * for a CRTC - like overlay or underlay planes.
4117 	 */
4118 	possible_crtcs = 1 << plane_id;
4119 	if (plane_id >= dm->dc->caps.max_streams)
4120 		possible_crtcs = 0xff;
4121 
4122 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4123 
4124 	if (ret) {
4125 		DRM_ERROR("KMS: Failed to initialize plane\n");
4126 		kfree(plane);
4127 		return ret;
4128 	}
4129 
4130 	if (mode_info)
4131 		mode_info->planes[plane_id] = plane;
4132 
4133 	return ret;
4134 }
4135 
4136 
4137 static void register_backlight_device(struct amdgpu_display_manager *dm,
4138 				      struct dc_link *link)
4139 {
4140 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4141 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4142 
4143 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4144 	    link->type != dc_connection_none) {
4145 		/*
4146 		 * Event if registration failed, we should continue with
4147 		 * DM initialization because not having a backlight control
4148 		 * is better then a black screen.
4149 		 */
4150 		if (!dm->backlight_dev[dm->num_of_edps])
4151 			amdgpu_dm_register_backlight_device(dm);
4152 
4153 		if (dm->backlight_dev[dm->num_of_edps]) {
4154 			dm->backlight_link[dm->num_of_edps] = link;
4155 			dm->num_of_edps++;
4156 		}
4157 	}
4158 #endif
4159 }
4160 
4161 
4162 /*
4163  * In this architecture, the association
4164  * connector -> encoder -> crtc
4165  * id not really requried. The crtc and connector will hold the
4166  * display_index as an abstraction to use with DAL component
4167  *
4168  * Returns 0 on success
4169  */
4170 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4171 {
4172 	struct amdgpu_display_manager *dm = &adev->dm;
4173 	int32_t i;
4174 	struct amdgpu_dm_connector *aconnector = NULL;
4175 	struct amdgpu_encoder *aencoder = NULL;
4176 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4177 	uint32_t link_cnt;
4178 	int32_t primary_planes;
4179 	enum dc_connection_type new_connection_type = dc_connection_none;
4180 	const struct dc_plane_cap *plane;
4181 	bool psr_feature_enabled = false;
4182 
4183 	dm->display_indexes_num = dm->dc->caps.max_streams;
4184 	/* Update the actual used number of crtc */
4185 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4186 
4187 	link_cnt = dm->dc->caps.max_links;
4188 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4189 		DRM_ERROR("DM: Failed to initialize mode config\n");
4190 		return -EINVAL;
4191 	}
4192 
4193 	/* There is one primary plane per CRTC */
4194 	primary_planes = dm->dc->caps.max_streams;
4195 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4196 
4197 	/*
4198 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4199 	 * Order is reversed to match iteration order in atomic check.
4200 	 */
4201 	for (i = (primary_planes - 1); i >= 0; i--) {
4202 		plane = &dm->dc->caps.planes[i];
4203 
4204 		if (initialize_plane(dm, mode_info, i,
4205 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4206 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4207 			goto fail;
4208 		}
4209 	}
4210 
4211 	/*
4212 	 * Initialize overlay planes, index starting after primary planes.
4213 	 * These planes have a higher DRM index than the primary planes since
4214 	 * they should be considered as having a higher z-order.
4215 	 * Order is reversed to match iteration order in atomic check.
4216 	 *
4217 	 * Only support DCN for now, and only expose one so we don't encourage
4218 	 * userspace to use up all the pipes.
4219 	 */
4220 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4221 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4222 
4223 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4224 			continue;
4225 
4226 		if (!plane->blends_with_above || !plane->blends_with_below)
4227 			continue;
4228 
4229 		if (!plane->pixel_format_support.argb8888)
4230 			continue;
4231 
4232 		if (initialize_plane(dm, NULL, primary_planes + i,
4233 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4234 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4235 			goto fail;
4236 		}
4237 
4238 		/* Only create one overlay plane. */
4239 		break;
4240 	}
4241 
4242 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4243 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4244 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4245 			goto fail;
4246 		}
4247 
4248 #if defined(CONFIG_DRM_AMD_DC_DCN)
4249 	/* Use Outbox interrupt */
4250 	switch (adev->ip_versions[DCE_HWIP][0]) {
4251 	case IP_VERSION(3, 0, 0):
4252 	case IP_VERSION(3, 1, 2):
4253 	case IP_VERSION(3, 1, 3):
4254 	case IP_VERSION(3, 1, 5):
4255 	case IP_VERSION(3, 1, 6):
4256 	case IP_VERSION(2, 1, 0):
4257 		if (register_outbox_irq_handlers(dm->adev)) {
4258 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4259 			goto fail;
4260 		}
4261 		break;
4262 	default:
4263 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4264 			      adev->ip_versions[DCE_HWIP][0]);
4265 	}
4266 
4267 	/* Determine whether to enable PSR support by default. */
4268 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4269 		switch (adev->ip_versions[DCE_HWIP][0]) {
4270 		case IP_VERSION(3, 1, 2):
4271 		case IP_VERSION(3, 1, 3):
4272 		case IP_VERSION(3, 1, 5):
4273 		case IP_VERSION(3, 1, 6):
4274 			psr_feature_enabled = true;
4275 			break;
4276 		default:
4277 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4278 			break;
4279 		}
4280 	}
4281 #endif
4282 
4283 	/* Disable vblank IRQs aggressively for power-saving. */
4284 	adev_to_drm(adev)->vblank_disable_immediate = true;
4285 
4286 	/* loops over all connectors on the board */
4287 	for (i = 0; i < link_cnt; i++) {
4288 		struct dc_link *link = NULL;
4289 
4290 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4291 			DRM_ERROR(
4292 				"KMS: Cannot support more than %d display indexes\n",
4293 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4294 			continue;
4295 		}
4296 
4297 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4298 		if (!aconnector)
4299 			goto fail;
4300 
4301 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4302 		if (!aencoder)
4303 			goto fail;
4304 
4305 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4306 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4307 			goto fail;
4308 		}
4309 
4310 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4311 			DRM_ERROR("KMS: Failed to initialize connector\n");
4312 			goto fail;
4313 		}
4314 
4315 		link = dc_get_link_at_index(dm->dc, i);
4316 
4317 		if (!dc_link_detect_sink(link, &new_connection_type))
4318 			DRM_ERROR("KMS: Failed to detect connector\n");
4319 
4320 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4321 			emulated_link_detect(link);
4322 			amdgpu_dm_update_connector_after_detect(aconnector);
4323 
4324 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4325 			amdgpu_dm_update_connector_after_detect(aconnector);
4326 			register_backlight_device(dm, link);
4327 			if (dm->num_of_edps)
4328 				update_connector_ext_caps(aconnector);
4329 			if (psr_feature_enabled)
4330 				amdgpu_dm_set_psr_caps(link);
4331 
4332 			/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4333 			 * PSR is also supported.
4334 			 */
4335 			if (link->psr_settings.psr_feature_enabled)
4336 				adev_to_drm(adev)->vblank_disable_immediate = false;
4337 		}
4338 
4339 
4340 	}
4341 
4342 	/* Software is initialized. Now we can register interrupt handlers. */
4343 	switch (adev->asic_type) {
4344 #if defined(CONFIG_DRM_AMD_DC_SI)
4345 	case CHIP_TAHITI:
4346 	case CHIP_PITCAIRN:
4347 	case CHIP_VERDE:
4348 	case CHIP_OLAND:
4349 		if (dce60_register_irq_handlers(dm->adev)) {
4350 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4351 			goto fail;
4352 		}
4353 		break;
4354 #endif
4355 	case CHIP_BONAIRE:
4356 	case CHIP_HAWAII:
4357 	case CHIP_KAVERI:
4358 	case CHIP_KABINI:
4359 	case CHIP_MULLINS:
4360 	case CHIP_TONGA:
4361 	case CHIP_FIJI:
4362 	case CHIP_CARRIZO:
4363 	case CHIP_STONEY:
4364 	case CHIP_POLARIS11:
4365 	case CHIP_POLARIS10:
4366 	case CHIP_POLARIS12:
4367 	case CHIP_VEGAM:
4368 	case CHIP_VEGA10:
4369 	case CHIP_VEGA12:
4370 	case CHIP_VEGA20:
4371 		if (dce110_register_irq_handlers(dm->adev)) {
4372 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4373 			goto fail;
4374 		}
4375 		break;
4376 	default:
4377 #if defined(CONFIG_DRM_AMD_DC_DCN)
4378 		switch (adev->ip_versions[DCE_HWIP][0]) {
4379 		case IP_VERSION(1, 0, 0):
4380 		case IP_VERSION(1, 0, 1):
4381 		case IP_VERSION(2, 0, 2):
4382 		case IP_VERSION(2, 0, 3):
4383 		case IP_VERSION(2, 0, 0):
4384 		case IP_VERSION(2, 1, 0):
4385 		case IP_VERSION(3, 0, 0):
4386 		case IP_VERSION(3, 0, 2):
4387 		case IP_VERSION(3, 0, 3):
4388 		case IP_VERSION(3, 0, 1):
4389 		case IP_VERSION(3, 1, 2):
4390 		case IP_VERSION(3, 1, 3):
4391 		case IP_VERSION(3, 1, 5):
4392 		case IP_VERSION(3, 1, 6):
4393 			if (dcn10_register_irq_handlers(dm->adev)) {
4394 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4395 				goto fail;
4396 			}
4397 			break;
4398 		default:
4399 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4400 					adev->ip_versions[DCE_HWIP][0]);
4401 			goto fail;
4402 		}
4403 #endif
4404 		break;
4405 	}
4406 
4407 	return 0;
4408 fail:
4409 	kfree(aencoder);
4410 	kfree(aconnector);
4411 
4412 	return -EINVAL;
4413 }
4414 
4415 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4416 {
4417 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4418 	return;
4419 }
4420 
4421 /******************************************************************************
4422  * amdgpu_display_funcs functions
4423  *****************************************************************************/
4424 
4425 /*
4426  * dm_bandwidth_update - program display watermarks
4427  *
4428  * @adev: amdgpu_device pointer
4429  *
4430  * Calculate and program the display watermarks and line buffer allocation.
4431  */
4432 static void dm_bandwidth_update(struct amdgpu_device *adev)
4433 {
4434 	/* TODO: implement later */
4435 }
4436 
4437 static const struct amdgpu_display_funcs dm_display_funcs = {
4438 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4439 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4440 	.backlight_set_level = NULL, /* never called for DC */
4441 	.backlight_get_level = NULL, /* never called for DC */
4442 	.hpd_sense = NULL,/* called unconditionally */
4443 	.hpd_set_polarity = NULL, /* called unconditionally */
4444 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4445 	.page_flip_get_scanoutpos =
4446 		dm_crtc_get_scanoutpos,/* called unconditionally */
4447 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4448 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4449 };
4450 
4451 #if defined(CONFIG_DEBUG_KERNEL_DC)
4452 
4453 static ssize_t s3_debug_store(struct device *device,
4454 			      struct device_attribute *attr,
4455 			      const char *buf,
4456 			      size_t count)
4457 {
4458 	int ret;
4459 	int s3_state;
4460 	struct drm_device *drm_dev = dev_get_drvdata(device);
4461 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4462 
4463 	ret = kstrtoint(buf, 0, &s3_state);
4464 
4465 	if (ret == 0) {
4466 		if (s3_state) {
4467 			dm_resume(adev);
4468 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4469 		} else
4470 			dm_suspend(adev);
4471 	}
4472 
4473 	return ret == 0 ? count : 0;
4474 }
4475 
4476 DEVICE_ATTR_WO(s3_debug);
4477 
4478 #endif
4479 
4480 static int dm_early_init(void *handle)
4481 {
4482 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4483 
4484 	switch (adev->asic_type) {
4485 #if defined(CONFIG_DRM_AMD_DC_SI)
4486 	case CHIP_TAHITI:
4487 	case CHIP_PITCAIRN:
4488 	case CHIP_VERDE:
4489 		adev->mode_info.num_crtc = 6;
4490 		adev->mode_info.num_hpd = 6;
4491 		adev->mode_info.num_dig = 6;
4492 		break;
4493 	case CHIP_OLAND:
4494 		adev->mode_info.num_crtc = 2;
4495 		adev->mode_info.num_hpd = 2;
4496 		adev->mode_info.num_dig = 2;
4497 		break;
4498 #endif
4499 	case CHIP_BONAIRE:
4500 	case CHIP_HAWAII:
4501 		adev->mode_info.num_crtc = 6;
4502 		adev->mode_info.num_hpd = 6;
4503 		adev->mode_info.num_dig = 6;
4504 		break;
4505 	case CHIP_KAVERI:
4506 		adev->mode_info.num_crtc = 4;
4507 		adev->mode_info.num_hpd = 6;
4508 		adev->mode_info.num_dig = 7;
4509 		break;
4510 	case CHIP_KABINI:
4511 	case CHIP_MULLINS:
4512 		adev->mode_info.num_crtc = 2;
4513 		adev->mode_info.num_hpd = 6;
4514 		adev->mode_info.num_dig = 6;
4515 		break;
4516 	case CHIP_FIJI:
4517 	case CHIP_TONGA:
4518 		adev->mode_info.num_crtc = 6;
4519 		adev->mode_info.num_hpd = 6;
4520 		adev->mode_info.num_dig = 7;
4521 		break;
4522 	case CHIP_CARRIZO:
4523 		adev->mode_info.num_crtc = 3;
4524 		adev->mode_info.num_hpd = 6;
4525 		adev->mode_info.num_dig = 9;
4526 		break;
4527 	case CHIP_STONEY:
4528 		adev->mode_info.num_crtc = 2;
4529 		adev->mode_info.num_hpd = 6;
4530 		adev->mode_info.num_dig = 9;
4531 		break;
4532 	case CHIP_POLARIS11:
4533 	case CHIP_POLARIS12:
4534 		adev->mode_info.num_crtc = 5;
4535 		adev->mode_info.num_hpd = 5;
4536 		adev->mode_info.num_dig = 5;
4537 		break;
4538 	case CHIP_POLARIS10:
4539 	case CHIP_VEGAM:
4540 		adev->mode_info.num_crtc = 6;
4541 		adev->mode_info.num_hpd = 6;
4542 		adev->mode_info.num_dig = 6;
4543 		break;
4544 	case CHIP_VEGA10:
4545 	case CHIP_VEGA12:
4546 	case CHIP_VEGA20:
4547 		adev->mode_info.num_crtc = 6;
4548 		adev->mode_info.num_hpd = 6;
4549 		adev->mode_info.num_dig = 6;
4550 		break;
4551 	default:
4552 #if defined(CONFIG_DRM_AMD_DC_DCN)
4553 		switch (adev->ip_versions[DCE_HWIP][0]) {
4554 		case IP_VERSION(2, 0, 2):
4555 		case IP_VERSION(3, 0, 0):
4556 			adev->mode_info.num_crtc = 6;
4557 			adev->mode_info.num_hpd = 6;
4558 			adev->mode_info.num_dig = 6;
4559 			break;
4560 		case IP_VERSION(2, 0, 0):
4561 		case IP_VERSION(3, 0, 2):
4562 			adev->mode_info.num_crtc = 5;
4563 			adev->mode_info.num_hpd = 5;
4564 			adev->mode_info.num_dig = 5;
4565 			break;
4566 		case IP_VERSION(2, 0, 3):
4567 		case IP_VERSION(3, 0, 3):
4568 			adev->mode_info.num_crtc = 2;
4569 			adev->mode_info.num_hpd = 2;
4570 			adev->mode_info.num_dig = 2;
4571 			break;
4572 		case IP_VERSION(1, 0, 0):
4573 		case IP_VERSION(1, 0, 1):
4574 		case IP_VERSION(3, 0, 1):
4575 		case IP_VERSION(2, 1, 0):
4576 		case IP_VERSION(3, 1, 2):
4577 		case IP_VERSION(3, 1, 3):
4578 		case IP_VERSION(3, 1, 5):
4579 		case IP_VERSION(3, 1, 6):
4580 			adev->mode_info.num_crtc = 4;
4581 			adev->mode_info.num_hpd = 4;
4582 			adev->mode_info.num_dig = 4;
4583 			break;
4584 		default:
4585 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4586 					adev->ip_versions[DCE_HWIP][0]);
4587 			return -EINVAL;
4588 		}
4589 #endif
4590 		break;
4591 	}
4592 
4593 	amdgpu_dm_set_irq_funcs(adev);
4594 
4595 	if (adev->mode_info.funcs == NULL)
4596 		adev->mode_info.funcs = &dm_display_funcs;
4597 
4598 	/*
4599 	 * Note: Do NOT change adev->audio_endpt_rreg and
4600 	 * adev->audio_endpt_wreg because they are initialised in
4601 	 * amdgpu_device_init()
4602 	 */
4603 #if defined(CONFIG_DEBUG_KERNEL_DC)
4604 	device_create_file(
4605 		adev_to_drm(adev)->dev,
4606 		&dev_attr_s3_debug);
4607 #endif
4608 
4609 	return 0;
4610 }
4611 
4612 static bool modeset_required(struct drm_crtc_state *crtc_state,
4613 			     struct dc_stream_state *new_stream,
4614 			     struct dc_stream_state *old_stream)
4615 {
4616 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4617 }
4618 
4619 static bool modereset_required(struct drm_crtc_state *crtc_state)
4620 {
4621 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4622 }
4623 
4624 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4625 {
4626 	drm_encoder_cleanup(encoder);
4627 	kfree(encoder);
4628 }
4629 
4630 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4631 	.destroy = amdgpu_dm_encoder_destroy,
4632 };
4633 
4634 
4635 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4636 					 struct drm_framebuffer *fb,
4637 					 int *min_downscale, int *max_upscale)
4638 {
4639 	struct amdgpu_device *adev = drm_to_adev(dev);
4640 	struct dc *dc = adev->dm.dc;
4641 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4642 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4643 
4644 	switch (fb->format->format) {
4645 	case DRM_FORMAT_P010:
4646 	case DRM_FORMAT_NV12:
4647 	case DRM_FORMAT_NV21:
4648 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4649 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4650 		break;
4651 
4652 	case DRM_FORMAT_XRGB16161616F:
4653 	case DRM_FORMAT_ARGB16161616F:
4654 	case DRM_FORMAT_XBGR16161616F:
4655 	case DRM_FORMAT_ABGR16161616F:
4656 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4657 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4658 		break;
4659 
4660 	default:
4661 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4662 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4663 		break;
4664 	}
4665 
4666 	/*
4667 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4668 	 * scaling factor of 1.0 == 1000 units.
4669 	 */
4670 	if (*max_upscale == 1)
4671 		*max_upscale = 1000;
4672 
4673 	if (*min_downscale == 1)
4674 		*min_downscale = 1000;
4675 }
4676 
4677 
4678 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4679 				const struct drm_plane_state *state,
4680 				struct dc_scaling_info *scaling_info)
4681 {
4682 	int scale_w, scale_h, min_downscale, max_upscale;
4683 
4684 	memset(scaling_info, 0, sizeof(*scaling_info));
4685 
4686 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4687 	scaling_info->src_rect.x = state->src_x >> 16;
4688 	scaling_info->src_rect.y = state->src_y >> 16;
4689 
4690 	/*
4691 	 * For reasons we don't (yet) fully understand a non-zero
4692 	 * src_y coordinate into an NV12 buffer can cause a
4693 	 * system hang on DCN1x.
4694 	 * To avoid hangs (and maybe be overly cautious)
4695 	 * let's reject both non-zero src_x and src_y.
4696 	 *
4697 	 * We currently know of only one use-case to reproduce a
4698 	 * scenario with non-zero src_x and src_y for NV12, which
4699 	 * is to gesture the YouTube Android app into full screen
4700 	 * on ChromeOS.
4701 	 */
4702 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4703 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4704 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4705 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4706 		return -EINVAL;
4707 
4708 	scaling_info->src_rect.width = state->src_w >> 16;
4709 	if (scaling_info->src_rect.width == 0)
4710 		return -EINVAL;
4711 
4712 	scaling_info->src_rect.height = state->src_h >> 16;
4713 	if (scaling_info->src_rect.height == 0)
4714 		return -EINVAL;
4715 
4716 	scaling_info->dst_rect.x = state->crtc_x;
4717 	scaling_info->dst_rect.y = state->crtc_y;
4718 
4719 	if (state->crtc_w == 0)
4720 		return -EINVAL;
4721 
4722 	scaling_info->dst_rect.width = state->crtc_w;
4723 
4724 	if (state->crtc_h == 0)
4725 		return -EINVAL;
4726 
4727 	scaling_info->dst_rect.height = state->crtc_h;
4728 
4729 	/* DRM doesn't specify clipping on destination output. */
4730 	scaling_info->clip_rect = scaling_info->dst_rect;
4731 
4732 	/* Validate scaling per-format with DC plane caps */
4733 	if (state->plane && state->plane->dev && state->fb) {
4734 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4735 					     &min_downscale, &max_upscale);
4736 	} else {
4737 		min_downscale = 250;
4738 		max_upscale = 16000;
4739 	}
4740 
4741 	scale_w = scaling_info->dst_rect.width * 1000 /
4742 		  scaling_info->src_rect.width;
4743 
4744 	if (scale_w < min_downscale || scale_w > max_upscale)
4745 		return -EINVAL;
4746 
4747 	scale_h = scaling_info->dst_rect.height * 1000 /
4748 		  scaling_info->src_rect.height;
4749 
4750 	if (scale_h < min_downscale || scale_h > max_upscale)
4751 		return -EINVAL;
4752 
4753 	/*
4754 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4755 	 * assume reasonable defaults based on the format.
4756 	 */
4757 
4758 	return 0;
4759 }
4760 
4761 static void
4762 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4763 				 uint64_t tiling_flags)
4764 {
4765 	/* Fill GFX8 params */
4766 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4767 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4768 
4769 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4770 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4771 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4772 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4773 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4774 
4775 		/* XXX fix me for VI */
4776 		tiling_info->gfx8.num_banks = num_banks;
4777 		tiling_info->gfx8.array_mode =
4778 				DC_ARRAY_2D_TILED_THIN1;
4779 		tiling_info->gfx8.tile_split = tile_split;
4780 		tiling_info->gfx8.bank_width = bankw;
4781 		tiling_info->gfx8.bank_height = bankh;
4782 		tiling_info->gfx8.tile_aspect = mtaspect;
4783 		tiling_info->gfx8.tile_mode =
4784 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4785 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4786 			== DC_ARRAY_1D_TILED_THIN1) {
4787 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4788 	}
4789 
4790 	tiling_info->gfx8.pipe_config =
4791 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4792 }
4793 
4794 static void
4795 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4796 				  union dc_tiling_info *tiling_info)
4797 {
4798 	tiling_info->gfx9.num_pipes =
4799 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4800 	tiling_info->gfx9.num_banks =
4801 		adev->gfx.config.gb_addr_config_fields.num_banks;
4802 	tiling_info->gfx9.pipe_interleave =
4803 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4804 	tiling_info->gfx9.num_shader_engines =
4805 		adev->gfx.config.gb_addr_config_fields.num_se;
4806 	tiling_info->gfx9.max_compressed_frags =
4807 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4808 	tiling_info->gfx9.num_rb_per_se =
4809 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4810 	tiling_info->gfx9.shaderEnable = 1;
4811 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4812 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4813 }
4814 
4815 static int
4816 validate_dcc(struct amdgpu_device *adev,
4817 	     const enum surface_pixel_format format,
4818 	     const enum dc_rotation_angle rotation,
4819 	     const union dc_tiling_info *tiling_info,
4820 	     const struct dc_plane_dcc_param *dcc,
4821 	     const struct dc_plane_address *address,
4822 	     const struct plane_size *plane_size)
4823 {
4824 	struct dc *dc = adev->dm.dc;
4825 	struct dc_dcc_surface_param input;
4826 	struct dc_surface_dcc_cap output;
4827 
4828 	memset(&input, 0, sizeof(input));
4829 	memset(&output, 0, sizeof(output));
4830 
4831 	if (!dcc->enable)
4832 		return 0;
4833 
4834 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4835 	    !dc->cap_funcs.get_dcc_compression_cap)
4836 		return -EINVAL;
4837 
4838 	input.format = format;
4839 	input.surface_size.width = plane_size->surface_size.width;
4840 	input.surface_size.height = plane_size->surface_size.height;
4841 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4842 
4843 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4844 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4845 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4846 		input.scan = SCAN_DIRECTION_VERTICAL;
4847 
4848 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4849 		return -EINVAL;
4850 
4851 	if (!output.capable)
4852 		return -EINVAL;
4853 
4854 	if (dcc->independent_64b_blks == 0 &&
4855 	    output.grph.rgb.independent_64b_blks != 0)
4856 		return -EINVAL;
4857 
4858 	return 0;
4859 }
4860 
4861 static bool
4862 modifier_has_dcc(uint64_t modifier)
4863 {
4864 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4865 }
4866 
4867 static unsigned
4868 modifier_gfx9_swizzle_mode(uint64_t modifier)
4869 {
4870 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4871 		return 0;
4872 
4873 	return AMD_FMT_MOD_GET(TILE, modifier);
4874 }
4875 
4876 static const struct drm_format_info *
4877 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4878 {
4879 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4880 }
4881 
4882 static void
4883 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4884 				    union dc_tiling_info *tiling_info,
4885 				    uint64_t modifier)
4886 {
4887 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4888 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4889 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4890 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4891 
4892 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4893 
4894 	if (!IS_AMD_FMT_MOD(modifier))
4895 		return;
4896 
4897 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4898 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4899 
4900 	if (adev->family >= AMDGPU_FAMILY_NV) {
4901 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4902 	} else {
4903 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4904 
4905 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4906 	}
4907 }
4908 
4909 enum dm_micro_swizzle {
4910 	MICRO_SWIZZLE_Z = 0,
4911 	MICRO_SWIZZLE_S = 1,
4912 	MICRO_SWIZZLE_D = 2,
4913 	MICRO_SWIZZLE_R = 3
4914 };
4915 
4916 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4917 					  uint32_t format,
4918 					  uint64_t modifier)
4919 {
4920 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4921 	const struct drm_format_info *info = drm_format_info(format);
4922 	int i;
4923 
4924 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4925 
4926 	if (!info)
4927 		return false;
4928 
4929 	/*
4930 	 * We always have to allow these modifiers:
4931 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4932 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4933 	 */
4934 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4935 	    modifier == DRM_FORMAT_MOD_INVALID) {
4936 		return true;
4937 	}
4938 
4939 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4940 	for (i = 0; i < plane->modifier_count; i++) {
4941 		if (modifier == plane->modifiers[i])
4942 			break;
4943 	}
4944 	if (i == plane->modifier_count)
4945 		return false;
4946 
4947 	/*
4948 	 * For D swizzle the canonical modifier depends on the bpp, so check
4949 	 * it here.
4950 	 */
4951 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4952 	    adev->family >= AMDGPU_FAMILY_NV) {
4953 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4954 			return false;
4955 	}
4956 
4957 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4958 	    info->cpp[0] < 8)
4959 		return false;
4960 
4961 	if (modifier_has_dcc(modifier)) {
4962 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4963 		if (info->cpp[0] != 4)
4964 			return false;
4965 		/* We support multi-planar formats, but not when combined with
4966 		 * additional DCC metadata planes. */
4967 		if (info->num_planes > 1)
4968 			return false;
4969 	}
4970 
4971 	return true;
4972 }
4973 
4974 static void
4975 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4976 {
4977 	if (!*mods)
4978 		return;
4979 
4980 	if (*cap - *size < 1) {
4981 		uint64_t new_cap = *cap * 2;
4982 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4983 
4984 		if (!new_mods) {
4985 			kfree(*mods);
4986 			*mods = NULL;
4987 			return;
4988 		}
4989 
4990 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4991 		kfree(*mods);
4992 		*mods = new_mods;
4993 		*cap = new_cap;
4994 	}
4995 
4996 	(*mods)[*size] = mod;
4997 	*size += 1;
4998 }
4999 
5000 static void
5001 add_gfx9_modifiers(const struct amdgpu_device *adev,
5002 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
5003 {
5004 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5005 	int pipe_xor_bits = min(8, pipes +
5006 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5007 	int bank_xor_bits = min(8 - pipe_xor_bits,
5008 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5009 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5010 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5011 
5012 
5013 	if (adev->family == AMDGPU_FAMILY_RV) {
5014 		/* Raven2 and later */
5015 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5016 
5017 		/*
5018 		 * No _D DCC swizzles yet because we only allow 32bpp, which
5019 		 * doesn't support _D on DCN
5020 		 */
5021 
5022 		if (has_constant_encode) {
5023 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5024 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5025 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5026 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5027 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5028 				    AMD_FMT_MOD_SET(DCC, 1) |
5029 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5030 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5031 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5032 		}
5033 
5034 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5035 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5036 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5037 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5038 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5039 			    AMD_FMT_MOD_SET(DCC, 1) |
5040 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5041 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5042 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5043 
5044 		if (has_constant_encode) {
5045 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5046 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5047 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5048 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5049 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5050 				    AMD_FMT_MOD_SET(DCC, 1) |
5051 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5052 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5053 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5054 
5055 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5056 				    AMD_FMT_MOD_SET(RB, rb) |
5057 				    AMD_FMT_MOD_SET(PIPE, pipes));
5058 		}
5059 
5060 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5061 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5062 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5063 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5064 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5065 			    AMD_FMT_MOD_SET(DCC, 1) |
5066 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5067 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5068 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5069 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5070 			    AMD_FMT_MOD_SET(RB, rb) |
5071 			    AMD_FMT_MOD_SET(PIPE, pipes));
5072 	}
5073 
5074 	/*
5075 	 * Only supported for 64bpp on Raven, will be filtered on format in
5076 	 * dm_plane_format_mod_supported.
5077 	 */
5078 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5079 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5080 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5081 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5082 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5083 
5084 	if (adev->family == AMDGPU_FAMILY_RV) {
5085 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5086 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5087 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5088 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5089 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5090 	}
5091 
5092 	/*
5093 	 * Only supported for 64bpp on Raven, will be filtered on format in
5094 	 * dm_plane_format_mod_supported.
5095 	 */
5096 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5097 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5098 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5099 
5100 	if (adev->family == AMDGPU_FAMILY_RV) {
5101 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5102 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5103 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5104 	}
5105 }
5106 
5107 static void
5108 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5109 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5110 {
5111 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5112 
5113 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5114 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5115 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5116 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5117 		    AMD_FMT_MOD_SET(DCC, 1) |
5118 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5119 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5120 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5121 
5122 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5123 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5124 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5125 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5126 		    AMD_FMT_MOD_SET(DCC, 1) |
5127 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5128 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5129 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5130 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5131 
5132 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5133 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5134 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5135 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5136 
5137 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5138 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5139 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5140 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5141 
5142 
5143 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5144 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5145 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5146 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5147 
5148 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5149 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5150 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5151 }
5152 
5153 static void
5154 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5155 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5156 {
5157 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5158 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5159 
5160 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5161 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5162 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5163 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5164 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5165 		    AMD_FMT_MOD_SET(DCC, 1) |
5166 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5167 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5168 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5169 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5170 
5171 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5172 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5173 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5174 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5175 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5176 		    AMD_FMT_MOD_SET(DCC, 1) |
5177 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5178 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5179 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5180 
5181 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5182 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5183 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5184 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5185 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5186 		    AMD_FMT_MOD_SET(DCC, 1) |
5187 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5188 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5189 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5190 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5191 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5192 
5193 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5194 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5195 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5196 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5197 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5198 		    AMD_FMT_MOD_SET(DCC, 1) |
5199 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5200 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5201 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5202 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5203 
5204 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5205 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5206 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5207 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5208 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5209 
5210 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5211 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5212 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5213 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5214 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5215 
5216 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5217 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5218 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5219 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5220 
5221 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5222 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5223 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5224 }
5225 
5226 static int
5227 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5228 {
5229 	uint64_t size = 0, capacity = 128;
5230 	*mods = NULL;
5231 
5232 	/* We have not hooked up any pre-GFX9 modifiers. */
5233 	if (adev->family < AMDGPU_FAMILY_AI)
5234 		return 0;
5235 
5236 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5237 
5238 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5239 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5240 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5241 		return *mods ? 0 : -ENOMEM;
5242 	}
5243 
5244 	switch (adev->family) {
5245 	case AMDGPU_FAMILY_AI:
5246 	case AMDGPU_FAMILY_RV:
5247 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5248 		break;
5249 	case AMDGPU_FAMILY_NV:
5250 	case AMDGPU_FAMILY_VGH:
5251 	case AMDGPU_FAMILY_YC:
5252 	case AMDGPU_FAMILY_GC_10_3_6:
5253 	case AMDGPU_FAMILY_GC_10_3_7:
5254 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5255 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5256 		else
5257 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5258 		break;
5259 	}
5260 
5261 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5262 
5263 	/* INVALID marks the end of the list. */
5264 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5265 
5266 	if (!*mods)
5267 		return -ENOMEM;
5268 
5269 	return 0;
5270 }
5271 
5272 static int
5273 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5274 					  const struct amdgpu_framebuffer *afb,
5275 					  const enum surface_pixel_format format,
5276 					  const enum dc_rotation_angle rotation,
5277 					  const struct plane_size *plane_size,
5278 					  union dc_tiling_info *tiling_info,
5279 					  struct dc_plane_dcc_param *dcc,
5280 					  struct dc_plane_address *address,
5281 					  const bool force_disable_dcc)
5282 {
5283 	const uint64_t modifier = afb->base.modifier;
5284 	int ret = 0;
5285 
5286 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5287 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5288 
5289 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5290 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5291 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5292 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5293 
5294 		dcc->enable = 1;
5295 		dcc->meta_pitch = afb->base.pitches[1];
5296 		dcc->independent_64b_blks = independent_64b_blks;
5297 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5298 			if (independent_64b_blks && independent_128b_blks)
5299 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5300 			else if (independent_128b_blks)
5301 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5302 			else if (independent_64b_blks && !independent_128b_blks)
5303 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5304 			else
5305 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5306 		} else {
5307 			if (independent_64b_blks)
5308 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5309 			else
5310 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5311 		}
5312 
5313 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5314 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5315 	}
5316 
5317 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5318 	if (ret)
5319 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5320 
5321 	return ret;
5322 }
5323 
5324 static int
5325 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5326 			     const struct amdgpu_framebuffer *afb,
5327 			     const enum surface_pixel_format format,
5328 			     const enum dc_rotation_angle rotation,
5329 			     const uint64_t tiling_flags,
5330 			     union dc_tiling_info *tiling_info,
5331 			     struct plane_size *plane_size,
5332 			     struct dc_plane_dcc_param *dcc,
5333 			     struct dc_plane_address *address,
5334 			     bool tmz_surface,
5335 			     bool force_disable_dcc)
5336 {
5337 	const struct drm_framebuffer *fb = &afb->base;
5338 	int ret;
5339 
5340 	memset(tiling_info, 0, sizeof(*tiling_info));
5341 	memset(plane_size, 0, sizeof(*plane_size));
5342 	memset(dcc, 0, sizeof(*dcc));
5343 	memset(address, 0, sizeof(*address));
5344 
5345 	address->tmz_surface = tmz_surface;
5346 
5347 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5348 		uint64_t addr = afb->address + fb->offsets[0];
5349 
5350 		plane_size->surface_size.x = 0;
5351 		plane_size->surface_size.y = 0;
5352 		plane_size->surface_size.width = fb->width;
5353 		plane_size->surface_size.height = fb->height;
5354 		plane_size->surface_pitch =
5355 			fb->pitches[0] / fb->format->cpp[0];
5356 
5357 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5358 		address->grph.addr.low_part = lower_32_bits(addr);
5359 		address->grph.addr.high_part = upper_32_bits(addr);
5360 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5361 		uint64_t luma_addr = afb->address + fb->offsets[0];
5362 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5363 
5364 		plane_size->surface_size.x = 0;
5365 		plane_size->surface_size.y = 0;
5366 		plane_size->surface_size.width = fb->width;
5367 		plane_size->surface_size.height = fb->height;
5368 		plane_size->surface_pitch =
5369 			fb->pitches[0] / fb->format->cpp[0];
5370 
5371 		plane_size->chroma_size.x = 0;
5372 		plane_size->chroma_size.y = 0;
5373 		/* TODO: set these based on surface format */
5374 		plane_size->chroma_size.width = fb->width / 2;
5375 		plane_size->chroma_size.height = fb->height / 2;
5376 
5377 		plane_size->chroma_pitch =
5378 			fb->pitches[1] / fb->format->cpp[1];
5379 
5380 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5381 		address->video_progressive.luma_addr.low_part =
5382 			lower_32_bits(luma_addr);
5383 		address->video_progressive.luma_addr.high_part =
5384 			upper_32_bits(luma_addr);
5385 		address->video_progressive.chroma_addr.low_part =
5386 			lower_32_bits(chroma_addr);
5387 		address->video_progressive.chroma_addr.high_part =
5388 			upper_32_bits(chroma_addr);
5389 	}
5390 
5391 	if (adev->family >= AMDGPU_FAMILY_AI) {
5392 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5393 								rotation, plane_size,
5394 								tiling_info, dcc,
5395 								address,
5396 								force_disable_dcc);
5397 		if (ret)
5398 			return ret;
5399 	} else {
5400 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5401 	}
5402 
5403 	return 0;
5404 }
5405 
5406 static void
5407 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5408 			       bool *per_pixel_alpha, bool *global_alpha,
5409 			       int *global_alpha_value)
5410 {
5411 	*per_pixel_alpha = false;
5412 	*global_alpha = false;
5413 	*global_alpha_value = 0xff;
5414 
5415 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5416 		return;
5417 
5418 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5419 		static const uint32_t alpha_formats[] = {
5420 			DRM_FORMAT_ARGB8888,
5421 			DRM_FORMAT_RGBA8888,
5422 			DRM_FORMAT_ABGR8888,
5423 		};
5424 		uint32_t format = plane_state->fb->format->format;
5425 		unsigned int i;
5426 
5427 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5428 			if (format == alpha_formats[i]) {
5429 				*per_pixel_alpha = true;
5430 				break;
5431 			}
5432 		}
5433 	}
5434 
5435 	if (plane_state->alpha < 0xffff) {
5436 		*global_alpha = true;
5437 		*global_alpha_value = plane_state->alpha >> 8;
5438 	}
5439 }
5440 
5441 static int
5442 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5443 			    const enum surface_pixel_format format,
5444 			    enum dc_color_space *color_space)
5445 {
5446 	bool full_range;
5447 
5448 	*color_space = COLOR_SPACE_SRGB;
5449 
5450 	/* DRM color properties only affect non-RGB formats. */
5451 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5452 		return 0;
5453 
5454 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5455 
5456 	switch (plane_state->color_encoding) {
5457 	case DRM_COLOR_YCBCR_BT601:
5458 		if (full_range)
5459 			*color_space = COLOR_SPACE_YCBCR601;
5460 		else
5461 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5462 		break;
5463 
5464 	case DRM_COLOR_YCBCR_BT709:
5465 		if (full_range)
5466 			*color_space = COLOR_SPACE_YCBCR709;
5467 		else
5468 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5469 		break;
5470 
5471 	case DRM_COLOR_YCBCR_BT2020:
5472 		if (full_range)
5473 			*color_space = COLOR_SPACE_2020_YCBCR;
5474 		else
5475 			return -EINVAL;
5476 		break;
5477 
5478 	default:
5479 		return -EINVAL;
5480 	}
5481 
5482 	return 0;
5483 }
5484 
5485 static int
5486 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5487 			    const struct drm_plane_state *plane_state,
5488 			    const uint64_t tiling_flags,
5489 			    struct dc_plane_info *plane_info,
5490 			    struct dc_plane_address *address,
5491 			    bool tmz_surface,
5492 			    bool force_disable_dcc)
5493 {
5494 	const struct drm_framebuffer *fb = plane_state->fb;
5495 	const struct amdgpu_framebuffer *afb =
5496 		to_amdgpu_framebuffer(plane_state->fb);
5497 	int ret;
5498 
5499 	memset(plane_info, 0, sizeof(*plane_info));
5500 
5501 	switch (fb->format->format) {
5502 	case DRM_FORMAT_C8:
5503 		plane_info->format =
5504 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5505 		break;
5506 	case DRM_FORMAT_RGB565:
5507 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5508 		break;
5509 	case DRM_FORMAT_XRGB8888:
5510 	case DRM_FORMAT_ARGB8888:
5511 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5512 		break;
5513 	case DRM_FORMAT_XRGB2101010:
5514 	case DRM_FORMAT_ARGB2101010:
5515 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5516 		break;
5517 	case DRM_FORMAT_XBGR2101010:
5518 	case DRM_FORMAT_ABGR2101010:
5519 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5520 		break;
5521 	case DRM_FORMAT_XBGR8888:
5522 	case DRM_FORMAT_ABGR8888:
5523 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5524 		break;
5525 	case DRM_FORMAT_NV21:
5526 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5527 		break;
5528 	case DRM_FORMAT_NV12:
5529 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5530 		break;
5531 	case DRM_FORMAT_P010:
5532 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5533 		break;
5534 	case DRM_FORMAT_XRGB16161616F:
5535 	case DRM_FORMAT_ARGB16161616F:
5536 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5537 		break;
5538 	case DRM_FORMAT_XBGR16161616F:
5539 	case DRM_FORMAT_ABGR16161616F:
5540 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5541 		break;
5542 	case DRM_FORMAT_XRGB16161616:
5543 	case DRM_FORMAT_ARGB16161616:
5544 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5545 		break;
5546 	case DRM_FORMAT_XBGR16161616:
5547 	case DRM_FORMAT_ABGR16161616:
5548 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5549 		break;
5550 	default:
5551 		DRM_ERROR(
5552 			"Unsupported screen format %p4cc\n",
5553 			&fb->format->format);
5554 		return -EINVAL;
5555 	}
5556 
5557 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5558 	case DRM_MODE_ROTATE_0:
5559 		plane_info->rotation = ROTATION_ANGLE_0;
5560 		break;
5561 	case DRM_MODE_ROTATE_90:
5562 		plane_info->rotation = ROTATION_ANGLE_90;
5563 		break;
5564 	case DRM_MODE_ROTATE_180:
5565 		plane_info->rotation = ROTATION_ANGLE_180;
5566 		break;
5567 	case DRM_MODE_ROTATE_270:
5568 		plane_info->rotation = ROTATION_ANGLE_270;
5569 		break;
5570 	default:
5571 		plane_info->rotation = ROTATION_ANGLE_0;
5572 		break;
5573 	}
5574 
5575 	plane_info->visible = true;
5576 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5577 
5578 	plane_info->layer_index = 0;
5579 
5580 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5581 					  &plane_info->color_space);
5582 	if (ret)
5583 		return ret;
5584 
5585 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5586 					   plane_info->rotation, tiling_flags,
5587 					   &plane_info->tiling_info,
5588 					   &plane_info->plane_size,
5589 					   &plane_info->dcc, address, tmz_surface,
5590 					   force_disable_dcc);
5591 	if (ret)
5592 		return ret;
5593 
5594 	fill_blending_from_plane_state(
5595 		plane_state, &plane_info->per_pixel_alpha,
5596 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5597 
5598 	return 0;
5599 }
5600 
5601 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5602 				    struct dc_plane_state *dc_plane_state,
5603 				    struct drm_plane_state *plane_state,
5604 				    struct drm_crtc_state *crtc_state)
5605 {
5606 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5607 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5608 	struct dc_scaling_info scaling_info;
5609 	struct dc_plane_info plane_info;
5610 	int ret;
5611 	bool force_disable_dcc = false;
5612 
5613 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5614 	if (ret)
5615 		return ret;
5616 
5617 	dc_plane_state->src_rect = scaling_info.src_rect;
5618 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5619 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5620 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5621 
5622 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5623 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5624 					  afb->tiling_flags,
5625 					  &plane_info,
5626 					  &dc_plane_state->address,
5627 					  afb->tmz_surface,
5628 					  force_disable_dcc);
5629 	if (ret)
5630 		return ret;
5631 
5632 	dc_plane_state->format = plane_info.format;
5633 	dc_plane_state->color_space = plane_info.color_space;
5634 	dc_plane_state->format = plane_info.format;
5635 	dc_plane_state->plane_size = plane_info.plane_size;
5636 	dc_plane_state->rotation = plane_info.rotation;
5637 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5638 	dc_plane_state->stereo_format = plane_info.stereo_format;
5639 	dc_plane_state->tiling_info = plane_info.tiling_info;
5640 	dc_plane_state->visible = plane_info.visible;
5641 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5642 	dc_plane_state->global_alpha = plane_info.global_alpha;
5643 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5644 	dc_plane_state->dcc = plane_info.dcc;
5645 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5646 	dc_plane_state->flip_int_enabled = true;
5647 
5648 	/*
5649 	 * Always set input transfer function, since plane state is refreshed
5650 	 * every time.
5651 	 */
5652 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5653 	if (ret)
5654 		return ret;
5655 
5656 	return 0;
5657 }
5658 
5659 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5660 					   const struct dm_connector_state *dm_state,
5661 					   struct dc_stream_state *stream)
5662 {
5663 	enum amdgpu_rmx_type rmx_type;
5664 
5665 	struct rect src = { 0 }; /* viewport in composition space*/
5666 	struct rect dst = { 0 }; /* stream addressable area */
5667 
5668 	/* no mode. nothing to be done */
5669 	if (!mode)
5670 		return;
5671 
5672 	/* Full screen scaling by default */
5673 	src.width = mode->hdisplay;
5674 	src.height = mode->vdisplay;
5675 	dst.width = stream->timing.h_addressable;
5676 	dst.height = stream->timing.v_addressable;
5677 
5678 	if (dm_state) {
5679 		rmx_type = dm_state->scaling;
5680 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5681 			if (src.width * dst.height <
5682 					src.height * dst.width) {
5683 				/* height needs less upscaling/more downscaling */
5684 				dst.width = src.width *
5685 						dst.height / src.height;
5686 			} else {
5687 				/* width needs less upscaling/more downscaling */
5688 				dst.height = src.height *
5689 						dst.width / src.width;
5690 			}
5691 		} else if (rmx_type == RMX_CENTER) {
5692 			dst = src;
5693 		}
5694 
5695 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5696 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5697 
5698 		if (dm_state->underscan_enable) {
5699 			dst.x += dm_state->underscan_hborder / 2;
5700 			dst.y += dm_state->underscan_vborder / 2;
5701 			dst.width -= dm_state->underscan_hborder;
5702 			dst.height -= dm_state->underscan_vborder;
5703 		}
5704 	}
5705 
5706 	stream->src = src;
5707 	stream->dst = dst;
5708 
5709 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5710 		      dst.x, dst.y, dst.width, dst.height);
5711 
5712 }
5713 
5714 static enum dc_color_depth
5715 convert_color_depth_from_display_info(const struct drm_connector *connector,
5716 				      bool is_y420, int requested_bpc)
5717 {
5718 	uint8_t bpc;
5719 
5720 	if (is_y420) {
5721 		bpc = 8;
5722 
5723 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5724 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5725 			bpc = 16;
5726 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5727 			bpc = 12;
5728 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5729 			bpc = 10;
5730 	} else {
5731 		bpc = (uint8_t)connector->display_info.bpc;
5732 		/* Assume 8 bpc by default if no bpc is specified. */
5733 		bpc = bpc ? bpc : 8;
5734 	}
5735 
5736 	if (requested_bpc > 0) {
5737 		/*
5738 		 * Cap display bpc based on the user requested value.
5739 		 *
5740 		 * The value for state->max_bpc may not correctly updated
5741 		 * depending on when the connector gets added to the state
5742 		 * or if this was called outside of atomic check, so it
5743 		 * can't be used directly.
5744 		 */
5745 		bpc = min_t(u8, bpc, requested_bpc);
5746 
5747 		/* Round down to the nearest even number. */
5748 		bpc = bpc - (bpc & 1);
5749 	}
5750 
5751 	switch (bpc) {
5752 	case 0:
5753 		/*
5754 		 * Temporary Work around, DRM doesn't parse color depth for
5755 		 * EDID revision before 1.4
5756 		 * TODO: Fix edid parsing
5757 		 */
5758 		return COLOR_DEPTH_888;
5759 	case 6:
5760 		return COLOR_DEPTH_666;
5761 	case 8:
5762 		return COLOR_DEPTH_888;
5763 	case 10:
5764 		return COLOR_DEPTH_101010;
5765 	case 12:
5766 		return COLOR_DEPTH_121212;
5767 	case 14:
5768 		return COLOR_DEPTH_141414;
5769 	case 16:
5770 		return COLOR_DEPTH_161616;
5771 	default:
5772 		return COLOR_DEPTH_UNDEFINED;
5773 	}
5774 }
5775 
5776 static enum dc_aspect_ratio
5777 get_aspect_ratio(const struct drm_display_mode *mode_in)
5778 {
5779 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5780 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5781 }
5782 
5783 static enum dc_color_space
5784 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5785 {
5786 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5787 
5788 	switch (dc_crtc_timing->pixel_encoding)	{
5789 	case PIXEL_ENCODING_YCBCR422:
5790 	case PIXEL_ENCODING_YCBCR444:
5791 	case PIXEL_ENCODING_YCBCR420:
5792 	{
5793 		/*
5794 		 * 27030khz is the separation point between HDTV and SDTV
5795 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5796 		 * respectively
5797 		 */
5798 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5799 			if (dc_crtc_timing->flags.Y_ONLY)
5800 				color_space =
5801 					COLOR_SPACE_YCBCR709_LIMITED;
5802 			else
5803 				color_space = COLOR_SPACE_YCBCR709;
5804 		} else {
5805 			if (dc_crtc_timing->flags.Y_ONLY)
5806 				color_space =
5807 					COLOR_SPACE_YCBCR601_LIMITED;
5808 			else
5809 				color_space = COLOR_SPACE_YCBCR601;
5810 		}
5811 
5812 	}
5813 	break;
5814 	case PIXEL_ENCODING_RGB:
5815 		color_space = COLOR_SPACE_SRGB;
5816 		break;
5817 
5818 	default:
5819 		WARN_ON(1);
5820 		break;
5821 	}
5822 
5823 	return color_space;
5824 }
5825 
5826 static bool adjust_colour_depth_from_display_info(
5827 	struct dc_crtc_timing *timing_out,
5828 	const struct drm_display_info *info)
5829 {
5830 	enum dc_color_depth depth = timing_out->display_color_depth;
5831 	int normalized_clk;
5832 	do {
5833 		normalized_clk = timing_out->pix_clk_100hz / 10;
5834 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5835 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5836 			normalized_clk /= 2;
5837 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5838 		switch (depth) {
5839 		case COLOR_DEPTH_888:
5840 			break;
5841 		case COLOR_DEPTH_101010:
5842 			normalized_clk = (normalized_clk * 30) / 24;
5843 			break;
5844 		case COLOR_DEPTH_121212:
5845 			normalized_clk = (normalized_clk * 36) / 24;
5846 			break;
5847 		case COLOR_DEPTH_161616:
5848 			normalized_clk = (normalized_clk * 48) / 24;
5849 			break;
5850 		default:
5851 			/* The above depths are the only ones valid for HDMI. */
5852 			return false;
5853 		}
5854 		if (normalized_clk <= info->max_tmds_clock) {
5855 			timing_out->display_color_depth = depth;
5856 			return true;
5857 		}
5858 	} while (--depth > COLOR_DEPTH_666);
5859 	return false;
5860 }
5861 
5862 static void fill_stream_properties_from_drm_display_mode(
5863 	struct dc_stream_state *stream,
5864 	const struct drm_display_mode *mode_in,
5865 	const struct drm_connector *connector,
5866 	const struct drm_connector_state *connector_state,
5867 	const struct dc_stream_state *old_stream,
5868 	int requested_bpc)
5869 {
5870 	struct dc_crtc_timing *timing_out = &stream->timing;
5871 	const struct drm_display_info *info = &connector->display_info;
5872 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5873 	struct hdmi_vendor_infoframe hv_frame;
5874 	struct hdmi_avi_infoframe avi_frame;
5875 
5876 	memset(&hv_frame, 0, sizeof(hv_frame));
5877 	memset(&avi_frame, 0, sizeof(avi_frame));
5878 
5879 	timing_out->h_border_left = 0;
5880 	timing_out->h_border_right = 0;
5881 	timing_out->v_border_top = 0;
5882 	timing_out->v_border_bottom = 0;
5883 	/* TODO: un-hardcode */
5884 	if (drm_mode_is_420_only(info, mode_in)
5885 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5886 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5887 	else if (drm_mode_is_420_also(info, mode_in)
5888 			&& aconnector->force_yuv420_output)
5889 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5890 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5891 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5892 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5893 	else
5894 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5895 
5896 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5897 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5898 		connector,
5899 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5900 		requested_bpc);
5901 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5902 	timing_out->hdmi_vic = 0;
5903 
5904 	if(old_stream) {
5905 		timing_out->vic = old_stream->timing.vic;
5906 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5907 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5908 	} else {
5909 		timing_out->vic = drm_match_cea_mode(mode_in);
5910 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5911 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5912 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5913 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5914 	}
5915 
5916 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5917 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5918 		timing_out->vic = avi_frame.video_code;
5919 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5920 		timing_out->hdmi_vic = hv_frame.vic;
5921 	}
5922 
5923 	if (is_freesync_video_mode(mode_in, aconnector)) {
5924 		timing_out->h_addressable = mode_in->hdisplay;
5925 		timing_out->h_total = mode_in->htotal;
5926 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5927 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5928 		timing_out->v_total = mode_in->vtotal;
5929 		timing_out->v_addressable = mode_in->vdisplay;
5930 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5931 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5932 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5933 	} else {
5934 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5935 		timing_out->h_total = mode_in->crtc_htotal;
5936 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5937 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5938 		timing_out->v_total = mode_in->crtc_vtotal;
5939 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5940 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5941 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5942 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5943 	}
5944 
5945 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5946 
5947 	stream->output_color_space = get_output_color_space(timing_out);
5948 
5949 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5950 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5951 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5952 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5953 		    drm_mode_is_420_also(info, mode_in) &&
5954 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5955 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5956 			adjust_colour_depth_from_display_info(timing_out, info);
5957 		}
5958 	}
5959 }
5960 
5961 static void fill_audio_info(struct audio_info *audio_info,
5962 			    const struct drm_connector *drm_connector,
5963 			    const struct dc_sink *dc_sink)
5964 {
5965 	int i = 0;
5966 	int cea_revision = 0;
5967 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5968 
5969 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5970 	audio_info->product_id = edid_caps->product_id;
5971 
5972 	cea_revision = drm_connector->display_info.cea_rev;
5973 
5974 	strscpy(audio_info->display_name,
5975 		edid_caps->display_name,
5976 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5977 
5978 	if (cea_revision >= 3) {
5979 		audio_info->mode_count = edid_caps->audio_mode_count;
5980 
5981 		for (i = 0; i < audio_info->mode_count; ++i) {
5982 			audio_info->modes[i].format_code =
5983 					(enum audio_format_code)
5984 					(edid_caps->audio_modes[i].format_code);
5985 			audio_info->modes[i].channel_count =
5986 					edid_caps->audio_modes[i].channel_count;
5987 			audio_info->modes[i].sample_rates.all =
5988 					edid_caps->audio_modes[i].sample_rate;
5989 			audio_info->modes[i].sample_size =
5990 					edid_caps->audio_modes[i].sample_size;
5991 		}
5992 	}
5993 
5994 	audio_info->flags.all = edid_caps->speaker_flags;
5995 
5996 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5997 	if (drm_connector->latency_present[0]) {
5998 		audio_info->video_latency = drm_connector->video_latency[0];
5999 		audio_info->audio_latency = drm_connector->audio_latency[0];
6000 	}
6001 
6002 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6003 
6004 }
6005 
6006 static void
6007 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6008 				      struct drm_display_mode *dst_mode)
6009 {
6010 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6011 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6012 	dst_mode->crtc_clock = src_mode->crtc_clock;
6013 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6014 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6015 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6016 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6017 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
6018 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
6019 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6020 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6021 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6022 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6023 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6024 }
6025 
6026 static void
6027 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6028 					const struct drm_display_mode *native_mode,
6029 					bool scale_enabled)
6030 {
6031 	if (scale_enabled) {
6032 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6033 	} else if (native_mode->clock == drm_mode->clock &&
6034 			native_mode->htotal == drm_mode->htotal &&
6035 			native_mode->vtotal == drm_mode->vtotal) {
6036 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6037 	} else {
6038 		/* no scaling nor amdgpu inserted, no need to patch */
6039 	}
6040 }
6041 
6042 static struct dc_sink *
6043 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6044 {
6045 	struct dc_sink_init_data sink_init_data = { 0 };
6046 	struct dc_sink *sink = NULL;
6047 	sink_init_data.link = aconnector->dc_link;
6048 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6049 
6050 	sink = dc_sink_create(&sink_init_data);
6051 	if (!sink) {
6052 		DRM_ERROR("Failed to create sink!\n");
6053 		return NULL;
6054 	}
6055 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6056 
6057 	return sink;
6058 }
6059 
6060 static void set_multisync_trigger_params(
6061 		struct dc_stream_state *stream)
6062 {
6063 	struct dc_stream_state *master = NULL;
6064 
6065 	if (stream->triggered_crtc_reset.enabled) {
6066 		master = stream->triggered_crtc_reset.event_source;
6067 		stream->triggered_crtc_reset.event =
6068 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6069 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6070 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6071 	}
6072 }
6073 
6074 static void set_master_stream(struct dc_stream_state *stream_set[],
6075 			      int stream_count)
6076 {
6077 	int j, highest_rfr = 0, master_stream = 0;
6078 
6079 	for (j = 0;  j < stream_count; j++) {
6080 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6081 			int refresh_rate = 0;
6082 
6083 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6084 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6085 			if (refresh_rate > highest_rfr) {
6086 				highest_rfr = refresh_rate;
6087 				master_stream = j;
6088 			}
6089 		}
6090 	}
6091 	for (j = 0;  j < stream_count; j++) {
6092 		if (stream_set[j])
6093 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6094 	}
6095 }
6096 
6097 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6098 {
6099 	int i = 0;
6100 	struct dc_stream_state *stream;
6101 
6102 	if (context->stream_count < 2)
6103 		return;
6104 	for (i = 0; i < context->stream_count ; i++) {
6105 		if (!context->streams[i])
6106 			continue;
6107 		/*
6108 		 * TODO: add a function to read AMD VSDB bits and set
6109 		 * crtc_sync_master.multi_sync_enabled flag
6110 		 * For now it's set to false
6111 		 */
6112 	}
6113 
6114 	set_master_stream(context->streams, context->stream_count);
6115 
6116 	for (i = 0; i < context->stream_count ; i++) {
6117 		stream = context->streams[i];
6118 
6119 		if (!stream)
6120 			continue;
6121 
6122 		set_multisync_trigger_params(stream);
6123 	}
6124 }
6125 
6126 #if defined(CONFIG_DRM_AMD_DC_DCN)
6127 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6128 							struct dc_sink *sink, struct dc_stream_state *stream,
6129 							struct dsc_dec_dpcd_caps *dsc_caps)
6130 {
6131 	stream->timing.flags.DSC = 0;
6132 	dsc_caps->is_dsc_supported = false;
6133 
6134 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6135 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6136 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6137 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6138 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6139 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6140 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6141 				dsc_caps);
6142 	}
6143 }
6144 
6145 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6146 				    struct dc_sink *sink, struct dc_stream_state *stream,
6147 				    struct dsc_dec_dpcd_caps *dsc_caps,
6148 				    uint32_t max_dsc_target_bpp_limit_override)
6149 {
6150 	const struct dc_link_settings *verified_link_cap = NULL;
6151 	uint32_t link_bw_in_kbps;
6152 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6153 	struct dc *dc = sink->ctx->dc;
6154 	struct dc_dsc_bw_range bw_range = {0};
6155 	struct dc_dsc_config dsc_cfg = {0};
6156 
6157 	verified_link_cap = dc_link_get_link_cap(stream->link);
6158 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6159 	edp_min_bpp_x16 = 8 * 16;
6160 	edp_max_bpp_x16 = 8 * 16;
6161 
6162 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6163 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6164 
6165 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6166 		edp_min_bpp_x16 = edp_max_bpp_x16;
6167 
6168 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6169 				dc->debug.dsc_min_slice_height_override,
6170 				edp_min_bpp_x16, edp_max_bpp_x16,
6171 				dsc_caps,
6172 				&stream->timing,
6173 				&bw_range)) {
6174 
6175 		if (bw_range.max_kbps < link_bw_in_kbps) {
6176 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6177 					dsc_caps,
6178 					dc->debug.dsc_min_slice_height_override,
6179 					max_dsc_target_bpp_limit_override,
6180 					0,
6181 					&stream->timing,
6182 					&dsc_cfg)) {
6183 				stream->timing.dsc_cfg = dsc_cfg;
6184 				stream->timing.flags.DSC = 1;
6185 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6186 			}
6187 			return;
6188 		}
6189 	}
6190 
6191 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6192 				dsc_caps,
6193 				dc->debug.dsc_min_slice_height_override,
6194 				max_dsc_target_bpp_limit_override,
6195 				link_bw_in_kbps,
6196 				&stream->timing,
6197 				&dsc_cfg)) {
6198 		stream->timing.dsc_cfg = dsc_cfg;
6199 		stream->timing.flags.DSC = 1;
6200 	}
6201 }
6202 
6203 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6204 										struct dc_sink *sink, struct dc_stream_state *stream,
6205 										struct dsc_dec_dpcd_caps *dsc_caps)
6206 {
6207 	struct drm_connector *drm_connector = &aconnector->base;
6208 	uint32_t link_bandwidth_kbps;
6209 	uint32_t max_dsc_target_bpp_limit_override = 0;
6210 	struct dc *dc = sink->ctx->dc;
6211 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6212 	uint32_t dsc_max_supported_bw_in_kbps;
6213 
6214 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6215 							dc_link_get_link_cap(aconnector->dc_link));
6216 
6217 	if (stream->link && stream->link->local_sink)
6218 		max_dsc_target_bpp_limit_override =
6219 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6220 
6221 	/* Set DSC policy according to dsc_clock_en */
6222 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6223 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6224 
6225 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6226 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6227 
6228 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6229 
6230 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6231 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6232 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6233 						dsc_caps,
6234 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6235 						max_dsc_target_bpp_limit_override,
6236 						link_bandwidth_kbps,
6237 						&stream->timing,
6238 						&stream->timing.dsc_cfg)) {
6239 				stream->timing.flags.DSC = 1;
6240 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6241 								 __func__, drm_connector->name);
6242 			}
6243 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6244 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6245 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6246 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6247 
6248 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6249 					max_supported_bw_in_kbps > 0 &&
6250 					dsc_max_supported_bw_in_kbps > 0)
6251 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6252 						dsc_caps,
6253 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6254 						max_dsc_target_bpp_limit_override,
6255 						dsc_max_supported_bw_in_kbps,
6256 						&stream->timing,
6257 						&stream->timing.dsc_cfg)) {
6258 					stream->timing.flags.DSC = 1;
6259 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6260 									 __func__, drm_connector->name);
6261 				}
6262 		}
6263 	}
6264 
6265 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6266 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6267 		stream->timing.flags.DSC = 1;
6268 
6269 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6270 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6271 
6272 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6273 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6274 
6275 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6276 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6277 }
6278 #endif /* CONFIG_DRM_AMD_DC_DCN */
6279 
6280 /**
6281  * DOC: FreeSync Video
6282  *
6283  * When a userspace application wants to play a video, the content follows a
6284  * standard format definition that usually specifies the FPS for that format.
6285  * The below list illustrates some video format and the expected FPS,
6286  * respectively:
6287  *
6288  * - TV/NTSC (23.976 FPS)
6289  * - Cinema (24 FPS)
6290  * - TV/PAL (25 FPS)
6291  * - TV/NTSC (29.97 FPS)
6292  * - TV/NTSC (30 FPS)
6293  * - Cinema HFR (48 FPS)
6294  * - TV/PAL (50 FPS)
6295  * - Commonly used (60 FPS)
6296  * - Multiples of 24 (48,72,96,120 FPS)
6297  *
6298  * The list of standards video format is not huge and can be added to the
6299  * connector modeset list beforehand. With that, userspace can leverage
6300  * FreeSync to extends the front porch in order to attain the target refresh
6301  * rate. Such a switch will happen seamlessly, without screen blanking or
6302  * reprogramming of the output in any other way. If the userspace requests a
6303  * modesetting change compatible with FreeSync modes that only differ in the
6304  * refresh rate, DC will skip the full update and avoid blink during the
6305  * transition. For example, the video player can change the modesetting from
6306  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6307  * causing any display blink. This same concept can be applied to a mode
6308  * setting change.
6309  */
6310 static struct drm_display_mode *
6311 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6312 			  bool use_probed_modes)
6313 {
6314 	struct drm_display_mode *m, *m_pref = NULL;
6315 	u16 current_refresh, highest_refresh;
6316 	struct list_head *list_head = use_probed_modes ?
6317 						    &aconnector->base.probed_modes :
6318 						    &aconnector->base.modes;
6319 
6320 	if (aconnector->freesync_vid_base.clock != 0)
6321 		return &aconnector->freesync_vid_base;
6322 
6323 	/* Find the preferred mode */
6324 	list_for_each_entry (m, list_head, head) {
6325 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6326 			m_pref = m;
6327 			break;
6328 		}
6329 	}
6330 
6331 	if (!m_pref) {
6332 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6333 		m_pref = list_first_entry_or_null(
6334 			&aconnector->base.modes, struct drm_display_mode, head);
6335 		if (!m_pref) {
6336 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6337 			return NULL;
6338 		}
6339 	}
6340 
6341 	highest_refresh = drm_mode_vrefresh(m_pref);
6342 
6343 	/*
6344 	 * Find the mode with highest refresh rate with same resolution.
6345 	 * For some monitors, preferred mode is not the mode with highest
6346 	 * supported refresh rate.
6347 	 */
6348 	list_for_each_entry (m, list_head, head) {
6349 		current_refresh  = drm_mode_vrefresh(m);
6350 
6351 		if (m->hdisplay == m_pref->hdisplay &&
6352 		    m->vdisplay == m_pref->vdisplay &&
6353 		    highest_refresh < current_refresh) {
6354 			highest_refresh = current_refresh;
6355 			m_pref = m;
6356 		}
6357 	}
6358 
6359 	aconnector->freesync_vid_base = *m_pref;
6360 	return m_pref;
6361 }
6362 
6363 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6364 				   struct amdgpu_dm_connector *aconnector)
6365 {
6366 	struct drm_display_mode *high_mode;
6367 	int timing_diff;
6368 
6369 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6370 	if (!high_mode || !mode)
6371 		return false;
6372 
6373 	timing_diff = high_mode->vtotal - mode->vtotal;
6374 
6375 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6376 	    high_mode->hdisplay != mode->hdisplay ||
6377 	    high_mode->vdisplay != mode->vdisplay ||
6378 	    high_mode->hsync_start != mode->hsync_start ||
6379 	    high_mode->hsync_end != mode->hsync_end ||
6380 	    high_mode->htotal != mode->htotal ||
6381 	    high_mode->hskew != mode->hskew ||
6382 	    high_mode->vscan != mode->vscan ||
6383 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6384 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6385 		return false;
6386 	else
6387 		return true;
6388 }
6389 
6390 static struct dc_stream_state *
6391 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6392 		       const struct drm_display_mode *drm_mode,
6393 		       const struct dm_connector_state *dm_state,
6394 		       const struct dc_stream_state *old_stream,
6395 		       int requested_bpc)
6396 {
6397 	struct drm_display_mode *preferred_mode = NULL;
6398 	struct drm_connector *drm_connector;
6399 	const struct drm_connector_state *con_state =
6400 		dm_state ? &dm_state->base : NULL;
6401 	struct dc_stream_state *stream = NULL;
6402 	struct drm_display_mode mode = *drm_mode;
6403 	struct drm_display_mode saved_mode;
6404 	struct drm_display_mode *freesync_mode = NULL;
6405 	bool native_mode_found = false;
6406 	bool recalculate_timing = false;
6407 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6408 	int mode_refresh;
6409 	int preferred_refresh = 0;
6410 #if defined(CONFIG_DRM_AMD_DC_DCN)
6411 	struct dsc_dec_dpcd_caps dsc_caps;
6412 #endif
6413 	struct dc_sink *sink = NULL;
6414 
6415 	memset(&saved_mode, 0, sizeof(saved_mode));
6416 
6417 	if (aconnector == NULL) {
6418 		DRM_ERROR("aconnector is NULL!\n");
6419 		return stream;
6420 	}
6421 
6422 	drm_connector = &aconnector->base;
6423 
6424 	if (!aconnector->dc_sink) {
6425 		sink = create_fake_sink(aconnector);
6426 		if (!sink)
6427 			return stream;
6428 	} else {
6429 		sink = aconnector->dc_sink;
6430 		dc_sink_retain(sink);
6431 	}
6432 
6433 	stream = dc_create_stream_for_sink(sink);
6434 
6435 	if (stream == NULL) {
6436 		DRM_ERROR("Failed to create stream for sink!\n");
6437 		goto finish;
6438 	}
6439 
6440 	stream->dm_stream_context = aconnector;
6441 
6442 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6443 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6444 
6445 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6446 		/* Search for preferred mode */
6447 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6448 			native_mode_found = true;
6449 			break;
6450 		}
6451 	}
6452 	if (!native_mode_found)
6453 		preferred_mode = list_first_entry_or_null(
6454 				&aconnector->base.modes,
6455 				struct drm_display_mode,
6456 				head);
6457 
6458 	mode_refresh = drm_mode_vrefresh(&mode);
6459 
6460 	if (preferred_mode == NULL) {
6461 		/*
6462 		 * This may not be an error, the use case is when we have no
6463 		 * usermode calls to reset and set mode upon hotplug. In this
6464 		 * case, we call set mode ourselves to restore the previous mode
6465 		 * and the modelist may not be filled in in time.
6466 		 */
6467 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6468 	} else {
6469 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6470 		if (recalculate_timing) {
6471 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6472 			saved_mode = mode;
6473 			mode = *freesync_mode;
6474 		} else {
6475 			decide_crtc_timing_for_drm_display_mode(
6476 				&mode, preferred_mode, scale);
6477 
6478 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6479 		}
6480 	}
6481 
6482 	if (recalculate_timing)
6483 		drm_mode_set_crtcinfo(&saved_mode, 0);
6484 	else if (!dm_state)
6485 		drm_mode_set_crtcinfo(&mode, 0);
6486 
6487        /*
6488 	* If scaling is enabled and refresh rate didn't change
6489 	* we copy the vic and polarities of the old timings
6490 	*/
6491 	if (!scale || mode_refresh != preferred_refresh)
6492 		fill_stream_properties_from_drm_display_mode(
6493 			stream, &mode, &aconnector->base, con_state, NULL,
6494 			requested_bpc);
6495 	else
6496 		fill_stream_properties_from_drm_display_mode(
6497 			stream, &mode, &aconnector->base, con_state, old_stream,
6498 			requested_bpc);
6499 
6500 #if defined(CONFIG_DRM_AMD_DC_DCN)
6501 	/* SST DSC determination policy */
6502 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6503 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6504 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6505 #endif
6506 
6507 	update_stream_scaling_settings(&mode, dm_state, stream);
6508 
6509 	fill_audio_info(
6510 		&stream->audio_info,
6511 		drm_connector,
6512 		sink);
6513 
6514 	update_stream_signal(stream, sink);
6515 
6516 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6517 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6518 
6519 	if (stream->link->psr_settings.psr_feature_enabled) {
6520 		//
6521 		// should decide stream support vsc sdp colorimetry capability
6522 		// before building vsc info packet
6523 		//
6524 		stream->use_vsc_sdp_for_colorimetry = false;
6525 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6526 			stream->use_vsc_sdp_for_colorimetry =
6527 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6528 		} else {
6529 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6530 				stream->use_vsc_sdp_for_colorimetry = true;
6531 		}
6532 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6533 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6534 
6535 	}
6536 finish:
6537 	dc_sink_release(sink);
6538 
6539 	return stream;
6540 }
6541 
6542 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6543 {
6544 	drm_crtc_cleanup(crtc);
6545 	kfree(crtc);
6546 }
6547 
6548 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6549 				  struct drm_crtc_state *state)
6550 {
6551 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6552 
6553 	/* TODO Destroy dc_stream objects are stream object is flattened */
6554 	if (cur->stream)
6555 		dc_stream_release(cur->stream);
6556 
6557 
6558 	__drm_atomic_helper_crtc_destroy_state(state);
6559 
6560 
6561 	kfree(state);
6562 }
6563 
6564 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6565 {
6566 	struct dm_crtc_state *state;
6567 
6568 	if (crtc->state)
6569 		dm_crtc_destroy_state(crtc, crtc->state);
6570 
6571 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6572 	if (WARN_ON(!state))
6573 		return;
6574 
6575 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6576 }
6577 
6578 static struct drm_crtc_state *
6579 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6580 {
6581 	struct dm_crtc_state *state, *cur;
6582 
6583 	cur = to_dm_crtc_state(crtc->state);
6584 
6585 	if (WARN_ON(!crtc->state))
6586 		return NULL;
6587 
6588 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6589 	if (!state)
6590 		return NULL;
6591 
6592 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6593 
6594 	if (cur->stream) {
6595 		state->stream = cur->stream;
6596 		dc_stream_retain(state->stream);
6597 	}
6598 
6599 	state->active_planes = cur->active_planes;
6600 	state->vrr_infopacket = cur->vrr_infopacket;
6601 	state->abm_level = cur->abm_level;
6602 	state->vrr_supported = cur->vrr_supported;
6603 	state->freesync_config = cur->freesync_config;
6604 	state->cm_has_degamma = cur->cm_has_degamma;
6605 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6606 	state->force_dpms_off = cur->force_dpms_off;
6607 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6608 
6609 	return &state->base;
6610 }
6611 
6612 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6613 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6614 {
6615 	crtc_debugfs_init(crtc);
6616 
6617 	return 0;
6618 }
6619 #endif
6620 
6621 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6622 {
6623 	enum dc_irq_source irq_source;
6624 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6625 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6626 	int rc;
6627 
6628 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6629 
6630 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6631 
6632 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6633 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6634 	return rc;
6635 }
6636 
6637 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6638 {
6639 	enum dc_irq_source irq_source;
6640 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6641 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6642 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6643 #if defined(CONFIG_DRM_AMD_DC_DCN)
6644 	struct amdgpu_display_manager *dm = &adev->dm;
6645 	struct vblank_control_work *work;
6646 #endif
6647 	int rc = 0;
6648 
6649 	if (enable) {
6650 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6651 		if (amdgpu_dm_vrr_active(acrtc_state))
6652 			rc = dm_set_vupdate_irq(crtc, true);
6653 	} else {
6654 		/* vblank irq off -> vupdate irq off */
6655 		rc = dm_set_vupdate_irq(crtc, false);
6656 	}
6657 
6658 	if (rc)
6659 		return rc;
6660 
6661 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6662 
6663 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6664 		return -EBUSY;
6665 
6666 	if (amdgpu_in_reset(adev))
6667 		return 0;
6668 
6669 #if defined(CONFIG_DRM_AMD_DC_DCN)
6670 	if (dm->vblank_control_workqueue) {
6671 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6672 		if (!work)
6673 			return -ENOMEM;
6674 
6675 		INIT_WORK(&work->work, vblank_control_worker);
6676 		work->dm = dm;
6677 		work->acrtc = acrtc;
6678 		work->enable = enable;
6679 
6680 		if (acrtc_state->stream) {
6681 			dc_stream_retain(acrtc_state->stream);
6682 			work->stream = acrtc_state->stream;
6683 		}
6684 
6685 		queue_work(dm->vblank_control_workqueue, &work->work);
6686 	}
6687 #endif
6688 
6689 	return 0;
6690 }
6691 
6692 static int dm_enable_vblank(struct drm_crtc *crtc)
6693 {
6694 	return dm_set_vblank(crtc, true);
6695 }
6696 
6697 static void dm_disable_vblank(struct drm_crtc *crtc)
6698 {
6699 	dm_set_vblank(crtc, false);
6700 }
6701 
6702 /* Implemented only the options currently availible for the driver */
6703 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6704 	.reset = dm_crtc_reset_state,
6705 	.destroy = amdgpu_dm_crtc_destroy,
6706 	.set_config = drm_atomic_helper_set_config,
6707 	.page_flip = drm_atomic_helper_page_flip,
6708 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6709 	.atomic_destroy_state = dm_crtc_destroy_state,
6710 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6711 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6712 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6713 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6714 	.enable_vblank = dm_enable_vblank,
6715 	.disable_vblank = dm_disable_vblank,
6716 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6717 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6718 	.late_register = amdgpu_dm_crtc_late_register,
6719 #endif
6720 };
6721 
6722 static enum drm_connector_status
6723 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6724 {
6725 	bool connected;
6726 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6727 
6728 	/*
6729 	 * Notes:
6730 	 * 1. This interface is NOT called in context of HPD irq.
6731 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6732 	 * makes it a bad place for *any* MST-related activity.
6733 	 */
6734 
6735 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6736 	    !aconnector->fake_enable)
6737 		connected = (aconnector->dc_sink != NULL);
6738 	else
6739 		connected = (aconnector->base.force == DRM_FORCE_ON);
6740 
6741 	update_subconnector_property(aconnector);
6742 
6743 	return (connected ? connector_status_connected :
6744 			connector_status_disconnected);
6745 }
6746 
6747 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6748 					    struct drm_connector_state *connector_state,
6749 					    struct drm_property *property,
6750 					    uint64_t val)
6751 {
6752 	struct drm_device *dev = connector->dev;
6753 	struct amdgpu_device *adev = drm_to_adev(dev);
6754 	struct dm_connector_state *dm_old_state =
6755 		to_dm_connector_state(connector->state);
6756 	struct dm_connector_state *dm_new_state =
6757 		to_dm_connector_state(connector_state);
6758 
6759 	int ret = -EINVAL;
6760 
6761 	if (property == dev->mode_config.scaling_mode_property) {
6762 		enum amdgpu_rmx_type rmx_type;
6763 
6764 		switch (val) {
6765 		case DRM_MODE_SCALE_CENTER:
6766 			rmx_type = RMX_CENTER;
6767 			break;
6768 		case DRM_MODE_SCALE_ASPECT:
6769 			rmx_type = RMX_ASPECT;
6770 			break;
6771 		case DRM_MODE_SCALE_FULLSCREEN:
6772 			rmx_type = RMX_FULL;
6773 			break;
6774 		case DRM_MODE_SCALE_NONE:
6775 		default:
6776 			rmx_type = RMX_OFF;
6777 			break;
6778 		}
6779 
6780 		if (dm_old_state->scaling == rmx_type)
6781 			return 0;
6782 
6783 		dm_new_state->scaling = rmx_type;
6784 		ret = 0;
6785 	} else if (property == adev->mode_info.underscan_hborder_property) {
6786 		dm_new_state->underscan_hborder = val;
6787 		ret = 0;
6788 	} else if (property == adev->mode_info.underscan_vborder_property) {
6789 		dm_new_state->underscan_vborder = val;
6790 		ret = 0;
6791 	} else if (property == adev->mode_info.underscan_property) {
6792 		dm_new_state->underscan_enable = val;
6793 		ret = 0;
6794 	} else if (property == adev->mode_info.abm_level_property) {
6795 		dm_new_state->abm_level = val;
6796 		ret = 0;
6797 	}
6798 
6799 	return ret;
6800 }
6801 
6802 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6803 					    const struct drm_connector_state *state,
6804 					    struct drm_property *property,
6805 					    uint64_t *val)
6806 {
6807 	struct drm_device *dev = connector->dev;
6808 	struct amdgpu_device *adev = drm_to_adev(dev);
6809 	struct dm_connector_state *dm_state =
6810 		to_dm_connector_state(state);
6811 	int ret = -EINVAL;
6812 
6813 	if (property == dev->mode_config.scaling_mode_property) {
6814 		switch (dm_state->scaling) {
6815 		case RMX_CENTER:
6816 			*val = DRM_MODE_SCALE_CENTER;
6817 			break;
6818 		case RMX_ASPECT:
6819 			*val = DRM_MODE_SCALE_ASPECT;
6820 			break;
6821 		case RMX_FULL:
6822 			*val = DRM_MODE_SCALE_FULLSCREEN;
6823 			break;
6824 		case RMX_OFF:
6825 		default:
6826 			*val = DRM_MODE_SCALE_NONE;
6827 			break;
6828 		}
6829 		ret = 0;
6830 	} else if (property == adev->mode_info.underscan_hborder_property) {
6831 		*val = dm_state->underscan_hborder;
6832 		ret = 0;
6833 	} else if (property == adev->mode_info.underscan_vborder_property) {
6834 		*val = dm_state->underscan_vborder;
6835 		ret = 0;
6836 	} else if (property == adev->mode_info.underscan_property) {
6837 		*val = dm_state->underscan_enable;
6838 		ret = 0;
6839 	} else if (property == adev->mode_info.abm_level_property) {
6840 		*val = dm_state->abm_level;
6841 		ret = 0;
6842 	}
6843 
6844 	return ret;
6845 }
6846 
6847 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6848 {
6849 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6850 
6851 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6852 }
6853 
6854 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6855 {
6856 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6857 	const struct dc_link *link = aconnector->dc_link;
6858 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6859 	struct amdgpu_display_manager *dm = &adev->dm;
6860 	int i;
6861 
6862 	/*
6863 	 * Call only if mst_mgr was iniitalized before since it's not done
6864 	 * for all connector types.
6865 	 */
6866 	if (aconnector->mst_mgr.dev)
6867 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6868 
6869 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6870 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6871 	for (i = 0; i < dm->num_of_edps; i++) {
6872 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6873 			backlight_device_unregister(dm->backlight_dev[i]);
6874 			dm->backlight_dev[i] = NULL;
6875 		}
6876 	}
6877 #endif
6878 
6879 	if (aconnector->dc_em_sink)
6880 		dc_sink_release(aconnector->dc_em_sink);
6881 	aconnector->dc_em_sink = NULL;
6882 	if (aconnector->dc_sink)
6883 		dc_sink_release(aconnector->dc_sink);
6884 	aconnector->dc_sink = NULL;
6885 
6886 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6887 	drm_connector_unregister(connector);
6888 	drm_connector_cleanup(connector);
6889 	if (aconnector->i2c) {
6890 		i2c_del_adapter(&aconnector->i2c->base);
6891 		kfree(aconnector->i2c);
6892 	}
6893 	kfree(aconnector->dm_dp_aux.aux.name);
6894 
6895 	kfree(connector);
6896 }
6897 
6898 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6899 {
6900 	struct dm_connector_state *state =
6901 		to_dm_connector_state(connector->state);
6902 
6903 	if (connector->state)
6904 		__drm_atomic_helper_connector_destroy_state(connector->state);
6905 
6906 	kfree(state);
6907 
6908 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6909 
6910 	if (state) {
6911 		state->scaling = RMX_OFF;
6912 		state->underscan_enable = false;
6913 		state->underscan_hborder = 0;
6914 		state->underscan_vborder = 0;
6915 		state->base.max_requested_bpc = 8;
6916 		state->vcpi_slots = 0;
6917 		state->pbn = 0;
6918 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6919 			state->abm_level = amdgpu_dm_abm_level;
6920 
6921 		__drm_atomic_helper_connector_reset(connector, &state->base);
6922 	}
6923 }
6924 
6925 struct drm_connector_state *
6926 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6927 {
6928 	struct dm_connector_state *state =
6929 		to_dm_connector_state(connector->state);
6930 
6931 	struct dm_connector_state *new_state =
6932 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6933 
6934 	if (!new_state)
6935 		return NULL;
6936 
6937 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6938 
6939 	new_state->freesync_capable = state->freesync_capable;
6940 	new_state->abm_level = state->abm_level;
6941 	new_state->scaling = state->scaling;
6942 	new_state->underscan_enable = state->underscan_enable;
6943 	new_state->underscan_hborder = state->underscan_hborder;
6944 	new_state->underscan_vborder = state->underscan_vborder;
6945 	new_state->vcpi_slots = state->vcpi_slots;
6946 	new_state->pbn = state->pbn;
6947 	return &new_state->base;
6948 }
6949 
6950 static int
6951 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6952 {
6953 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6954 		to_amdgpu_dm_connector(connector);
6955 	int r;
6956 
6957 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6958 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6959 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6960 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6961 		if (r)
6962 			return r;
6963 	}
6964 
6965 #if defined(CONFIG_DEBUG_FS)
6966 	connector_debugfs_init(amdgpu_dm_connector);
6967 #endif
6968 
6969 	return 0;
6970 }
6971 
6972 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6973 	.reset = amdgpu_dm_connector_funcs_reset,
6974 	.detect = amdgpu_dm_connector_detect,
6975 	.fill_modes = drm_helper_probe_single_connector_modes,
6976 	.destroy = amdgpu_dm_connector_destroy,
6977 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6978 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6979 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6980 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6981 	.late_register = amdgpu_dm_connector_late_register,
6982 	.early_unregister = amdgpu_dm_connector_unregister
6983 };
6984 
6985 static int get_modes(struct drm_connector *connector)
6986 {
6987 	return amdgpu_dm_connector_get_modes(connector);
6988 }
6989 
6990 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6991 {
6992 	struct dc_sink_init_data init_params = {
6993 			.link = aconnector->dc_link,
6994 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6995 	};
6996 	struct edid *edid;
6997 
6998 	if (!aconnector->base.edid_blob_ptr) {
6999 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7000 				aconnector->base.name);
7001 
7002 		aconnector->base.force = DRM_FORCE_OFF;
7003 		aconnector->base.override_edid = false;
7004 		return;
7005 	}
7006 
7007 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7008 
7009 	aconnector->edid = edid;
7010 
7011 	aconnector->dc_em_sink = dc_link_add_remote_sink(
7012 		aconnector->dc_link,
7013 		(uint8_t *)edid,
7014 		(edid->extensions + 1) * EDID_LENGTH,
7015 		&init_params);
7016 
7017 	if (aconnector->base.force == DRM_FORCE_ON) {
7018 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
7019 		aconnector->dc_link->local_sink :
7020 		aconnector->dc_em_sink;
7021 		dc_sink_retain(aconnector->dc_sink);
7022 	}
7023 }
7024 
7025 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7026 {
7027 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7028 
7029 	/*
7030 	 * In case of headless boot with force on for DP managed connector
7031 	 * Those settings have to be != 0 to get initial modeset
7032 	 */
7033 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7034 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7035 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7036 	}
7037 
7038 
7039 	aconnector->base.override_edid = true;
7040 	create_eml_sink(aconnector);
7041 }
7042 
7043 struct dc_stream_state *
7044 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7045 				const struct drm_display_mode *drm_mode,
7046 				const struct dm_connector_state *dm_state,
7047 				const struct dc_stream_state *old_stream)
7048 {
7049 	struct drm_connector *connector = &aconnector->base;
7050 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7051 	struct dc_stream_state *stream;
7052 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7053 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7054 	enum dc_status dc_result = DC_OK;
7055 
7056 	do {
7057 		stream = create_stream_for_sink(aconnector, drm_mode,
7058 						dm_state, old_stream,
7059 						requested_bpc);
7060 		if (stream == NULL) {
7061 			DRM_ERROR("Failed to create stream for sink!\n");
7062 			break;
7063 		}
7064 
7065 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7066 
7067 		if (dc_result != DC_OK) {
7068 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7069 				      drm_mode->hdisplay,
7070 				      drm_mode->vdisplay,
7071 				      drm_mode->clock,
7072 				      dc_result,
7073 				      dc_status_to_str(dc_result));
7074 
7075 			dc_stream_release(stream);
7076 			stream = NULL;
7077 			requested_bpc -= 2; /* lower bpc to retry validation */
7078 		}
7079 
7080 	} while (stream == NULL && requested_bpc >= 6);
7081 
7082 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7083 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7084 
7085 		aconnector->force_yuv420_output = true;
7086 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7087 						dm_state, old_stream);
7088 		aconnector->force_yuv420_output = false;
7089 	}
7090 
7091 	return stream;
7092 }
7093 
7094 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7095 				   struct drm_display_mode *mode)
7096 {
7097 	int result = MODE_ERROR;
7098 	struct dc_sink *dc_sink;
7099 	/* TODO: Unhardcode stream count */
7100 	struct dc_stream_state *stream;
7101 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7102 
7103 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7104 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7105 		return result;
7106 
7107 	/*
7108 	 * Only run this the first time mode_valid is called to initilialize
7109 	 * EDID mgmt
7110 	 */
7111 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7112 		!aconnector->dc_em_sink)
7113 		handle_edid_mgmt(aconnector);
7114 
7115 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7116 
7117 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7118 				aconnector->base.force != DRM_FORCE_ON) {
7119 		DRM_ERROR("dc_sink is NULL!\n");
7120 		goto fail;
7121 	}
7122 
7123 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7124 	if (stream) {
7125 		dc_stream_release(stream);
7126 		result = MODE_OK;
7127 	}
7128 
7129 fail:
7130 	/* TODO: error handling*/
7131 	return result;
7132 }
7133 
7134 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7135 				struct dc_info_packet *out)
7136 {
7137 	struct hdmi_drm_infoframe frame;
7138 	unsigned char buf[30]; /* 26 + 4 */
7139 	ssize_t len;
7140 	int ret, i;
7141 
7142 	memset(out, 0, sizeof(*out));
7143 
7144 	if (!state->hdr_output_metadata)
7145 		return 0;
7146 
7147 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7148 	if (ret)
7149 		return ret;
7150 
7151 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7152 	if (len < 0)
7153 		return (int)len;
7154 
7155 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7156 	if (len != 30)
7157 		return -EINVAL;
7158 
7159 	/* Prepare the infopacket for DC. */
7160 	switch (state->connector->connector_type) {
7161 	case DRM_MODE_CONNECTOR_HDMIA:
7162 		out->hb0 = 0x87; /* type */
7163 		out->hb1 = 0x01; /* version */
7164 		out->hb2 = 0x1A; /* length */
7165 		out->sb[0] = buf[3]; /* checksum */
7166 		i = 1;
7167 		break;
7168 
7169 	case DRM_MODE_CONNECTOR_DisplayPort:
7170 	case DRM_MODE_CONNECTOR_eDP:
7171 		out->hb0 = 0x00; /* sdp id, zero */
7172 		out->hb1 = 0x87; /* type */
7173 		out->hb2 = 0x1D; /* payload len - 1 */
7174 		out->hb3 = (0x13 << 2); /* sdp version */
7175 		out->sb[0] = 0x01; /* version */
7176 		out->sb[1] = 0x1A; /* length */
7177 		i = 2;
7178 		break;
7179 
7180 	default:
7181 		return -EINVAL;
7182 	}
7183 
7184 	memcpy(&out->sb[i], &buf[4], 26);
7185 	out->valid = true;
7186 
7187 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7188 		       sizeof(out->sb), false);
7189 
7190 	return 0;
7191 }
7192 
7193 static int
7194 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7195 				 struct drm_atomic_state *state)
7196 {
7197 	struct drm_connector_state *new_con_state =
7198 		drm_atomic_get_new_connector_state(state, conn);
7199 	struct drm_connector_state *old_con_state =
7200 		drm_atomic_get_old_connector_state(state, conn);
7201 	struct drm_crtc *crtc = new_con_state->crtc;
7202 	struct drm_crtc_state *new_crtc_state;
7203 	int ret;
7204 
7205 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7206 
7207 	if (!crtc)
7208 		return 0;
7209 
7210 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7211 		struct dc_info_packet hdr_infopacket;
7212 
7213 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7214 		if (ret)
7215 			return ret;
7216 
7217 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7218 		if (IS_ERR(new_crtc_state))
7219 			return PTR_ERR(new_crtc_state);
7220 
7221 		/*
7222 		 * DC considers the stream backends changed if the
7223 		 * static metadata changes. Forcing the modeset also
7224 		 * gives a simple way for userspace to switch from
7225 		 * 8bpc to 10bpc when setting the metadata to enter
7226 		 * or exit HDR.
7227 		 *
7228 		 * Changing the static metadata after it's been
7229 		 * set is permissible, however. So only force a
7230 		 * modeset if we're entering or exiting HDR.
7231 		 */
7232 		new_crtc_state->mode_changed =
7233 			!old_con_state->hdr_output_metadata ||
7234 			!new_con_state->hdr_output_metadata;
7235 	}
7236 
7237 	return 0;
7238 }
7239 
7240 static const struct drm_connector_helper_funcs
7241 amdgpu_dm_connector_helper_funcs = {
7242 	/*
7243 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7244 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7245 	 * are missing after user start lightdm. So we need to renew modes list.
7246 	 * in get_modes call back, not just return the modes count
7247 	 */
7248 	.get_modes = get_modes,
7249 	.mode_valid = amdgpu_dm_connector_mode_valid,
7250 	.atomic_check = amdgpu_dm_connector_atomic_check,
7251 };
7252 
7253 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7254 {
7255 }
7256 
7257 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7258 {
7259 	struct drm_atomic_state *state = new_crtc_state->state;
7260 	struct drm_plane *plane;
7261 	int num_active = 0;
7262 
7263 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7264 		struct drm_plane_state *new_plane_state;
7265 
7266 		/* Cursor planes are "fake". */
7267 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7268 			continue;
7269 
7270 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7271 
7272 		if (!new_plane_state) {
7273 			/*
7274 			 * The plane is enable on the CRTC and hasn't changed
7275 			 * state. This means that it previously passed
7276 			 * validation and is therefore enabled.
7277 			 */
7278 			num_active += 1;
7279 			continue;
7280 		}
7281 
7282 		/* We need a framebuffer to be considered enabled. */
7283 		num_active += (new_plane_state->fb != NULL);
7284 	}
7285 
7286 	return num_active;
7287 }
7288 
7289 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7290 					 struct drm_crtc_state *new_crtc_state)
7291 {
7292 	struct dm_crtc_state *dm_new_crtc_state =
7293 		to_dm_crtc_state(new_crtc_state);
7294 
7295 	dm_new_crtc_state->active_planes = 0;
7296 
7297 	if (!dm_new_crtc_state->stream)
7298 		return;
7299 
7300 	dm_new_crtc_state->active_planes =
7301 		count_crtc_active_planes(new_crtc_state);
7302 }
7303 
7304 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7305 				       struct drm_atomic_state *state)
7306 {
7307 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7308 									  crtc);
7309 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7310 	struct dc *dc = adev->dm.dc;
7311 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7312 	int ret = -EINVAL;
7313 
7314 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7315 
7316 	dm_update_crtc_active_planes(crtc, crtc_state);
7317 
7318 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7319 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7320 		return ret;
7321 	}
7322 
7323 	/*
7324 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7325 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7326 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7327 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7328 	 */
7329 	if (crtc_state->enable &&
7330 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7331 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7332 		return -EINVAL;
7333 	}
7334 
7335 	/* In some use cases, like reset, no stream is attached */
7336 	if (!dm_crtc_state->stream)
7337 		return 0;
7338 
7339 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7340 		return 0;
7341 
7342 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7343 	return ret;
7344 }
7345 
7346 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7347 				      const struct drm_display_mode *mode,
7348 				      struct drm_display_mode *adjusted_mode)
7349 {
7350 	return true;
7351 }
7352 
7353 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7354 	.disable = dm_crtc_helper_disable,
7355 	.atomic_check = dm_crtc_helper_atomic_check,
7356 	.mode_fixup = dm_crtc_helper_mode_fixup,
7357 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7358 };
7359 
7360 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7361 {
7362 
7363 }
7364 
7365 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7366 {
7367 	switch (display_color_depth) {
7368 		case COLOR_DEPTH_666:
7369 			return 6;
7370 		case COLOR_DEPTH_888:
7371 			return 8;
7372 		case COLOR_DEPTH_101010:
7373 			return 10;
7374 		case COLOR_DEPTH_121212:
7375 			return 12;
7376 		case COLOR_DEPTH_141414:
7377 			return 14;
7378 		case COLOR_DEPTH_161616:
7379 			return 16;
7380 		default:
7381 			break;
7382 		}
7383 	return 0;
7384 }
7385 
7386 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7387 					  struct drm_crtc_state *crtc_state,
7388 					  struct drm_connector_state *conn_state)
7389 {
7390 	struct drm_atomic_state *state = crtc_state->state;
7391 	struct drm_connector *connector = conn_state->connector;
7392 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7393 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7394 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7395 	struct drm_dp_mst_topology_mgr *mst_mgr;
7396 	struct drm_dp_mst_port *mst_port;
7397 	enum dc_color_depth color_depth;
7398 	int clock, bpp = 0;
7399 	bool is_y420 = false;
7400 
7401 	if (!aconnector->port || !aconnector->dc_sink)
7402 		return 0;
7403 
7404 	mst_port = aconnector->port;
7405 	mst_mgr = &aconnector->mst_port->mst_mgr;
7406 
7407 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7408 		return 0;
7409 
7410 	if (!state->duplicated) {
7411 		int max_bpc = conn_state->max_requested_bpc;
7412 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7413 				aconnector->force_yuv420_output;
7414 		color_depth = convert_color_depth_from_display_info(connector,
7415 								    is_y420,
7416 								    max_bpc);
7417 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7418 		clock = adjusted_mode->clock;
7419 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7420 	}
7421 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7422 									   mst_mgr,
7423 									   mst_port,
7424 									   dm_new_connector_state->pbn,
7425 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7426 	if (dm_new_connector_state->vcpi_slots < 0) {
7427 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7428 		return dm_new_connector_state->vcpi_slots;
7429 	}
7430 	return 0;
7431 }
7432 
7433 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7434 	.disable = dm_encoder_helper_disable,
7435 	.atomic_check = dm_encoder_helper_atomic_check
7436 };
7437 
7438 #if defined(CONFIG_DRM_AMD_DC_DCN)
7439 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7440 					    struct dc_state *dc_state,
7441 					    struct dsc_mst_fairness_vars *vars)
7442 {
7443 	struct dc_stream_state *stream = NULL;
7444 	struct drm_connector *connector;
7445 	struct drm_connector_state *new_con_state;
7446 	struct amdgpu_dm_connector *aconnector;
7447 	struct dm_connector_state *dm_conn_state;
7448 	int i, j;
7449 	int vcpi, pbn_div, pbn, slot_num = 0;
7450 
7451 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7452 
7453 		aconnector = to_amdgpu_dm_connector(connector);
7454 
7455 		if (!aconnector->port)
7456 			continue;
7457 
7458 		if (!new_con_state || !new_con_state->crtc)
7459 			continue;
7460 
7461 		dm_conn_state = to_dm_connector_state(new_con_state);
7462 
7463 		for (j = 0; j < dc_state->stream_count; j++) {
7464 			stream = dc_state->streams[j];
7465 			if (!stream)
7466 				continue;
7467 
7468 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7469 				break;
7470 
7471 			stream = NULL;
7472 		}
7473 
7474 		if (!stream)
7475 			continue;
7476 
7477 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7478 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7479 		for (j = 0; j < dc_state->stream_count; j++) {
7480 			if (vars[j].aconnector == aconnector) {
7481 				pbn = vars[j].pbn;
7482 				break;
7483 			}
7484 		}
7485 
7486 		if (j == dc_state->stream_count)
7487 			continue;
7488 
7489 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7490 
7491 		if (stream->timing.flags.DSC != 1) {
7492 			dm_conn_state->pbn = pbn;
7493 			dm_conn_state->vcpi_slots = slot_num;
7494 
7495 			drm_dp_mst_atomic_enable_dsc(state,
7496 						     aconnector->port,
7497 						     dm_conn_state->pbn,
7498 						     0,
7499 						     false);
7500 			continue;
7501 		}
7502 
7503 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7504 						    aconnector->port,
7505 						    pbn, pbn_div,
7506 						    true);
7507 		if (vcpi < 0)
7508 			return vcpi;
7509 
7510 		dm_conn_state->pbn = pbn;
7511 		dm_conn_state->vcpi_slots = vcpi;
7512 	}
7513 	return 0;
7514 }
7515 #endif
7516 
7517 static void dm_drm_plane_reset(struct drm_plane *plane)
7518 {
7519 	struct dm_plane_state *amdgpu_state = NULL;
7520 
7521 	if (plane->state)
7522 		plane->funcs->atomic_destroy_state(plane, plane->state);
7523 
7524 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7525 	WARN_ON(amdgpu_state == NULL);
7526 
7527 	if (amdgpu_state)
7528 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7529 }
7530 
7531 static struct drm_plane_state *
7532 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7533 {
7534 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7535 
7536 	old_dm_plane_state = to_dm_plane_state(plane->state);
7537 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7538 	if (!dm_plane_state)
7539 		return NULL;
7540 
7541 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7542 
7543 	if (old_dm_plane_state->dc_state) {
7544 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7545 		dc_plane_state_retain(dm_plane_state->dc_state);
7546 	}
7547 
7548 	return &dm_plane_state->base;
7549 }
7550 
7551 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7552 				struct drm_plane_state *state)
7553 {
7554 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7555 
7556 	if (dm_plane_state->dc_state)
7557 		dc_plane_state_release(dm_plane_state->dc_state);
7558 
7559 	drm_atomic_helper_plane_destroy_state(plane, state);
7560 }
7561 
7562 static const struct drm_plane_funcs dm_plane_funcs = {
7563 	.update_plane	= drm_atomic_helper_update_plane,
7564 	.disable_plane	= drm_atomic_helper_disable_plane,
7565 	.destroy	= drm_primary_helper_destroy,
7566 	.reset = dm_drm_plane_reset,
7567 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7568 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7569 	.format_mod_supported = dm_plane_format_mod_supported,
7570 };
7571 
7572 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7573 				      struct drm_plane_state *new_state)
7574 {
7575 	struct amdgpu_framebuffer *afb;
7576 	struct drm_gem_object *obj;
7577 	struct amdgpu_device *adev;
7578 	struct amdgpu_bo *rbo;
7579 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7580 	struct list_head list;
7581 	struct ttm_validate_buffer tv;
7582 	struct ww_acquire_ctx ticket;
7583 	uint32_t domain;
7584 	int r;
7585 
7586 	if (!new_state->fb) {
7587 		DRM_DEBUG_KMS("No FB bound\n");
7588 		return 0;
7589 	}
7590 
7591 	afb = to_amdgpu_framebuffer(new_state->fb);
7592 	obj = new_state->fb->obj[0];
7593 	rbo = gem_to_amdgpu_bo(obj);
7594 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7595 	INIT_LIST_HEAD(&list);
7596 
7597 	tv.bo = &rbo->tbo;
7598 	tv.num_shared = 1;
7599 	list_add(&tv.head, &list);
7600 
7601 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7602 	if (r) {
7603 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7604 		return r;
7605 	}
7606 
7607 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7608 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7609 	else
7610 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7611 
7612 	r = amdgpu_bo_pin(rbo, domain);
7613 	if (unlikely(r != 0)) {
7614 		if (r != -ERESTARTSYS)
7615 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7616 		ttm_eu_backoff_reservation(&ticket, &list);
7617 		return r;
7618 	}
7619 
7620 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7621 	if (unlikely(r != 0)) {
7622 		amdgpu_bo_unpin(rbo);
7623 		ttm_eu_backoff_reservation(&ticket, &list);
7624 		DRM_ERROR("%p bind failed\n", rbo);
7625 		return r;
7626 	}
7627 
7628 	ttm_eu_backoff_reservation(&ticket, &list);
7629 
7630 	afb->address = amdgpu_bo_gpu_offset(rbo);
7631 
7632 	amdgpu_bo_ref(rbo);
7633 
7634 	/**
7635 	 * We don't do surface updates on planes that have been newly created,
7636 	 * but we also don't have the afb->address during atomic check.
7637 	 *
7638 	 * Fill in buffer attributes depending on the address here, but only on
7639 	 * newly created planes since they're not being used by DC yet and this
7640 	 * won't modify global state.
7641 	 */
7642 	dm_plane_state_old = to_dm_plane_state(plane->state);
7643 	dm_plane_state_new = to_dm_plane_state(new_state);
7644 
7645 	if (dm_plane_state_new->dc_state &&
7646 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7647 		struct dc_plane_state *plane_state =
7648 			dm_plane_state_new->dc_state;
7649 		bool force_disable_dcc = !plane_state->dcc.enable;
7650 
7651 		fill_plane_buffer_attributes(
7652 			adev, afb, plane_state->format, plane_state->rotation,
7653 			afb->tiling_flags,
7654 			&plane_state->tiling_info, &plane_state->plane_size,
7655 			&plane_state->dcc, &plane_state->address,
7656 			afb->tmz_surface, force_disable_dcc);
7657 	}
7658 
7659 	return 0;
7660 }
7661 
7662 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7663 				       struct drm_plane_state *old_state)
7664 {
7665 	struct amdgpu_bo *rbo;
7666 	int r;
7667 
7668 	if (!old_state->fb)
7669 		return;
7670 
7671 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7672 	r = amdgpu_bo_reserve(rbo, false);
7673 	if (unlikely(r)) {
7674 		DRM_ERROR("failed to reserve rbo before unpin\n");
7675 		return;
7676 	}
7677 
7678 	amdgpu_bo_unpin(rbo);
7679 	amdgpu_bo_unreserve(rbo);
7680 	amdgpu_bo_unref(&rbo);
7681 }
7682 
7683 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7684 				       struct drm_crtc_state *new_crtc_state)
7685 {
7686 	struct drm_framebuffer *fb = state->fb;
7687 	int min_downscale, max_upscale;
7688 	int min_scale = 0;
7689 	int max_scale = INT_MAX;
7690 
7691 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7692 	if (fb && state->crtc) {
7693 		/* Validate viewport to cover the case when only the position changes */
7694 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7695 			int viewport_width = state->crtc_w;
7696 			int viewport_height = state->crtc_h;
7697 
7698 			if (state->crtc_x < 0)
7699 				viewport_width += state->crtc_x;
7700 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7701 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7702 
7703 			if (state->crtc_y < 0)
7704 				viewport_height += state->crtc_y;
7705 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7706 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7707 
7708 			if (viewport_width < 0 || viewport_height < 0) {
7709 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7710 				return -EINVAL;
7711 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7712 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7713 				return -EINVAL;
7714 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7715 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7716 				return -EINVAL;
7717 			}
7718 
7719 		}
7720 
7721 		/* Get min/max allowed scaling factors from plane caps. */
7722 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7723 					     &min_downscale, &max_upscale);
7724 		/*
7725 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7726 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7727 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7728 		 */
7729 		min_scale = (1000 << 16) / max_upscale;
7730 		max_scale = (1000 << 16) / min_downscale;
7731 	}
7732 
7733 	return drm_atomic_helper_check_plane_state(
7734 		state, new_crtc_state, min_scale, max_scale, true, true);
7735 }
7736 
7737 static int dm_plane_atomic_check(struct drm_plane *plane,
7738 				 struct drm_atomic_state *state)
7739 {
7740 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7741 										 plane);
7742 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7743 	struct dc *dc = adev->dm.dc;
7744 	struct dm_plane_state *dm_plane_state;
7745 	struct dc_scaling_info scaling_info;
7746 	struct drm_crtc_state *new_crtc_state;
7747 	int ret;
7748 
7749 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7750 
7751 	dm_plane_state = to_dm_plane_state(new_plane_state);
7752 
7753 	if (!dm_plane_state->dc_state)
7754 		return 0;
7755 
7756 	new_crtc_state =
7757 		drm_atomic_get_new_crtc_state(state,
7758 					      new_plane_state->crtc);
7759 	if (!new_crtc_state)
7760 		return -EINVAL;
7761 
7762 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7763 	if (ret)
7764 		return ret;
7765 
7766 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7767 	if (ret)
7768 		return ret;
7769 
7770 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7771 		return 0;
7772 
7773 	return -EINVAL;
7774 }
7775 
7776 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7777 				       struct drm_atomic_state *state)
7778 {
7779 	/* Only support async updates on cursor planes. */
7780 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7781 		return -EINVAL;
7782 
7783 	return 0;
7784 }
7785 
7786 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7787 					 struct drm_atomic_state *state)
7788 {
7789 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7790 									   plane);
7791 	struct drm_plane_state *old_state =
7792 		drm_atomic_get_old_plane_state(state, plane);
7793 
7794 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7795 
7796 	swap(plane->state->fb, new_state->fb);
7797 
7798 	plane->state->src_x = new_state->src_x;
7799 	plane->state->src_y = new_state->src_y;
7800 	plane->state->src_w = new_state->src_w;
7801 	plane->state->src_h = new_state->src_h;
7802 	plane->state->crtc_x = new_state->crtc_x;
7803 	plane->state->crtc_y = new_state->crtc_y;
7804 	plane->state->crtc_w = new_state->crtc_w;
7805 	plane->state->crtc_h = new_state->crtc_h;
7806 
7807 	handle_cursor_update(plane, old_state);
7808 }
7809 
7810 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7811 	.prepare_fb = dm_plane_helper_prepare_fb,
7812 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7813 	.atomic_check = dm_plane_atomic_check,
7814 	.atomic_async_check = dm_plane_atomic_async_check,
7815 	.atomic_async_update = dm_plane_atomic_async_update
7816 };
7817 
7818 /*
7819  * TODO: these are currently initialized to rgb formats only.
7820  * For future use cases we should either initialize them dynamically based on
7821  * plane capabilities, or initialize this array to all formats, so internal drm
7822  * check will succeed, and let DC implement proper check
7823  */
7824 static const uint32_t rgb_formats[] = {
7825 	DRM_FORMAT_XRGB8888,
7826 	DRM_FORMAT_ARGB8888,
7827 	DRM_FORMAT_RGBA8888,
7828 	DRM_FORMAT_XRGB2101010,
7829 	DRM_FORMAT_XBGR2101010,
7830 	DRM_FORMAT_ARGB2101010,
7831 	DRM_FORMAT_ABGR2101010,
7832 	DRM_FORMAT_XRGB16161616,
7833 	DRM_FORMAT_XBGR16161616,
7834 	DRM_FORMAT_ARGB16161616,
7835 	DRM_FORMAT_ABGR16161616,
7836 	DRM_FORMAT_XBGR8888,
7837 	DRM_FORMAT_ABGR8888,
7838 	DRM_FORMAT_RGB565,
7839 };
7840 
7841 static const uint32_t overlay_formats[] = {
7842 	DRM_FORMAT_XRGB8888,
7843 	DRM_FORMAT_ARGB8888,
7844 	DRM_FORMAT_RGBA8888,
7845 	DRM_FORMAT_XBGR8888,
7846 	DRM_FORMAT_ABGR8888,
7847 	DRM_FORMAT_RGB565
7848 };
7849 
7850 static const u32 cursor_formats[] = {
7851 	DRM_FORMAT_ARGB8888
7852 };
7853 
7854 static int get_plane_formats(const struct drm_plane *plane,
7855 			     const struct dc_plane_cap *plane_cap,
7856 			     uint32_t *formats, int max_formats)
7857 {
7858 	int i, num_formats = 0;
7859 
7860 	/*
7861 	 * TODO: Query support for each group of formats directly from
7862 	 * DC plane caps. This will require adding more formats to the
7863 	 * caps list.
7864 	 */
7865 
7866 	switch (plane->type) {
7867 	case DRM_PLANE_TYPE_PRIMARY:
7868 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7869 			if (num_formats >= max_formats)
7870 				break;
7871 
7872 			formats[num_formats++] = rgb_formats[i];
7873 		}
7874 
7875 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7876 			formats[num_formats++] = DRM_FORMAT_NV12;
7877 		if (plane_cap && plane_cap->pixel_format_support.p010)
7878 			formats[num_formats++] = DRM_FORMAT_P010;
7879 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7880 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7881 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7882 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7883 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7884 		}
7885 		break;
7886 
7887 	case DRM_PLANE_TYPE_OVERLAY:
7888 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7889 			if (num_formats >= max_formats)
7890 				break;
7891 
7892 			formats[num_formats++] = overlay_formats[i];
7893 		}
7894 		break;
7895 
7896 	case DRM_PLANE_TYPE_CURSOR:
7897 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7898 			if (num_formats >= max_formats)
7899 				break;
7900 
7901 			formats[num_formats++] = cursor_formats[i];
7902 		}
7903 		break;
7904 	}
7905 
7906 	return num_formats;
7907 }
7908 
7909 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7910 				struct drm_plane *plane,
7911 				unsigned long possible_crtcs,
7912 				const struct dc_plane_cap *plane_cap)
7913 {
7914 	uint32_t formats[32];
7915 	int num_formats;
7916 	int res = -EPERM;
7917 	unsigned int supported_rotations;
7918 	uint64_t *modifiers = NULL;
7919 
7920 	num_formats = get_plane_formats(plane, plane_cap, formats,
7921 					ARRAY_SIZE(formats));
7922 
7923 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7924 	if (res)
7925 		return res;
7926 
7927 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7928 				       &dm_plane_funcs, formats, num_formats,
7929 				       modifiers, plane->type, NULL);
7930 	kfree(modifiers);
7931 	if (res)
7932 		return res;
7933 
7934 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7935 	    plane_cap && plane_cap->per_pixel_alpha) {
7936 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7937 					  BIT(DRM_MODE_BLEND_PREMULTI);
7938 
7939 		drm_plane_create_alpha_property(plane);
7940 		drm_plane_create_blend_mode_property(plane, blend_caps);
7941 	}
7942 
7943 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7944 	    plane_cap &&
7945 	    (plane_cap->pixel_format_support.nv12 ||
7946 	     plane_cap->pixel_format_support.p010)) {
7947 		/* This only affects YUV formats. */
7948 		drm_plane_create_color_properties(
7949 			plane,
7950 			BIT(DRM_COLOR_YCBCR_BT601) |
7951 			BIT(DRM_COLOR_YCBCR_BT709) |
7952 			BIT(DRM_COLOR_YCBCR_BT2020),
7953 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7954 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7955 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7956 	}
7957 
7958 	supported_rotations =
7959 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7960 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7961 
7962 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7963 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7964 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7965 						   supported_rotations);
7966 
7967 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7968 
7969 	/* Create (reset) the plane state */
7970 	if (plane->funcs->reset)
7971 		plane->funcs->reset(plane);
7972 
7973 	return 0;
7974 }
7975 
7976 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7977 			       struct drm_plane *plane,
7978 			       uint32_t crtc_index)
7979 {
7980 	struct amdgpu_crtc *acrtc = NULL;
7981 	struct drm_plane *cursor_plane;
7982 
7983 	int res = -ENOMEM;
7984 
7985 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7986 	if (!cursor_plane)
7987 		goto fail;
7988 
7989 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7990 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7991 
7992 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7993 	if (!acrtc)
7994 		goto fail;
7995 
7996 	res = drm_crtc_init_with_planes(
7997 			dm->ddev,
7998 			&acrtc->base,
7999 			plane,
8000 			cursor_plane,
8001 			&amdgpu_dm_crtc_funcs, NULL);
8002 
8003 	if (res)
8004 		goto fail;
8005 
8006 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8007 
8008 	/* Create (reset) the plane state */
8009 	if (acrtc->base.funcs->reset)
8010 		acrtc->base.funcs->reset(&acrtc->base);
8011 
8012 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8013 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8014 
8015 	acrtc->crtc_id = crtc_index;
8016 	acrtc->base.enabled = false;
8017 	acrtc->otg_inst = -1;
8018 
8019 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8020 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8021 				   true, MAX_COLOR_LUT_ENTRIES);
8022 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8023 
8024 	return 0;
8025 
8026 fail:
8027 	kfree(acrtc);
8028 	kfree(cursor_plane);
8029 	return res;
8030 }
8031 
8032 
8033 static int to_drm_connector_type(enum signal_type st)
8034 {
8035 	switch (st) {
8036 	case SIGNAL_TYPE_HDMI_TYPE_A:
8037 		return DRM_MODE_CONNECTOR_HDMIA;
8038 	case SIGNAL_TYPE_EDP:
8039 		return DRM_MODE_CONNECTOR_eDP;
8040 	case SIGNAL_TYPE_LVDS:
8041 		return DRM_MODE_CONNECTOR_LVDS;
8042 	case SIGNAL_TYPE_RGB:
8043 		return DRM_MODE_CONNECTOR_VGA;
8044 	case SIGNAL_TYPE_DISPLAY_PORT:
8045 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
8046 		return DRM_MODE_CONNECTOR_DisplayPort;
8047 	case SIGNAL_TYPE_DVI_DUAL_LINK:
8048 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
8049 		return DRM_MODE_CONNECTOR_DVID;
8050 	case SIGNAL_TYPE_VIRTUAL:
8051 		return DRM_MODE_CONNECTOR_VIRTUAL;
8052 
8053 	default:
8054 		return DRM_MODE_CONNECTOR_Unknown;
8055 	}
8056 }
8057 
8058 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8059 {
8060 	struct drm_encoder *encoder;
8061 
8062 	/* There is only one encoder per connector */
8063 	drm_connector_for_each_possible_encoder(connector, encoder)
8064 		return encoder;
8065 
8066 	return NULL;
8067 }
8068 
8069 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8070 {
8071 	struct drm_encoder *encoder;
8072 	struct amdgpu_encoder *amdgpu_encoder;
8073 
8074 	encoder = amdgpu_dm_connector_to_encoder(connector);
8075 
8076 	if (encoder == NULL)
8077 		return;
8078 
8079 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8080 
8081 	amdgpu_encoder->native_mode.clock = 0;
8082 
8083 	if (!list_empty(&connector->probed_modes)) {
8084 		struct drm_display_mode *preferred_mode = NULL;
8085 
8086 		list_for_each_entry(preferred_mode,
8087 				    &connector->probed_modes,
8088 				    head) {
8089 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8090 				amdgpu_encoder->native_mode = *preferred_mode;
8091 
8092 			break;
8093 		}
8094 
8095 	}
8096 }
8097 
8098 static struct drm_display_mode *
8099 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8100 			     char *name,
8101 			     int hdisplay, int vdisplay)
8102 {
8103 	struct drm_device *dev = encoder->dev;
8104 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8105 	struct drm_display_mode *mode = NULL;
8106 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8107 
8108 	mode = drm_mode_duplicate(dev, native_mode);
8109 
8110 	if (mode == NULL)
8111 		return NULL;
8112 
8113 	mode->hdisplay = hdisplay;
8114 	mode->vdisplay = vdisplay;
8115 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8116 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8117 
8118 	return mode;
8119 
8120 }
8121 
8122 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8123 						 struct drm_connector *connector)
8124 {
8125 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8126 	struct drm_display_mode *mode = NULL;
8127 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8128 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8129 				to_amdgpu_dm_connector(connector);
8130 	int i;
8131 	int n;
8132 	struct mode_size {
8133 		char name[DRM_DISPLAY_MODE_LEN];
8134 		int w;
8135 		int h;
8136 	} common_modes[] = {
8137 		{  "640x480",  640,  480},
8138 		{  "800x600",  800,  600},
8139 		{ "1024x768", 1024,  768},
8140 		{ "1280x720", 1280,  720},
8141 		{ "1280x800", 1280,  800},
8142 		{"1280x1024", 1280, 1024},
8143 		{ "1440x900", 1440,  900},
8144 		{"1680x1050", 1680, 1050},
8145 		{"1600x1200", 1600, 1200},
8146 		{"1920x1080", 1920, 1080},
8147 		{"1920x1200", 1920, 1200}
8148 	};
8149 
8150 	n = ARRAY_SIZE(common_modes);
8151 
8152 	for (i = 0; i < n; i++) {
8153 		struct drm_display_mode *curmode = NULL;
8154 		bool mode_existed = false;
8155 
8156 		if (common_modes[i].w > native_mode->hdisplay ||
8157 		    common_modes[i].h > native_mode->vdisplay ||
8158 		   (common_modes[i].w == native_mode->hdisplay &&
8159 		    common_modes[i].h == native_mode->vdisplay))
8160 			continue;
8161 
8162 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8163 			if (common_modes[i].w == curmode->hdisplay &&
8164 			    common_modes[i].h == curmode->vdisplay) {
8165 				mode_existed = true;
8166 				break;
8167 			}
8168 		}
8169 
8170 		if (mode_existed)
8171 			continue;
8172 
8173 		mode = amdgpu_dm_create_common_mode(encoder,
8174 				common_modes[i].name, common_modes[i].w,
8175 				common_modes[i].h);
8176 		if (!mode)
8177 			continue;
8178 
8179 		drm_mode_probed_add(connector, mode);
8180 		amdgpu_dm_connector->num_modes++;
8181 	}
8182 }
8183 
8184 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8185 {
8186 	struct drm_encoder *encoder;
8187 	struct amdgpu_encoder *amdgpu_encoder;
8188 	const struct drm_display_mode *native_mode;
8189 
8190 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8191 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8192 		return;
8193 
8194 	encoder = amdgpu_dm_connector_to_encoder(connector);
8195 	if (!encoder)
8196 		return;
8197 
8198 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8199 
8200 	native_mode = &amdgpu_encoder->native_mode;
8201 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8202 		return;
8203 
8204 	drm_connector_set_panel_orientation_with_quirk(connector,
8205 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8206 						       native_mode->hdisplay,
8207 						       native_mode->vdisplay);
8208 }
8209 
8210 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8211 					      struct edid *edid)
8212 {
8213 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8214 			to_amdgpu_dm_connector(connector);
8215 
8216 	if (edid) {
8217 		/* empty probed_modes */
8218 		INIT_LIST_HEAD(&connector->probed_modes);
8219 		amdgpu_dm_connector->num_modes =
8220 				drm_add_edid_modes(connector, edid);
8221 
8222 		/* sorting the probed modes before calling function
8223 		 * amdgpu_dm_get_native_mode() since EDID can have
8224 		 * more than one preferred mode. The modes that are
8225 		 * later in the probed mode list could be of higher
8226 		 * and preferred resolution. For example, 3840x2160
8227 		 * resolution in base EDID preferred timing and 4096x2160
8228 		 * preferred resolution in DID extension block later.
8229 		 */
8230 		drm_mode_sort(&connector->probed_modes);
8231 		amdgpu_dm_get_native_mode(connector);
8232 
8233 		/* Freesync capabilities are reset by calling
8234 		 * drm_add_edid_modes() and need to be
8235 		 * restored here.
8236 		 */
8237 		amdgpu_dm_update_freesync_caps(connector, edid);
8238 
8239 		amdgpu_set_panel_orientation(connector);
8240 	} else {
8241 		amdgpu_dm_connector->num_modes = 0;
8242 	}
8243 }
8244 
8245 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8246 			      struct drm_display_mode *mode)
8247 {
8248 	struct drm_display_mode *m;
8249 
8250 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8251 		if (drm_mode_equal(m, mode))
8252 			return true;
8253 	}
8254 
8255 	return false;
8256 }
8257 
8258 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8259 {
8260 	const struct drm_display_mode *m;
8261 	struct drm_display_mode *new_mode;
8262 	uint i;
8263 	uint32_t new_modes_count = 0;
8264 
8265 	/* Standard FPS values
8266 	 *
8267 	 * 23.976       - TV/NTSC
8268 	 * 24 	        - Cinema
8269 	 * 25 	        - TV/PAL
8270 	 * 29.97        - TV/NTSC
8271 	 * 30 	        - TV/NTSC
8272 	 * 48 	        - Cinema HFR
8273 	 * 50 	        - TV/PAL
8274 	 * 60 	        - Commonly used
8275 	 * 48,72,96,120 - Multiples of 24
8276 	 */
8277 	static const uint32_t common_rates[] = {
8278 		23976, 24000, 25000, 29970, 30000,
8279 		48000, 50000, 60000, 72000, 96000, 120000
8280 	};
8281 
8282 	/*
8283 	 * Find mode with highest refresh rate with the same resolution
8284 	 * as the preferred mode. Some monitors report a preferred mode
8285 	 * with lower resolution than the highest refresh rate supported.
8286 	 */
8287 
8288 	m = get_highest_refresh_rate_mode(aconnector, true);
8289 	if (!m)
8290 		return 0;
8291 
8292 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8293 		uint64_t target_vtotal, target_vtotal_diff;
8294 		uint64_t num, den;
8295 
8296 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8297 			continue;
8298 
8299 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8300 		    common_rates[i] > aconnector->max_vfreq * 1000)
8301 			continue;
8302 
8303 		num = (unsigned long long)m->clock * 1000 * 1000;
8304 		den = common_rates[i] * (unsigned long long)m->htotal;
8305 		target_vtotal = div_u64(num, den);
8306 		target_vtotal_diff = target_vtotal - m->vtotal;
8307 
8308 		/* Check for illegal modes */
8309 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8310 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8311 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8312 			continue;
8313 
8314 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8315 		if (!new_mode)
8316 			goto out;
8317 
8318 		new_mode->vtotal += (u16)target_vtotal_diff;
8319 		new_mode->vsync_start += (u16)target_vtotal_diff;
8320 		new_mode->vsync_end += (u16)target_vtotal_diff;
8321 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8322 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8323 
8324 		if (!is_duplicate_mode(aconnector, new_mode)) {
8325 			drm_mode_probed_add(&aconnector->base, new_mode);
8326 			new_modes_count += 1;
8327 		} else
8328 			drm_mode_destroy(aconnector->base.dev, new_mode);
8329 	}
8330  out:
8331 	return new_modes_count;
8332 }
8333 
8334 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8335 						   struct edid *edid)
8336 {
8337 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8338 		to_amdgpu_dm_connector(connector);
8339 
8340 	if (!edid)
8341 		return;
8342 
8343 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8344 		amdgpu_dm_connector->num_modes +=
8345 			add_fs_modes(amdgpu_dm_connector);
8346 }
8347 
8348 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8349 {
8350 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8351 			to_amdgpu_dm_connector(connector);
8352 	struct drm_encoder *encoder;
8353 	struct edid *edid = amdgpu_dm_connector->edid;
8354 
8355 	encoder = amdgpu_dm_connector_to_encoder(connector);
8356 
8357 	if (!drm_edid_is_valid(edid)) {
8358 		amdgpu_dm_connector->num_modes =
8359 				drm_add_modes_noedid(connector, 640, 480);
8360 	} else {
8361 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8362 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8363 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8364 	}
8365 	amdgpu_dm_fbc_init(connector);
8366 
8367 	return amdgpu_dm_connector->num_modes;
8368 }
8369 
8370 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8371 				     struct amdgpu_dm_connector *aconnector,
8372 				     int connector_type,
8373 				     struct dc_link *link,
8374 				     int link_index)
8375 {
8376 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8377 
8378 	/*
8379 	 * Some of the properties below require access to state, like bpc.
8380 	 * Allocate some default initial connector state with our reset helper.
8381 	 */
8382 	if (aconnector->base.funcs->reset)
8383 		aconnector->base.funcs->reset(&aconnector->base);
8384 
8385 	aconnector->connector_id = link_index;
8386 	aconnector->dc_link = link;
8387 	aconnector->base.interlace_allowed = false;
8388 	aconnector->base.doublescan_allowed = false;
8389 	aconnector->base.stereo_allowed = false;
8390 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8391 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8392 	aconnector->audio_inst = -1;
8393 	mutex_init(&aconnector->hpd_lock);
8394 
8395 	/*
8396 	 * configure support HPD hot plug connector_>polled default value is 0
8397 	 * which means HPD hot plug not supported
8398 	 */
8399 	switch (connector_type) {
8400 	case DRM_MODE_CONNECTOR_HDMIA:
8401 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8402 		aconnector->base.ycbcr_420_allowed =
8403 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8404 		break;
8405 	case DRM_MODE_CONNECTOR_DisplayPort:
8406 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8407 		link->link_enc = link_enc_cfg_get_link_enc(link);
8408 		ASSERT(link->link_enc);
8409 		if (link->link_enc)
8410 			aconnector->base.ycbcr_420_allowed =
8411 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8412 		break;
8413 	case DRM_MODE_CONNECTOR_DVID:
8414 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8415 		break;
8416 	default:
8417 		break;
8418 	}
8419 
8420 	drm_object_attach_property(&aconnector->base.base,
8421 				dm->ddev->mode_config.scaling_mode_property,
8422 				DRM_MODE_SCALE_NONE);
8423 
8424 	drm_object_attach_property(&aconnector->base.base,
8425 				adev->mode_info.underscan_property,
8426 				UNDERSCAN_OFF);
8427 	drm_object_attach_property(&aconnector->base.base,
8428 				adev->mode_info.underscan_hborder_property,
8429 				0);
8430 	drm_object_attach_property(&aconnector->base.base,
8431 				adev->mode_info.underscan_vborder_property,
8432 				0);
8433 
8434 	if (!aconnector->mst_port)
8435 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8436 
8437 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8438 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8439 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8440 
8441 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8442 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8443 		drm_object_attach_property(&aconnector->base.base,
8444 				adev->mode_info.abm_level_property, 0);
8445 	}
8446 
8447 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8448 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8449 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8450 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8451 
8452 		if (!aconnector->mst_port)
8453 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8454 
8455 #ifdef CONFIG_DRM_AMD_DC_HDCP
8456 		if (adev->dm.hdcp_workqueue)
8457 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8458 #endif
8459 	}
8460 }
8461 
8462 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8463 			      struct i2c_msg *msgs, int num)
8464 {
8465 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8466 	struct ddc_service *ddc_service = i2c->ddc_service;
8467 	struct i2c_command cmd;
8468 	int i;
8469 	int result = -EIO;
8470 
8471 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8472 
8473 	if (!cmd.payloads)
8474 		return result;
8475 
8476 	cmd.number_of_payloads = num;
8477 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8478 	cmd.speed = 100;
8479 
8480 	for (i = 0; i < num; i++) {
8481 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8482 		cmd.payloads[i].address = msgs[i].addr;
8483 		cmd.payloads[i].length = msgs[i].len;
8484 		cmd.payloads[i].data = msgs[i].buf;
8485 	}
8486 
8487 	if (dc_submit_i2c(
8488 			ddc_service->ctx->dc,
8489 			ddc_service->ddc_pin->hw_info.ddc_channel,
8490 			&cmd))
8491 		result = num;
8492 
8493 	kfree(cmd.payloads);
8494 	return result;
8495 }
8496 
8497 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8498 {
8499 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8500 }
8501 
8502 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8503 	.master_xfer = amdgpu_dm_i2c_xfer,
8504 	.functionality = amdgpu_dm_i2c_func,
8505 };
8506 
8507 static struct amdgpu_i2c_adapter *
8508 create_i2c(struct ddc_service *ddc_service,
8509 	   int link_index,
8510 	   int *res)
8511 {
8512 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8513 	struct amdgpu_i2c_adapter *i2c;
8514 
8515 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8516 	if (!i2c)
8517 		return NULL;
8518 	i2c->base.owner = THIS_MODULE;
8519 	i2c->base.class = I2C_CLASS_DDC;
8520 	i2c->base.dev.parent = &adev->pdev->dev;
8521 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8522 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8523 	i2c_set_adapdata(&i2c->base, i2c);
8524 	i2c->ddc_service = ddc_service;
8525 	if (i2c->ddc_service->ddc_pin)
8526 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8527 
8528 	return i2c;
8529 }
8530 
8531 
8532 /*
8533  * Note: this function assumes that dc_link_detect() was called for the
8534  * dc_link which will be represented by this aconnector.
8535  */
8536 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8537 				    struct amdgpu_dm_connector *aconnector,
8538 				    uint32_t link_index,
8539 				    struct amdgpu_encoder *aencoder)
8540 {
8541 	int res = 0;
8542 	int connector_type;
8543 	struct dc *dc = dm->dc;
8544 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8545 	struct amdgpu_i2c_adapter *i2c;
8546 
8547 	link->priv = aconnector;
8548 
8549 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8550 
8551 	i2c = create_i2c(link->ddc, link->link_index, &res);
8552 	if (!i2c) {
8553 		DRM_ERROR("Failed to create i2c adapter data\n");
8554 		return -ENOMEM;
8555 	}
8556 
8557 	aconnector->i2c = i2c;
8558 	res = i2c_add_adapter(&i2c->base);
8559 
8560 	if (res) {
8561 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8562 		goto out_free;
8563 	}
8564 
8565 	connector_type = to_drm_connector_type(link->connector_signal);
8566 
8567 	res = drm_connector_init_with_ddc(
8568 			dm->ddev,
8569 			&aconnector->base,
8570 			&amdgpu_dm_connector_funcs,
8571 			connector_type,
8572 			&i2c->base);
8573 
8574 	if (res) {
8575 		DRM_ERROR("connector_init failed\n");
8576 		aconnector->connector_id = -1;
8577 		goto out_free;
8578 	}
8579 
8580 	drm_connector_helper_add(
8581 			&aconnector->base,
8582 			&amdgpu_dm_connector_helper_funcs);
8583 
8584 	amdgpu_dm_connector_init_helper(
8585 		dm,
8586 		aconnector,
8587 		connector_type,
8588 		link,
8589 		link_index);
8590 
8591 	drm_connector_attach_encoder(
8592 		&aconnector->base, &aencoder->base);
8593 
8594 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8595 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8596 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8597 
8598 out_free:
8599 	if (res) {
8600 		kfree(i2c);
8601 		aconnector->i2c = NULL;
8602 	}
8603 	return res;
8604 }
8605 
8606 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8607 {
8608 	switch (adev->mode_info.num_crtc) {
8609 	case 1:
8610 		return 0x1;
8611 	case 2:
8612 		return 0x3;
8613 	case 3:
8614 		return 0x7;
8615 	case 4:
8616 		return 0xf;
8617 	case 5:
8618 		return 0x1f;
8619 	case 6:
8620 	default:
8621 		return 0x3f;
8622 	}
8623 }
8624 
8625 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8626 				  struct amdgpu_encoder *aencoder,
8627 				  uint32_t link_index)
8628 {
8629 	struct amdgpu_device *adev = drm_to_adev(dev);
8630 
8631 	int res = drm_encoder_init(dev,
8632 				   &aencoder->base,
8633 				   &amdgpu_dm_encoder_funcs,
8634 				   DRM_MODE_ENCODER_TMDS,
8635 				   NULL);
8636 
8637 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8638 
8639 	if (!res)
8640 		aencoder->encoder_id = link_index;
8641 	else
8642 		aencoder->encoder_id = -1;
8643 
8644 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8645 
8646 	return res;
8647 }
8648 
8649 static void manage_dm_interrupts(struct amdgpu_device *adev,
8650 				 struct amdgpu_crtc *acrtc,
8651 				 bool enable)
8652 {
8653 	/*
8654 	 * We have no guarantee that the frontend index maps to the same
8655 	 * backend index - some even map to more than one.
8656 	 *
8657 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8658 	 */
8659 	int irq_type =
8660 		amdgpu_display_crtc_idx_to_irq_type(
8661 			adev,
8662 			acrtc->crtc_id);
8663 
8664 	if (enable) {
8665 		drm_crtc_vblank_on(&acrtc->base);
8666 		amdgpu_irq_get(
8667 			adev,
8668 			&adev->pageflip_irq,
8669 			irq_type);
8670 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8671 		amdgpu_irq_get(
8672 			adev,
8673 			&adev->vline0_irq,
8674 			irq_type);
8675 #endif
8676 	} else {
8677 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8678 		amdgpu_irq_put(
8679 			adev,
8680 			&adev->vline0_irq,
8681 			irq_type);
8682 #endif
8683 		amdgpu_irq_put(
8684 			adev,
8685 			&adev->pageflip_irq,
8686 			irq_type);
8687 		drm_crtc_vblank_off(&acrtc->base);
8688 	}
8689 }
8690 
8691 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8692 				      struct amdgpu_crtc *acrtc)
8693 {
8694 	int irq_type =
8695 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8696 
8697 	/**
8698 	 * This reads the current state for the IRQ and force reapplies
8699 	 * the setting to hardware.
8700 	 */
8701 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8702 }
8703 
8704 static bool
8705 is_scaling_state_different(const struct dm_connector_state *dm_state,
8706 			   const struct dm_connector_state *old_dm_state)
8707 {
8708 	if (dm_state->scaling != old_dm_state->scaling)
8709 		return true;
8710 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8711 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8712 			return true;
8713 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8714 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8715 			return true;
8716 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8717 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8718 		return true;
8719 	return false;
8720 }
8721 
8722 #ifdef CONFIG_DRM_AMD_DC_HDCP
8723 static bool is_content_protection_different(struct drm_connector_state *state,
8724 					    const struct drm_connector_state *old_state,
8725 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8726 {
8727 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8728 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8729 
8730 	/* Handle: Type0/1 change */
8731 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8732 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8733 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8734 		return true;
8735 	}
8736 
8737 	/* CP is being re enabled, ignore this
8738 	 *
8739 	 * Handles:	ENABLED -> DESIRED
8740 	 */
8741 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8742 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8743 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8744 		return false;
8745 	}
8746 
8747 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8748 	 *
8749 	 * Handles:	UNDESIRED -> ENABLED
8750 	 */
8751 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8752 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8753 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8754 
8755 	/* Stream removed and re-enabled
8756 	 *
8757 	 * Can sometimes overlap with the HPD case,
8758 	 * thus set update_hdcp to false to avoid
8759 	 * setting HDCP multiple times.
8760 	 *
8761 	 * Handles:	DESIRED -> DESIRED (Special case)
8762 	 */
8763 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8764 		state->crtc && state->crtc->enabled &&
8765 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8766 		dm_con_state->update_hdcp = false;
8767 		return true;
8768 	}
8769 
8770 	/* Hot-plug, headless s3, dpms
8771 	 *
8772 	 * Only start HDCP if the display is connected/enabled.
8773 	 * update_hdcp flag will be set to false until the next
8774 	 * HPD comes in.
8775 	 *
8776 	 * Handles:	DESIRED -> DESIRED (Special case)
8777 	 */
8778 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8779 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8780 		dm_con_state->update_hdcp = false;
8781 		return true;
8782 	}
8783 
8784 	/*
8785 	 * Handles:	UNDESIRED -> UNDESIRED
8786 	 *		DESIRED -> DESIRED
8787 	 *		ENABLED -> ENABLED
8788 	 */
8789 	if (old_state->content_protection == state->content_protection)
8790 		return false;
8791 
8792 	/*
8793 	 * Handles:	UNDESIRED -> DESIRED
8794 	 *		DESIRED -> UNDESIRED
8795 	 *		ENABLED -> UNDESIRED
8796 	 */
8797 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8798 		return true;
8799 
8800 	/*
8801 	 * Handles:	DESIRED -> ENABLED
8802 	 */
8803 	return false;
8804 }
8805 
8806 #endif
8807 static void remove_stream(struct amdgpu_device *adev,
8808 			  struct amdgpu_crtc *acrtc,
8809 			  struct dc_stream_state *stream)
8810 {
8811 	/* this is the update mode case */
8812 
8813 	acrtc->otg_inst = -1;
8814 	acrtc->enabled = false;
8815 }
8816 
8817 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8818 			       struct dc_cursor_position *position)
8819 {
8820 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8821 	int x, y;
8822 	int xorigin = 0, yorigin = 0;
8823 
8824 	if (!crtc || !plane->state->fb)
8825 		return 0;
8826 
8827 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8828 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8829 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8830 			  __func__,
8831 			  plane->state->crtc_w,
8832 			  plane->state->crtc_h);
8833 		return -EINVAL;
8834 	}
8835 
8836 	x = plane->state->crtc_x;
8837 	y = plane->state->crtc_y;
8838 
8839 	if (x <= -amdgpu_crtc->max_cursor_width ||
8840 	    y <= -amdgpu_crtc->max_cursor_height)
8841 		return 0;
8842 
8843 	if (x < 0) {
8844 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8845 		x = 0;
8846 	}
8847 	if (y < 0) {
8848 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8849 		y = 0;
8850 	}
8851 	position->enable = true;
8852 	position->translate_by_source = true;
8853 	position->x = x;
8854 	position->y = y;
8855 	position->x_hotspot = xorigin;
8856 	position->y_hotspot = yorigin;
8857 
8858 	return 0;
8859 }
8860 
8861 static void handle_cursor_update(struct drm_plane *plane,
8862 				 struct drm_plane_state *old_plane_state)
8863 {
8864 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8865 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8866 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8867 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8868 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8869 	uint64_t address = afb ? afb->address : 0;
8870 	struct dc_cursor_position position = {0};
8871 	struct dc_cursor_attributes attributes;
8872 	int ret;
8873 
8874 	if (!plane->state->fb && !old_plane_state->fb)
8875 		return;
8876 
8877 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8878 		      __func__,
8879 		      amdgpu_crtc->crtc_id,
8880 		      plane->state->crtc_w,
8881 		      plane->state->crtc_h);
8882 
8883 	ret = get_cursor_position(plane, crtc, &position);
8884 	if (ret)
8885 		return;
8886 
8887 	if (!position.enable) {
8888 		/* turn off cursor */
8889 		if (crtc_state && crtc_state->stream) {
8890 			mutex_lock(&adev->dm.dc_lock);
8891 			dc_stream_set_cursor_position(crtc_state->stream,
8892 						      &position);
8893 			mutex_unlock(&adev->dm.dc_lock);
8894 		}
8895 		return;
8896 	}
8897 
8898 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8899 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8900 
8901 	memset(&attributes, 0, sizeof(attributes));
8902 	attributes.address.high_part = upper_32_bits(address);
8903 	attributes.address.low_part  = lower_32_bits(address);
8904 	attributes.width             = plane->state->crtc_w;
8905 	attributes.height            = plane->state->crtc_h;
8906 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8907 	attributes.rotation_angle    = 0;
8908 	attributes.attribute_flags.value = 0;
8909 
8910 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8911 
8912 	if (crtc_state->stream) {
8913 		mutex_lock(&adev->dm.dc_lock);
8914 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8915 							 &attributes))
8916 			DRM_ERROR("DC failed to set cursor attributes\n");
8917 
8918 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8919 						   &position))
8920 			DRM_ERROR("DC failed to set cursor position\n");
8921 		mutex_unlock(&adev->dm.dc_lock);
8922 	}
8923 }
8924 
8925 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8926 {
8927 
8928 	assert_spin_locked(&acrtc->base.dev->event_lock);
8929 	WARN_ON(acrtc->event);
8930 
8931 	acrtc->event = acrtc->base.state->event;
8932 
8933 	/* Set the flip status */
8934 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8935 
8936 	/* Mark this event as consumed */
8937 	acrtc->base.state->event = NULL;
8938 
8939 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8940 		     acrtc->crtc_id);
8941 }
8942 
8943 static void update_freesync_state_on_stream(
8944 	struct amdgpu_display_manager *dm,
8945 	struct dm_crtc_state *new_crtc_state,
8946 	struct dc_stream_state *new_stream,
8947 	struct dc_plane_state *surface,
8948 	u32 flip_timestamp_in_us)
8949 {
8950 	struct mod_vrr_params vrr_params;
8951 	struct dc_info_packet vrr_infopacket = {0};
8952 	struct amdgpu_device *adev = dm->adev;
8953 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8954 	unsigned long flags;
8955 	bool pack_sdp_v1_3 = false;
8956 
8957 	if (!new_stream)
8958 		return;
8959 
8960 	/*
8961 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8962 	 * For now it's sufficient to just guard against these conditions.
8963 	 */
8964 
8965 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8966 		return;
8967 
8968 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8969         vrr_params = acrtc->dm_irq_params.vrr_params;
8970 
8971 	if (surface) {
8972 		mod_freesync_handle_preflip(
8973 			dm->freesync_module,
8974 			surface,
8975 			new_stream,
8976 			flip_timestamp_in_us,
8977 			&vrr_params);
8978 
8979 		if (adev->family < AMDGPU_FAMILY_AI &&
8980 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8981 			mod_freesync_handle_v_update(dm->freesync_module,
8982 						     new_stream, &vrr_params);
8983 
8984 			/* Need to call this before the frame ends. */
8985 			dc_stream_adjust_vmin_vmax(dm->dc,
8986 						   new_crtc_state->stream,
8987 						   &vrr_params.adjust);
8988 		}
8989 	}
8990 
8991 	mod_freesync_build_vrr_infopacket(
8992 		dm->freesync_module,
8993 		new_stream,
8994 		&vrr_params,
8995 		PACKET_TYPE_VRR,
8996 		TRANSFER_FUNC_UNKNOWN,
8997 		&vrr_infopacket,
8998 		pack_sdp_v1_3);
8999 
9000 	new_crtc_state->freesync_timing_changed |=
9001 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9002 			&vrr_params.adjust,
9003 			sizeof(vrr_params.adjust)) != 0);
9004 
9005 	new_crtc_state->freesync_vrr_info_changed |=
9006 		(memcmp(&new_crtc_state->vrr_infopacket,
9007 			&vrr_infopacket,
9008 			sizeof(vrr_infopacket)) != 0);
9009 
9010 	acrtc->dm_irq_params.vrr_params = vrr_params;
9011 	new_crtc_state->vrr_infopacket = vrr_infopacket;
9012 
9013 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9014 	new_stream->vrr_infopacket = vrr_infopacket;
9015 
9016 	if (new_crtc_state->freesync_vrr_info_changed)
9017 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9018 			      new_crtc_state->base.crtc->base.id,
9019 			      (int)new_crtc_state->base.vrr_enabled,
9020 			      (int)vrr_params.state);
9021 
9022 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9023 }
9024 
9025 static void update_stream_irq_parameters(
9026 	struct amdgpu_display_manager *dm,
9027 	struct dm_crtc_state *new_crtc_state)
9028 {
9029 	struct dc_stream_state *new_stream = new_crtc_state->stream;
9030 	struct mod_vrr_params vrr_params;
9031 	struct mod_freesync_config config = new_crtc_state->freesync_config;
9032 	struct amdgpu_device *adev = dm->adev;
9033 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9034 	unsigned long flags;
9035 
9036 	if (!new_stream)
9037 		return;
9038 
9039 	/*
9040 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9041 	 * For now it's sufficient to just guard against these conditions.
9042 	 */
9043 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9044 		return;
9045 
9046 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9047 	vrr_params = acrtc->dm_irq_params.vrr_params;
9048 
9049 	if (new_crtc_state->vrr_supported &&
9050 	    config.min_refresh_in_uhz &&
9051 	    config.max_refresh_in_uhz) {
9052 		/*
9053 		 * if freesync compatible mode was set, config.state will be set
9054 		 * in atomic check
9055 		 */
9056 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9057 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9058 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9059 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9060 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9061 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9062 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9063 		} else {
9064 			config.state = new_crtc_state->base.vrr_enabled ?
9065 						     VRR_STATE_ACTIVE_VARIABLE :
9066 						     VRR_STATE_INACTIVE;
9067 		}
9068 	} else {
9069 		config.state = VRR_STATE_UNSUPPORTED;
9070 	}
9071 
9072 	mod_freesync_build_vrr_params(dm->freesync_module,
9073 				      new_stream,
9074 				      &config, &vrr_params);
9075 
9076 	new_crtc_state->freesync_timing_changed |=
9077 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9078 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9079 
9080 	new_crtc_state->freesync_config = config;
9081 	/* Copy state for access from DM IRQ handler */
9082 	acrtc->dm_irq_params.freesync_config = config;
9083 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9084 	acrtc->dm_irq_params.vrr_params = vrr_params;
9085 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9086 }
9087 
9088 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9089 					    struct dm_crtc_state *new_state)
9090 {
9091 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9092 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9093 
9094 	if (!old_vrr_active && new_vrr_active) {
9095 		/* Transition VRR inactive -> active:
9096 		 * While VRR is active, we must not disable vblank irq, as a
9097 		 * reenable after disable would compute bogus vblank/pflip
9098 		 * timestamps if it likely happened inside display front-porch.
9099 		 *
9100 		 * We also need vupdate irq for the actual core vblank handling
9101 		 * at end of vblank.
9102 		 */
9103 		dm_set_vupdate_irq(new_state->base.crtc, true);
9104 		drm_crtc_vblank_get(new_state->base.crtc);
9105 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9106 				 __func__, new_state->base.crtc->base.id);
9107 	} else if (old_vrr_active && !new_vrr_active) {
9108 		/* Transition VRR active -> inactive:
9109 		 * Allow vblank irq disable again for fixed refresh rate.
9110 		 */
9111 		dm_set_vupdate_irq(new_state->base.crtc, false);
9112 		drm_crtc_vblank_put(new_state->base.crtc);
9113 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9114 				 __func__, new_state->base.crtc->base.id);
9115 	}
9116 }
9117 
9118 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9119 {
9120 	struct drm_plane *plane;
9121 	struct drm_plane_state *old_plane_state;
9122 	int i;
9123 
9124 	/*
9125 	 * TODO: Make this per-stream so we don't issue redundant updates for
9126 	 * commits with multiple streams.
9127 	 */
9128 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9129 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9130 			handle_cursor_update(plane, old_plane_state);
9131 }
9132 
9133 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9134 				    struct dc_state *dc_state,
9135 				    struct drm_device *dev,
9136 				    struct amdgpu_display_manager *dm,
9137 				    struct drm_crtc *pcrtc,
9138 				    bool wait_for_vblank)
9139 {
9140 	uint32_t i;
9141 	uint64_t timestamp_ns;
9142 	struct drm_plane *plane;
9143 	struct drm_plane_state *old_plane_state, *new_plane_state;
9144 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9145 	struct drm_crtc_state *new_pcrtc_state =
9146 			drm_atomic_get_new_crtc_state(state, pcrtc);
9147 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9148 	struct dm_crtc_state *dm_old_crtc_state =
9149 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9150 	int planes_count = 0, vpos, hpos;
9151 	long r;
9152 	unsigned long flags;
9153 	struct amdgpu_bo *abo;
9154 	uint32_t target_vblank, last_flip_vblank;
9155 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9156 	bool pflip_present = false;
9157 	struct {
9158 		struct dc_surface_update surface_updates[MAX_SURFACES];
9159 		struct dc_plane_info plane_infos[MAX_SURFACES];
9160 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9161 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9162 		struct dc_stream_update stream_update;
9163 	} *bundle;
9164 
9165 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9166 
9167 	if (!bundle) {
9168 		dm_error("Failed to allocate update bundle\n");
9169 		goto cleanup;
9170 	}
9171 
9172 	/*
9173 	 * Disable the cursor first if we're disabling all the planes.
9174 	 * It'll remain on the screen after the planes are re-enabled
9175 	 * if we don't.
9176 	 */
9177 	if (acrtc_state->active_planes == 0)
9178 		amdgpu_dm_commit_cursors(state);
9179 
9180 	/* update planes when needed */
9181 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9182 		struct drm_crtc *crtc = new_plane_state->crtc;
9183 		struct drm_crtc_state *new_crtc_state;
9184 		struct drm_framebuffer *fb = new_plane_state->fb;
9185 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9186 		bool plane_needs_flip;
9187 		struct dc_plane_state *dc_plane;
9188 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9189 
9190 		/* Cursor plane is handled after stream updates */
9191 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9192 			continue;
9193 
9194 		if (!fb || !crtc || pcrtc != crtc)
9195 			continue;
9196 
9197 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9198 		if (!new_crtc_state->active)
9199 			continue;
9200 
9201 		dc_plane = dm_new_plane_state->dc_state;
9202 
9203 		bundle->surface_updates[planes_count].surface = dc_plane;
9204 		if (new_pcrtc_state->color_mgmt_changed) {
9205 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9206 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9207 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9208 		}
9209 
9210 		fill_dc_scaling_info(dm->adev, new_plane_state,
9211 				     &bundle->scaling_infos[planes_count]);
9212 
9213 		bundle->surface_updates[planes_count].scaling_info =
9214 			&bundle->scaling_infos[planes_count];
9215 
9216 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9217 
9218 		pflip_present = pflip_present || plane_needs_flip;
9219 
9220 		if (!plane_needs_flip) {
9221 			planes_count += 1;
9222 			continue;
9223 		}
9224 
9225 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9226 
9227 		/*
9228 		 * Wait for all fences on this FB. Do limited wait to avoid
9229 		 * deadlock during GPU reset when this fence will not signal
9230 		 * but we hold reservation lock for the BO.
9231 		 */
9232 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9233 					  msecs_to_jiffies(5000));
9234 		if (unlikely(r <= 0))
9235 			DRM_ERROR("Waiting for fences timed out!");
9236 
9237 		fill_dc_plane_info_and_addr(
9238 			dm->adev, new_plane_state,
9239 			afb->tiling_flags,
9240 			&bundle->plane_infos[planes_count],
9241 			&bundle->flip_addrs[planes_count].address,
9242 			afb->tmz_surface, false);
9243 
9244 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9245 				 new_plane_state->plane->index,
9246 				 bundle->plane_infos[planes_count].dcc.enable);
9247 
9248 		bundle->surface_updates[planes_count].plane_info =
9249 			&bundle->plane_infos[planes_count];
9250 
9251 		/*
9252 		 * Only allow immediate flips for fast updates that don't
9253 		 * change FB pitch, DCC state, rotation or mirroing.
9254 		 */
9255 		bundle->flip_addrs[planes_count].flip_immediate =
9256 			crtc->state->async_flip &&
9257 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9258 
9259 		timestamp_ns = ktime_get_ns();
9260 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9261 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9262 		bundle->surface_updates[planes_count].surface = dc_plane;
9263 
9264 		if (!bundle->surface_updates[planes_count].surface) {
9265 			DRM_ERROR("No surface for CRTC: id=%d\n",
9266 					acrtc_attach->crtc_id);
9267 			continue;
9268 		}
9269 
9270 		if (plane == pcrtc->primary)
9271 			update_freesync_state_on_stream(
9272 				dm,
9273 				acrtc_state,
9274 				acrtc_state->stream,
9275 				dc_plane,
9276 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9277 
9278 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9279 				 __func__,
9280 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9281 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9282 
9283 		planes_count += 1;
9284 
9285 	}
9286 
9287 	if (pflip_present) {
9288 		if (!vrr_active) {
9289 			/* Use old throttling in non-vrr fixed refresh rate mode
9290 			 * to keep flip scheduling based on target vblank counts
9291 			 * working in a backwards compatible way, e.g., for
9292 			 * clients using the GLX_OML_sync_control extension or
9293 			 * DRI3/Present extension with defined target_msc.
9294 			 */
9295 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9296 		}
9297 		else {
9298 			/* For variable refresh rate mode only:
9299 			 * Get vblank of last completed flip to avoid > 1 vrr
9300 			 * flips per video frame by use of throttling, but allow
9301 			 * flip programming anywhere in the possibly large
9302 			 * variable vrr vblank interval for fine-grained flip
9303 			 * timing control and more opportunity to avoid stutter
9304 			 * on late submission of flips.
9305 			 */
9306 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9307 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9308 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9309 		}
9310 
9311 		target_vblank = last_flip_vblank + wait_for_vblank;
9312 
9313 		/*
9314 		 * Wait until we're out of the vertical blank period before the one
9315 		 * targeted by the flip
9316 		 */
9317 		while ((acrtc_attach->enabled &&
9318 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9319 							    0, &vpos, &hpos, NULL,
9320 							    NULL, &pcrtc->hwmode)
9321 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9322 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9323 			(int)(target_vblank -
9324 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9325 			usleep_range(1000, 1100);
9326 		}
9327 
9328 		/**
9329 		 * Prepare the flip event for the pageflip interrupt to handle.
9330 		 *
9331 		 * This only works in the case where we've already turned on the
9332 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9333 		 * from 0 -> n planes we have to skip a hardware generated event
9334 		 * and rely on sending it from software.
9335 		 */
9336 		if (acrtc_attach->base.state->event &&
9337 		    acrtc_state->active_planes > 0 &&
9338 		    !acrtc_state->force_dpms_off) {
9339 			drm_crtc_vblank_get(pcrtc);
9340 
9341 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9342 
9343 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9344 			prepare_flip_isr(acrtc_attach);
9345 
9346 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9347 		}
9348 
9349 		if (acrtc_state->stream) {
9350 			if (acrtc_state->freesync_vrr_info_changed)
9351 				bundle->stream_update.vrr_infopacket =
9352 					&acrtc_state->stream->vrr_infopacket;
9353 		}
9354 	}
9355 
9356 	/* Update the planes if changed or disable if we don't have any. */
9357 	if ((planes_count || acrtc_state->active_planes == 0) &&
9358 		acrtc_state->stream) {
9359 #if defined(CONFIG_DRM_AMD_DC_DCN)
9360 		/*
9361 		 * If PSR or idle optimizations are enabled then flush out
9362 		 * any pending work before hardware programming.
9363 		 */
9364 		if (dm->vblank_control_workqueue)
9365 			flush_workqueue(dm->vblank_control_workqueue);
9366 #endif
9367 
9368 		bundle->stream_update.stream = acrtc_state->stream;
9369 		if (new_pcrtc_state->mode_changed) {
9370 			bundle->stream_update.src = acrtc_state->stream->src;
9371 			bundle->stream_update.dst = acrtc_state->stream->dst;
9372 		}
9373 
9374 		if (new_pcrtc_state->color_mgmt_changed) {
9375 			/*
9376 			 * TODO: This isn't fully correct since we've actually
9377 			 * already modified the stream in place.
9378 			 */
9379 			bundle->stream_update.gamut_remap =
9380 				&acrtc_state->stream->gamut_remap_matrix;
9381 			bundle->stream_update.output_csc_transform =
9382 				&acrtc_state->stream->csc_color_matrix;
9383 			bundle->stream_update.out_transfer_func =
9384 				acrtc_state->stream->out_transfer_func;
9385 		}
9386 
9387 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9388 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9389 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9390 
9391 		/*
9392 		 * If FreeSync state on the stream has changed then we need to
9393 		 * re-adjust the min/max bounds now that DC doesn't handle this
9394 		 * as part of commit.
9395 		 */
9396 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9397 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9398 			dc_stream_adjust_vmin_vmax(
9399 				dm->dc, acrtc_state->stream,
9400 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9401 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9402 		}
9403 		mutex_lock(&dm->dc_lock);
9404 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9405 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9406 			amdgpu_dm_psr_disable(acrtc_state->stream);
9407 
9408 		dc_commit_updates_for_stream(dm->dc,
9409 						     bundle->surface_updates,
9410 						     planes_count,
9411 						     acrtc_state->stream,
9412 						     &bundle->stream_update,
9413 						     dc_state);
9414 
9415 		/**
9416 		 * Enable or disable the interrupts on the backend.
9417 		 *
9418 		 * Most pipes are put into power gating when unused.
9419 		 *
9420 		 * When power gating is enabled on a pipe we lose the
9421 		 * interrupt enablement state when power gating is disabled.
9422 		 *
9423 		 * So we need to update the IRQ control state in hardware
9424 		 * whenever the pipe turns on (since it could be previously
9425 		 * power gated) or off (since some pipes can't be power gated
9426 		 * on some ASICs).
9427 		 */
9428 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9429 			dm_update_pflip_irq_state(drm_to_adev(dev),
9430 						  acrtc_attach);
9431 
9432 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9433 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9434 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9435 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9436 
9437 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9438 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9439 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9440 			struct amdgpu_dm_connector *aconn =
9441 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9442 
9443 			if (aconn->psr_skip_count > 0)
9444 				aconn->psr_skip_count--;
9445 
9446 			/* Allow PSR when skip count is 0. */
9447 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9448 		} else {
9449 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9450 		}
9451 
9452 		mutex_unlock(&dm->dc_lock);
9453 	}
9454 
9455 	/*
9456 	 * Update cursor state *after* programming all the planes.
9457 	 * This avoids redundant programming in the case where we're going
9458 	 * to be disabling a single plane - those pipes are being disabled.
9459 	 */
9460 	if (acrtc_state->active_planes)
9461 		amdgpu_dm_commit_cursors(state);
9462 
9463 cleanup:
9464 	kfree(bundle);
9465 }
9466 
9467 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9468 				   struct drm_atomic_state *state)
9469 {
9470 	struct amdgpu_device *adev = drm_to_adev(dev);
9471 	struct amdgpu_dm_connector *aconnector;
9472 	struct drm_connector *connector;
9473 	struct drm_connector_state *old_con_state, *new_con_state;
9474 	struct drm_crtc_state *new_crtc_state;
9475 	struct dm_crtc_state *new_dm_crtc_state;
9476 	const struct dc_stream_status *status;
9477 	int i, inst;
9478 
9479 	/* Notify device removals. */
9480 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9481 		if (old_con_state->crtc != new_con_state->crtc) {
9482 			/* CRTC changes require notification. */
9483 			goto notify;
9484 		}
9485 
9486 		if (!new_con_state->crtc)
9487 			continue;
9488 
9489 		new_crtc_state = drm_atomic_get_new_crtc_state(
9490 			state, new_con_state->crtc);
9491 
9492 		if (!new_crtc_state)
9493 			continue;
9494 
9495 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9496 			continue;
9497 
9498 	notify:
9499 		aconnector = to_amdgpu_dm_connector(connector);
9500 
9501 		mutex_lock(&adev->dm.audio_lock);
9502 		inst = aconnector->audio_inst;
9503 		aconnector->audio_inst = -1;
9504 		mutex_unlock(&adev->dm.audio_lock);
9505 
9506 		amdgpu_dm_audio_eld_notify(adev, inst);
9507 	}
9508 
9509 	/* Notify audio device additions. */
9510 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9511 		if (!new_con_state->crtc)
9512 			continue;
9513 
9514 		new_crtc_state = drm_atomic_get_new_crtc_state(
9515 			state, new_con_state->crtc);
9516 
9517 		if (!new_crtc_state)
9518 			continue;
9519 
9520 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9521 			continue;
9522 
9523 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9524 		if (!new_dm_crtc_state->stream)
9525 			continue;
9526 
9527 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9528 		if (!status)
9529 			continue;
9530 
9531 		aconnector = to_amdgpu_dm_connector(connector);
9532 
9533 		mutex_lock(&adev->dm.audio_lock);
9534 		inst = status->audio_inst;
9535 		aconnector->audio_inst = inst;
9536 		mutex_unlock(&adev->dm.audio_lock);
9537 
9538 		amdgpu_dm_audio_eld_notify(adev, inst);
9539 	}
9540 }
9541 
9542 /*
9543  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9544  * @crtc_state: the DRM CRTC state
9545  * @stream_state: the DC stream state.
9546  *
9547  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9548  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9549  */
9550 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9551 						struct dc_stream_state *stream_state)
9552 {
9553 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9554 }
9555 
9556 /**
9557  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9558  * @state: The atomic state to commit
9559  *
9560  * This will tell DC to commit the constructed DC state from atomic_check,
9561  * programming the hardware. Any failures here implies a hardware failure, since
9562  * atomic check should have filtered anything non-kosher.
9563  */
9564 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9565 {
9566 	struct drm_device *dev = state->dev;
9567 	struct amdgpu_device *adev = drm_to_adev(dev);
9568 	struct amdgpu_display_manager *dm = &adev->dm;
9569 	struct dm_atomic_state *dm_state;
9570 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9571 	uint32_t i, j;
9572 	struct drm_crtc *crtc;
9573 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9574 	unsigned long flags;
9575 	bool wait_for_vblank = true;
9576 	struct drm_connector *connector;
9577 	struct drm_connector_state *old_con_state, *new_con_state;
9578 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9579 	int crtc_disable_count = 0;
9580 	bool mode_set_reset_required = false;
9581 
9582 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9583 
9584 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9585 
9586 	dm_state = dm_atomic_get_new_state(state);
9587 	if (dm_state && dm_state->context) {
9588 		dc_state = dm_state->context;
9589 	} else {
9590 		/* No state changes, retain current state. */
9591 		dc_state_temp = dc_create_state(dm->dc);
9592 		ASSERT(dc_state_temp);
9593 		dc_state = dc_state_temp;
9594 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9595 	}
9596 
9597 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9598 				       new_crtc_state, i) {
9599 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9600 
9601 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9602 
9603 		if (old_crtc_state->active &&
9604 		    (!new_crtc_state->active ||
9605 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9606 			manage_dm_interrupts(adev, acrtc, false);
9607 			dc_stream_release(dm_old_crtc_state->stream);
9608 		}
9609 	}
9610 
9611 	drm_atomic_helper_calc_timestamping_constants(state);
9612 
9613 	/* update changed items */
9614 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9615 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9616 
9617 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9618 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9619 
9620 		DRM_DEBUG_ATOMIC(
9621 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9622 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9623 			"connectors_changed:%d\n",
9624 			acrtc->crtc_id,
9625 			new_crtc_state->enable,
9626 			new_crtc_state->active,
9627 			new_crtc_state->planes_changed,
9628 			new_crtc_state->mode_changed,
9629 			new_crtc_state->active_changed,
9630 			new_crtc_state->connectors_changed);
9631 
9632 		/* Disable cursor if disabling crtc */
9633 		if (old_crtc_state->active && !new_crtc_state->active) {
9634 			struct dc_cursor_position position;
9635 
9636 			memset(&position, 0, sizeof(position));
9637 			mutex_lock(&dm->dc_lock);
9638 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9639 			mutex_unlock(&dm->dc_lock);
9640 		}
9641 
9642 		/* Copy all transient state flags into dc state */
9643 		if (dm_new_crtc_state->stream) {
9644 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9645 							    dm_new_crtc_state->stream);
9646 		}
9647 
9648 		/* handles headless hotplug case, updating new_state and
9649 		 * aconnector as needed
9650 		 */
9651 
9652 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9653 
9654 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9655 
9656 			if (!dm_new_crtc_state->stream) {
9657 				/*
9658 				 * this could happen because of issues with
9659 				 * userspace notifications delivery.
9660 				 * In this case userspace tries to set mode on
9661 				 * display which is disconnected in fact.
9662 				 * dc_sink is NULL in this case on aconnector.
9663 				 * We expect reset mode will come soon.
9664 				 *
9665 				 * This can also happen when unplug is done
9666 				 * during resume sequence ended
9667 				 *
9668 				 * In this case, we want to pretend we still
9669 				 * have a sink to keep the pipe running so that
9670 				 * hw state is consistent with the sw state
9671 				 */
9672 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9673 						__func__, acrtc->base.base.id);
9674 				continue;
9675 			}
9676 
9677 			if (dm_old_crtc_state->stream)
9678 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9679 
9680 			pm_runtime_get_noresume(dev->dev);
9681 
9682 			acrtc->enabled = true;
9683 			acrtc->hw_mode = new_crtc_state->mode;
9684 			crtc->hwmode = new_crtc_state->mode;
9685 			mode_set_reset_required = true;
9686 		} else if (modereset_required(new_crtc_state)) {
9687 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9688 			/* i.e. reset mode */
9689 			if (dm_old_crtc_state->stream)
9690 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9691 
9692 			mode_set_reset_required = true;
9693 		}
9694 	} /* for_each_crtc_in_state() */
9695 
9696 	if (dc_state) {
9697 		/* if there mode set or reset, disable eDP PSR */
9698 		if (mode_set_reset_required) {
9699 #if defined(CONFIG_DRM_AMD_DC_DCN)
9700 			if (dm->vblank_control_workqueue)
9701 				flush_workqueue(dm->vblank_control_workqueue);
9702 #endif
9703 			amdgpu_dm_psr_disable_all(dm);
9704 		}
9705 
9706 		dm_enable_per_frame_crtc_master_sync(dc_state);
9707 		mutex_lock(&dm->dc_lock);
9708 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9709 #if defined(CONFIG_DRM_AMD_DC_DCN)
9710                /* Allow idle optimization when vblank count is 0 for display off */
9711                if (dm->active_vblank_irq_count == 0)
9712                    dc_allow_idle_optimizations(dm->dc,true);
9713 #endif
9714 		mutex_unlock(&dm->dc_lock);
9715 	}
9716 
9717 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9718 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9719 
9720 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9721 
9722 		if (dm_new_crtc_state->stream != NULL) {
9723 			const struct dc_stream_status *status =
9724 					dc_stream_get_status(dm_new_crtc_state->stream);
9725 
9726 			if (!status)
9727 				status = dc_stream_get_status_from_state(dc_state,
9728 									 dm_new_crtc_state->stream);
9729 			if (!status)
9730 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9731 			else
9732 				acrtc->otg_inst = status->primary_otg_inst;
9733 		}
9734 	}
9735 #ifdef CONFIG_DRM_AMD_DC_HDCP
9736 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9737 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9738 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9739 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9740 
9741 		new_crtc_state = NULL;
9742 
9743 		if (acrtc)
9744 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9745 
9746 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9747 
9748 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9749 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9750 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9751 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9752 			dm_new_con_state->update_hdcp = true;
9753 			continue;
9754 		}
9755 
9756 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9757 			hdcp_update_display(
9758 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9759 				new_con_state->hdcp_content_type,
9760 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9761 	}
9762 #endif
9763 
9764 	/* Handle connector state changes */
9765 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9766 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9767 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9768 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9769 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9770 		struct dc_stream_update stream_update;
9771 		struct dc_info_packet hdr_packet;
9772 		struct dc_stream_status *status = NULL;
9773 		bool abm_changed, hdr_changed, scaling_changed;
9774 
9775 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9776 		memset(&stream_update, 0, sizeof(stream_update));
9777 
9778 		if (acrtc) {
9779 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9780 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9781 		}
9782 
9783 		/* Skip any modesets/resets */
9784 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9785 			continue;
9786 
9787 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9788 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9789 
9790 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9791 							     dm_old_con_state);
9792 
9793 		abm_changed = dm_new_crtc_state->abm_level !=
9794 			      dm_old_crtc_state->abm_level;
9795 
9796 		hdr_changed =
9797 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9798 
9799 		if (!scaling_changed && !abm_changed && !hdr_changed)
9800 			continue;
9801 
9802 		stream_update.stream = dm_new_crtc_state->stream;
9803 		if (scaling_changed) {
9804 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9805 					dm_new_con_state, dm_new_crtc_state->stream);
9806 
9807 			stream_update.src = dm_new_crtc_state->stream->src;
9808 			stream_update.dst = dm_new_crtc_state->stream->dst;
9809 		}
9810 
9811 		if (abm_changed) {
9812 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9813 
9814 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9815 		}
9816 
9817 		if (hdr_changed) {
9818 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9819 			stream_update.hdr_static_metadata = &hdr_packet;
9820 		}
9821 
9822 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9823 
9824 		if (WARN_ON(!status))
9825 			continue;
9826 
9827 		WARN_ON(!status->plane_count);
9828 
9829 		/*
9830 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9831 		 * Here we create an empty update on each plane.
9832 		 * To fix this, DC should permit updating only stream properties.
9833 		 */
9834 		for (j = 0; j < status->plane_count; j++)
9835 			dummy_updates[j].surface = status->plane_states[0];
9836 
9837 
9838 		mutex_lock(&dm->dc_lock);
9839 		dc_commit_updates_for_stream(dm->dc,
9840 						     dummy_updates,
9841 						     status->plane_count,
9842 						     dm_new_crtc_state->stream,
9843 						     &stream_update,
9844 						     dc_state);
9845 		mutex_unlock(&dm->dc_lock);
9846 	}
9847 
9848 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9849 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9850 				      new_crtc_state, i) {
9851 		if (old_crtc_state->active && !new_crtc_state->active)
9852 			crtc_disable_count++;
9853 
9854 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9855 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9856 
9857 		/* For freesync config update on crtc state and params for irq */
9858 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9859 
9860 		/* Handle vrr on->off / off->on transitions */
9861 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9862 						dm_new_crtc_state);
9863 	}
9864 
9865 	/**
9866 	 * Enable interrupts for CRTCs that are newly enabled or went through
9867 	 * a modeset. It was intentionally deferred until after the front end
9868 	 * state was modified to wait until the OTG was on and so the IRQ
9869 	 * handlers didn't access stale or invalid state.
9870 	 */
9871 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9872 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9873 #ifdef CONFIG_DEBUG_FS
9874 		bool configure_crc = false;
9875 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9876 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9877 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9878 #endif
9879 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9880 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9881 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9882 #endif
9883 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9884 
9885 		if (new_crtc_state->active &&
9886 		    (!old_crtc_state->active ||
9887 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9888 			dc_stream_retain(dm_new_crtc_state->stream);
9889 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9890 			manage_dm_interrupts(adev, acrtc, true);
9891 
9892 #ifdef CONFIG_DEBUG_FS
9893 			/**
9894 			 * Frontend may have changed so reapply the CRC capture
9895 			 * settings for the stream.
9896 			 */
9897 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9898 
9899 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9900 				configure_crc = true;
9901 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9902 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9903 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9904 					acrtc->dm_irq_params.crc_window.update_win = true;
9905 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9906 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9907 					crc_rd_wrk->crtc = crtc;
9908 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9909 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9910 				}
9911 #endif
9912 			}
9913 
9914 			if (configure_crc)
9915 				if (amdgpu_dm_crtc_configure_crc_source(
9916 					crtc, dm_new_crtc_state, cur_crc_src))
9917 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9918 #endif
9919 		}
9920 	}
9921 
9922 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9923 		if (new_crtc_state->async_flip)
9924 			wait_for_vblank = false;
9925 
9926 	/* update planes when needed per crtc*/
9927 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9928 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9929 
9930 		if (dm_new_crtc_state->stream)
9931 			amdgpu_dm_commit_planes(state, dc_state, dev,
9932 						dm, crtc, wait_for_vblank);
9933 	}
9934 
9935 	/* Update audio instances for each connector. */
9936 	amdgpu_dm_commit_audio(dev, state);
9937 
9938 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9939 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9940 	/* restore the backlight level */
9941 	for (i = 0; i < dm->num_of_edps; i++) {
9942 		if (dm->backlight_dev[i] &&
9943 		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9944 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9945 	}
9946 #endif
9947 	/*
9948 	 * send vblank event on all events not handled in flip and
9949 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9950 	 */
9951 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9952 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9953 
9954 		if (new_crtc_state->event)
9955 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9956 
9957 		new_crtc_state->event = NULL;
9958 	}
9959 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9960 
9961 	/* Signal HW programming completion */
9962 	drm_atomic_helper_commit_hw_done(state);
9963 
9964 	if (wait_for_vblank)
9965 		drm_atomic_helper_wait_for_flip_done(dev, state);
9966 
9967 	drm_atomic_helper_cleanup_planes(dev, state);
9968 
9969 	/* return the stolen vga memory back to VRAM */
9970 	if (!adev->mman.keep_stolen_vga_memory)
9971 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9972 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9973 
9974 	/*
9975 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9976 	 * so we can put the GPU into runtime suspend if we're not driving any
9977 	 * displays anymore
9978 	 */
9979 	for (i = 0; i < crtc_disable_count; i++)
9980 		pm_runtime_put_autosuspend(dev->dev);
9981 	pm_runtime_mark_last_busy(dev->dev);
9982 
9983 	if (dc_state_temp)
9984 		dc_release_state(dc_state_temp);
9985 }
9986 
9987 
9988 static int dm_force_atomic_commit(struct drm_connector *connector)
9989 {
9990 	int ret = 0;
9991 	struct drm_device *ddev = connector->dev;
9992 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9993 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9994 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9995 	struct drm_connector_state *conn_state;
9996 	struct drm_crtc_state *crtc_state;
9997 	struct drm_plane_state *plane_state;
9998 
9999 	if (!state)
10000 		return -ENOMEM;
10001 
10002 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
10003 
10004 	/* Construct an atomic state to restore previous display setting */
10005 
10006 	/*
10007 	 * Attach connectors to drm_atomic_state
10008 	 */
10009 	conn_state = drm_atomic_get_connector_state(state, connector);
10010 
10011 	ret = PTR_ERR_OR_ZERO(conn_state);
10012 	if (ret)
10013 		goto out;
10014 
10015 	/* Attach crtc to drm_atomic_state*/
10016 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10017 
10018 	ret = PTR_ERR_OR_ZERO(crtc_state);
10019 	if (ret)
10020 		goto out;
10021 
10022 	/* force a restore */
10023 	crtc_state->mode_changed = true;
10024 
10025 	/* Attach plane to drm_atomic_state */
10026 	plane_state = drm_atomic_get_plane_state(state, plane);
10027 
10028 	ret = PTR_ERR_OR_ZERO(plane_state);
10029 	if (ret)
10030 		goto out;
10031 
10032 	/* Call commit internally with the state we just constructed */
10033 	ret = drm_atomic_commit(state);
10034 
10035 out:
10036 	drm_atomic_state_put(state);
10037 	if (ret)
10038 		DRM_ERROR("Restoring old state failed with %i\n", ret);
10039 
10040 	return ret;
10041 }
10042 
10043 /*
10044  * This function handles all cases when set mode does not come upon hotplug.
10045  * This includes when a display is unplugged then plugged back into the
10046  * same port and when running without usermode desktop manager supprot
10047  */
10048 void dm_restore_drm_connector_state(struct drm_device *dev,
10049 				    struct drm_connector *connector)
10050 {
10051 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10052 	struct amdgpu_crtc *disconnected_acrtc;
10053 	struct dm_crtc_state *acrtc_state;
10054 
10055 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10056 		return;
10057 
10058 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10059 	if (!disconnected_acrtc)
10060 		return;
10061 
10062 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10063 	if (!acrtc_state->stream)
10064 		return;
10065 
10066 	/*
10067 	 * If the previous sink is not released and different from the current,
10068 	 * we deduce we are in a state where we can not rely on usermode call
10069 	 * to turn on the display, so we do it here
10070 	 */
10071 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10072 		dm_force_atomic_commit(&aconnector->base);
10073 }
10074 
10075 /*
10076  * Grabs all modesetting locks to serialize against any blocking commits,
10077  * Waits for completion of all non blocking commits.
10078  */
10079 static int do_aquire_global_lock(struct drm_device *dev,
10080 				 struct drm_atomic_state *state)
10081 {
10082 	struct drm_crtc *crtc;
10083 	struct drm_crtc_commit *commit;
10084 	long ret;
10085 
10086 	/*
10087 	 * Adding all modeset locks to aquire_ctx will
10088 	 * ensure that when the framework release it the
10089 	 * extra locks we are locking here will get released to
10090 	 */
10091 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10092 	if (ret)
10093 		return ret;
10094 
10095 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10096 		spin_lock(&crtc->commit_lock);
10097 		commit = list_first_entry_or_null(&crtc->commit_list,
10098 				struct drm_crtc_commit, commit_entry);
10099 		if (commit)
10100 			drm_crtc_commit_get(commit);
10101 		spin_unlock(&crtc->commit_lock);
10102 
10103 		if (!commit)
10104 			continue;
10105 
10106 		/*
10107 		 * Make sure all pending HW programming completed and
10108 		 * page flips done
10109 		 */
10110 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10111 
10112 		if (ret > 0)
10113 			ret = wait_for_completion_interruptible_timeout(
10114 					&commit->flip_done, 10*HZ);
10115 
10116 		if (ret == 0)
10117 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10118 				  "timed out\n", crtc->base.id, crtc->name);
10119 
10120 		drm_crtc_commit_put(commit);
10121 	}
10122 
10123 	return ret < 0 ? ret : 0;
10124 }
10125 
10126 static void get_freesync_config_for_crtc(
10127 	struct dm_crtc_state *new_crtc_state,
10128 	struct dm_connector_state *new_con_state)
10129 {
10130 	struct mod_freesync_config config = {0};
10131 	struct amdgpu_dm_connector *aconnector =
10132 			to_amdgpu_dm_connector(new_con_state->base.connector);
10133 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10134 	int vrefresh = drm_mode_vrefresh(mode);
10135 	bool fs_vid_mode = false;
10136 
10137 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10138 					vrefresh >= aconnector->min_vfreq &&
10139 					vrefresh <= aconnector->max_vfreq;
10140 
10141 	if (new_crtc_state->vrr_supported) {
10142 		new_crtc_state->stream->ignore_msa_timing_param = true;
10143 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10144 
10145 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10146 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10147 		config.vsif_supported = true;
10148 		config.btr = true;
10149 
10150 		if (fs_vid_mode) {
10151 			config.state = VRR_STATE_ACTIVE_FIXED;
10152 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10153 			goto out;
10154 		} else if (new_crtc_state->base.vrr_enabled) {
10155 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10156 		} else {
10157 			config.state = VRR_STATE_INACTIVE;
10158 		}
10159 	}
10160 out:
10161 	new_crtc_state->freesync_config = config;
10162 }
10163 
10164 static void reset_freesync_config_for_crtc(
10165 	struct dm_crtc_state *new_crtc_state)
10166 {
10167 	new_crtc_state->vrr_supported = false;
10168 
10169 	memset(&new_crtc_state->vrr_infopacket, 0,
10170 	       sizeof(new_crtc_state->vrr_infopacket));
10171 }
10172 
10173 static bool
10174 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10175 				 struct drm_crtc_state *new_crtc_state)
10176 {
10177 	struct drm_display_mode old_mode, new_mode;
10178 
10179 	if (!old_crtc_state || !new_crtc_state)
10180 		return false;
10181 
10182 	old_mode = old_crtc_state->mode;
10183 	new_mode = new_crtc_state->mode;
10184 
10185 	if (old_mode.clock       == new_mode.clock &&
10186 	    old_mode.hdisplay    == new_mode.hdisplay &&
10187 	    old_mode.vdisplay    == new_mode.vdisplay &&
10188 	    old_mode.htotal      == new_mode.htotal &&
10189 	    old_mode.vtotal      != new_mode.vtotal &&
10190 	    old_mode.hsync_start == new_mode.hsync_start &&
10191 	    old_mode.vsync_start != new_mode.vsync_start &&
10192 	    old_mode.hsync_end   == new_mode.hsync_end &&
10193 	    old_mode.vsync_end   != new_mode.vsync_end &&
10194 	    old_mode.hskew       == new_mode.hskew &&
10195 	    old_mode.vscan       == new_mode.vscan &&
10196 	    (old_mode.vsync_end - old_mode.vsync_start) ==
10197 	    (new_mode.vsync_end - new_mode.vsync_start))
10198 		return true;
10199 
10200 	return false;
10201 }
10202 
10203 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10204 	uint64_t num, den, res;
10205 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10206 
10207 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10208 
10209 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10210 	den = (unsigned long long)new_crtc_state->mode.htotal *
10211 	      (unsigned long long)new_crtc_state->mode.vtotal;
10212 
10213 	res = div_u64(num, den);
10214 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10215 }
10216 
10217 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10218 			 struct drm_atomic_state *state,
10219 			 struct drm_crtc *crtc,
10220 			 struct drm_crtc_state *old_crtc_state,
10221 			 struct drm_crtc_state *new_crtc_state,
10222 			 bool enable,
10223 			 bool *lock_and_validation_needed)
10224 {
10225 	struct dm_atomic_state *dm_state = NULL;
10226 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10227 	struct dc_stream_state *new_stream;
10228 	int ret = 0;
10229 
10230 	/*
10231 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10232 	 * update changed items
10233 	 */
10234 	struct amdgpu_crtc *acrtc = NULL;
10235 	struct amdgpu_dm_connector *aconnector = NULL;
10236 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10237 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10238 
10239 	new_stream = NULL;
10240 
10241 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10242 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10243 	acrtc = to_amdgpu_crtc(crtc);
10244 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10245 
10246 	/* TODO This hack should go away */
10247 	if (aconnector && enable) {
10248 		/* Make sure fake sink is created in plug-in scenario */
10249 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10250 							    &aconnector->base);
10251 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10252 							    &aconnector->base);
10253 
10254 		if (IS_ERR(drm_new_conn_state)) {
10255 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10256 			goto fail;
10257 		}
10258 
10259 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10260 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10261 
10262 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10263 			goto skip_modeset;
10264 
10265 		new_stream = create_validate_stream_for_sink(aconnector,
10266 							     &new_crtc_state->mode,
10267 							     dm_new_conn_state,
10268 							     dm_old_crtc_state->stream);
10269 
10270 		/*
10271 		 * we can have no stream on ACTION_SET if a display
10272 		 * was disconnected during S3, in this case it is not an
10273 		 * error, the OS will be updated after detection, and
10274 		 * will do the right thing on next atomic commit
10275 		 */
10276 
10277 		if (!new_stream) {
10278 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10279 					__func__, acrtc->base.base.id);
10280 			ret = -ENOMEM;
10281 			goto fail;
10282 		}
10283 
10284 		/*
10285 		 * TODO: Check VSDB bits to decide whether this should
10286 		 * be enabled or not.
10287 		 */
10288 		new_stream->triggered_crtc_reset.enabled =
10289 			dm->force_timing_sync;
10290 
10291 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10292 
10293 		ret = fill_hdr_info_packet(drm_new_conn_state,
10294 					   &new_stream->hdr_static_metadata);
10295 		if (ret)
10296 			goto fail;
10297 
10298 		/*
10299 		 * If we already removed the old stream from the context
10300 		 * (and set the new stream to NULL) then we can't reuse
10301 		 * the old stream even if the stream and scaling are unchanged.
10302 		 * We'll hit the BUG_ON and black screen.
10303 		 *
10304 		 * TODO: Refactor this function to allow this check to work
10305 		 * in all conditions.
10306 		 */
10307 		if (dm_new_crtc_state->stream &&
10308 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10309 			goto skip_modeset;
10310 
10311 		if (dm_new_crtc_state->stream &&
10312 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10313 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10314 			new_crtc_state->mode_changed = false;
10315 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10316 					 new_crtc_state->mode_changed);
10317 		}
10318 	}
10319 
10320 	/* mode_changed flag may get updated above, need to check again */
10321 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10322 		goto skip_modeset;
10323 
10324 	DRM_DEBUG_ATOMIC(
10325 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10326 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10327 		"connectors_changed:%d\n",
10328 		acrtc->crtc_id,
10329 		new_crtc_state->enable,
10330 		new_crtc_state->active,
10331 		new_crtc_state->planes_changed,
10332 		new_crtc_state->mode_changed,
10333 		new_crtc_state->active_changed,
10334 		new_crtc_state->connectors_changed);
10335 
10336 	/* Remove stream for any changed/disabled CRTC */
10337 	if (!enable) {
10338 
10339 		if (!dm_old_crtc_state->stream)
10340 			goto skip_modeset;
10341 
10342 		if (dm_new_crtc_state->stream &&
10343 		    is_timing_unchanged_for_freesync(new_crtc_state,
10344 						     old_crtc_state)) {
10345 			new_crtc_state->mode_changed = false;
10346 			DRM_DEBUG_DRIVER(
10347 				"Mode change not required for front porch change, "
10348 				"setting mode_changed to %d",
10349 				new_crtc_state->mode_changed);
10350 
10351 			set_freesync_fixed_config(dm_new_crtc_state);
10352 
10353 			goto skip_modeset;
10354 		} else if (aconnector &&
10355 			   is_freesync_video_mode(&new_crtc_state->mode,
10356 						  aconnector)) {
10357 			struct drm_display_mode *high_mode;
10358 
10359 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10360 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10361 				set_freesync_fixed_config(dm_new_crtc_state);
10362 			}
10363 		}
10364 
10365 		ret = dm_atomic_get_state(state, &dm_state);
10366 		if (ret)
10367 			goto fail;
10368 
10369 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10370 				crtc->base.id);
10371 
10372 		/* i.e. reset mode */
10373 		if (dc_remove_stream_from_ctx(
10374 				dm->dc,
10375 				dm_state->context,
10376 				dm_old_crtc_state->stream) != DC_OK) {
10377 			ret = -EINVAL;
10378 			goto fail;
10379 		}
10380 
10381 		dc_stream_release(dm_old_crtc_state->stream);
10382 		dm_new_crtc_state->stream = NULL;
10383 
10384 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10385 
10386 		*lock_and_validation_needed = true;
10387 
10388 	} else {/* Add stream for any updated/enabled CRTC */
10389 		/*
10390 		 * Quick fix to prevent NULL pointer on new_stream when
10391 		 * added MST connectors not found in existing crtc_state in the chained mode
10392 		 * TODO: need to dig out the root cause of that
10393 		 */
10394 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10395 			goto skip_modeset;
10396 
10397 		if (modereset_required(new_crtc_state))
10398 			goto skip_modeset;
10399 
10400 		if (modeset_required(new_crtc_state, new_stream,
10401 				     dm_old_crtc_state->stream)) {
10402 
10403 			WARN_ON(dm_new_crtc_state->stream);
10404 
10405 			ret = dm_atomic_get_state(state, &dm_state);
10406 			if (ret)
10407 				goto fail;
10408 
10409 			dm_new_crtc_state->stream = new_stream;
10410 
10411 			dc_stream_retain(new_stream);
10412 
10413 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10414 					 crtc->base.id);
10415 
10416 			if (dc_add_stream_to_ctx(
10417 					dm->dc,
10418 					dm_state->context,
10419 					dm_new_crtc_state->stream) != DC_OK) {
10420 				ret = -EINVAL;
10421 				goto fail;
10422 			}
10423 
10424 			*lock_and_validation_needed = true;
10425 		}
10426 	}
10427 
10428 skip_modeset:
10429 	/* Release extra reference */
10430 	if (new_stream)
10431 		 dc_stream_release(new_stream);
10432 
10433 	/*
10434 	 * We want to do dc stream updates that do not require a
10435 	 * full modeset below.
10436 	 */
10437 	if (!(enable && aconnector && new_crtc_state->active))
10438 		return 0;
10439 	/*
10440 	 * Given above conditions, the dc state cannot be NULL because:
10441 	 * 1. We're in the process of enabling CRTCs (just been added
10442 	 *    to the dc context, or already is on the context)
10443 	 * 2. Has a valid connector attached, and
10444 	 * 3. Is currently active and enabled.
10445 	 * => The dc stream state currently exists.
10446 	 */
10447 	BUG_ON(dm_new_crtc_state->stream == NULL);
10448 
10449 	/* Scaling or underscan settings */
10450 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10451 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10452 		update_stream_scaling_settings(
10453 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10454 
10455 	/* ABM settings */
10456 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10457 
10458 	/*
10459 	 * Color management settings. We also update color properties
10460 	 * when a modeset is needed, to ensure it gets reprogrammed.
10461 	 */
10462 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10463 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10464 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10465 		if (ret)
10466 			goto fail;
10467 	}
10468 
10469 	/* Update Freesync settings. */
10470 	get_freesync_config_for_crtc(dm_new_crtc_state,
10471 				     dm_new_conn_state);
10472 
10473 	return ret;
10474 
10475 fail:
10476 	if (new_stream)
10477 		dc_stream_release(new_stream);
10478 	return ret;
10479 }
10480 
10481 static bool should_reset_plane(struct drm_atomic_state *state,
10482 			       struct drm_plane *plane,
10483 			       struct drm_plane_state *old_plane_state,
10484 			       struct drm_plane_state *new_plane_state)
10485 {
10486 	struct drm_plane *other;
10487 	struct drm_plane_state *old_other_state, *new_other_state;
10488 	struct drm_crtc_state *new_crtc_state;
10489 	int i;
10490 
10491 	/*
10492 	 * TODO: Remove this hack once the checks below are sufficient
10493 	 * enough to determine when we need to reset all the planes on
10494 	 * the stream.
10495 	 */
10496 	if (state->allow_modeset)
10497 		return true;
10498 
10499 	/* Exit early if we know that we're adding or removing the plane. */
10500 	if (old_plane_state->crtc != new_plane_state->crtc)
10501 		return true;
10502 
10503 	/* old crtc == new_crtc == NULL, plane not in context. */
10504 	if (!new_plane_state->crtc)
10505 		return false;
10506 
10507 	new_crtc_state =
10508 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10509 
10510 	if (!new_crtc_state)
10511 		return true;
10512 
10513 	/* CRTC Degamma changes currently require us to recreate planes. */
10514 	if (new_crtc_state->color_mgmt_changed)
10515 		return true;
10516 
10517 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10518 		return true;
10519 
10520 	/*
10521 	 * If there are any new primary or overlay planes being added or
10522 	 * removed then the z-order can potentially change. To ensure
10523 	 * correct z-order and pipe acquisition the current DC architecture
10524 	 * requires us to remove and recreate all existing planes.
10525 	 *
10526 	 * TODO: Come up with a more elegant solution for this.
10527 	 */
10528 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10529 		struct amdgpu_framebuffer *old_afb, *new_afb;
10530 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10531 			continue;
10532 
10533 		if (old_other_state->crtc != new_plane_state->crtc &&
10534 		    new_other_state->crtc != new_plane_state->crtc)
10535 			continue;
10536 
10537 		if (old_other_state->crtc != new_other_state->crtc)
10538 			return true;
10539 
10540 		/* Src/dst size and scaling updates. */
10541 		if (old_other_state->src_w != new_other_state->src_w ||
10542 		    old_other_state->src_h != new_other_state->src_h ||
10543 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10544 		    old_other_state->crtc_h != new_other_state->crtc_h)
10545 			return true;
10546 
10547 		/* Rotation / mirroring updates. */
10548 		if (old_other_state->rotation != new_other_state->rotation)
10549 			return true;
10550 
10551 		/* Blending updates. */
10552 		if (old_other_state->pixel_blend_mode !=
10553 		    new_other_state->pixel_blend_mode)
10554 			return true;
10555 
10556 		/* Alpha updates. */
10557 		if (old_other_state->alpha != new_other_state->alpha)
10558 			return true;
10559 
10560 		/* Colorspace changes. */
10561 		if (old_other_state->color_range != new_other_state->color_range ||
10562 		    old_other_state->color_encoding != new_other_state->color_encoding)
10563 			return true;
10564 
10565 		/* Framebuffer checks fall at the end. */
10566 		if (!old_other_state->fb || !new_other_state->fb)
10567 			continue;
10568 
10569 		/* Pixel format changes can require bandwidth updates. */
10570 		if (old_other_state->fb->format != new_other_state->fb->format)
10571 			return true;
10572 
10573 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10574 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10575 
10576 		/* Tiling and DCC changes also require bandwidth updates. */
10577 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10578 		    old_afb->base.modifier != new_afb->base.modifier)
10579 			return true;
10580 	}
10581 
10582 	return false;
10583 }
10584 
10585 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10586 			      struct drm_plane_state *new_plane_state,
10587 			      struct drm_framebuffer *fb)
10588 {
10589 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10590 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10591 	unsigned int pitch;
10592 	bool linear;
10593 
10594 	if (fb->width > new_acrtc->max_cursor_width ||
10595 	    fb->height > new_acrtc->max_cursor_height) {
10596 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10597 				 new_plane_state->fb->width,
10598 				 new_plane_state->fb->height);
10599 		return -EINVAL;
10600 	}
10601 	if (new_plane_state->src_w != fb->width << 16 ||
10602 	    new_plane_state->src_h != fb->height << 16) {
10603 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10604 		return -EINVAL;
10605 	}
10606 
10607 	/* Pitch in pixels */
10608 	pitch = fb->pitches[0] / fb->format->cpp[0];
10609 
10610 	if (fb->width != pitch) {
10611 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10612 				 fb->width, pitch);
10613 		return -EINVAL;
10614 	}
10615 
10616 	switch (pitch) {
10617 	case 64:
10618 	case 128:
10619 	case 256:
10620 		/* FB pitch is supported by cursor plane */
10621 		break;
10622 	default:
10623 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10624 		return -EINVAL;
10625 	}
10626 
10627 	/* Core DRM takes care of checking FB modifiers, so we only need to
10628 	 * check tiling flags when the FB doesn't have a modifier. */
10629 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10630 		if (adev->family < AMDGPU_FAMILY_AI) {
10631 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10632 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10633 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10634 		} else {
10635 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10636 		}
10637 		if (!linear) {
10638 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10639 			return -EINVAL;
10640 		}
10641 	}
10642 
10643 	return 0;
10644 }
10645 
10646 static int dm_update_plane_state(struct dc *dc,
10647 				 struct drm_atomic_state *state,
10648 				 struct drm_plane *plane,
10649 				 struct drm_plane_state *old_plane_state,
10650 				 struct drm_plane_state *new_plane_state,
10651 				 bool enable,
10652 				 bool *lock_and_validation_needed)
10653 {
10654 
10655 	struct dm_atomic_state *dm_state = NULL;
10656 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10657 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10658 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10659 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10660 	struct amdgpu_crtc *new_acrtc;
10661 	bool needs_reset;
10662 	int ret = 0;
10663 
10664 
10665 	new_plane_crtc = new_plane_state->crtc;
10666 	old_plane_crtc = old_plane_state->crtc;
10667 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10668 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10669 
10670 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10671 		if (!enable || !new_plane_crtc ||
10672 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10673 			return 0;
10674 
10675 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10676 
10677 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10678 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10679 			return -EINVAL;
10680 		}
10681 
10682 		if (new_plane_state->fb) {
10683 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10684 						 new_plane_state->fb);
10685 			if (ret)
10686 				return ret;
10687 		}
10688 
10689 		return 0;
10690 	}
10691 
10692 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10693 					 new_plane_state);
10694 
10695 	/* Remove any changed/removed planes */
10696 	if (!enable) {
10697 		if (!needs_reset)
10698 			return 0;
10699 
10700 		if (!old_plane_crtc)
10701 			return 0;
10702 
10703 		old_crtc_state = drm_atomic_get_old_crtc_state(
10704 				state, old_plane_crtc);
10705 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10706 
10707 		if (!dm_old_crtc_state->stream)
10708 			return 0;
10709 
10710 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10711 				plane->base.id, old_plane_crtc->base.id);
10712 
10713 		ret = dm_atomic_get_state(state, &dm_state);
10714 		if (ret)
10715 			return ret;
10716 
10717 		if (!dc_remove_plane_from_context(
10718 				dc,
10719 				dm_old_crtc_state->stream,
10720 				dm_old_plane_state->dc_state,
10721 				dm_state->context)) {
10722 
10723 			return -EINVAL;
10724 		}
10725 
10726 
10727 		dc_plane_state_release(dm_old_plane_state->dc_state);
10728 		dm_new_plane_state->dc_state = NULL;
10729 
10730 		*lock_and_validation_needed = true;
10731 
10732 	} else { /* Add new planes */
10733 		struct dc_plane_state *dc_new_plane_state;
10734 
10735 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10736 			return 0;
10737 
10738 		if (!new_plane_crtc)
10739 			return 0;
10740 
10741 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10742 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10743 
10744 		if (!dm_new_crtc_state->stream)
10745 			return 0;
10746 
10747 		if (!needs_reset)
10748 			return 0;
10749 
10750 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10751 		if (ret)
10752 			return ret;
10753 
10754 		WARN_ON(dm_new_plane_state->dc_state);
10755 
10756 		dc_new_plane_state = dc_create_plane_state(dc);
10757 		if (!dc_new_plane_state)
10758 			return -ENOMEM;
10759 
10760 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10761 				 plane->base.id, new_plane_crtc->base.id);
10762 
10763 		ret = fill_dc_plane_attributes(
10764 			drm_to_adev(new_plane_crtc->dev),
10765 			dc_new_plane_state,
10766 			new_plane_state,
10767 			new_crtc_state);
10768 		if (ret) {
10769 			dc_plane_state_release(dc_new_plane_state);
10770 			return ret;
10771 		}
10772 
10773 		ret = dm_atomic_get_state(state, &dm_state);
10774 		if (ret) {
10775 			dc_plane_state_release(dc_new_plane_state);
10776 			return ret;
10777 		}
10778 
10779 		/*
10780 		 * Any atomic check errors that occur after this will
10781 		 * not need a release. The plane state will be attached
10782 		 * to the stream, and therefore part of the atomic
10783 		 * state. It'll be released when the atomic state is
10784 		 * cleaned.
10785 		 */
10786 		if (!dc_add_plane_to_context(
10787 				dc,
10788 				dm_new_crtc_state->stream,
10789 				dc_new_plane_state,
10790 				dm_state->context)) {
10791 
10792 			dc_plane_state_release(dc_new_plane_state);
10793 			return -EINVAL;
10794 		}
10795 
10796 		dm_new_plane_state->dc_state = dc_new_plane_state;
10797 
10798 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10799 
10800 		/* Tell DC to do a full surface update every time there
10801 		 * is a plane change. Inefficient, but works for now.
10802 		 */
10803 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10804 
10805 		*lock_and_validation_needed = true;
10806 	}
10807 
10808 
10809 	return ret;
10810 }
10811 
10812 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10813 				       int *src_w, int *src_h)
10814 {
10815 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10816 	case DRM_MODE_ROTATE_90:
10817 	case DRM_MODE_ROTATE_270:
10818 		*src_w = plane_state->src_h >> 16;
10819 		*src_h = plane_state->src_w >> 16;
10820 		break;
10821 	case DRM_MODE_ROTATE_0:
10822 	case DRM_MODE_ROTATE_180:
10823 	default:
10824 		*src_w = plane_state->src_w >> 16;
10825 		*src_h = plane_state->src_h >> 16;
10826 		break;
10827 	}
10828 }
10829 
10830 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10831 				struct drm_crtc *crtc,
10832 				struct drm_crtc_state *new_crtc_state)
10833 {
10834 	struct drm_plane *cursor = crtc->cursor, *underlying;
10835 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10836 	int i;
10837 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10838 	int cursor_src_w, cursor_src_h;
10839 	int underlying_src_w, underlying_src_h;
10840 
10841 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10842 	 * cursor per pipe but it's going to inherit the scaling and
10843 	 * positioning from the underlying pipe. Check the cursor plane's
10844 	 * blending properties match the underlying planes'. */
10845 
10846 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10847 	if (!new_cursor_state || !new_cursor_state->fb) {
10848 		return 0;
10849 	}
10850 
10851 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10852 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10853 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10854 
10855 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10856 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10857 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10858 			continue;
10859 
10860 		/* Ignore disabled planes */
10861 		if (!new_underlying_state->fb)
10862 			continue;
10863 
10864 		dm_get_oriented_plane_size(new_underlying_state,
10865 					   &underlying_src_w, &underlying_src_h);
10866 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10867 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10868 
10869 		if (cursor_scale_w != underlying_scale_w ||
10870 		    cursor_scale_h != underlying_scale_h) {
10871 			drm_dbg_atomic(crtc->dev,
10872 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10873 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10874 			return -EINVAL;
10875 		}
10876 
10877 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10878 		if (new_underlying_state->crtc_x <= 0 &&
10879 		    new_underlying_state->crtc_y <= 0 &&
10880 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10881 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10882 			break;
10883 	}
10884 
10885 	return 0;
10886 }
10887 
10888 #if defined(CONFIG_DRM_AMD_DC_DCN)
10889 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10890 {
10891 	struct drm_connector *connector;
10892 	struct drm_connector_state *conn_state, *old_conn_state;
10893 	struct amdgpu_dm_connector *aconnector = NULL;
10894 	int i;
10895 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10896 		if (!conn_state->crtc)
10897 			conn_state = old_conn_state;
10898 
10899 		if (conn_state->crtc != crtc)
10900 			continue;
10901 
10902 		aconnector = to_amdgpu_dm_connector(connector);
10903 		if (!aconnector->port || !aconnector->mst_port)
10904 			aconnector = NULL;
10905 		else
10906 			break;
10907 	}
10908 
10909 	if (!aconnector)
10910 		return 0;
10911 
10912 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10913 }
10914 #endif
10915 
10916 /**
10917  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10918  * @dev: The DRM device
10919  * @state: The atomic state to commit
10920  *
10921  * Validate that the given atomic state is programmable by DC into hardware.
10922  * This involves constructing a &struct dc_state reflecting the new hardware
10923  * state we wish to commit, then querying DC to see if it is programmable. It's
10924  * important not to modify the existing DC state. Otherwise, atomic_check
10925  * may unexpectedly commit hardware changes.
10926  *
10927  * When validating the DC state, it's important that the right locks are
10928  * acquired. For full updates case which removes/adds/updates streams on one
10929  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10930  * that any such full update commit will wait for completion of any outstanding
10931  * flip using DRMs synchronization events.
10932  *
10933  * Note that DM adds the affected connectors for all CRTCs in state, when that
10934  * might not seem necessary. This is because DC stream creation requires the
10935  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10936  * be possible but non-trivial - a possible TODO item.
10937  *
10938  * Return: -Error code if validation failed.
10939  */
10940 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10941 				  struct drm_atomic_state *state)
10942 {
10943 	struct amdgpu_device *adev = drm_to_adev(dev);
10944 	struct dm_atomic_state *dm_state = NULL;
10945 	struct dc *dc = adev->dm.dc;
10946 	struct drm_connector *connector;
10947 	struct drm_connector_state *old_con_state, *new_con_state;
10948 	struct drm_crtc *crtc;
10949 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10950 	struct drm_plane *plane;
10951 	struct drm_plane_state *old_plane_state, *new_plane_state;
10952 	enum dc_status status;
10953 	int ret, i;
10954 	bool lock_and_validation_needed = false;
10955 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10956 #if defined(CONFIG_DRM_AMD_DC_DCN)
10957 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10958 	struct drm_dp_mst_topology_state *mst_state;
10959 	struct drm_dp_mst_topology_mgr *mgr;
10960 #endif
10961 
10962 	trace_amdgpu_dm_atomic_check_begin(state);
10963 
10964 	ret = drm_atomic_helper_check_modeset(dev, state);
10965 	if (ret) {
10966 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10967 		goto fail;
10968 	}
10969 
10970 	/* Check connector changes */
10971 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10972 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10973 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10974 
10975 		/* Skip connectors that are disabled or part of modeset already. */
10976 		if (!old_con_state->crtc && !new_con_state->crtc)
10977 			continue;
10978 
10979 		if (!new_con_state->crtc)
10980 			continue;
10981 
10982 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10983 		if (IS_ERR(new_crtc_state)) {
10984 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10985 			ret = PTR_ERR(new_crtc_state);
10986 			goto fail;
10987 		}
10988 
10989 		if (dm_old_con_state->abm_level !=
10990 		    dm_new_con_state->abm_level)
10991 			new_crtc_state->connectors_changed = true;
10992 	}
10993 
10994 #if defined(CONFIG_DRM_AMD_DC_DCN)
10995 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10996 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10997 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10998 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10999 				if (ret) {
11000 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11001 					goto fail;
11002 				}
11003 			}
11004 		}
11005 		pre_validate_dsc(state, &dm_state, vars);
11006 	}
11007 #endif
11008 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11009 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11010 
11011 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11012 		    !new_crtc_state->color_mgmt_changed &&
11013 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11014 			dm_old_crtc_state->dsc_force_changed == false)
11015 			continue;
11016 
11017 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11018 		if (ret) {
11019 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11020 			goto fail;
11021 		}
11022 
11023 		if (!new_crtc_state->enable)
11024 			continue;
11025 
11026 		ret = drm_atomic_add_affected_connectors(state, crtc);
11027 		if (ret) {
11028 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11029 			goto fail;
11030 		}
11031 
11032 		ret = drm_atomic_add_affected_planes(state, crtc);
11033 		if (ret) {
11034 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11035 			goto fail;
11036 		}
11037 
11038 		if (dm_old_crtc_state->dsc_force_changed)
11039 			new_crtc_state->mode_changed = true;
11040 	}
11041 
11042 	/*
11043 	 * Add all primary and overlay planes on the CRTC to the state
11044 	 * whenever a plane is enabled to maintain correct z-ordering
11045 	 * and to enable fast surface updates.
11046 	 */
11047 	drm_for_each_crtc(crtc, dev) {
11048 		bool modified = false;
11049 
11050 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11051 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11052 				continue;
11053 
11054 			if (new_plane_state->crtc == crtc ||
11055 			    old_plane_state->crtc == crtc) {
11056 				modified = true;
11057 				break;
11058 			}
11059 		}
11060 
11061 		if (!modified)
11062 			continue;
11063 
11064 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11065 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11066 				continue;
11067 
11068 			new_plane_state =
11069 				drm_atomic_get_plane_state(state, plane);
11070 
11071 			if (IS_ERR(new_plane_state)) {
11072 				ret = PTR_ERR(new_plane_state);
11073 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11074 				goto fail;
11075 			}
11076 		}
11077 	}
11078 
11079 	/* Remove exiting planes if they are modified */
11080 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11081 		ret = dm_update_plane_state(dc, state, plane,
11082 					    old_plane_state,
11083 					    new_plane_state,
11084 					    false,
11085 					    &lock_and_validation_needed);
11086 		if (ret) {
11087 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11088 			goto fail;
11089 		}
11090 	}
11091 
11092 	/* Disable all crtcs which require disable */
11093 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11094 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11095 					   old_crtc_state,
11096 					   new_crtc_state,
11097 					   false,
11098 					   &lock_and_validation_needed);
11099 		if (ret) {
11100 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11101 			goto fail;
11102 		}
11103 	}
11104 
11105 	/* Enable all crtcs which require enable */
11106 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11107 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11108 					   old_crtc_state,
11109 					   new_crtc_state,
11110 					   true,
11111 					   &lock_and_validation_needed);
11112 		if (ret) {
11113 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11114 			goto fail;
11115 		}
11116 	}
11117 
11118 	/* Add new/modified planes */
11119 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11120 		ret = dm_update_plane_state(dc, state, plane,
11121 					    old_plane_state,
11122 					    new_plane_state,
11123 					    true,
11124 					    &lock_and_validation_needed);
11125 		if (ret) {
11126 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11127 			goto fail;
11128 		}
11129 	}
11130 
11131 	/* Run this here since we want to validate the streams we created */
11132 	ret = drm_atomic_helper_check_planes(dev, state);
11133 	if (ret) {
11134 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11135 		goto fail;
11136 	}
11137 
11138 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11139 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11140 		if (dm_new_crtc_state->mpo_requested)
11141 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11142 	}
11143 
11144 	/* Check cursor planes scaling */
11145 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11146 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11147 		if (ret) {
11148 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11149 			goto fail;
11150 		}
11151 	}
11152 
11153 	if (state->legacy_cursor_update) {
11154 		/*
11155 		 * This is a fast cursor update coming from the plane update
11156 		 * helper, check if it can be done asynchronously for better
11157 		 * performance.
11158 		 */
11159 		state->async_update =
11160 			!drm_atomic_helper_async_check(dev, state);
11161 
11162 		/*
11163 		 * Skip the remaining global validation if this is an async
11164 		 * update. Cursor updates can be done without affecting
11165 		 * state or bandwidth calcs and this avoids the performance
11166 		 * penalty of locking the private state object and
11167 		 * allocating a new dc_state.
11168 		 */
11169 		if (state->async_update)
11170 			return 0;
11171 	}
11172 
11173 	/* Check scaling and underscan changes*/
11174 	/* TODO Removed scaling changes validation due to inability to commit
11175 	 * new stream into context w\o causing full reset. Need to
11176 	 * decide how to handle.
11177 	 */
11178 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11179 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11180 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11181 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11182 
11183 		/* Skip any modesets/resets */
11184 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11185 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11186 			continue;
11187 
11188 		/* Skip any thing not scale or underscan changes */
11189 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11190 			continue;
11191 
11192 		lock_and_validation_needed = true;
11193 	}
11194 
11195 #if defined(CONFIG_DRM_AMD_DC_DCN)
11196 	/* set the slot info for each mst_state based on the link encoding format */
11197 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11198 		struct amdgpu_dm_connector *aconnector;
11199 		struct drm_connector *connector;
11200 		struct drm_connector_list_iter iter;
11201 		u8 link_coding_cap;
11202 
11203 		if (!mgr->mst_state )
11204 			continue;
11205 
11206 		drm_connector_list_iter_begin(dev, &iter);
11207 		drm_for_each_connector_iter(connector, &iter) {
11208 			int id = connector->index;
11209 
11210 			if (id == mst_state->mgr->conn_base_id) {
11211 				aconnector = to_amdgpu_dm_connector(connector);
11212 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11213 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11214 
11215 				break;
11216 			}
11217 		}
11218 		drm_connector_list_iter_end(&iter);
11219 
11220 	}
11221 #endif
11222 	/**
11223 	 * Streams and planes are reset when there are changes that affect
11224 	 * bandwidth. Anything that affects bandwidth needs to go through
11225 	 * DC global validation to ensure that the configuration can be applied
11226 	 * to hardware.
11227 	 *
11228 	 * We have to currently stall out here in atomic_check for outstanding
11229 	 * commits to finish in this case because our IRQ handlers reference
11230 	 * DRM state directly - we can end up disabling interrupts too early
11231 	 * if we don't.
11232 	 *
11233 	 * TODO: Remove this stall and drop DM state private objects.
11234 	 */
11235 	if (lock_and_validation_needed) {
11236 		ret = dm_atomic_get_state(state, &dm_state);
11237 		if (ret) {
11238 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11239 			goto fail;
11240 		}
11241 
11242 		ret = do_aquire_global_lock(dev, state);
11243 		if (ret) {
11244 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11245 			goto fail;
11246 		}
11247 
11248 #if defined(CONFIG_DRM_AMD_DC_DCN)
11249 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11250 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11251 			goto fail;
11252 		}
11253 
11254 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11255 		if (ret) {
11256 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11257 			goto fail;
11258 		}
11259 #endif
11260 
11261 		/*
11262 		 * Perform validation of MST topology in the state:
11263 		 * We need to perform MST atomic check before calling
11264 		 * dc_validate_global_state(), or there is a chance
11265 		 * to get stuck in an infinite loop and hang eventually.
11266 		 */
11267 		ret = drm_dp_mst_atomic_check(state);
11268 		if (ret) {
11269 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11270 			goto fail;
11271 		}
11272 		status = dc_validate_global_state(dc, dm_state->context, true);
11273 		if (status != DC_OK) {
11274 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11275 				       dc_status_to_str(status), status);
11276 			ret = -EINVAL;
11277 			goto fail;
11278 		}
11279 	} else {
11280 		/*
11281 		 * The commit is a fast update. Fast updates shouldn't change
11282 		 * the DC context, affect global validation, and can have their
11283 		 * commit work done in parallel with other commits not touching
11284 		 * the same resource. If we have a new DC context as part of
11285 		 * the DM atomic state from validation we need to free it and
11286 		 * retain the existing one instead.
11287 		 *
11288 		 * Furthermore, since the DM atomic state only contains the DC
11289 		 * context and can safely be annulled, we can free the state
11290 		 * and clear the associated private object now to free
11291 		 * some memory and avoid a possible use-after-free later.
11292 		 */
11293 
11294 		for (i = 0; i < state->num_private_objs; i++) {
11295 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11296 
11297 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11298 				int j = state->num_private_objs-1;
11299 
11300 				dm_atomic_destroy_state(obj,
11301 						state->private_objs[i].state);
11302 
11303 				/* If i is not at the end of the array then the
11304 				 * last element needs to be moved to where i was
11305 				 * before the array can safely be truncated.
11306 				 */
11307 				if (i != j)
11308 					state->private_objs[i] =
11309 						state->private_objs[j];
11310 
11311 				state->private_objs[j].ptr = NULL;
11312 				state->private_objs[j].state = NULL;
11313 				state->private_objs[j].old_state = NULL;
11314 				state->private_objs[j].new_state = NULL;
11315 
11316 				state->num_private_objs = j;
11317 				break;
11318 			}
11319 		}
11320 	}
11321 
11322 	/* Store the overall update type for use later in atomic check. */
11323 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11324 		struct dm_crtc_state *dm_new_crtc_state =
11325 			to_dm_crtc_state(new_crtc_state);
11326 
11327 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11328 							 UPDATE_TYPE_FULL :
11329 							 UPDATE_TYPE_FAST;
11330 	}
11331 
11332 	/* Must be success */
11333 	WARN_ON(ret);
11334 
11335 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11336 
11337 	return ret;
11338 
11339 fail:
11340 	if (ret == -EDEADLK)
11341 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11342 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11343 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11344 	else
11345 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11346 
11347 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11348 
11349 	return ret;
11350 }
11351 
11352 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11353 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11354 {
11355 	uint8_t dpcd_data;
11356 	bool capable = false;
11357 
11358 	if (amdgpu_dm_connector->dc_link &&
11359 		dm_helpers_dp_read_dpcd(
11360 				NULL,
11361 				amdgpu_dm_connector->dc_link,
11362 				DP_DOWN_STREAM_PORT_COUNT,
11363 				&dpcd_data,
11364 				sizeof(dpcd_data))) {
11365 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11366 	}
11367 
11368 	return capable;
11369 }
11370 
11371 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11372 		unsigned int offset,
11373 		unsigned int total_length,
11374 		uint8_t *data,
11375 		unsigned int length,
11376 		struct amdgpu_hdmi_vsdb_info *vsdb)
11377 {
11378 	bool res;
11379 	union dmub_rb_cmd cmd;
11380 	struct dmub_cmd_send_edid_cea *input;
11381 	struct dmub_cmd_edid_cea_output *output;
11382 
11383 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11384 		return false;
11385 
11386 	memset(&cmd, 0, sizeof(cmd));
11387 
11388 	input = &cmd.edid_cea.data.input;
11389 
11390 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11391 	cmd.edid_cea.header.sub_type = 0;
11392 	cmd.edid_cea.header.payload_bytes =
11393 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11394 	input->offset = offset;
11395 	input->length = length;
11396 	input->cea_total_length = total_length;
11397 	memcpy(input->payload, data, length);
11398 
11399 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11400 	if (!res) {
11401 		DRM_ERROR("EDID CEA parser failed\n");
11402 		return false;
11403 	}
11404 
11405 	output = &cmd.edid_cea.data.output;
11406 
11407 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11408 		if (!output->ack.success) {
11409 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11410 					output->ack.offset);
11411 		}
11412 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11413 		if (!output->amd_vsdb.vsdb_found)
11414 			return false;
11415 
11416 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11417 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11418 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11419 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11420 	} else {
11421 		DRM_WARN("Unknown EDID CEA parser results\n");
11422 		return false;
11423 	}
11424 
11425 	return true;
11426 }
11427 
11428 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11429 		uint8_t *edid_ext, int len,
11430 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11431 {
11432 	int i;
11433 
11434 	/* send extension block to DMCU for parsing */
11435 	for (i = 0; i < len; i += 8) {
11436 		bool res;
11437 		int offset;
11438 
11439 		/* send 8 bytes a time */
11440 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11441 			return false;
11442 
11443 		if (i+8 == len) {
11444 			/* EDID block sent completed, expect result */
11445 			int version, min_rate, max_rate;
11446 
11447 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11448 			if (res) {
11449 				/* amd vsdb found */
11450 				vsdb_info->freesync_supported = 1;
11451 				vsdb_info->amd_vsdb_version = version;
11452 				vsdb_info->min_refresh_rate_hz = min_rate;
11453 				vsdb_info->max_refresh_rate_hz = max_rate;
11454 				return true;
11455 			}
11456 			/* not amd vsdb */
11457 			return false;
11458 		}
11459 
11460 		/* check for ack*/
11461 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11462 		if (!res)
11463 			return false;
11464 	}
11465 
11466 	return false;
11467 }
11468 
11469 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11470 		uint8_t *edid_ext, int len,
11471 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11472 {
11473 	int i;
11474 
11475 	/* send extension block to DMCU for parsing */
11476 	for (i = 0; i < len; i += 8) {
11477 		/* send 8 bytes a time */
11478 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11479 			return false;
11480 	}
11481 
11482 	return vsdb_info->freesync_supported;
11483 }
11484 
11485 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11486 		uint8_t *edid_ext, int len,
11487 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11488 {
11489 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11490 
11491 	if (adev->dm.dmub_srv)
11492 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11493 	else
11494 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11495 }
11496 
11497 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11498 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11499 {
11500 	uint8_t *edid_ext = NULL;
11501 	int i;
11502 	bool valid_vsdb_found = false;
11503 
11504 	/*----- drm_find_cea_extension() -----*/
11505 	/* No EDID or EDID extensions */
11506 	if (edid == NULL || edid->extensions == 0)
11507 		return -ENODEV;
11508 
11509 	/* Find CEA extension */
11510 	for (i = 0; i < edid->extensions; i++) {
11511 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11512 		if (edid_ext[0] == CEA_EXT)
11513 			break;
11514 	}
11515 
11516 	if (i == edid->extensions)
11517 		return -ENODEV;
11518 
11519 	/*----- cea_db_offsets() -----*/
11520 	if (edid_ext[0] != CEA_EXT)
11521 		return -ENODEV;
11522 
11523 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11524 
11525 	return valid_vsdb_found ? i : -ENODEV;
11526 }
11527 
11528 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11529 					struct edid *edid)
11530 {
11531 	int i = 0;
11532 	struct detailed_timing *timing;
11533 	struct detailed_non_pixel *data;
11534 	struct detailed_data_monitor_range *range;
11535 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11536 			to_amdgpu_dm_connector(connector);
11537 	struct dm_connector_state *dm_con_state = NULL;
11538 	struct dc_sink *sink;
11539 
11540 	struct drm_device *dev = connector->dev;
11541 	struct amdgpu_device *adev = drm_to_adev(dev);
11542 	bool freesync_capable = false;
11543 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11544 
11545 	if (!connector->state) {
11546 		DRM_ERROR("%s - Connector has no state", __func__);
11547 		goto update;
11548 	}
11549 
11550 	sink = amdgpu_dm_connector->dc_sink ?
11551 		amdgpu_dm_connector->dc_sink :
11552 		amdgpu_dm_connector->dc_em_sink;
11553 
11554 	if (!edid || !sink) {
11555 		dm_con_state = to_dm_connector_state(connector->state);
11556 
11557 		amdgpu_dm_connector->min_vfreq = 0;
11558 		amdgpu_dm_connector->max_vfreq = 0;
11559 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11560 		connector->display_info.monitor_range.min_vfreq = 0;
11561 		connector->display_info.monitor_range.max_vfreq = 0;
11562 		freesync_capable = false;
11563 
11564 		goto update;
11565 	}
11566 
11567 	dm_con_state = to_dm_connector_state(connector->state);
11568 
11569 	if (!adev->dm.freesync_module)
11570 		goto update;
11571 
11572 
11573 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11574 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11575 		bool edid_check_required = false;
11576 
11577 		if (edid) {
11578 			edid_check_required = is_dp_capable_without_timing_msa(
11579 						adev->dm.dc,
11580 						amdgpu_dm_connector);
11581 		}
11582 
11583 		if (edid_check_required == true && (edid->version > 1 ||
11584 		   (edid->version == 1 && edid->revision > 1))) {
11585 			for (i = 0; i < 4; i++) {
11586 
11587 				timing	= &edid->detailed_timings[i];
11588 				data	= &timing->data.other_data;
11589 				range	= &data->data.range;
11590 				/*
11591 				 * Check if monitor has continuous frequency mode
11592 				 */
11593 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11594 					continue;
11595 				/*
11596 				 * Check for flag range limits only. If flag == 1 then
11597 				 * no additional timing information provided.
11598 				 * Default GTF, GTF Secondary curve and CVT are not
11599 				 * supported
11600 				 */
11601 				if (range->flags != 1)
11602 					continue;
11603 
11604 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11605 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11606 				amdgpu_dm_connector->pixel_clock_mhz =
11607 					range->pixel_clock_mhz * 10;
11608 
11609 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11610 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11611 
11612 				break;
11613 			}
11614 
11615 			if (amdgpu_dm_connector->max_vfreq -
11616 			    amdgpu_dm_connector->min_vfreq > 10) {
11617 
11618 				freesync_capable = true;
11619 			}
11620 		}
11621 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11622 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11623 		if (i >= 0 && vsdb_info.freesync_supported) {
11624 			timing  = &edid->detailed_timings[i];
11625 			data    = &timing->data.other_data;
11626 
11627 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11628 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11629 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11630 				freesync_capable = true;
11631 
11632 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11633 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11634 		}
11635 	}
11636 
11637 update:
11638 	if (dm_con_state)
11639 		dm_con_state->freesync_capable = freesync_capable;
11640 
11641 	if (connector->vrr_capable_property)
11642 		drm_connector_set_vrr_capable_property(connector,
11643 						       freesync_capable);
11644 }
11645 
11646 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11647 {
11648 	struct amdgpu_device *adev = drm_to_adev(dev);
11649 	struct dc *dc = adev->dm.dc;
11650 	int i;
11651 
11652 	mutex_lock(&adev->dm.dc_lock);
11653 	if (dc->current_state) {
11654 		for (i = 0; i < dc->current_state->stream_count; ++i)
11655 			dc->current_state->streams[i]
11656 				->triggered_crtc_reset.enabled =
11657 				adev->dm.force_timing_sync;
11658 
11659 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11660 		dc_trigger_sync(dc, dc->current_state);
11661 	}
11662 	mutex_unlock(&adev->dm.dc_lock);
11663 }
11664 
11665 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11666 		       uint32_t value, const char *func_name)
11667 {
11668 #ifdef DM_CHECK_ADDR_0
11669 	if (address == 0) {
11670 		DC_ERR("invalid register write. address = 0");
11671 		return;
11672 	}
11673 #endif
11674 	cgs_write_register(ctx->cgs_device, address, value);
11675 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11676 }
11677 
11678 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11679 			  const char *func_name)
11680 {
11681 	uint32_t value;
11682 #ifdef DM_CHECK_ADDR_0
11683 	if (address == 0) {
11684 		DC_ERR("invalid register read; address = 0\n");
11685 		return 0;
11686 	}
11687 #endif
11688 
11689 	if (ctx->dmub_srv &&
11690 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11691 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11692 		ASSERT(false);
11693 		return 0;
11694 	}
11695 
11696 	value = cgs_read_register(ctx->cgs_device, address);
11697 
11698 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11699 
11700 	return value;
11701 }
11702 
11703 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11704 						struct dc_context *ctx,
11705 						uint8_t status_type,
11706 						uint32_t *operation_result)
11707 {
11708 	struct amdgpu_device *adev = ctx->driver_context;
11709 	int return_status = -1;
11710 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11711 
11712 	if (is_cmd_aux) {
11713 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11714 			return_status = p_notify->aux_reply.length;
11715 			*operation_result = p_notify->result;
11716 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11717 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11718 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11719 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11720 		} else {
11721 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11722 		}
11723 	} else {
11724 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11725 			return_status = 0;
11726 			*operation_result = p_notify->sc_status;
11727 		} else {
11728 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11729 		}
11730 	}
11731 
11732 	return return_status;
11733 }
11734 
11735 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11736 	unsigned int link_index, void *cmd_payload, void *operation_result)
11737 {
11738 	struct amdgpu_device *adev = ctx->driver_context;
11739 	int ret = 0;
11740 
11741 	if (is_cmd_aux) {
11742 		dc_process_dmub_aux_transfer_async(ctx->dc,
11743 			link_index, (struct aux_payload *)cmd_payload);
11744 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11745 					(struct set_config_cmd_payload *)cmd_payload,
11746 					adev->dm.dmub_notify)) {
11747 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11748 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11749 					(uint32_t *)operation_result);
11750 	}
11751 
11752 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11753 	if (ret == 0) {
11754 		DRM_ERROR("wait_for_completion_timeout timeout!");
11755 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11756 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11757 				(uint32_t *)operation_result);
11758 	}
11759 
11760 	if (is_cmd_aux) {
11761 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11762 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11763 
11764 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11765 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11766 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11767 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11768 				       adev->dm.dmub_notify->aux_reply.length);
11769 			}
11770 		}
11771 	}
11772 
11773 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11774 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11775 			(uint32_t *)operation_result);
11776 }
11777 
11778 /*
11779  * Check whether seamless boot is supported.
11780  *
11781  * So far we only support seamless boot on CHIP_VANGOGH.
11782  * If everything goes well, we may consider expanding
11783  * seamless boot to other ASICs.
11784  */
11785 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11786 {
11787 	switch (adev->asic_type) {
11788 	case CHIP_VANGOGH:
11789 		if (!adev->mman.keep_stolen_vga_memory)
11790 			return true;
11791 		break;
11792 	default:
11793 		break;
11794 	}
11795 
11796 	return false;
11797 }
11798