xref: /openbmc/linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision f066af882b3755c5cdd2574e860433750c6bce1e)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
39 
40 #include "vid.h"
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
44 #include "atom.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
49 #endif
50 #include "amdgpu_pm.h"
51 
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
58 #endif
59 
60 #include "ivsrcid/ivsrcid_vislands30.h"
61 
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69 
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107 
108 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110 
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113 
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116 
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119 
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129 
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
134 
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137 	switch (link->dpcd_caps.dongle_type) {
138 	case DISPLAY_DONGLE_NONE:
139 		return DRM_MODE_SUBCONNECTOR_Native;
140 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 		return DRM_MODE_SUBCONNECTOR_VGA;
142 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 		return DRM_MODE_SUBCONNECTOR_DVID;
145 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 		return DRM_MODE_SUBCONNECTOR_HDMIA;
148 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 	default:
150 		return DRM_MODE_SUBCONNECTOR_Unknown;
151 	}
152 }
153 
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156 	struct dc_link *link = aconnector->dc_link;
157 	struct drm_connector *connector = &aconnector->base;
158 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159 
160 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161 		return;
162 
163 	if (aconnector->dc_sink)
164 		subconnector = get_subconnector_type(link);
165 
166 	drm_object_property_set_value(&connector->base,
167 			connector->dev->mode_config.dp_subconnector_property,
168 			subconnector);
169 }
170 
171 /*
172  * initializes drm_device display related structures, based on the information
173  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174  * drm_encoder, drm_mode_config
175  *
176  * Returns 0 on success
177  */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181 
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183 				struct drm_plane *plane,
184 				unsigned long possible_crtcs,
185 				const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 			       struct drm_plane *plane,
188 			       uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
191 				    uint32_t link_index,
192 				    struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 				  struct amdgpu_encoder *aencoder,
195 				  uint32_t link_index);
196 
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198 
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200 
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202 				  struct drm_atomic_state *state);
203 
204 static void handle_cursor_update(struct drm_plane *plane,
205 				 struct drm_plane_state *old_plane_state);
206 
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212 
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
215 
216 static bool
217 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
218 				 struct drm_crtc_state *new_crtc_state);
219 /*
220  * dm_vblank_get_counter
221  *
222  * @brief
223  * Get counter for number of vertical blanks
224  *
225  * @param
226  * struct amdgpu_device *adev - [in] desired amdgpu device
227  * int disp_idx - [in] which CRTC to get the counter from
228  *
229  * @return
230  * Counter for vertical blanks
231  */
232 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
233 {
234 	if (crtc >= adev->mode_info.num_crtc)
235 		return 0;
236 	else {
237 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
238 
239 		if (acrtc->dm_irq_params.stream == NULL) {
240 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241 				  crtc);
242 			return 0;
243 		}
244 
245 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
246 	}
247 }
248 
249 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
250 				  u32 *vbl, u32 *position)
251 {
252 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
253 
254 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
255 		return -EINVAL;
256 	else {
257 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258 
259 		if (acrtc->dm_irq_params.stream ==  NULL) {
260 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
261 				  crtc);
262 			return 0;
263 		}
264 
265 		/*
266 		 * TODO rework base driver to use values directly.
267 		 * for now parse it back into reg-format
268 		 */
269 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
270 					 &v_blank_start,
271 					 &v_blank_end,
272 					 &h_position,
273 					 &v_position);
274 
275 		*position = v_position | (h_position << 16);
276 		*vbl = v_blank_start | (v_blank_end << 16);
277 	}
278 
279 	return 0;
280 }
281 
282 static bool dm_is_idle(void *handle)
283 {
284 	/* XXX todo */
285 	return true;
286 }
287 
288 static int dm_wait_for_idle(void *handle)
289 {
290 	/* XXX todo */
291 	return 0;
292 }
293 
294 static bool dm_check_soft_reset(void *handle)
295 {
296 	return false;
297 }
298 
299 static int dm_soft_reset(void *handle)
300 {
301 	/* XXX todo */
302 	return 0;
303 }
304 
305 static struct amdgpu_crtc *
306 get_crtc_by_otg_inst(struct amdgpu_device *adev,
307 		     int otg_inst)
308 {
309 	struct drm_device *dev = adev_to_drm(adev);
310 	struct drm_crtc *crtc;
311 	struct amdgpu_crtc *amdgpu_crtc;
312 
313 	if (otg_inst == -1) {
314 		WARN_ON(1);
315 		return adev->mode_info.crtcs[0];
316 	}
317 
318 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 		amdgpu_crtc = to_amdgpu_crtc(crtc);
320 
321 		if (amdgpu_crtc->otg_inst == otg_inst)
322 			return amdgpu_crtc;
323 	}
324 
325 	return NULL;
326 }
327 
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330 	return acrtc->dm_irq_params.freesync_config.state ==
331 		       VRR_STATE_ACTIVE_VARIABLE ||
332 	       acrtc->dm_irq_params.freesync_config.state ==
333 		       VRR_STATE_ACTIVE_FIXED;
334 }
335 
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341 
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 					      struct dm_crtc_state *new_state)
344 {
345 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346 		return true;
347 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348 		return true;
349 	else
350 		return false;
351 }
352 
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362 	struct amdgpu_crtc *amdgpu_crtc;
363 	struct common_irq_params *irq_params = interrupt_params;
364 	struct amdgpu_device *adev = irq_params->adev;
365 	unsigned long flags;
366 	struct drm_pending_vblank_event *e;
367 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
368 	bool vrr_active;
369 
370 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371 
372 	/* IRQ could occur when in initial stage */
373 	/* TODO work and BO cleanup */
374 	if (amdgpu_crtc == NULL) {
375 		DC_LOG_PFLIP("CRTC is null, returning.\n");
376 		return;
377 	}
378 
379 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380 
381 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 						 amdgpu_crtc->pflip_status,
384 						 AMDGPU_FLIP_SUBMITTED,
385 						 amdgpu_crtc->crtc_id,
386 						 amdgpu_crtc);
387 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388 		return;
389 	}
390 
391 	/* page flip completed. */
392 	e = amdgpu_crtc->event;
393 	amdgpu_crtc->event = NULL;
394 
395 	if (!e)
396 		WARN_ON(1);
397 
398 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
399 
400 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
401 	if (!vrr_active ||
402 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
403 				      &v_blank_end, &hpos, &vpos) ||
404 	    (vpos < v_blank_start)) {
405 		/* Update to correct count and vblank timestamp if racing with
406 		 * vblank irq. This also updates to the correct vblank timestamp
407 		 * even in VRR mode, as scanout is past the front-porch atm.
408 		 */
409 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
410 
411 		/* Wake up userspace by sending the pageflip event with proper
412 		 * count and timestamp of vblank of flip completion.
413 		 */
414 		if (e) {
415 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
416 
417 			/* Event sent, so done with vblank for this flip */
418 			drm_crtc_vblank_put(&amdgpu_crtc->base);
419 		}
420 	} else if (e) {
421 		/* VRR active and inside front-porch: vblank count and
422 		 * timestamp for pageflip event will only be up to date after
423 		 * drm_crtc_handle_vblank() has been executed from late vblank
424 		 * irq handler after start of back-porch (vline 0). We queue the
425 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
426 		 * updated timestamp and count, once it runs after us.
427 		 *
428 		 * We need to open-code this instead of using the helper
429 		 * drm_crtc_arm_vblank_event(), as that helper would
430 		 * call drm_crtc_accurate_vblank_count(), which we must
431 		 * not call in VRR mode while we are in front-porch!
432 		 */
433 
434 		/* sequence will be replaced by real count during send-out. */
435 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
436 		e->pipe = amdgpu_crtc->crtc_id;
437 
438 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
439 		e = NULL;
440 	}
441 
442 	/* Keep track of vblank of this flip for flip throttling. We use the
443 	 * cooked hw counter, as that one incremented at start of this vblank
444 	 * of pageflip completion, so last_flip_vblank is the forbidden count
445 	 * for queueing new pageflips if vsync + VRR is enabled.
446 	 */
447 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
448 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
449 
450 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
451 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
452 
453 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
454 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
455 		     vrr_active, (int) !e);
456 }
457 
458 static void dm_vupdate_high_irq(void *interrupt_params)
459 {
460 	struct common_irq_params *irq_params = interrupt_params;
461 	struct amdgpu_device *adev = irq_params->adev;
462 	struct amdgpu_crtc *acrtc;
463 	struct drm_device *drm_dev;
464 	struct drm_vblank_crtc *vblank;
465 	ktime_t frame_duration_ns, previous_timestamp;
466 	unsigned long flags;
467 	int vrr_active;
468 
469 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
470 
471 	if (acrtc) {
472 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
473 		drm_dev = acrtc->base.dev;
474 		vblank = &drm_dev->vblank[acrtc->base.index];
475 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
476 		frame_duration_ns = vblank->time - previous_timestamp;
477 
478 		if (frame_duration_ns > 0) {
479 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
480 						frame_duration_ns,
481 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
482 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
483 		}
484 
485 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
486 			      acrtc->crtc_id,
487 			      vrr_active);
488 
489 		/* Core vblank handling is done here after end of front-porch in
490 		 * vrr mode, as vblank timestamping will give valid results
491 		 * while now done after front-porch. This will also deliver
492 		 * page-flip completion events that have been queued to us
493 		 * if a pageflip happened inside front-porch.
494 		 */
495 		if (vrr_active) {
496 			drm_crtc_handle_vblank(&acrtc->base);
497 
498 			/* BTR processing for pre-DCE12 ASICs */
499 			if (acrtc->dm_irq_params.stream &&
500 			    adev->family < AMDGPU_FAMILY_AI) {
501 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
502 				mod_freesync_handle_v_update(
503 				    adev->dm.freesync_module,
504 				    acrtc->dm_irq_params.stream,
505 				    &acrtc->dm_irq_params.vrr_params);
506 
507 				dc_stream_adjust_vmin_vmax(
508 				    adev->dm.dc,
509 				    acrtc->dm_irq_params.stream,
510 				    &acrtc->dm_irq_params.vrr_params.adjust);
511 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
512 			}
513 		}
514 	}
515 }
516 
517 /**
518  * dm_crtc_high_irq() - Handles CRTC interrupt
519  * @interrupt_params: used for determining the CRTC instance
520  *
521  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
522  * event handler.
523  */
524 static void dm_crtc_high_irq(void *interrupt_params)
525 {
526 	struct common_irq_params *irq_params = interrupt_params;
527 	struct amdgpu_device *adev = irq_params->adev;
528 	struct amdgpu_crtc *acrtc;
529 	unsigned long flags;
530 	int vrr_active;
531 
532 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
533 	if (!acrtc)
534 		return;
535 
536 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
537 
538 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
539 		      vrr_active, acrtc->dm_irq_params.active_planes);
540 
541 	/**
542 	 * Core vblank handling at start of front-porch is only possible
543 	 * in non-vrr mode, as only there vblank timestamping will give
544 	 * valid results while done in front-porch. Otherwise defer it
545 	 * to dm_vupdate_high_irq after end of front-porch.
546 	 */
547 	if (!vrr_active)
548 		drm_crtc_handle_vblank(&acrtc->base);
549 
550 	/**
551 	 * Following stuff must happen at start of vblank, for crc
552 	 * computation and below-the-range btr support in vrr mode.
553 	 */
554 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
555 
556 	/* BTR updates need to happen before VUPDATE on Vega and above. */
557 	if (adev->family < AMDGPU_FAMILY_AI)
558 		return;
559 
560 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
561 
562 	if (acrtc->dm_irq_params.stream &&
563 	    acrtc->dm_irq_params.vrr_params.supported &&
564 	    acrtc->dm_irq_params.freesync_config.state ==
565 		    VRR_STATE_ACTIVE_VARIABLE) {
566 		mod_freesync_handle_v_update(adev->dm.freesync_module,
567 					     acrtc->dm_irq_params.stream,
568 					     &acrtc->dm_irq_params.vrr_params);
569 
570 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
571 					   &acrtc->dm_irq_params.vrr_params.adjust);
572 	}
573 
574 	/*
575 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
576 	 * In that case, pageflip completion interrupts won't fire and pageflip
577 	 * completion events won't get delivered. Prevent this by sending
578 	 * pending pageflip events from here if a flip is still pending.
579 	 *
580 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
581 	 * avoid race conditions between flip programming and completion,
582 	 * which could cause too early flip completion events.
583 	 */
584 	if (adev->family >= AMDGPU_FAMILY_RV &&
585 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
586 	    acrtc->dm_irq_params.active_planes == 0) {
587 		if (acrtc->event) {
588 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
589 			acrtc->event = NULL;
590 			drm_crtc_vblank_put(&acrtc->base);
591 		}
592 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
593 	}
594 
595 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
596 }
597 
598 #if defined(CONFIG_DRM_AMD_DC_DCN)
599 /**
600  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601  * DCN generation ASICs
602  * @interrupt params - interrupt parameters
603  *
604  * Used to set crc window/read out crc value at vertical line 0 position
605  */
606 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
607 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
608 {
609 	struct common_irq_params *irq_params = interrupt_params;
610 	struct amdgpu_device *adev = irq_params->adev;
611 	struct amdgpu_crtc *acrtc;
612 
613 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
614 
615 	if (!acrtc)
616 		return;
617 
618 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
619 }
620 #endif
621 #endif
622 
623 static int dm_set_clockgating_state(void *handle,
624 		  enum amd_clockgating_state state)
625 {
626 	return 0;
627 }
628 
629 static int dm_set_powergating_state(void *handle,
630 		  enum amd_powergating_state state)
631 {
632 	return 0;
633 }
634 
635 /* Prototypes of private functions */
636 static int dm_early_init(void* handle);
637 
638 /* Allocate memory for FBC compressed data  */
639 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
640 {
641 	struct drm_device *dev = connector->dev;
642 	struct amdgpu_device *adev = drm_to_adev(dev);
643 	struct dm_compressor_info *compressor = &adev->dm.compressor;
644 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
645 	struct drm_display_mode *mode;
646 	unsigned long max_size = 0;
647 
648 	if (adev->dm.dc->fbc_compressor == NULL)
649 		return;
650 
651 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
652 		return;
653 
654 	if (compressor->bo_ptr)
655 		return;
656 
657 
658 	list_for_each_entry(mode, &connector->modes, head) {
659 		if (max_size < mode->htotal * mode->vtotal)
660 			max_size = mode->htotal * mode->vtotal;
661 	}
662 
663 	if (max_size) {
664 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
665 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
666 			    &compressor->gpu_addr, &compressor->cpu_addr);
667 
668 		if (r)
669 			DRM_ERROR("DM: Failed to initialize FBC\n");
670 		else {
671 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
672 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
673 		}
674 
675 	}
676 
677 }
678 
679 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
680 					  int pipe, bool *enabled,
681 					  unsigned char *buf, int max_bytes)
682 {
683 	struct drm_device *dev = dev_get_drvdata(kdev);
684 	struct amdgpu_device *adev = drm_to_adev(dev);
685 	struct drm_connector *connector;
686 	struct drm_connector_list_iter conn_iter;
687 	struct amdgpu_dm_connector *aconnector;
688 	int ret = 0;
689 
690 	*enabled = false;
691 
692 	mutex_lock(&adev->dm.audio_lock);
693 
694 	drm_connector_list_iter_begin(dev, &conn_iter);
695 	drm_for_each_connector_iter(connector, &conn_iter) {
696 		aconnector = to_amdgpu_dm_connector(connector);
697 		if (aconnector->audio_inst != port)
698 			continue;
699 
700 		*enabled = true;
701 		ret = drm_eld_size(connector->eld);
702 		memcpy(buf, connector->eld, min(max_bytes, ret));
703 
704 		break;
705 	}
706 	drm_connector_list_iter_end(&conn_iter);
707 
708 	mutex_unlock(&adev->dm.audio_lock);
709 
710 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
711 
712 	return ret;
713 }
714 
715 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
716 	.get_eld = amdgpu_dm_audio_component_get_eld,
717 };
718 
719 static int amdgpu_dm_audio_component_bind(struct device *kdev,
720 				       struct device *hda_kdev, void *data)
721 {
722 	struct drm_device *dev = dev_get_drvdata(kdev);
723 	struct amdgpu_device *adev = drm_to_adev(dev);
724 	struct drm_audio_component *acomp = data;
725 
726 	acomp->ops = &amdgpu_dm_audio_component_ops;
727 	acomp->dev = kdev;
728 	adev->dm.audio_component = acomp;
729 
730 	return 0;
731 }
732 
733 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
734 					  struct device *hda_kdev, void *data)
735 {
736 	struct drm_device *dev = dev_get_drvdata(kdev);
737 	struct amdgpu_device *adev = drm_to_adev(dev);
738 	struct drm_audio_component *acomp = data;
739 
740 	acomp->ops = NULL;
741 	acomp->dev = NULL;
742 	adev->dm.audio_component = NULL;
743 }
744 
745 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
746 	.bind	= amdgpu_dm_audio_component_bind,
747 	.unbind	= amdgpu_dm_audio_component_unbind,
748 };
749 
750 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
751 {
752 	int i, ret;
753 
754 	if (!amdgpu_audio)
755 		return 0;
756 
757 	adev->mode_info.audio.enabled = true;
758 
759 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
760 
761 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
762 		adev->mode_info.audio.pin[i].channels = -1;
763 		adev->mode_info.audio.pin[i].rate = -1;
764 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
765 		adev->mode_info.audio.pin[i].status_bits = 0;
766 		adev->mode_info.audio.pin[i].category_code = 0;
767 		adev->mode_info.audio.pin[i].connected = false;
768 		adev->mode_info.audio.pin[i].id =
769 			adev->dm.dc->res_pool->audios[i]->inst;
770 		adev->mode_info.audio.pin[i].offset = 0;
771 	}
772 
773 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
774 	if (ret < 0)
775 		return ret;
776 
777 	adev->dm.audio_registered = true;
778 
779 	return 0;
780 }
781 
782 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
783 {
784 	if (!amdgpu_audio)
785 		return;
786 
787 	if (!adev->mode_info.audio.enabled)
788 		return;
789 
790 	if (adev->dm.audio_registered) {
791 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
792 		adev->dm.audio_registered = false;
793 	}
794 
795 	/* TODO: Disable audio? */
796 
797 	adev->mode_info.audio.enabled = false;
798 }
799 
800 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
801 {
802 	struct drm_audio_component *acomp = adev->dm.audio_component;
803 
804 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
805 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
806 
807 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
808 						 pin, -1);
809 	}
810 }
811 
812 static int dm_dmub_hw_init(struct amdgpu_device *adev)
813 {
814 	const struct dmcub_firmware_header_v1_0 *hdr;
815 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
816 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
817 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
818 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
819 	struct abm *abm = adev->dm.dc->res_pool->abm;
820 	struct dmub_srv_hw_params hw_params;
821 	enum dmub_status status;
822 	const unsigned char *fw_inst_const, *fw_bss_data;
823 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
824 	bool has_hw_support;
825 
826 	if (!dmub_srv)
827 		/* DMUB isn't supported on the ASIC. */
828 		return 0;
829 
830 	if (!fb_info) {
831 		DRM_ERROR("No framebuffer info for DMUB service.\n");
832 		return -EINVAL;
833 	}
834 
835 	if (!dmub_fw) {
836 		/* Firmware required for DMUB support. */
837 		DRM_ERROR("No firmware provided for DMUB.\n");
838 		return -EINVAL;
839 	}
840 
841 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
842 	if (status != DMUB_STATUS_OK) {
843 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
844 		return -EINVAL;
845 	}
846 
847 	if (!has_hw_support) {
848 		DRM_INFO("DMUB unsupported on ASIC\n");
849 		return 0;
850 	}
851 
852 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
853 
854 	fw_inst_const = dmub_fw->data +
855 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
856 			PSP_HEADER_BYTES;
857 
858 	fw_bss_data = dmub_fw->data +
859 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
860 		      le32_to_cpu(hdr->inst_const_bytes);
861 
862 	/* Copy firmware and bios info into FB memory. */
863 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
864 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
865 
866 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
867 
868 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
869 	 * amdgpu_ucode_init_single_fw will load dmub firmware
870 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
871 	 * will be done by dm_dmub_hw_init
872 	 */
873 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
874 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
875 				fw_inst_const_size);
876 	}
877 
878 	if (fw_bss_data_size)
879 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
880 		       fw_bss_data, fw_bss_data_size);
881 
882 	/* Copy firmware bios info into FB memory. */
883 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
884 	       adev->bios_size);
885 
886 	/* Reset regions that need to be reset. */
887 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
888 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
889 
890 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
891 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
892 
893 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
894 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
895 
896 	/* Initialize hardware. */
897 	memset(&hw_params, 0, sizeof(hw_params));
898 	hw_params.fb_base = adev->gmc.fb_start;
899 	hw_params.fb_offset = adev->gmc.aper_base;
900 
901 	/* backdoor load firmware and trigger dmub running */
902 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
903 		hw_params.load_inst_const = true;
904 
905 	if (dmcu)
906 		hw_params.psp_version = dmcu->psp_version;
907 
908 	for (i = 0; i < fb_info->num_fb; ++i)
909 		hw_params.fb[i] = &fb_info->fb[i];
910 
911 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
912 	if (status != DMUB_STATUS_OK) {
913 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
914 		return -EINVAL;
915 	}
916 
917 	/* Wait for firmware load to finish. */
918 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
919 	if (status != DMUB_STATUS_OK)
920 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
921 
922 	/* Init DMCU and ABM if available. */
923 	if (dmcu && abm) {
924 		dmcu->funcs->dmcu_init(dmcu);
925 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
926 	}
927 
928 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
929 	if (!adev->dm.dc->ctx->dmub_srv) {
930 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
931 		return -ENOMEM;
932 	}
933 
934 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
935 		 adev->dm.dmcub_fw_version);
936 
937 	return 0;
938 }
939 
940 #define DMUB_TRACE_MAX_READ 64
941 static void dm_dmub_trace_high_irq(void *interrupt_params)
942 {
943 	struct common_irq_params *irq_params = interrupt_params;
944 	struct amdgpu_device *adev = irq_params->adev;
945 	struct amdgpu_display_manager *dm = &adev->dm;
946 	struct dmcub_trace_buf_entry entry = { 0 };
947 	uint32_t count = 0;
948 
949 	do {
950 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
951 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
952 							entry.param0, entry.param1);
953 
954 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
955 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
956 		} else
957 			break;
958 
959 		count++;
960 
961 	} while (count <= DMUB_TRACE_MAX_READ);
962 
963 	ASSERT(count <= DMUB_TRACE_MAX_READ);
964 }
965 
966 #if defined(CONFIG_DRM_AMD_DC_DCN)
967 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
968 {
969 	uint64_t pt_base;
970 	uint32_t logical_addr_low;
971 	uint32_t logical_addr_high;
972 	uint32_t agp_base, agp_bot, agp_top;
973 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
974 
975 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
976 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
977 
978 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
979 		/*
980 		 * Raven2 has a HW issue that it is unable to use the vram which
981 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
982 		 * workaround that increase system aperture high address (add 1)
983 		 * to get rid of the VM fault and hardware hang.
984 		 */
985 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
986 	else
987 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
988 
989 	agp_base = 0;
990 	agp_bot = adev->gmc.agp_start >> 24;
991 	agp_top = adev->gmc.agp_end >> 24;
992 
993 
994 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
995 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
996 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
997 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
998 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
999 	page_table_base.low_part = lower_32_bits(pt_base);
1000 
1001 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1002 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1003 
1004 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1005 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1006 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1007 
1008 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1009 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1010 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1011 
1012 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1013 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1014 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1015 
1016 	pa_config->is_hvm_enabled = 0;
1017 
1018 }
1019 #endif
1020 #if defined(CONFIG_DRM_AMD_DC_DCN)
1021 static void event_mall_stutter(struct work_struct *work)
1022 {
1023 
1024 	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1025 	struct amdgpu_display_manager *dm = vblank_work->dm;
1026 
1027 	mutex_lock(&dm->dc_lock);
1028 
1029 	if (vblank_work->enable)
1030 		dm->active_vblank_irq_count++;
1031 	else if(dm->active_vblank_irq_count)
1032 		dm->active_vblank_irq_count--;
1033 
1034 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1035 
1036 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1037 
1038 	mutex_unlock(&dm->dc_lock);
1039 }
1040 
1041 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1042 {
1043 
1044 	int max_caps = dc->caps.max_links;
1045 	struct vblank_workqueue *vblank_work;
1046 	int i = 0;
1047 
1048 	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1049 	if (ZERO_OR_NULL_PTR(vblank_work)) {
1050 		kfree(vblank_work);
1051 		return NULL;
1052 	}
1053 
1054 	for (i = 0; i < max_caps; i++)
1055 		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1056 
1057 	return vblank_work;
1058 }
1059 #endif
1060 static int amdgpu_dm_init(struct amdgpu_device *adev)
1061 {
1062 	struct dc_init_data init_data;
1063 #ifdef CONFIG_DRM_AMD_DC_HDCP
1064 	struct dc_callback_init init_params;
1065 #endif
1066 	int r;
1067 
1068 	adev->dm.ddev = adev_to_drm(adev);
1069 	adev->dm.adev = adev;
1070 
1071 	/* Zero all the fields */
1072 	memset(&init_data, 0, sizeof(init_data));
1073 #ifdef CONFIG_DRM_AMD_DC_HDCP
1074 	memset(&init_params, 0, sizeof(init_params));
1075 #endif
1076 
1077 	mutex_init(&adev->dm.dc_lock);
1078 	mutex_init(&adev->dm.audio_lock);
1079 #if defined(CONFIG_DRM_AMD_DC_DCN)
1080 	spin_lock_init(&adev->dm.vblank_lock);
1081 #endif
1082 
1083 	if(amdgpu_dm_irq_init(adev)) {
1084 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1085 		goto error;
1086 	}
1087 
1088 	init_data.asic_id.chip_family = adev->family;
1089 
1090 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1091 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1092 
1093 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1094 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1095 	init_data.asic_id.atombios_base_address =
1096 		adev->mode_info.atom_context->bios;
1097 
1098 	init_data.driver = adev;
1099 
1100 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1101 
1102 	if (!adev->dm.cgs_device) {
1103 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1104 		goto error;
1105 	}
1106 
1107 	init_data.cgs_device = adev->dm.cgs_device;
1108 
1109 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1110 
1111 	switch (adev->asic_type) {
1112 	case CHIP_CARRIZO:
1113 	case CHIP_STONEY:
1114 	case CHIP_RAVEN:
1115 	case CHIP_RENOIR:
1116 		init_data.flags.gpu_vm_support = true;
1117 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1118 			init_data.flags.disable_dmcu = true;
1119 		break;
1120 #if defined(CONFIG_DRM_AMD_DC_DCN)
1121 	case CHIP_VANGOGH:
1122 		init_data.flags.gpu_vm_support = true;
1123 		break;
1124 #endif
1125 	default:
1126 		break;
1127 	}
1128 
1129 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1130 		init_data.flags.fbc_support = true;
1131 
1132 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1133 		init_data.flags.multi_mon_pp_mclk_switch = true;
1134 
1135 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1136 		init_data.flags.disable_fractional_pwm = true;
1137 
1138 	init_data.flags.power_down_display_on_boot = true;
1139 
1140 	INIT_LIST_HEAD(&adev->dm.da_list);
1141 	/* Display Core create. */
1142 	adev->dm.dc = dc_create(&init_data);
1143 
1144 	if (adev->dm.dc) {
1145 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1146 	} else {
1147 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1148 		goto error;
1149 	}
1150 
1151 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1152 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1153 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1154 	}
1155 
1156 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1157 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1158 
1159 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1160 		adev->dm.dc->debug.disable_stutter = true;
1161 
1162 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1163 		adev->dm.dc->debug.disable_dsc = true;
1164 
1165 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1166 		adev->dm.dc->debug.disable_clock_gate = true;
1167 
1168 	r = dm_dmub_hw_init(adev);
1169 	if (r) {
1170 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1171 		goto error;
1172 	}
1173 
1174 	dc_hardware_init(adev->dm.dc);
1175 
1176 #if defined(CONFIG_DRM_AMD_DC_DCN)
1177 	if (adev->apu_flags) {
1178 		struct dc_phy_addr_space_config pa_config;
1179 
1180 		mmhub_read_system_context(adev, &pa_config);
1181 
1182 		// Call the DC init_memory func
1183 		dc_setup_system_context(adev->dm.dc, &pa_config);
1184 	}
1185 #endif
1186 
1187 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1188 	if (!adev->dm.freesync_module) {
1189 		DRM_ERROR(
1190 		"amdgpu: failed to initialize freesync_module.\n");
1191 	} else
1192 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1193 				adev->dm.freesync_module);
1194 
1195 	amdgpu_dm_init_color_mod();
1196 
1197 #if defined(CONFIG_DRM_AMD_DC_DCN)
1198 	if (adev->dm.dc->caps.max_links > 0) {
1199 		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1200 
1201 		if (!adev->dm.vblank_workqueue)
1202 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1203 		else
1204 			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1205 	}
1206 #endif
1207 
1208 #ifdef CONFIG_DRM_AMD_DC_HDCP
1209 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1210 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1211 
1212 		if (!adev->dm.hdcp_workqueue)
1213 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1214 		else
1215 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1216 
1217 		dc_init_callbacks(adev->dm.dc, &init_params);
1218 	}
1219 #endif
1220 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1221 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1222 #endif
1223 	if (amdgpu_dm_initialize_drm_device(adev)) {
1224 		DRM_ERROR(
1225 		"amdgpu: failed to initialize sw for display support.\n");
1226 		goto error;
1227 	}
1228 
1229 	/* create fake encoders for MST */
1230 	dm_dp_create_fake_mst_encoders(adev);
1231 
1232 	/* TODO: Add_display_info? */
1233 
1234 	/* TODO use dynamic cursor width */
1235 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1236 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1237 
1238 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1239 		DRM_ERROR(
1240 		"amdgpu: failed to initialize sw for display support.\n");
1241 		goto error;
1242 	}
1243 
1244 
1245 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1246 
1247 	return 0;
1248 error:
1249 	amdgpu_dm_fini(adev);
1250 
1251 	return -EINVAL;
1252 }
1253 
1254 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1255 {
1256 	int i;
1257 
1258 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1259 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1260 	}
1261 
1262 	amdgpu_dm_audio_fini(adev);
1263 
1264 	amdgpu_dm_destroy_drm_device(&adev->dm);
1265 
1266 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1267 	if (adev->dm.crc_rd_wrk) {
1268 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1269 		kfree(adev->dm.crc_rd_wrk);
1270 		adev->dm.crc_rd_wrk = NULL;
1271 	}
1272 #endif
1273 #ifdef CONFIG_DRM_AMD_DC_HDCP
1274 	if (adev->dm.hdcp_workqueue) {
1275 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1276 		adev->dm.hdcp_workqueue = NULL;
1277 	}
1278 
1279 	if (adev->dm.dc)
1280 		dc_deinit_callbacks(adev->dm.dc);
1281 #endif
1282 
1283 #if defined(CONFIG_DRM_AMD_DC_DCN)
1284 	if (adev->dm.vblank_workqueue) {
1285 		adev->dm.vblank_workqueue->dm = NULL;
1286 		kfree(adev->dm.vblank_workqueue);
1287 		adev->dm.vblank_workqueue = NULL;
1288 	}
1289 #endif
1290 
1291 	if (adev->dm.dc->ctx->dmub_srv) {
1292 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1293 		adev->dm.dc->ctx->dmub_srv = NULL;
1294 	}
1295 
1296 	if (adev->dm.dmub_bo)
1297 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1298 				      &adev->dm.dmub_bo_gpu_addr,
1299 				      &adev->dm.dmub_bo_cpu_addr);
1300 
1301 	/* DC Destroy TODO: Replace destroy DAL */
1302 	if (adev->dm.dc)
1303 		dc_destroy(&adev->dm.dc);
1304 	/*
1305 	 * TODO: pageflip, vlank interrupt
1306 	 *
1307 	 * amdgpu_dm_irq_fini(adev);
1308 	 */
1309 
1310 	if (adev->dm.cgs_device) {
1311 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1312 		adev->dm.cgs_device = NULL;
1313 	}
1314 	if (adev->dm.freesync_module) {
1315 		mod_freesync_destroy(adev->dm.freesync_module);
1316 		adev->dm.freesync_module = NULL;
1317 	}
1318 
1319 	mutex_destroy(&adev->dm.audio_lock);
1320 	mutex_destroy(&adev->dm.dc_lock);
1321 
1322 	return;
1323 }
1324 
1325 static int load_dmcu_fw(struct amdgpu_device *adev)
1326 {
1327 	const char *fw_name_dmcu = NULL;
1328 	int r;
1329 	const struct dmcu_firmware_header_v1_0 *hdr;
1330 
1331 	switch(adev->asic_type) {
1332 #if defined(CONFIG_DRM_AMD_DC_SI)
1333 	case CHIP_TAHITI:
1334 	case CHIP_PITCAIRN:
1335 	case CHIP_VERDE:
1336 	case CHIP_OLAND:
1337 #endif
1338 	case CHIP_BONAIRE:
1339 	case CHIP_HAWAII:
1340 	case CHIP_KAVERI:
1341 	case CHIP_KABINI:
1342 	case CHIP_MULLINS:
1343 	case CHIP_TONGA:
1344 	case CHIP_FIJI:
1345 	case CHIP_CARRIZO:
1346 	case CHIP_STONEY:
1347 	case CHIP_POLARIS11:
1348 	case CHIP_POLARIS10:
1349 	case CHIP_POLARIS12:
1350 	case CHIP_VEGAM:
1351 	case CHIP_VEGA10:
1352 	case CHIP_VEGA12:
1353 	case CHIP_VEGA20:
1354 	case CHIP_NAVI10:
1355 	case CHIP_NAVI14:
1356 	case CHIP_RENOIR:
1357 	case CHIP_SIENNA_CICHLID:
1358 	case CHIP_NAVY_FLOUNDER:
1359 	case CHIP_DIMGREY_CAVEFISH:
1360 	case CHIP_VANGOGH:
1361 		return 0;
1362 	case CHIP_NAVI12:
1363 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1364 		break;
1365 	case CHIP_RAVEN:
1366 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1367 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1368 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1369 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1370 		else
1371 			return 0;
1372 		break;
1373 	default:
1374 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1375 		return -EINVAL;
1376 	}
1377 
1378 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1379 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1380 		return 0;
1381 	}
1382 
1383 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1384 	if (r == -ENOENT) {
1385 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1386 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1387 		adev->dm.fw_dmcu = NULL;
1388 		return 0;
1389 	}
1390 	if (r) {
1391 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1392 			fw_name_dmcu);
1393 		return r;
1394 	}
1395 
1396 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1397 	if (r) {
1398 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1399 			fw_name_dmcu);
1400 		release_firmware(adev->dm.fw_dmcu);
1401 		adev->dm.fw_dmcu = NULL;
1402 		return r;
1403 	}
1404 
1405 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1406 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1407 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1408 	adev->firmware.fw_size +=
1409 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1410 
1411 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1412 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1413 	adev->firmware.fw_size +=
1414 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1415 
1416 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1417 
1418 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1419 
1420 	return 0;
1421 }
1422 
1423 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1424 {
1425 	struct amdgpu_device *adev = ctx;
1426 
1427 	return dm_read_reg(adev->dm.dc->ctx, address);
1428 }
1429 
1430 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1431 				     uint32_t value)
1432 {
1433 	struct amdgpu_device *adev = ctx;
1434 
1435 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1436 }
1437 
1438 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1439 {
1440 	struct dmub_srv_create_params create_params;
1441 	struct dmub_srv_region_params region_params;
1442 	struct dmub_srv_region_info region_info;
1443 	struct dmub_srv_fb_params fb_params;
1444 	struct dmub_srv_fb_info *fb_info;
1445 	struct dmub_srv *dmub_srv;
1446 	const struct dmcub_firmware_header_v1_0 *hdr;
1447 	const char *fw_name_dmub;
1448 	enum dmub_asic dmub_asic;
1449 	enum dmub_status status;
1450 	int r;
1451 
1452 	switch (adev->asic_type) {
1453 	case CHIP_RENOIR:
1454 		dmub_asic = DMUB_ASIC_DCN21;
1455 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1456 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1457 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1458 		break;
1459 	case CHIP_SIENNA_CICHLID:
1460 		dmub_asic = DMUB_ASIC_DCN30;
1461 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1462 		break;
1463 	case CHIP_NAVY_FLOUNDER:
1464 		dmub_asic = DMUB_ASIC_DCN30;
1465 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1466 		break;
1467 	case CHIP_VANGOGH:
1468 		dmub_asic = DMUB_ASIC_DCN301;
1469 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1470 		break;
1471 	case CHIP_DIMGREY_CAVEFISH:
1472 		dmub_asic = DMUB_ASIC_DCN302;
1473 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1474 		break;
1475 
1476 	default:
1477 		/* ASIC doesn't support DMUB. */
1478 		return 0;
1479 	}
1480 
1481 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1482 	if (r) {
1483 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1484 		return 0;
1485 	}
1486 
1487 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1488 	if (r) {
1489 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1490 		return 0;
1491 	}
1492 
1493 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1494 
1495 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1496 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1497 			AMDGPU_UCODE_ID_DMCUB;
1498 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1499 			adev->dm.dmub_fw;
1500 		adev->firmware.fw_size +=
1501 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1502 
1503 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1504 			 adev->dm.dmcub_fw_version);
1505 	}
1506 
1507 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1508 
1509 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1510 	dmub_srv = adev->dm.dmub_srv;
1511 
1512 	if (!dmub_srv) {
1513 		DRM_ERROR("Failed to allocate DMUB service!\n");
1514 		return -ENOMEM;
1515 	}
1516 
1517 	memset(&create_params, 0, sizeof(create_params));
1518 	create_params.user_ctx = adev;
1519 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1520 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1521 	create_params.asic = dmub_asic;
1522 
1523 	/* Create the DMUB service. */
1524 	status = dmub_srv_create(dmub_srv, &create_params);
1525 	if (status != DMUB_STATUS_OK) {
1526 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1527 		return -EINVAL;
1528 	}
1529 
1530 	/* Calculate the size of all the regions for the DMUB service. */
1531 	memset(&region_params, 0, sizeof(region_params));
1532 
1533 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1534 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1535 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1536 	region_params.vbios_size = adev->bios_size;
1537 	region_params.fw_bss_data = region_params.bss_data_size ?
1538 		adev->dm.dmub_fw->data +
1539 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1540 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1541 	region_params.fw_inst_const =
1542 		adev->dm.dmub_fw->data +
1543 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1544 		PSP_HEADER_BYTES;
1545 
1546 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1547 					   &region_info);
1548 
1549 	if (status != DMUB_STATUS_OK) {
1550 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1551 		return -EINVAL;
1552 	}
1553 
1554 	/*
1555 	 * Allocate a framebuffer based on the total size of all the regions.
1556 	 * TODO: Move this into GART.
1557 	 */
1558 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1559 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1560 				    &adev->dm.dmub_bo_gpu_addr,
1561 				    &adev->dm.dmub_bo_cpu_addr);
1562 	if (r)
1563 		return r;
1564 
1565 	/* Rebase the regions on the framebuffer address. */
1566 	memset(&fb_params, 0, sizeof(fb_params));
1567 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1568 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1569 	fb_params.region_info = &region_info;
1570 
1571 	adev->dm.dmub_fb_info =
1572 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1573 	fb_info = adev->dm.dmub_fb_info;
1574 
1575 	if (!fb_info) {
1576 		DRM_ERROR(
1577 			"Failed to allocate framebuffer info for DMUB service!\n");
1578 		return -ENOMEM;
1579 	}
1580 
1581 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1582 	if (status != DMUB_STATUS_OK) {
1583 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1584 		return -EINVAL;
1585 	}
1586 
1587 	return 0;
1588 }
1589 
1590 static int dm_sw_init(void *handle)
1591 {
1592 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1593 	int r;
1594 
1595 	r = dm_dmub_sw_init(adev);
1596 	if (r)
1597 		return r;
1598 
1599 	return load_dmcu_fw(adev);
1600 }
1601 
1602 static int dm_sw_fini(void *handle)
1603 {
1604 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1605 
1606 	kfree(adev->dm.dmub_fb_info);
1607 	adev->dm.dmub_fb_info = NULL;
1608 
1609 	if (adev->dm.dmub_srv) {
1610 		dmub_srv_destroy(adev->dm.dmub_srv);
1611 		adev->dm.dmub_srv = NULL;
1612 	}
1613 
1614 	release_firmware(adev->dm.dmub_fw);
1615 	adev->dm.dmub_fw = NULL;
1616 
1617 	release_firmware(adev->dm.fw_dmcu);
1618 	adev->dm.fw_dmcu = NULL;
1619 
1620 	return 0;
1621 }
1622 
1623 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1624 {
1625 	struct amdgpu_dm_connector *aconnector;
1626 	struct drm_connector *connector;
1627 	struct drm_connector_list_iter iter;
1628 	int ret = 0;
1629 
1630 	drm_connector_list_iter_begin(dev, &iter);
1631 	drm_for_each_connector_iter(connector, &iter) {
1632 		aconnector = to_amdgpu_dm_connector(connector);
1633 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1634 		    aconnector->mst_mgr.aux) {
1635 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1636 					 aconnector,
1637 					 aconnector->base.base.id);
1638 
1639 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1640 			if (ret < 0) {
1641 				DRM_ERROR("DM_MST: Failed to start MST\n");
1642 				aconnector->dc_link->type =
1643 					dc_connection_single;
1644 				break;
1645 			}
1646 		}
1647 	}
1648 	drm_connector_list_iter_end(&iter);
1649 
1650 	return ret;
1651 }
1652 
1653 static int dm_late_init(void *handle)
1654 {
1655 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1656 
1657 	struct dmcu_iram_parameters params;
1658 	unsigned int linear_lut[16];
1659 	int i;
1660 	struct dmcu *dmcu = NULL;
1661 	bool ret = true;
1662 
1663 	dmcu = adev->dm.dc->res_pool->dmcu;
1664 
1665 	for (i = 0; i < 16; i++)
1666 		linear_lut[i] = 0xFFFF * i / 15;
1667 
1668 	params.set = 0;
1669 	params.backlight_ramping_start = 0xCCCC;
1670 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1671 	params.backlight_lut_array_size = 16;
1672 	params.backlight_lut_array = linear_lut;
1673 
1674 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1675 	 * 0xFFFF x 0.01 = 0x28F
1676 	 */
1677 	params.min_abm_backlight = 0x28F;
1678 
1679 	/* In the case where abm is implemented on dmcub,
1680 	 * dmcu object will be null.
1681 	 * ABM 2.4 and up are implemented on dmcub.
1682 	 */
1683 	if (dmcu)
1684 		ret = dmcu_load_iram(dmcu, params);
1685 	else if (adev->dm.dc->ctx->dmub_srv)
1686 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1687 
1688 	if (!ret)
1689 		return -EINVAL;
1690 
1691 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1692 }
1693 
1694 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1695 {
1696 	struct amdgpu_dm_connector *aconnector;
1697 	struct drm_connector *connector;
1698 	struct drm_connector_list_iter iter;
1699 	struct drm_dp_mst_topology_mgr *mgr;
1700 	int ret;
1701 	bool need_hotplug = false;
1702 
1703 	drm_connector_list_iter_begin(dev, &iter);
1704 	drm_for_each_connector_iter(connector, &iter) {
1705 		aconnector = to_amdgpu_dm_connector(connector);
1706 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1707 		    aconnector->mst_port)
1708 			continue;
1709 
1710 		mgr = &aconnector->mst_mgr;
1711 
1712 		if (suspend) {
1713 			drm_dp_mst_topology_mgr_suspend(mgr);
1714 		} else {
1715 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1716 			if (ret < 0) {
1717 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1718 				need_hotplug = true;
1719 			}
1720 		}
1721 	}
1722 	drm_connector_list_iter_end(&iter);
1723 
1724 	if (need_hotplug)
1725 		drm_kms_helper_hotplug_event(dev);
1726 }
1727 
1728 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1729 {
1730 	struct smu_context *smu = &adev->smu;
1731 	int ret = 0;
1732 
1733 	if (!is_support_sw_smu(adev))
1734 		return 0;
1735 
1736 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1737 	 * on window driver dc implementation.
1738 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1739 	 * should be passed to smu during boot up and resume from s3.
1740 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1741 	 * dcn20_resource_construct
1742 	 * then call pplib functions below to pass the settings to smu:
1743 	 * smu_set_watermarks_for_clock_ranges
1744 	 * smu_set_watermarks_table
1745 	 * navi10_set_watermarks_table
1746 	 * smu_write_watermarks_table
1747 	 *
1748 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1749 	 * dc has implemented different flow for window driver:
1750 	 * dc_hardware_init / dc_set_power_state
1751 	 * dcn10_init_hw
1752 	 * notify_wm_ranges
1753 	 * set_wm_ranges
1754 	 * -- Linux
1755 	 * smu_set_watermarks_for_clock_ranges
1756 	 * renoir_set_watermarks_table
1757 	 * smu_write_watermarks_table
1758 	 *
1759 	 * For Linux,
1760 	 * dc_hardware_init -> amdgpu_dm_init
1761 	 * dc_set_power_state --> dm_resume
1762 	 *
1763 	 * therefore, this function apply to navi10/12/14 but not Renoir
1764 	 * *
1765 	 */
1766 	switch(adev->asic_type) {
1767 	case CHIP_NAVI10:
1768 	case CHIP_NAVI14:
1769 	case CHIP_NAVI12:
1770 		break;
1771 	default:
1772 		return 0;
1773 	}
1774 
1775 	ret = smu_write_watermarks_table(smu);
1776 	if (ret) {
1777 		DRM_ERROR("Failed to update WMTABLE!\n");
1778 		return ret;
1779 	}
1780 
1781 	return 0;
1782 }
1783 
1784 /**
1785  * dm_hw_init() - Initialize DC device
1786  * @handle: The base driver device containing the amdgpu_dm device.
1787  *
1788  * Initialize the &struct amdgpu_display_manager device. This involves calling
1789  * the initializers of each DM component, then populating the struct with them.
1790  *
1791  * Although the function implies hardware initialization, both hardware and
1792  * software are initialized here. Splitting them out to their relevant init
1793  * hooks is a future TODO item.
1794  *
1795  * Some notable things that are initialized here:
1796  *
1797  * - Display Core, both software and hardware
1798  * - DC modules that we need (freesync and color management)
1799  * - DRM software states
1800  * - Interrupt sources and handlers
1801  * - Vblank support
1802  * - Debug FS entries, if enabled
1803  */
1804 static int dm_hw_init(void *handle)
1805 {
1806 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1807 	/* Create DAL display manager */
1808 	amdgpu_dm_init(adev);
1809 	amdgpu_dm_hpd_init(adev);
1810 
1811 	return 0;
1812 }
1813 
1814 /**
1815  * dm_hw_fini() - Teardown DC device
1816  * @handle: The base driver device containing the amdgpu_dm device.
1817  *
1818  * Teardown components within &struct amdgpu_display_manager that require
1819  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1820  * were loaded. Also flush IRQ workqueues and disable them.
1821  */
1822 static int dm_hw_fini(void *handle)
1823 {
1824 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1825 
1826 	amdgpu_dm_hpd_fini(adev);
1827 
1828 	amdgpu_dm_irq_fini(adev);
1829 	amdgpu_dm_fini(adev);
1830 	return 0;
1831 }
1832 
1833 
1834 static int dm_enable_vblank(struct drm_crtc *crtc);
1835 static void dm_disable_vblank(struct drm_crtc *crtc);
1836 
1837 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1838 				 struct dc_state *state, bool enable)
1839 {
1840 	enum dc_irq_source irq_source;
1841 	struct amdgpu_crtc *acrtc;
1842 	int rc = -EBUSY;
1843 	int i = 0;
1844 
1845 	for (i = 0; i < state->stream_count; i++) {
1846 		acrtc = get_crtc_by_otg_inst(
1847 				adev, state->stream_status[i].primary_otg_inst);
1848 
1849 		if (acrtc && state->stream_status[i].plane_count != 0) {
1850 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1851 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1852 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1853 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
1854 			if (rc)
1855 				DRM_WARN("Failed to %s pflip interrupts\n",
1856 					 enable ? "enable" : "disable");
1857 
1858 			if (enable) {
1859 				rc = dm_enable_vblank(&acrtc->base);
1860 				if (rc)
1861 					DRM_WARN("Failed to enable vblank interrupts\n");
1862 			} else {
1863 				dm_disable_vblank(&acrtc->base);
1864 			}
1865 
1866 		}
1867 	}
1868 
1869 }
1870 
1871 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1872 {
1873 	struct dc_state *context = NULL;
1874 	enum dc_status res = DC_ERROR_UNEXPECTED;
1875 	int i;
1876 	struct dc_stream_state *del_streams[MAX_PIPES];
1877 	int del_streams_count = 0;
1878 
1879 	memset(del_streams, 0, sizeof(del_streams));
1880 
1881 	context = dc_create_state(dc);
1882 	if (context == NULL)
1883 		goto context_alloc_fail;
1884 
1885 	dc_resource_state_copy_construct_current(dc, context);
1886 
1887 	/* First remove from context all streams */
1888 	for (i = 0; i < context->stream_count; i++) {
1889 		struct dc_stream_state *stream = context->streams[i];
1890 
1891 		del_streams[del_streams_count++] = stream;
1892 	}
1893 
1894 	/* Remove all planes for removed streams and then remove the streams */
1895 	for (i = 0; i < del_streams_count; i++) {
1896 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1897 			res = DC_FAIL_DETACH_SURFACES;
1898 			goto fail;
1899 		}
1900 
1901 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1902 		if (res != DC_OK)
1903 			goto fail;
1904 	}
1905 
1906 
1907 	res = dc_validate_global_state(dc, context, false);
1908 
1909 	if (res != DC_OK) {
1910 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1911 		goto fail;
1912 	}
1913 
1914 	res = dc_commit_state(dc, context);
1915 
1916 fail:
1917 	dc_release_state(context);
1918 
1919 context_alloc_fail:
1920 	return res;
1921 }
1922 
1923 static int dm_suspend(void *handle)
1924 {
1925 	struct amdgpu_device *adev = handle;
1926 	struct amdgpu_display_manager *dm = &adev->dm;
1927 	int ret = 0;
1928 
1929 	if (amdgpu_in_reset(adev)) {
1930 		mutex_lock(&dm->dc_lock);
1931 
1932 #if defined(CONFIG_DRM_AMD_DC_DCN)
1933 		dc_allow_idle_optimizations(adev->dm.dc, false);
1934 #endif
1935 
1936 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1937 
1938 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1939 
1940 		amdgpu_dm_commit_zero_streams(dm->dc);
1941 
1942 		amdgpu_dm_irq_suspend(adev);
1943 
1944 		return ret;
1945 	}
1946 
1947 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1948 	amdgpu_dm_crtc_secure_display_suspend(adev);
1949 #endif
1950 	WARN_ON(adev->dm.cached_state);
1951 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1952 
1953 	s3_handle_mst(adev_to_drm(adev), true);
1954 
1955 	amdgpu_dm_irq_suspend(adev);
1956 
1957 
1958 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1959 
1960 	return 0;
1961 }
1962 
1963 static struct amdgpu_dm_connector *
1964 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1965 					     struct drm_crtc *crtc)
1966 {
1967 	uint32_t i;
1968 	struct drm_connector_state *new_con_state;
1969 	struct drm_connector *connector;
1970 	struct drm_crtc *crtc_from_state;
1971 
1972 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1973 		crtc_from_state = new_con_state->crtc;
1974 
1975 		if (crtc_from_state == crtc)
1976 			return to_amdgpu_dm_connector(connector);
1977 	}
1978 
1979 	return NULL;
1980 }
1981 
1982 static void emulated_link_detect(struct dc_link *link)
1983 {
1984 	struct dc_sink_init_data sink_init_data = { 0 };
1985 	struct display_sink_capability sink_caps = { 0 };
1986 	enum dc_edid_status edid_status;
1987 	struct dc_context *dc_ctx = link->ctx;
1988 	struct dc_sink *sink = NULL;
1989 	struct dc_sink *prev_sink = NULL;
1990 
1991 	link->type = dc_connection_none;
1992 	prev_sink = link->local_sink;
1993 
1994 	if (prev_sink)
1995 		dc_sink_release(prev_sink);
1996 
1997 	switch (link->connector_signal) {
1998 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1999 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2000 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2001 		break;
2002 	}
2003 
2004 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2005 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2006 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2007 		break;
2008 	}
2009 
2010 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2011 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2012 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2013 		break;
2014 	}
2015 
2016 	case SIGNAL_TYPE_LVDS: {
2017 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2018 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2019 		break;
2020 	}
2021 
2022 	case SIGNAL_TYPE_EDP: {
2023 		sink_caps.transaction_type =
2024 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2025 		sink_caps.signal = SIGNAL_TYPE_EDP;
2026 		break;
2027 	}
2028 
2029 	case SIGNAL_TYPE_DISPLAY_PORT: {
2030 		sink_caps.transaction_type =
2031 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2032 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2033 		break;
2034 	}
2035 
2036 	default:
2037 		DC_ERROR("Invalid connector type! signal:%d\n",
2038 			link->connector_signal);
2039 		return;
2040 	}
2041 
2042 	sink_init_data.link = link;
2043 	sink_init_data.sink_signal = sink_caps.signal;
2044 
2045 	sink = dc_sink_create(&sink_init_data);
2046 	if (!sink) {
2047 		DC_ERROR("Failed to create sink!\n");
2048 		return;
2049 	}
2050 
2051 	/* dc_sink_create returns a new reference */
2052 	link->local_sink = sink;
2053 
2054 	edid_status = dm_helpers_read_local_edid(
2055 			link->ctx,
2056 			link,
2057 			sink);
2058 
2059 	if (edid_status != EDID_OK)
2060 		DC_ERROR("Failed to read EDID");
2061 
2062 }
2063 
2064 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2065 				     struct amdgpu_display_manager *dm)
2066 {
2067 	struct {
2068 		struct dc_surface_update surface_updates[MAX_SURFACES];
2069 		struct dc_plane_info plane_infos[MAX_SURFACES];
2070 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2071 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2072 		struct dc_stream_update stream_update;
2073 	} * bundle;
2074 	int k, m;
2075 
2076 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2077 
2078 	if (!bundle) {
2079 		dm_error("Failed to allocate update bundle\n");
2080 		goto cleanup;
2081 	}
2082 
2083 	for (k = 0; k < dc_state->stream_count; k++) {
2084 		bundle->stream_update.stream = dc_state->streams[k];
2085 
2086 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2087 			bundle->surface_updates[m].surface =
2088 				dc_state->stream_status->plane_states[m];
2089 			bundle->surface_updates[m].surface->force_full_update =
2090 				true;
2091 		}
2092 		dc_commit_updates_for_stream(
2093 			dm->dc, bundle->surface_updates,
2094 			dc_state->stream_status->plane_count,
2095 			dc_state->streams[k], &bundle->stream_update, dc_state);
2096 	}
2097 
2098 cleanup:
2099 	kfree(bundle);
2100 
2101 	return;
2102 }
2103 
2104 static void dm_set_dpms_off(struct dc_link *link)
2105 {
2106 	struct dc_stream_state *stream_state;
2107 	struct amdgpu_dm_connector *aconnector = link->priv;
2108 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2109 	struct dc_stream_update stream_update;
2110 	bool dpms_off = true;
2111 
2112 	memset(&stream_update, 0, sizeof(stream_update));
2113 	stream_update.dpms_off = &dpms_off;
2114 
2115 	mutex_lock(&adev->dm.dc_lock);
2116 	stream_state = dc_stream_find_from_link(link);
2117 
2118 	if (stream_state == NULL) {
2119 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2120 		mutex_unlock(&adev->dm.dc_lock);
2121 		return;
2122 	}
2123 
2124 	stream_update.stream = stream_state;
2125 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2126 				     stream_state, &stream_update,
2127 				     stream_state->ctx->dc->current_state);
2128 	mutex_unlock(&adev->dm.dc_lock);
2129 }
2130 
2131 static int dm_resume(void *handle)
2132 {
2133 	struct amdgpu_device *adev = handle;
2134 	struct drm_device *ddev = adev_to_drm(adev);
2135 	struct amdgpu_display_manager *dm = &adev->dm;
2136 	struct amdgpu_dm_connector *aconnector;
2137 	struct drm_connector *connector;
2138 	struct drm_connector_list_iter iter;
2139 	struct drm_crtc *crtc;
2140 	struct drm_crtc_state *new_crtc_state;
2141 	struct dm_crtc_state *dm_new_crtc_state;
2142 	struct drm_plane *plane;
2143 	struct drm_plane_state *new_plane_state;
2144 	struct dm_plane_state *dm_new_plane_state;
2145 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2146 	enum dc_connection_type new_connection_type = dc_connection_none;
2147 	struct dc_state *dc_state;
2148 	int i, r, j;
2149 
2150 	if (amdgpu_in_reset(adev)) {
2151 		dc_state = dm->cached_dc_state;
2152 
2153 		r = dm_dmub_hw_init(adev);
2154 		if (r)
2155 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2156 
2157 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2158 		dc_resume(dm->dc);
2159 
2160 		amdgpu_dm_irq_resume_early(adev);
2161 
2162 		for (i = 0; i < dc_state->stream_count; i++) {
2163 			dc_state->streams[i]->mode_changed = true;
2164 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2165 				dc_state->stream_status->plane_states[j]->update_flags.raw
2166 					= 0xffffffff;
2167 			}
2168 		}
2169 
2170 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2171 
2172 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2173 
2174 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2175 
2176 		dc_release_state(dm->cached_dc_state);
2177 		dm->cached_dc_state = NULL;
2178 
2179 		amdgpu_dm_irq_resume_late(adev);
2180 
2181 		mutex_unlock(&dm->dc_lock);
2182 
2183 		return 0;
2184 	}
2185 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2186 	dc_release_state(dm_state->context);
2187 	dm_state->context = dc_create_state(dm->dc);
2188 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2189 	dc_resource_state_construct(dm->dc, dm_state->context);
2190 
2191 	/* Before powering on DC we need to re-initialize DMUB. */
2192 	r = dm_dmub_hw_init(adev);
2193 	if (r)
2194 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2195 
2196 	/* power on hardware */
2197 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2198 
2199 	/* program HPD filter */
2200 	dc_resume(dm->dc);
2201 
2202 	/*
2203 	 * early enable HPD Rx IRQ, should be done before set mode as short
2204 	 * pulse interrupts are used for MST
2205 	 */
2206 	amdgpu_dm_irq_resume_early(adev);
2207 
2208 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2209 	s3_handle_mst(ddev, false);
2210 
2211 	/* Do detection*/
2212 	drm_connector_list_iter_begin(ddev, &iter);
2213 	drm_for_each_connector_iter(connector, &iter) {
2214 		aconnector = to_amdgpu_dm_connector(connector);
2215 
2216 		/*
2217 		 * this is the case when traversing through already created
2218 		 * MST connectors, should be skipped
2219 		 */
2220 		if (aconnector->mst_port)
2221 			continue;
2222 
2223 		mutex_lock(&aconnector->hpd_lock);
2224 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2225 			DRM_ERROR("KMS: Failed to detect connector\n");
2226 
2227 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2228 			emulated_link_detect(aconnector->dc_link);
2229 		else
2230 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2231 
2232 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2233 			aconnector->fake_enable = false;
2234 
2235 		if (aconnector->dc_sink)
2236 			dc_sink_release(aconnector->dc_sink);
2237 		aconnector->dc_sink = NULL;
2238 		amdgpu_dm_update_connector_after_detect(aconnector);
2239 		mutex_unlock(&aconnector->hpd_lock);
2240 	}
2241 	drm_connector_list_iter_end(&iter);
2242 
2243 	/* Force mode set in atomic commit */
2244 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2245 		new_crtc_state->active_changed = true;
2246 
2247 	/*
2248 	 * atomic_check is expected to create the dc states. We need to release
2249 	 * them here, since they were duplicated as part of the suspend
2250 	 * procedure.
2251 	 */
2252 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2253 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2254 		if (dm_new_crtc_state->stream) {
2255 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2256 			dc_stream_release(dm_new_crtc_state->stream);
2257 			dm_new_crtc_state->stream = NULL;
2258 		}
2259 	}
2260 
2261 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2262 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2263 		if (dm_new_plane_state->dc_state) {
2264 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2265 			dc_plane_state_release(dm_new_plane_state->dc_state);
2266 			dm_new_plane_state->dc_state = NULL;
2267 		}
2268 	}
2269 
2270 	drm_atomic_helper_resume(ddev, dm->cached_state);
2271 
2272 	dm->cached_state = NULL;
2273 
2274 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2275 	amdgpu_dm_crtc_secure_display_resume(adev);
2276 #endif
2277 
2278 	amdgpu_dm_irq_resume_late(adev);
2279 
2280 	amdgpu_dm_smu_write_watermarks_table(adev);
2281 
2282 	return 0;
2283 }
2284 
2285 /**
2286  * DOC: DM Lifecycle
2287  *
2288  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2289  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2290  * the base driver's device list to be initialized and torn down accordingly.
2291  *
2292  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2293  */
2294 
2295 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2296 	.name = "dm",
2297 	.early_init = dm_early_init,
2298 	.late_init = dm_late_init,
2299 	.sw_init = dm_sw_init,
2300 	.sw_fini = dm_sw_fini,
2301 	.hw_init = dm_hw_init,
2302 	.hw_fini = dm_hw_fini,
2303 	.suspend = dm_suspend,
2304 	.resume = dm_resume,
2305 	.is_idle = dm_is_idle,
2306 	.wait_for_idle = dm_wait_for_idle,
2307 	.check_soft_reset = dm_check_soft_reset,
2308 	.soft_reset = dm_soft_reset,
2309 	.set_clockgating_state = dm_set_clockgating_state,
2310 	.set_powergating_state = dm_set_powergating_state,
2311 };
2312 
2313 const struct amdgpu_ip_block_version dm_ip_block =
2314 {
2315 	.type = AMD_IP_BLOCK_TYPE_DCE,
2316 	.major = 1,
2317 	.minor = 0,
2318 	.rev = 0,
2319 	.funcs = &amdgpu_dm_funcs,
2320 };
2321 
2322 
2323 /**
2324  * DOC: atomic
2325  *
2326  * *WIP*
2327  */
2328 
2329 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2330 	.fb_create = amdgpu_display_user_framebuffer_create,
2331 	.get_format_info = amd_get_format_info,
2332 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2333 	.atomic_check = amdgpu_dm_atomic_check,
2334 	.atomic_commit = drm_atomic_helper_commit,
2335 };
2336 
2337 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2338 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2339 };
2340 
2341 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2342 {
2343 	u32 max_cll, min_cll, max, min, q, r;
2344 	struct amdgpu_dm_backlight_caps *caps;
2345 	struct amdgpu_display_manager *dm;
2346 	struct drm_connector *conn_base;
2347 	struct amdgpu_device *adev;
2348 	struct dc_link *link = NULL;
2349 	static const u8 pre_computed_values[] = {
2350 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2351 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2352 
2353 	if (!aconnector || !aconnector->dc_link)
2354 		return;
2355 
2356 	link = aconnector->dc_link;
2357 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2358 		return;
2359 
2360 	conn_base = &aconnector->base;
2361 	adev = drm_to_adev(conn_base->dev);
2362 	dm = &adev->dm;
2363 	caps = &dm->backlight_caps;
2364 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2365 	caps->aux_support = false;
2366 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2367 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2368 
2369 	if (caps->ext_caps->bits.oled == 1 ||
2370 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2371 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2372 		caps->aux_support = true;
2373 
2374 	if (amdgpu_backlight == 0)
2375 		caps->aux_support = false;
2376 	else if (amdgpu_backlight == 1)
2377 		caps->aux_support = true;
2378 
2379 	/* From the specification (CTA-861-G), for calculating the maximum
2380 	 * luminance we need to use:
2381 	 *	Luminance = 50*2**(CV/32)
2382 	 * Where CV is a one-byte value.
2383 	 * For calculating this expression we may need float point precision;
2384 	 * to avoid this complexity level, we take advantage that CV is divided
2385 	 * by a constant. From the Euclids division algorithm, we know that CV
2386 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2387 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2388 	 * need to pre-compute the value of r/32. For pre-computing the values
2389 	 * We just used the following Ruby line:
2390 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2391 	 * The results of the above expressions can be verified at
2392 	 * pre_computed_values.
2393 	 */
2394 	q = max_cll >> 5;
2395 	r = max_cll % 32;
2396 	max = (1 << q) * pre_computed_values[r];
2397 
2398 	// min luminance: maxLum * (CV/255)^2 / 100
2399 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2400 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2401 
2402 	caps->aux_max_input_signal = max;
2403 	caps->aux_min_input_signal = min;
2404 }
2405 
2406 void amdgpu_dm_update_connector_after_detect(
2407 		struct amdgpu_dm_connector *aconnector)
2408 {
2409 	struct drm_connector *connector = &aconnector->base;
2410 	struct drm_device *dev = connector->dev;
2411 	struct dc_sink *sink;
2412 
2413 	/* MST handled by drm_mst framework */
2414 	if (aconnector->mst_mgr.mst_state == true)
2415 		return;
2416 
2417 	sink = aconnector->dc_link->local_sink;
2418 	if (sink)
2419 		dc_sink_retain(sink);
2420 
2421 	/*
2422 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2423 	 * the connector sink is set to either fake or physical sink depends on link status.
2424 	 * Skip if already done during boot.
2425 	 */
2426 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2427 			&& aconnector->dc_em_sink) {
2428 
2429 		/*
2430 		 * For S3 resume with headless use eml_sink to fake stream
2431 		 * because on resume connector->sink is set to NULL
2432 		 */
2433 		mutex_lock(&dev->mode_config.mutex);
2434 
2435 		if (sink) {
2436 			if (aconnector->dc_sink) {
2437 				amdgpu_dm_update_freesync_caps(connector, NULL);
2438 				/*
2439 				 * retain and release below are used to
2440 				 * bump up refcount for sink because the link doesn't point
2441 				 * to it anymore after disconnect, so on next crtc to connector
2442 				 * reshuffle by UMD we will get into unwanted dc_sink release
2443 				 */
2444 				dc_sink_release(aconnector->dc_sink);
2445 			}
2446 			aconnector->dc_sink = sink;
2447 			dc_sink_retain(aconnector->dc_sink);
2448 			amdgpu_dm_update_freesync_caps(connector,
2449 					aconnector->edid);
2450 		} else {
2451 			amdgpu_dm_update_freesync_caps(connector, NULL);
2452 			if (!aconnector->dc_sink) {
2453 				aconnector->dc_sink = aconnector->dc_em_sink;
2454 				dc_sink_retain(aconnector->dc_sink);
2455 			}
2456 		}
2457 
2458 		mutex_unlock(&dev->mode_config.mutex);
2459 
2460 		if (sink)
2461 			dc_sink_release(sink);
2462 		return;
2463 	}
2464 
2465 	/*
2466 	 * TODO: temporary guard to look for proper fix
2467 	 * if this sink is MST sink, we should not do anything
2468 	 */
2469 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2470 		dc_sink_release(sink);
2471 		return;
2472 	}
2473 
2474 	if (aconnector->dc_sink == sink) {
2475 		/*
2476 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2477 		 * Do nothing!!
2478 		 */
2479 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2480 				aconnector->connector_id);
2481 		if (sink)
2482 			dc_sink_release(sink);
2483 		return;
2484 	}
2485 
2486 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2487 		aconnector->connector_id, aconnector->dc_sink, sink);
2488 
2489 	mutex_lock(&dev->mode_config.mutex);
2490 
2491 	/*
2492 	 * 1. Update status of the drm connector
2493 	 * 2. Send an event and let userspace tell us what to do
2494 	 */
2495 	if (sink) {
2496 		/*
2497 		 * TODO: check if we still need the S3 mode update workaround.
2498 		 * If yes, put it here.
2499 		 */
2500 		if (aconnector->dc_sink) {
2501 			amdgpu_dm_update_freesync_caps(connector, NULL);
2502 			dc_sink_release(aconnector->dc_sink);
2503 		}
2504 
2505 		aconnector->dc_sink = sink;
2506 		dc_sink_retain(aconnector->dc_sink);
2507 		if (sink->dc_edid.length == 0) {
2508 			aconnector->edid = NULL;
2509 			if (aconnector->dc_link->aux_mode) {
2510 				drm_dp_cec_unset_edid(
2511 					&aconnector->dm_dp_aux.aux);
2512 			}
2513 		} else {
2514 			aconnector->edid =
2515 				(struct edid *)sink->dc_edid.raw_edid;
2516 
2517 			drm_connector_update_edid_property(connector,
2518 							   aconnector->edid);
2519 			if (aconnector->dc_link->aux_mode)
2520 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2521 						    aconnector->edid);
2522 		}
2523 
2524 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2525 		update_connector_ext_caps(aconnector);
2526 	} else {
2527 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2528 		amdgpu_dm_update_freesync_caps(connector, NULL);
2529 		drm_connector_update_edid_property(connector, NULL);
2530 		aconnector->num_modes = 0;
2531 		dc_sink_release(aconnector->dc_sink);
2532 		aconnector->dc_sink = NULL;
2533 		aconnector->edid = NULL;
2534 #ifdef CONFIG_DRM_AMD_DC_HDCP
2535 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2536 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2537 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2538 #endif
2539 	}
2540 
2541 	mutex_unlock(&dev->mode_config.mutex);
2542 
2543 	update_subconnector_property(aconnector);
2544 
2545 	if (sink)
2546 		dc_sink_release(sink);
2547 }
2548 
2549 static void handle_hpd_irq(void *param)
2550 {
2551 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2552 	struct drm_connector *connector = &aconnector->base;
2553 	struct drm_device *dev = connector->dev;
2554 	enum dc_connection_type new_connection_type = dc_connection_none;
2555 #ifdef CONFIG_DRM_AMD_DC_HDCP
2556 	struct amdgpu_device *adev = drm_to_adev(dev);
2557 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2558 #endif
2559 
2560 	/*
2561 	 * In case of failure or MST no need to update connector status or notify the OS
2562 	 * since (for MST case) MST does this in its own context.
2563 	 */
2564 	mutex_lock(&aconnector->hpd_lock);
2565 
2566 #ifdef CONFIG_DRM_AMD_DC_HDCP
2567 	if (adev->dm.hdcp_workqueue) {
2568 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2569 		dm_con_state->update_hdcp = true;
2570 	}
2571 #endif
2572 	if (aconnector->fake_enable)
2573 		aconnector->fake_enable = false;
2574 
2575 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2576 		DRM_ERROR("KMS: Failed to detect connector\n");
2577 
2578 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2579 		emulated_link_detect(aconnector->dc_link);
2580 
2581 
2582 		drm_modeset_lock_all(dev);
2583 		dm_restore_drm_connector_state(dev, connector);
2584 		drm_modeset_unlock_all(dev);
2585 
2586 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2587 			drm_kms_helper_hotplug_event(dev);
2588 
2589 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2590 		if (new_connection_type == dc_connection_none &&
2591 		    aconnector->dc_link->type == dc_connection_none)
2592 			dm_set_dpms_off(aconnector->dc_link);
2593 
2594 		amdgpu_dm_update_connector_after_detect(aconnector);
2595 
2596 		drm_modeset_lock_all(dev);
2597 		dm_restore_drm_connector_state(dev, connector);
2598 		drm_modeset_unlock_all(dev);
2599 
2600 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2601 			drm_kms_helper_hotplug_event(dev);
2602 	}
2603 	mutex_unlock(&aconnector->hpd_lock);
2604 
2605 }
2606 
2607 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2608 {
2609 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2610 	uint8_t dret;
2611 	bool new_irq_handled = false;
2612 	int dpcd_addr;
2613 	int dpcd_bytes_to_read;
2614 
2615 	const int max_process_count = 30;
2616 	int process_count = 0;
2617 
2618 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2619 
2620 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2621 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2622 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2623 		dpcd_addr = DP_SINK_COUNT;
2624 	} else {
2625 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2626 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2627 		dpcd_addr = DP_SINK_COUNT_ESI;
2628 	}
2629 
2630 	dret = drm_dp_dpcd_read(
2631 		&aconnector->dm_dp_aux.aux,
2632 		dpcd_addr,
2633 		esi,
2634 		dpcd_bytes_to_read);
2635 
2636 	while (dret == dpcd_bytes_to_read &&
2637 		process_count < max_process_count) {
2638 		uint8_t retry;
2639 		dret = 0;
2640 
2641 		process_count++;
2642 
2643 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2644 		/* handle HPD short pulse irq */
2645 		if (aconnector->mst_mgr.mst_state)
2646 			drm_dp_mst_hpd_irq(
2647 				&aconnector->mst_mgr,
2648 				esi,
2649 				&new_irq_handled);
2650 
2651 		if (new_irq_handled) {
2652 			/* ACK at DPCD to notify down stream */
2653 			const int ack_dpcd_bytes_to_write =
2654 				dpcd_bytes_to_read - 1;
2655 
2656 			for (retry = 0; retry < 3; retry++) {
2657 				uint8_t wret;
2658 
2659 				wret = drm_dp_dpcd_write(
2660 					&aconnector->dm_dp_aux.aux,
2661 					dpcd_addr + 1,
2662 					&esi[1],
2663 					ack_dpcd_bytes_to_write);
2664 				if (wret == ack_dpcd_bytes_to_write)
2665 					break;
2666 			}
2667 
2668 			/* check if there is new irq to be handled */
2669 			dret = drm_dp_dpcd_read(
2670 				&aconnector->dm_dp_aux.aux,
2671 				dpcd_addr,
2672 				esi,
2673 				dpcd_bytes_to_read);
2674 
2675 			new_irq_handled = false;
2676 		} else {
2677 			break;
2678 		}
2679 	}
2680 
2681 	if (process_count == max_process_count)
2682 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2683 }
2684 
2685 static void handle_hpd_rx_irq(void *param)
2686 {
2687 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2688 	struct drm_connector *connector = &aconnector->base;
2689 	struct drm_device *dev = connector->dev;
2690 	struct dc_link *dc_link = aconnector->dc_link;
2691 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2692 	bool result = false;
2693 	enum dc_connection_type new_connection_type = dc_connection_none;
2694 	struct amdgpu_device *adev = drm_to_adev(dev);
2695 	union hpd_irq_data hpd_irq_data;
2696 
2697 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2698 
2699 	/*
2700 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2701 	 * conflict, after implement i2c helper, this mutex should be
2702 	 * retired.
2703 	 */
2704 	if (dc_link->type != dc_connection_mst_branch)
2705 		mutex_lock(&aconnector->hpd_lock);
2706 
2707 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2708 
2709 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2710 		(dc_link->type == dc_connection_mst_branch)) {
2711 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2712 			result = true;
2713 			dm_handle_hpd_rx_irq(aconnector);
2714 			goto out;
2715 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2716 			result = false;
2717 			dm_handle_hpd_rx_irq(aconnector);
2718 			goto out;
2719 		}
2720 	}
2721 
2722 	mutex_lock(&adev->dm.dc_lock);
2723 #ifdef CONFIG_DRM_AMD_DC_HDCP
2724 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2725 #else
2726 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2727 #endif
2728 	mutex_unlock(&adev->dm.dc_lock);
2729 
2730 out:
2731 	if (result && !is_mst_root_connector) {
2732 		/* Downstream Port status changed. */
2733 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2734 			DRM_ERROR("KMS: Failed to detect connector\n");
2735 
2736 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2737 			emulated_link_detect(dc_link);
2738 
2739 			if (aconnector->fake_enable)
2740 				aconnector->fake_enable = false;
2741 
2742 			amdgpu_dm_update_connector_after_detect(aconnector);
2743 
2744 
2745 			drm_modeset_lock_all(dev);
2746 			dm_restore_drm_connector_state(dev, connector);
2747 			drm_modeset_unlock_all(dev);
2748 
2749 			drm_kms_helper_hotplug_event(dev);
2750 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2751 
2752 			if (aconnector->fake_enable)
2753 				aconnector->fake_enable = false;
2754 
2755 			amdgpu_dm_update_connector_after_detect(aconnector);
2756 
2757 
2758 			drm_modeset_lock_all(dev);
2759 			dm_restore_drm_connector_state(dev, connector);
2760 			drm_modeset_unlock_all(dev);
2761 
2762 			drm_kms_helper_hotplug_event(dev);
2763 		}
2764 	}
2765 #ifdef CONFIG_DRM_AMD_DC_HDCP
2766 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2767 		if (adev->dm.hdcp_workqueue)
2768 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2769 	}
2770 #endif
2771 
2772 	if (dc_link->type != dc_connection_mst_branch) {
2773 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2774 		mutex_unlock(&aconnector->hpd_lock);
2775 	}
2776 }
2777 
2778 static void register_hpd_handlers(struct amdgpu_device *adev)
2779 {
2780 	struct drm_device *dev = adev_to_drm(adev);
2781 	struct drm_connector *connector;
2782 	struct amdgpu_dm_connector *aconnector;
2783 	const struct dc_link *dc_link;
2784 	struct dc_interrupt_params int_params = {0};
2785 
2786 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2787 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2788 
2789 	list_for_each_entry(connector,
2790 			&dev->mode_config.connector_list, head)	{
2791 
2792 		aconnector = to_amdgpu_dm_connector(connector);
2793 		dc_link = aconnector->dc_link;
2794 
2795 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2796 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2797 			int_params.irq_source = dc_link->irq_source_hpd;
2798 
2799 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2800 					handle_hpd_irq,
2801 					(void *) aconnector);
2802 		}
2803 
2804 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2805 
2806 			/* Also register for DP short pulse (hpd_rx). */
2807 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2808 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2809 
2810 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2811 					handle_hpd_rx_irq,
2812 					(void *) aconnector);
2813 		}
2814 	}
2815 }
2816 
2817 #if defined(CONFIG_DRM_AMD_DC_SI)
2818 /* Register IRQ sources and initialize IRQ callbacks */
2819 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2820 {
2821 	struct dc *dc = adev->dm.dc;
2822 	struct common_irq_params *c_irq_params;
2823 	struct dc_interrupt_params int_params = {0};
2824 	int r;
2825 	int i;
2826 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2827 
2828 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2829 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2830 
2831 	/*
2832 	 * Actions of amdgpu_irq_add_id():
2833 	 * 1. Register a set() function with base driver.
2834 	 *    Base driver will call set() function to enable/disable an
2835 	 *    interrupt in DC hardware.
2836 	 * 2. Register amdgpu_dm_irq_handler().
2837 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2838 	 *    coming from DC hardware.
2839 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2840 	 *    for acknowledging and handling. */
2841 
2842 	/* Use VBLANK interrupt */
2843 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2844 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2845 		if (r) {
2846 			DRM_ERROR("Failed to add crtc irq id!\n");
2847 			return r;
2848 		}
2849 
2850 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2851 		int_params.irq_source =
2852 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2853 
2854 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2855 
2856 		c_irq_params->adev = adev;
2857 		c_irq_params->irq_src = int_params.irq_source;
2858 
2859 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2860 				dm_crtc_high_irq, c_irq_params);
2861 	}
2862 
2863 	/* Use GRPH_PFLIP interrupt */
2864 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2865 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2866 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2867 		if (r) {
2868 			DRM_ERROR("Failed to add page flip irq id!\n");
2869 			return r;
2870 		}
2871 
2872 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2873 		int_params.irq_source =
2874 			dc_interrupt_to_irq_source(dc, i, 0);
2875 
2876 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2877 
2878 		c_irq_params->adev = adev;
2879 		c_irq_params->irq_src = int_params.irq_source;
2880 
2881 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2882 				dm_pflip_high_irq, c_irq_params);
2883 
2884 	}
2885 
2886 	/* HPD */
2887 	r = amdgpu_irq_add_id(adev, client_id,
2888 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2889 	if (r) {
2890 		DRM_ERROR("Failed to add hpd irq id!\n");
2891 		return r;
2892 	}
2893 
2894 	register_hpd_handlers(adev);
2895 
2896 	return 0;
2897 }
2898 #endif
2899 
2900 /* Register IRQ sources and initialize IRQ callbacks */
2901 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2902 {
2903 	struct dc *dc = adev->dm.dc;
2904 	struct common_irq_params *c_irq_params;
2905 	struct dc_interrupt_params int_params = {0};
2906 	int r;
2907 	int i;
2908 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2909 
2910 	if (adev->asic_type >= CHIP_VEGA10)
2911 		client_id = SOC15_IH_CLIENTID_DCE;
2912 
2913 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2914 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2915 
2916 	/*
2917 	 * Actions of amdgpu_irq_add_id():
2918 	 * 1. Register a set() function with base driver.
2919 	 *    Base driver will call set() function to enable/disable an
2920 	 *    interrupt in DC hardware.
2921 	 * 2. Register amdgpu_dm_irq_handler().
2922 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2923 	 *    coming from DC hardware.
2924 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2925 	 *    for acknowledging and handling. */
2926 
2927 	/* Use VBLANK interrupt */
2928 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2929 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2930 		if (r) {
2931 			DRM_ERROR("Failed to add crtc irq id!\n");
2932 			return r;
2933 		}
2934 
2935 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2936 		int_params.irq_source =
2937 			dc_interrupt_to_irq_source(dc, i, 0);
2938 
2939 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2940 
2941 		c_irq_params->adev = adev;
2942 		c_irq_params->irq_src = int_params.irq_source;
2943 
2944 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2945 				dm_crtc_high_irq, c_irq_params);
2946 	}
2947 
2948 	/* Use VUPDATE interrupt */
2949 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2950 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2951 		if (r) {
2952 			DRM_ERROR("Failed to add vupdate irq id!\n");
2953 			return r;
2954 		}
2955 
2956 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2957 		int_params.irq_source =
2958 			dc_interrupt_to_irq_source(dc, i, 0);
2959 
2960 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2961 
2962 		c_irq_params->adev = adev;
2963 		c_irq_params->irq_src = int_params.irq_source;
2964 
2965 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2966 				dm_vupdate_high_irq, c_irq_params);
2967 	}
2968 
2969 	/* Use GRPH_PFLIP interrupt */
2970 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2971 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2972 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2973 		if (r) {
2974 			DRM_ERROR("Failed to add page flip irq id!\n");
2975 			return r;
2976 		}
2977 
2978 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2979 		int_params.irq_source =
2980 			dc_interrupt_to_irq_source(dc, i, 0);
2981 
2982 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2983 
2984 		c_irq_params->adev = adev;
2985 		c_irq_params->irq_src = int_params.irq_source;
2986 
2987 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2988 				dm_pflip_high_irq, c_irq_params);
2989 
2990 	}
2991 
2992 	/* HPD */
2993 	r = amdgpu_irq_add_id(adev, client_id,
2994 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2995 	if (r) {
2996 		DRM_ERROR("Failed to add hpd irq id!\n");
2997 		return r;
2998 	}
2999 
3000 	register_hpd_handlers(adev);
3001 
3002 	return 0;
3003 }
3004 
3005 #if defined(CONFIG_DRM_AMD_DC_DCN)
3006 /* Register IRQ sources and initialize IRQ callbacks */
3007 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3008 {
3009 	struct dc *dc = adev->dm.dc;
3010 	struct common_irq_params *c_irq_params;
3011 	struct dc_interrupt_params int_params = {0};
3012 	int r;
3013 	int i;
3014 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3015 	static const unsigned int vrtl_int_srcid[] = {
3016 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3017 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3018 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3019 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3020 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3021 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3022 	};
3023 #endif
3024 
3025 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3026 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3027 
3028 	/*
3029 	 * Actions of amdgpu_irq_add_id():
3030 	 * 1. Register a set() function with base driver.
3031 	 *    Base driver will call set() function to enable/disable an
3032 	 *    interrupt in DC hardware.
3033 	 * 2. Register amdgpu_dm_irq_handler().
3034 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3035 	 *    coming from DC hardware.
3036 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3037 	 *    for acknowledging and handling.
3038 	 */
3039 
3040 	/* Use VSTARTUP interrupt */
3041 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3042 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3043 			i++) {
3044 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3045 
3046 		if (r) {
3047 			DRM_ERROR("Failed to add crtc irq id!\n");
3048 			return r;
3049 		}
3050 
3051 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3052 		int_params.irq_source =
3053 			dc_interrupt_to_irq_source(dc, i, 0);
3054 
3055 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3056 
3057 		c_irq_params->adev = adev;
3058 		c_irq_params->irq_src = int_params.irq_source;
3059 
3060 		amdgpu_dm_irq_register_interrupt(
3061 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3062 	}
3063 
3064 	/* Use otg vertical line interrupt */
3065 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3066 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3067 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3068 				vrtl_int_srcid[i], &adev->vline0_irq);
3069 
3070 		if (r) {
3071 			DRM_ERROR("Failed to add vline0 irq id!\n");
3072 			return r;
3073 		}
3074 
3075 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3076 		int_params.irq_source =
3077 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3078 
3079 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3080 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3081 			break;
3082 		}
3083 
3084 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3085 					- DC_IRQ_SOURCE_DC1_VLINE0];
3086 
3087 		c_irq_params->adev = adev;
3088 		c_irq_params->irq_src = int_params.irq_source;
3089 
3090 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3091 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3092 	}
3093 #endif
3094 
3095 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3096 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3097 	 * to trigger at end of each vblank, regardless of state of the lock,
3098 	 * matching DCE behaviour.
3099 	 */
3100 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3101 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3102 	     i++) {
3103 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3104 
3105 		if (r) {
3106 			DRM_ERROR("Failed to add vupdate irq id!\n");
3107 			return r;
3108 		}
3109 
3110 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3111 		int_params.irq_source =
3112 			dc_interrupt_to_irq_source(dc, i, 0);
3113 
3114 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3115 
3116 		c_irq_params->adev = adev;
3117 		c_irq_params->irq_src = int_params.irq_source;
3118 
3119 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3120 				dm_vupdate_high_irq, c_irq_params);
3121 	}
3122 
3123 	/* Use GRPH_PFLIP interrupt */
3124 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3125 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3126 			i++) {
3127 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3128 		if (r) {
3129 			DRM_ERROR("Failed to add page flip irq id!\n");
3130 			return r;
3131 		}
3132 
3133 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3134 		int_params.irq_source =
3135 			dc_interrupt_to_irq_source(dc, i, 0);
3136 
3137 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3138 
3139 		c_irq_params->adev = adev;
3140 		c_irq_params->irq_src = int_params.irq_source;
3141 
3142 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3143 				dm_pflip_high_irq, c_irq_params);
3144 
3145 	}
3146 
3147 	if (dc->ctx->dmub_srv) {
3148 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
3149 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
3150 
3151 		if (r) {
3152 			DRM_ERROR("Failed to add dmub trace irq id!\n");
3153 			return r;
3154 		}
3155 
3156 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3157 		int_params.irq_source =
3158 			dc_interrupt_to_irq_source(dc, i, 0);
3159 
3160 		c_irq_params = &adev->dm.dmub_trace_params[0];
3161 
3162 		c_irq_params->adev = adev;
3163 		c_irq_params->irq_src = int_params.irq_source;
3164 
3165 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3166 				dm_dmub_trace_high_irq, c_irq_params);
3167 	}
3168 
3169 	/* HPD */
3170 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3171 			&adev->hpd_irq);
3172 	if (r) {
3173 		DRM_ERROR("Failed to add hpd irq id!\n");
3174 		return r;
3175 	}
3176 
3177 	register_hpd_handlers(adev);
3178 
3179 	return 0;
3180 }
3181 #endif
3182 
3183 /*
3184  * Acquires the lock for the atomic state object and returns
3185  * the new atomic state.
3186  *
3187  * This should only be called during atomic check.
3188  */
3189 static int dm_atomic_get_state(struct drm_atomic_state *state,
3190 			       struct dm_atomic_state **dm_state)
3191 {
3192 	struct drm_device *dev = state->dev;
3193 	struct amdgpu_device *adev = drm_to_adev(dev);
3194 	struct amdgpu_display_manager *dm = &adev->dm;
3195 	struct drm_private_state *priv_state;
3196 
3197 	if (*dm_state)
3198 		return 0;
3199 
3200 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3201 	if (IS_ERR(priv_state))
3202 		return PTR_ERR(priv_state);
3203 
3204 	*dm_state = to_dm_atomic_state(priv_state);
3205 
3206 	return 0;
3207 }
3208 
3209 static struct dm_atomic_state *
3210 dm_atomic_get_new_state(struct drm_atomic_state *state)
3211 {
3212 	struct drm_device *dev = state->dev;
3213 	struct amdgpu_device *adev = drm_to_adev(dev);
3214 	struct amdgpu_display_manager *dm = &adev->dm;
3215 	struct drm_private_obj *obj;
3216 	struct drm_private_state *new_obj_state;
3217 	int i;
3218 
3219 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3220 		if (obj->funcs == dm->atomic_obj.funcs)
3221 			return to_dm_atomic_state(new_obj_state);
3222 	}
3223 
3224 	return NULL;
3225 }
3226 
3227 static struct drm_private_state *
3228 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3229 {
3230 	struct dm_atomic_state *old_state, *new_state;
3231 
3232 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3233 	if (!new_state)
3234 		return NULL;
3235 
3236 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3237 
3238 	old_state = to_dm_atomic_state(obj->state);
3239 
3240 	if (old_state && old_state->context)
3241 		new_state->context = dc_copy_state(old_state->context);
3242 
3243 	if (!new_state->context) {
3244 		kfree(new_state);
3245 		return NULL;
3246 	}
3247 
3248 	return &new_state->base;
3249 }
3250 
3251 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3252 				    struct drm_private_state *state)
3253 {
3254 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3255 
3256 	if (dm_state && dm_state->context)
3257 		dc_release_state(dm_state->context);
3258 
3259 	kfree(dm_state);
3260 }
3261 
3262 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3263 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3264 	.atomic_destroy_state = dm_atomic_destroy_state,
3265 };
3266 
3267 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3268 {
3269 	struct dm_atomic_state *state;
3270 	int r;
3271 
3272 	adev->mode_info.mode_config_initialized = true;
3273 
3274 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3275 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3276 
3277 	adev_to_drm(adev)->mode_config.max_width = 16384;
3278 	adev_to_drm(adev)->mode_config.max_height = 16384;
3279 
3280 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3281 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3282 	/* indicates support for immediate flip */
3283 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3284 
3285 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3286 
3287 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3288 	if (!state)
3289 		return -ENOMEM;
3290 
3291 	state->context = dc_create_state(adev->dm.dc);
3292 	if (!state->context) {
3293 		kfree(state);
3294 		return -ENOMEM;
3295 	}
3296 
3297 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3298 
3299 	drm_atomic_private_obj_init(adev_to_drm(adev),
3300 				    &adev->dm.atomic_obj,
3301 				    &state->base,
3302 				    &dm_atomic_state_funcs);
3303 
3304 	r = amdgpu_display_modeset_create_props(adev);
3305 	if (r) {
3306 		dc_release_state(state->context);
3307 		kfree(state);
3308 		return r;
3309 	}
3310 
3311 	r = amdgpu_dm_audio_init(adev);
3312 	if (r) {
3313 		dc_release_state(state->context);
3314 		kfree(state);
3315 		return r;
3316 	}
3317 
3318 	return 0;
3319 }
3320 
3321 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3322 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3323 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3324 
3325 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3326 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3327 
3328 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3329 {
3330 #if defined(CONFIG_ACPI)
3331 	struct amdgpu_dm_backlight_caps caps;
3332 
3333 	memset(&caps, 0, sizeof(caps));
3334 
3335 	if (dm->backlight_caps.caps_valid)
3336 		return;
3337 
3338 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3339 	if (caps.caps_valid) {
3340 		dm->backlight_caps.caps_valid = true;
3341 		if (caps.aux_support)
3342 			return;
3343 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3344 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3345 	} else {
3346 		dm->backlight_caps.min_input_signal =
3347 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3348 		dm->backlight_caps.max_input_signal =
3349 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3350 	}
3351 #else
3352 	if (dm->backlight_caps.aux_support)
3353 		return;
3354 
3355 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3356 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3357 #endif
3358 }
3359 
3360 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3361 				unsigned *min, unsigned *max)
3362 {
3363 	if (!caps)
3364 		return 0;
3365 
3366 	if (caps->aux_support) {
3367 		// Firmware limits are in nits, DC API wants millinits.
3368 		*max = 1000 * caps->aux_max_input_signal;
3369 		*min = 1000 * caps->aux_min_input_signal;
3370 	} else {
3371 		// Firmware limits are 8-bit, PWM control is 16-bit.
3372 		*max = 0x101 * caps->max_input_signal;
3373 		*min = 0x101 * caps->min_input_signal;
3374 	}
3375 	return 1;
3376 }
3377 
3378 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3379 					uint32_t brightness)
3380 {
3381 	unsigned min, max;
3382 
3383 	if (!get_brightness_range(caps, &min, &max))
3384 		return brightness;
3385 
3386 	// Rescale 0..255 to min..max
3387 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3388 				       AMDGPU_MAX_BL_LEVEL);
3389 }
3390 
3391 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3392 				      uint32_t brightness)
3393 {
3394 	unsigned min, max;
3395 
3396 	if (!get_brightness_range(caps, &min, &max))
3397 		return brightness;
3398 
3399 	if (brightness < min)
3400 		return 0;
3401 	// Rescale min..max to 0..255
3402 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3403 				 max - min);
3404 }
3405 
3406 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3407 {
3408 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3409 	struct amdgpu_dm_backlight_caps caps;
3410 	struct dc_link *link = NULL;
3411 	u32 brightness;
3412 	bool rc;
3413 
3414 	amdgpu_dm_update_backlight_caps(dm);
3415 	caps = dm->backlight_caps;
3416 
3417 	link = (struct dc_link *)dm->backlight_link;
3418 
3419 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3420 	// Change brightness based on AUX property
3421 	if (caps.aux_support)
3422 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3423 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3424 	else
3425 		rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3426 
3427 	return rc ? 0 : 1;
3428 }
3429 
3430 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3431 {
3432 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3433 	struct amdgpu_dm_backlight_caps caps;
3434 
3435 	amdgpu_dm_update_backlight_caps(dm);
3436 	caps = dm->backlight_caps;
3437 
3438 	if (caps.aux_support) {
3439 		struct dc_link *link = (struct dc_link *)dm->backlight_link;
3440 		u32 avg, peak;
3441 		bool rc;
3442 
3443 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3444 		if (!rc)
3445 			return bd->props.brightness;
3446 		return convert_brightness_to_user(&caps, avg);
3447 	} else {
3448 		int ret = dc_link_get_backlight_level(dm->backlight_link);
3449 
3450 		if (ret == DC_ERROR_UNEXPECTED)
3451 			return bd->props.brightness;
3452 		return convert_brightness_to_user(&caps, ret);
3453 	}
3454 }
3455 
3456 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3457 	.options = BL_CORE_SUSPENDRESUME,
3458 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3459 	.update_status	= amdgpu_dm_backlight_update_status,
3460 };
3461 
3462 static void
3463 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3464 {
3465 	char bl_name[16];
3466 	struct backlight_properties props = { 0 };
3467 
3468 	amdgpu_dm_update_backlight_caps(dm);
3469 
3470 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3471 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3472 	props.type = BACKLIGHT_RAW;
3473 
3474 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3475 		 adev_to_drm(dm->adev)->primary->index);
3476 
3477 	dm->backlight_dev = backlight_device_register(bl_name,
3478 						      adev_to_drm(dm->adev)->dev,
3479 						      dm,
3480 						      &amdgpu_dm_backlight_ops,
3481 						      &props);
3482 
3483 	if (IS_ERR(dm->backlight_dev))
3484 		DRM_ERROR("DM: Backlight registration failed!\n");
3485 	else
3486 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3487 }
3488 
3489 #endif
3490 
3491 static int initialize_plane(struct amdgpu_display_manager *dm,
3492 			    struct amdgpu_mode_info *mode_info, int plane_id,
3493 			    enum drm_plane_type plane_type,
3494 			    const struct dc_plane_cap *plane_cap)
3495 {
3496 	struct drm_plane *plane;
3497 	unsigned long possible_crtcs;
3498 	int ret = 0;
3499 
3500 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3501 	if (!plane) {
3502 		DRM_ERROR("KMS: Failed to allocate plane\n");
3503 		return -ENOMEM;
3504 	}
3505 	plane->type = plane_type;
3506 
3507 	/*
3508 	 * HACK: IGT tests expect that the primary plane for a CRTC
3509 	 * can only have one possible CRTC. Only expose support for
3510 	 * any CRTC if they're not going to be used as a primary plane
3511 	 * for a CRTC - like overlay or underlay planes.
3512 	 */
3513 	possible_crtcs = 1 << plane_id;
3514 	if (plane_id >= dm->dc->caps.max_streams)
3515 		possible_crtcs = 0xff;
3516 
3517 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3518 
3519 	if (ret) {
3520 		DRM_ERROR("KMS: Failed to initialize plane\n");
3521 		kfree(plane);
3522 		return ret;
3523 	}
3524 
3525 	if (mode_info)
3526 		mode_info->planes[plane_id] = plane;
3527 
3528 	return ret;
3529 }
3530 
3531 
3532 static void register_backlight_device(struct amdgpu_display_manager *dm,
3533 				      struct dc_link *link)
3534 {
3535 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3536 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3537 
3538 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3539 	    link->type != dc_connection_none) {
3540 		/*
3541 		 * Event if registration failed, we should continue with
3542 		 * DM initialization because not having a backlight control
3543 		 * is better then a black screen.
3544 		 */
3545 		amdgpu_dm_register_backlight_device(dm);
3546 
3547 		if (dm->backlight_dev)
3548 			dm->backlight_link = link;
3549 	}
3550 #endif
3551 }
3552 
3553 
3554 /*
3555  * In this architecture, the association
3556  * connector -> encoder -> crtc
3557  * id not really requried. The crtc and connector will hold the
3558  * display_index as an abstraction to use with DAL component
3559  *
3560  * Returns 0 on success
3561  */
3562 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3563 {
3564 	struct amdgpu_display_manager *dm = &adev->dm;
3565 	int32_t i;
3566 	struct amdgpu_dm_connector *aconnector = NULL;
3567 	struct amdgpu_encoder *aencoder = NULL;
3568 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3569 	uint32_t link_cnt;
3570 	int32_t primary_planes;
3571 	enum dc_connection_type new_connection_type = dc_connection_none;
3572 	const struct dc_plane_cap *plane;
3573 
3574 	dm->display_indexes_num = dm->dc->caps.max_streams;
3575 	/* Update the actual used number of crtc */
3576 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3577 
3578 	link_cnt = dm->dc->caps.max_links;
3579 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3580 		DRM_ERROR("DM: Failed to initialize mode config\n");
3581 		return -EINVAL;
3582 	}
3583 
3584 	/* There is one primary plane per CRTC */
3585 	primary_planes = dm->dc->caps.max_streams;
3586 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3587 
3588 	/*
3589 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3590 	 * Order is reversed to match iteration order in atomic check.
3591 	 */
3592 	for (i = (primary_planes - 1); i >= 0; i--) {
3593 		plane = &dm->dc->caps.planes[i];
3594 
3595 		if (initialize_plane(dm, mode_info, i,
3596 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3597 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3598 			goto fail;
3599 		}
3600 	}
3601 
3602 	/*
3603 	 * Initialize overlay planes, index starting after primary planes.
3604 	 * These planes have a higher DRM index than the primary planes since
3605 	 * they should be considered as having a higher z-order.
3606 	 * Order is reversed to match iteration order in atomic check.
3607 	 *
3608 	 * Only support DCN for now, and only expose one so we don't encourage
3609 	 * userspace to use up all the pipes.
3610 	 */
3611 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3612 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3613 
3614 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3615 			continue;
3616 
3617 		if (!plane->blends_with_above || !plane->blends_with_below)
3618 			continue;
3619 
3620 		if (!plane->pixel_format_support.argb8888)
3621 			continue;
3622 
3623 		if (initialize_plane(dm, NULL, primary_planes + i,
3624 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3625 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3626 			goto fail;
3627 		}
3628 
3629 		/* Only create one overlay plane. */
3630 		break;
3631 	}
3632 
3633 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3634 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3635 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3636 			goto fail;
3637 		}
3638 
3639 	/* loops over all connectors on the board */
3640 	for (i = 0; i < link_cnt; i++) {
3641 		struct dc_link *link = NULL;
3642 
3643 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3644 			DRM_ERROR(
3645 				"KMS: Cannot support more than %d display indexes\n",
3646 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3647 			continue;
3648 		}
3649 
3650 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3651 		if (!aconnector)
3652 			goto fail;
3653 
3654 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3655 		if (!aencoder)
3656 			goto fail;
3657 
3658 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3659 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3660 			goto fail;
3661 		}
3662 
3663 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3664 			DRM_ERROR("KMS: Failed to initialize connector\n");
3665 			goto fail;
3666 		}
3667 
3668 		link = dc_get_link_at_index(dm->dc, i);
3669 
3670 		if (!dc_link_detect_sink(link, &new_connection_type))
3671 			DRM_ERROR("KMS: Failed to detect connector\n");
3672 
3673 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3674 			emulated_link_detect(link);
3675 			amdgpu_dm_update_connector_after_detect(aconnector);
3676 
3677 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3678 			amdgpu_dm_update_connector_after_detect(aconnector);
3679 			register_backlight_device(dm, link);
3680 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3681 				amdgpu_dm_set_psr_caps(link);
3682 		}
3683 
3684 
3685 	}
3686 
3687 	/* Software is initialized. Now we can register interrupt handlers. */
3688 	switch (adev->asic_type) {
3689 #if defined(CONFIG_DRM_AMD_DC_SI)
3690 	case CHIP_TAHITI:
3691 	case CHIP_PITCAIRN:
3692 	case CHIP_VERDE:
3693 	case CHIP_OLAND:
3694 		if (dce60_register_irq_handlers(dm->adev)) {
3695 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3696 			goto fail;
3697 		}
3698 		break;
3699 #endif
3700 	case CHIP_BONAIRE:
3701 	case CHIP_HAWAII:
3702 	case CHIP_KAVERI:
3703 	case CHIP_KABINI:
3704 	case CHIP_MULLINS:
3705 	case CHIP_TONGA:
3706 	case CHIP_FIJI:
3707 	case CHIP_CARRIZO:
3708 	case CHIP_STONEY:
3709 	case CHIP_POLARIS11:
3710 	case CHIP_POLARIS10:
3711 	case CHIP_POLARIS12:
3712 	case CHIP_VEGAM:
3713 	case CHIP_VEGA10:
3714 	case CHIP_VEGA12:
3715 	case CHIP_VEGA20:
3716 		if (dce110_register_irq_handlers(dm->adev)) {
3717 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3718 			goto fail;
3719 		}
3720 		break;
3721 #if defined(CONFIG_DRM_AMD_DC_DCN)
3722 	case CHIP_RAVEN:
3723 	case CHIP_NAVI12:
3724 	case CHIP_NAVI10:
3725 	case CHIP_NAVI14:
3726 	case CHIP_RENOIR:
3727 	case CHIP_SIENNA_CICHLID:
3728 	case CHIP_NAVY_FLOUNDER:
3729 	case CHIP_DIMGREY_CAVEFISH:
3730 	case CHIP_VANGOGH:
3731 		if (dcn10_register_irq_handlers(dm->adev)) {
3732 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3733 			goto fail;
3734 		}
3735 		break;
3736 #endif
3737 	default:
3738 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3739 		goto fail;
3740 	}
3741 
3742 	return 0;
3743 fail:
3744 	kfree(aencoder);
3745 	kfree(aconnector);
3746 
3747 	return -EINVAL;
3748 }
3749 
3750 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3751 {
3752 	drm_mode_config_cleanup(dm->ddev);
3753 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3754 	return;
3755 }
3756 
3757 /******************************************************************************
3758  * amdgpu_display_funcs functions
3759  *****************************************************************************/
3760 
3761 /*
3762  * dm_bandwidth_update - program display watermarks
3763  *
3764  * @adev: amdgpu_device pointer
3765  *
3766  * Calculate and program the display watermarks and line buffer allocation.
3767  */
3768 static void dm_bandwidth_update(struct amdgpu_device *adev)
3769 {
3770 	/* TODO: implement later */
3771 }
3772 
3773 static const struct amdgpu_display_funcs dm_display_funcs = {
3774 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3775 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3776 	.backlight_set_level = NULL, /* never called for DC */
3777 	.backlight_get_level = NULL, /* never called for DC */
3778 	.hpd_sense = NULL,/* called unconditionally */
3779 	.hpd_set_polarity = NULL, /* called unconditionally */
3780 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3781 	.page_flip_get_scanoutpos =
3782 		dm_crtc_get_scanoutpos,/* called unconditionally */
3783 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3784 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3785 };
3786 
3787 #if defined(CONFIG_DEBUG_KERNEL_DC)
3788 
3789 static ssize_t s3_debug_store(struct device *device,
3790 			      struct device_attribute *attr,
3791 			      const char *buf,
3792 			      size_t count)
3793 {
3794 	int ret;
3795 	int s3_state;
3796 	struct drm_device *drm_dev = dev_get_drvdata(device);
3797 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3798 
3799 	ret = kstrtoint(buf, 0, &s3_state);
3800 
3801 	if (ret == 0) {
3802 		if (s3_state) {
3803 			dm_resume(adev);
3804 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3805 		} else
3806 			dm_suspend(adev);
3807 	}
3808 
3809 	return ret == 0 ? count : 0;
3810 }
3811 
3812 DEVICE_ATTR_WO(s3_debug);
3813 
3814 #endif
3815 
3816 static int dm_early_init(void *handle)
3817 {
3818 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3819 
3820 	switch (adev->asic_type) {
3821 #if defined(CONFIG_DRM_AMD_DC_SI)
3822 	case CHIP_TAHITI:
3823 	case CHIP_PITCAIRN:
3824 	case CHIP_VERDE:
3825 		adev->mode_info.num_crtc = 6;
3826 		adev->mode_info.num_hpd = 6;
3827 		adev->mode_info.num_dig = 6;
3828 		break;
3829 	case CHIP_OLAND:
3830 		adev->mode_info.num_crtc = 2;
3831 		adev->mode_info.num_hpd = 2;
3832 		adev->mode_info.num_dig = 2;
3833 		break;
3834 #endif
3835 	case CHIP_BONAIRE:
3836 	case CHIP_HAWAII:
3837 		adev->mode_info.num_crtc = 6;
3838 		adev->mode_info.num_hpd = 6;
3839 		adev->mode_info.num_dig = 6;
3840 		break;
3841 	case CHIP_KAVERI:
3842 		adev->mode_info.num_crtc = 4;
3843 		adev->mode_info.num_hpd = 6;
3844 		adev->mode_info.num_dig = 7;
3845 		break;
3846 	case CHIP_KABINI:
3847 	case CHIP_MULLINS:
3848 		adev->mode_info.num_crtc = 2;
3849 		adev->mode_info.num_hpd = 6;
3850 		adev->mode_info.num_dig = 6;
3851 		break;
3852 	case CHIP_FIJI:
3853 	case CHIP_TONGA:
3854 		adev->mode_info.num_crtc = 6;
3855 		adev->mode_info.num_hpd = 6;
3856 		adev->mode_info.num_dig = 7;
3857 		break;
3858 	case CHIP_CARRIZO:
3859 		adev->mode_info.num_crtc = 3;
3860 		adev->mode_info.num_hpd = 6;
3861 		adev->mode_info.num_dig = 9;
3862 		break;
3863 	case CHIP_STONEY:
3864 		adev->mode_info.num_crtc = 2;
3865 		adev->mode_info.num_hpd = 6;
3866 		adev->mode_info.num_dig = 9;
3867 		break;
3868 	case CHIP_POLARIS11:
3869 	case CHIP_POLARIS12:
3870 		adev->mode_info.num_crtc = 5;
3871 		adev->mode_info.num_hpd = 5;
3872 		adev->mode_info.num_dig = 5;
3873 		break;
3874 	case CHIP_POLARIS10:
3875 	case CHIP_VEGAM:
3876 		adev->mode_info.num_crtc = 6;
3877 		adev->mode_info.num_hpd = 6;
3878 		adev->mode_info.num_dig = 6;
3879 		break;
3880 	case CHIP_VEGA10:
3881 	case CHIP_VEGA12:
3882 	case CHIP_VEGA20:
3883 		adev->mode_info.num_crtc = 6;
3884 		adev->mode_info.num_hpd = 6;
3885 		adev->mode_info.num_dig = 6;
3886 		break;
3887 #if defined(CONFIG_DRM_AMD_DC_DCN)
3888 	case CHIP_RAVEN:
3889 	case CHIP_RENOIR:
3890 	case CHIP_VANGOGH:
3891 		adev->mode_info.num_crtc = 4;
3892 		adev->mode_info.num_hpd = 4;
3893 		adev->mode_info.num_dig = 4;
3894 		break;
3895 	case CHIP_NAVI10:
3896 	case CHIP_NAVI12:
3897 	case CHIP_SIENNA_CICHLID:
3898 	case CHIP_NAVY_FLOUNDER:
3899 		adev->mode_info.num_crtc = 6;
3900 		adev->mode_info.num_hpd = 6;
3901 		adev->mode_info.num_dig = 6;
3902 		break;
3903 	case CHIP_NAVI14:
3904 	case CHIP_DIMGREY_CAVEFISH:
3905 		adev->mode_info.num_crtc = 5;
3906 		adev->mode_info.num_hpd = 5;
3907 		adev->mode_info.num_dig = 5;
3908 		break;
3909 #endif
3910 	default:
3911 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3912 		return -EINVAL;
3913 	}
3914 
3915 	amdgpu_dm_set_irq_funcs(adev);
3916 
3917 	if (adev->mode_info.funcs == NULL)
3918 		adev->mode_info.funcs = &dm_display_funcs;
3919 
3920 	/*
3921 	 * Note: Do NOT change adev->audio_endpt_rreg and
3922 	 * adev->audio_endpt_wreg because they are initialised in
3923 	 * amdgpu_device_init()
3924 	 */
3925 #if defined(CONFIG_DEBUG_KERNEL_DC)
3926 	device_create_file(
3927 		adev_to_drm(adev)->dev,
3928 		&dev_attr_s3_debug);
3929 #endif
3930 
3931 	return 0;
3932 }
3933 
3934 static bool modeset_required(struct drm_crtc_state *crtc_state,
3935 			     struct dc_stream_state *new_stream,
3936 			     struct dc_stream_state *old_stream)
3937 {
3938 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3939 }
3940 
3941 static bool modereset_required(struct drm_crtc_state *crtc_state)
3942 {
3943 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3944 }
3945 
3946 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3947 {
3948 	drm_encoder_cleanup(encoder);
3949 	kfree(encoder);
3950 }
3951 
3952 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3953 	.destroy = amdgpu_dm_encoder_destroy,
3954 };
3955 
3956 
3957 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3958 					 struct drm_framebuffer *fb,
3959 					 int *min_downscale, int *max_upscale)
3960 {
3961 	struct amdgpu_device *adev = drm_to_adev(dev);
3962 	struct dc *dc = adev->dm.dc;
3963 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3964 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3965 
3966 	switch (fb->format->format) {
3967 	case DRM_FORMAT_P010:
3968 	case DRM_FORMAT_NV12:
3969 	case DRM_FORMAT_NV21:
3970 		*max_upscale = plane_cap->max_upscale_factor.nv12;
3971 		*min_downscale = plane_cap->max_downscale_factor.nv12;
3972 		break;
3973 
3974 	case DRM_FORMAT_XRGB16161616F:
3975 	case DRM_FORMAT_ARGB16161616F:
3976 	case DRM_FORMAT_XBGR16161616F:
3977 	case DRM_FORMAT_ABGR16161616F:
3978 		*max_upscale = plane_cap->max_upscale_factor.fp16;
3979 		*min_downscale = plane_cap->max_downscale_factor.fp16;
3980 		break;
3981 
3982 	default:
3983 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
3984 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
3985 		break;
3986 	}
3987 
3988 	/*
3989 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3990 	 * scaling factor of 1.0 == 1000 units.
3991 	 */
3992 	if (*max_upscale == 1)
3993 		*max_upscale = 1000;
3994 
3995 	if (*min_downscale == 1)
3996 		*min_downscale = 1000;
3997 }
3998 
3999 
4000 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4001 				struct dc_scaling_info *scaling_info)
4002 {
4003 	int scale_w, scale_h, min_downscale, max_upscale;
4004 
4005 	memset(scaling_info, 0, sizeof(*scaling_info));
4006 
4007 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4008 	scaling_info->src_rect.x = state->src_x >> 16;
4009 	scaling_info->src_rect.y = state->src_y >> 16;
4010 
4011 	scaling_info->src_rect.width = state->src_w >> 16;
4012 	if (scaling_info->src_rect.width == 0)
4013 		return -EINVAL;
4014 
4015 	scaling_info->src_rect.height = state->src_h >> 16;
4016 	if (scaling_info->src_rect.height == 0)
4017 		return -EINVAL;
4018 
4019 	scaling_info->dst_rect.x = state->crtc_x;
4020 	scaling_info->dst_rect.y = state->crtc_y;
4021 
4022 	if (state->crtc_w == 0)
4023 		return -EINVAL;
4024 
4025 	scaling_info->dst_rect.width = state->crtc_w;
4026 
4027 	if (state->crtc_h == 0)
4028 		return -EINVAL;
4029 
4030 	scaling_info->dst_rect.height = state->crtc_h;
4031 
4032 	/* DRM doesn't specify clipping on destination output. */
4033 	scaling_info->clip_rect = scaling_info->dst_rect;
4034 
4035 	/* Validate scaling per-format with DC plane caps */
4036 	if (state->plane && state->plane->dev && state->fb) {
4037 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4038 					     &min_downscale, &max_upscale);
4039 	} else {
4040 		min_downscale = 250;
4041 		max_upscale = 16000;
4042 	}
4043 
4044 	scale_w = scaling_info->dst_rect.width * 1000 /
4045 		  scaling_info->src_rect.width;
4046 
4047 	if (scale_w < min_downscale || scale_w > max_upscale)
4048 		return -EINVAL;
4049 
4050 	scale_h = scaling_info->dst_rect.height * 1000 /
4051 		  scaling_info->src_rect.height;
4052 
4053 	if (scale_h < min_downscale || scale_h > max_upscale)
4054 		return -EINVAL;
4055 
4056 	/*
4057 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4058 	 * assume reasonable defaults based on the format.
4059 	 */
4060 
4061 	return 0;
4062 }
4063 
4064 static void
4065 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4066 				 uint64_t tiling_flags)
4067 {
4068 	/* Fill GFX8 params */
4069 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4070 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4071 
4072 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4073 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4074 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4075 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4076 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4077 
4078 		/* XXX fix me for VI */
4079 		tiling_info->gfx8.num_banks = num_banks;
4080 		tiling_info->gfx8.array_mode =
4081 				DC_ARRAY_2D_TILED_THIN1;
4082 		tiling_info->gfx8.tile_split = tile_split;
4083 		tiling_info->gfx8.bank_width = bankw;
4084 		tiling_info->gfx8.bank_height = bankh;
4085 		tiling_info->gfx8.tile_aspect = mtaspect;
4086 		tiling_info->gfx8.tile_mode =
4087 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4088 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4089 			== DC_ARRAY_1D_TILED_THIN1) {
4090 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4091 	}
4092 
4093 	tiling_info->gfx8.pipe_config =
4094 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4095 }
4096 
4097 static void
4098 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4099 				  union dc_tiling_info *tiling_info)
4100 {
4101 	tiling_info->gfx9.num_pipes =
4102 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4103 	tiling_info->gfx9.num_banks =
4104 		adev->gfx.config.gb_addr_config_fields.num_banks;
4105 	tiling_info->gfx9.pipe_interleave =
4106 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4107 	tiling_info->gfx9.num_shader_engines =
4108 		adev->gfx.config.gb_addr_config_fields.num_se;
4109 	tiling_info->gfx9.max_compressed_frags =
4110 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4111 	tiling_info->gfx9.num_rb_per_se =
4112 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4113 	tiling_info->gfx9.shaderEnable = 1;
4114 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4115 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4116 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4117 	    adev->asic_type == CHIP_VANGOGH)
4118 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4119 }
4120 
4121 static int
4122 validate_dcc(struct amdgpu_device *adev,
4123 	     const enum surface_pixel_format format,
4124 	     const enum dc_rotation_angle rotation,
4125 	     const union dc_tiling_info *tiling_info,
4126 	     const struct dc_plane_dcc_param *dcc,
4127 	     const struct dc_plane_address *address,
4128 	     const struct plane_size *plane_size)
4129 {
4130 	struct dc *dc = adev->dm.dc;
4131 	struct dc_dcc_surface_param input;
4132 	struct dc_surface_dcc_cap output;
4133 
4134 	memset(&input, 0, sizeof(input));
4135 	memset(&output, 0, sizeof(output));
4136 
4137 	if (!dcc->enable)
4138 		return 0;
4139 
4140 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4141 	    !dc->cap_funcs.get_dcc_compression_cap)
4142 		return -EINVAL;
4143 
4144 	input.format = format;
4145 	input.surface_size.width = plane_size->surface_size.width;
4146 	input.surface_size.height = plane_size->surface_size.height;
4147 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4148 
4149 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4150 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4151 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4152 		input.scan = SCAN_DIRECTION_VERTICAL;
4153 
4154 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4155 		return -EINVAL;
4156 
4157 	if (!output.capable)
4158 		return -EINVAL;
4159 
4160 	if (dcc->independent_64b_blks == 0 &&
4161 	    output.grph.rgb.independent_64b_blks != 0)
4162 		return -EINVAL;
4163 
4164 	return 0;
4165 }
4166 
4167 static bool
4168 modifier_has_dcc(uint64_t modifier)
4169 {
4170 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4171 }
4172 
4173 static unsigned
4174 modifier_gfx9_swizzle_mode(uint64_t modifier)
4175 {
4176 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4177 		return 0;
4178 
4179 	return AMD_FMT_MOD_GET(TILE, modifier);
4180 }
4181 
4182 static const struct drm_format_info *
4183 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4184 {
4185 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4186 }
4187 
4188 static void
4189 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4190 				    union dc_tiling_info *tiling_info,
4191 				    uint64_t modifier)
4192 {
4193 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4194 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4195 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4196 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4197 
4198 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4199 
4200 	if (!IS_AMD_FMT_MOD(modifier))
4201 		return;
4202 
4203 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4204 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4205 
4206 	if (adev->family >= AMDGPU_FAMILY_NV) {
4207 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4208 	} else {
4209 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4210 
4211 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4212 	}
4213 }
4214 
4215 enum dm_micro_swizzle {
4216 	MICRO_SWIZZLE_Z = 0,
4217 	MICRO_SWIZZLE_S = 1,
4218 	MICRO_SWIZZLE_D = 2,
4219 	MICRO_SWIZZLE_R = 3
4220 };
4221 
4222 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4223 					  uint32_t format,
4224 					  uint64_t modifier)
4225 {
4226 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4227 	const struct drm_format_info *info = drm_format_info(format);
4228 
4229 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4230 
4231 	if (!info)
4232 		return false;
4233 
4234 	/*
4235 	 * We always have to allow this modifier, because core DRM still
4236 	 * checks LINEAR support if userspace does not provide modifers.
4237 	 */
4238 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4239 		return true;
4240 
4241 	/*
4242 	 * The arbitrary tiling support for multiplane formats has not been hooked
4243 	 * up.
4244 	 */
4245 	if (info->num_planes > 1)
4246 		return false;
4247 
4248 	/*
4249 	 * For D swizzle the canonical modifier depends on the bpp, so check
4250 	 * it here.
4251 	 */
4252 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4253 	    adev->family >= AMDGPU_FAMILY_NV) {
4254 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4255 			return false;
4256 	}
4257 
4258 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4259 	    info->cpp[0] < 8)
4260 		return false;
4261 
4262 	if (modifier_has_dcc(modifier)) {
4263 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4264 		if (info->cpp[0] != 4)
4265 			return false;
4266 	}
4267 
4268 	return true;
4269 }
4270 
4271 static void
4272 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4273 {
4274 	if (!*mods)
4275 		return;
4276 
4277 	if (*cap - *size < 1) {
4278 		uint64_t new_cap = *cap * 2;
4279 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4280 
4281 		if (!new_mods) {
4282 			kfree(*mods);
4283 			*mods = NULL;
4284 			return;
4285 		}
4286 
4287 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4288 		kfree(*mods);
4289 		*mods = new_mods;
4290 		*cap = new_cap;
4291 	}
4292 
4293 	(*mods)[*size] = mod;
4294 	*size += 1;
4295 }
4296 
4297 static void
4298 add_gfx9_modifiers(const struct amdgpu_device *adev,
4299 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4300 {
4301 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4302 	int pipe_xor_bits = min(8, pipes +
4303 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4304 	int bank_xor_bits = min(8 - pipe_xor_bits,
4305 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4306 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4307 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4308 
4309 
4310 	if (adev->family == AMDGPU_FAMILY_RV) {
4311 		/* Raven2 and later */
4312 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4313 
4314 		/*
4315 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4316 		 * doesn't support _D on DCN
4317 		 */
4318 
4319 		if (has_constant_encode) {
4320 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4321 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4322 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4323 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4324 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4325 				    AMD_FMT_MOD_SET(DCC, 1) |
4326 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4327 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4328 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4329 		}
4330 
4331 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4332 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4333 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4334 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4335 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4336 			    AMD_FMT_MOD_SET(DCC, 1) |
4337 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4338 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4339 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4340 
4341 		if (has_constant_encode) {
4342 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4343 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4344 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4345 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4346 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4347 				    AMD_FMT_MOD_SET(DCC, 1) |
4348 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4349 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4350 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4351 
4352 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4353 				    AMD_FMT_MOD_SET(RB, rb) |
4354 				    AMD_FMT_MOD_SET(PIPE, pipes));
4355 		}
4356 
4357 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4358 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4359 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4360 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4361 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4362 			    AMD_FMT_MOD_SET(DCC, 1) |
4363 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4364 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4365 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4366 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4367 			    AMD_FMT_MOD_SET(RB, rb) |
4368 			    AMD_FMT_MOD_SET(PIPE, pipes));
4369 	}
4370 
4371 	/*
4372 	 * Only supported for 64bpp on Raven, will be filtered on format in
4373 	 * dm_plane_format_mod_supported.
4374 	 */
4375 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4376 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4377 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4378 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4379 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4380 
4381 	if (adev->family == AMDGPU_FAMILY_RV) {
4382 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4383 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4384 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4385 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4386 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4387 	}
4388 
4389 	/*
4390 	 * Only supported for 64bpp on Raven, will be filtered on format in
4391 	 * dm_plane_format_mod_supported.
4392 	 */
4393 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4394 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4395 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4396 
4397 	if (adev->family == AMDGPU_FAMILY_RV) {
4398 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4399 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4400 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4401 	}
4402 }
4403 
4404 static void
4405 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4406 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4407 {
4408 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4409 
4410 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4411 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4412 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4413 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4414 		    AMD_FMT_MOD_SET(DCC, 1) |
4415 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4416 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4417 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4418 
4419 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4420 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4421 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4422 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4423 		    AMD_FMT_MOD_SET(DCC, 1) |
4424 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4425 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4426 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4427 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4428 
4429 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4430 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4431 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4432 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4433 
4434 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4435 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4436 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4437 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4438 
4439 
4440 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4441 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4442 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4443 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4444 
4445 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4446 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4447 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4448 }
4449 
4450 static void
4451 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4452 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4453 {
4454 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4455 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4456 
4457 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4458 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4459 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4460 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4461 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4462 		    AMD_FMT_MOD_SET(DCC, 1) |
4463 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4464 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4465 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4466 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4467 
4468 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4469 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4470 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4471 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4472 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4473 		    AMD_FMT_MOD_SET(DCC, 1) |
4474 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4475 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4476 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4477 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4478 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4479 
4480 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4481 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4482 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4483 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4484 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4485 
4486 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4487 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4488 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4489 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4490 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4491 
4492 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4493 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4494 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4495 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4496 
4497 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4498 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4499 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4500 }
4501 
4502 static int
4503 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4504 {
4505 	uint64_t size = 0, capacity = 128;
4506 	*mods = NULL;
4507 
4508 	/* We have not hooked up any pre-GFX9 modifiers. */
4509 	if (adev->family < AMDGPU_FAMILY_AI)
4510 		return 0;
4511 
4512 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4513 
4514 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4515 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4516 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4517 		return *mods ? 0 : -ENOMEM;
4518 	}
4519 
4520 	switch (adev->family) {
4521 	case AMDGPU_FAMILY_AI:
4522 	case AMDGPU_FAMILY_RV:
4523 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4524 		break;
4525 	case AMDGPU_FAMILY_NV:
4526 	case AMDGPU_FAMILY_VGH:
4527 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4528 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4529 		else
4530 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4531 		break;
4532 	}
4533 
4534 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4535 
4536 	/* INVALID marks the end of the list. */
4537 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4538 
4539 	if (!*mods)
4540 		return -ENOMEM;
4541 
4542 	return 0;
4543 }
4544 
4545 static int
4546 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4547 					  const struct amdgpu_framebuffer *afb,
4548 					  const enum surface_pixel_format format,
4549 					  const enum dc_rotation_angle rotation,
4550 					  const struct plane_size *plane_size,
4551 					  union dc_tiling_info *tiling_info,
4552 					  struct dc_plane_dcc_param *dcc,
4553 					  struct dc_plane_address *address,
4554 					  const bool force_disable_dcc)
4555 {
4556 	const uint64_t modifier = afb->base.modifier;
4557 	int ret;
4558 
4559 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4560 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4561 
4562 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4563 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4564 
4565 		dcc->enable = 1;
4566 		dcc->meta_pitch = afb->base.pitches[1];
4567 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4568 
4569 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4570 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4571 	}
4572 
4573 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4574 	if (ret)
4575 		return ret;
4576 
4577 	return 0;
4578 }
4579 
4580 static int
4581 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4582 			     const struct amdgpu_framebuffer *afb,
4583 			     const enum surface_pixel_format format,
4584 			     const enum dc_rotation_angle rotation,
4585 			     const uint64_t tiling_flags,
4586 			     union dc_tiling_info *tiling_info,
4587 			     struct plane_size *plane_size,
4588 			     struct dc_plane_dcc_param *dcc,
4589 			     struct dc_plane_address *address,
4590 			     bool tmz_surface,
4591 			     bool force_disable_dcc)
4592 {
4593 	const struct drm_framebuffer *fb = &afb->base;
4594 	int ret;
4595 
4596 	memset(tiling_info, 0, sizeof(*tiling_info));
4597 	memset(plane_size, 0, sizeof(*plane_size));
4598 	memset(dcc, 0, sizeof(*dcc));
4599 	memset(address, 0, sizeof(*address));
4600 
4601 	address->tmz_surface = tmz_surface;
4602 
4603 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4604 		uint64_t addr = afb->address + fb->offsets[0];
4605 
4606 		plane_size->surface_size.x = 0;
4607 		plane_size->surface_size.y = 0;
4608 		plane_size->surface_size.width = fb->width;
4609 		plane_size->surface_size.height = fb->height;
4610 		plane_size->surface_pitch =
4611 			fb->pitches[0] / fb->format->cpp[0];
4612 
4613 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4614 		address->grph.addr.low_part = lower_32_bits(addr);
4615 		address->grph.addr.high_part = upper_32_bits(addr);
4616 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4617 		uint64_t luma_addr = afb->address + fb->offsets[0];
4618 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4619 
4620 		plane_size->surface_size.x = 0;
4621 		plane_size->surface_size.y = 0;
4622 		plane_size->surface_size.width = fb->width;
4623 		plane_size->surface_size.height = fb->height;
4624 		plane_size->surface_pitch =
4625 			fb->pitches[0] / fb->format->cpp[0];
4626 
4627 		plane_size->chroma_size.x = 0;
4628 		plane_size->chroma_size.y = 0;
4629 		/* TODO: set these based on surface format */
4630 		plane_size->chroma_size.width = fb->width / 2;
4631 		plane_size->chroma_size.height = fb->height / 2;
4632 
4633 		plane_size->chroma_pitch =
4634 			fb->pitches[1] / fb->format->cpp[1];
4635 
4636 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4637 		address->video_progressive.luma_addr.low_part =
4638 			lower_32_bits(luma_addr);
4639 		address->video_progressive.luma_addr.high_part =
4640 			upper_32_bits(luma_addr);
4641 		address->video_progressive.chroma_addr.low_part =
4642 			lower_32_bits(chroma_addr);
4643 		address->video_progressive.chroma_addr.high_part =
4644 			upper_32_bits(chroma_addr);
4645 	}
4646 
4647 	if (adev->family >= AMDGPU_FAMILY_AI) {
4648 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4649 								rotation, plane_size,
4650 								tiling_info, dcc,
4651 								address,
4652 								force_disable_dcc);
4653 		if (ret)
4654 			return ret;
4655 	} else {
4656 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4657 	}
4658 
4659 	return 0;
4660 }
4661 
4662 static void
4663 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4664 			       bool *per_pixel_alpha, bool *global_alpha,
4665 			       int *global_alpha_value)
4666 {
4667 	*per_pixel_alpha = false;
4668 	*global_alpha = false;
4669 	*global_alpha_value = 0xff;
4670 
4671 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4672 		return;
4673 
4674 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4675 		static const uint32_t alpha_formats[] = {
4676 			DRM_FORMAT_ARGB8888,
4677 			DRM_FORMAT_RGBA8888,
4678 			DRM_FORMAT_ABGR8888,
4679 		};
4680 		uint32_t format = plane_state->fb->format->format;
4681 		unsigned int i;
4682 
4683 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4684 			if (format == alpha_formats[i]) {
4685 				*per_pixel_alpha = true;
4686 				break;
4687 			}
4688 		}
4689 	}
4690 
4691 	if (plane_state->alpha < 0xffff) {
4692 		*global_alpha = true;
4693 		*global_alpha_value = plane_state->alpha >> 8;
4694 	}
4695 }
4696 
4697 static int
4698 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4699 			    const enum surface_pixel_format format,
4700 			    enum dc_color_space *color_space)
4701 {
4702 	bool full_range;
4703 
4704 	*color_space = COLOR_SPACE_SRGB;
4705 
4706 	/* DRM color properties only affect non-RGB formats. */
4707 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4708 		return 0;
4709 
4710 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4711 
4712 	switch (plane_state->color_encoding) {
4713 	case DRM_COLOR_YCBCR_BT601:
4714 		if (full_range)
4715 			*color_space = COLOR_SPACE_YCBCR601;
4716 		else
4717 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4718 		break;
4719 
4720 	case DRM_COLOR_YCBCR_BT709:
4721 		if (full_range)
4722 			*color_space = COLOR_SPACE_YCBCR709;
4723 		else
4724 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4725 		break;
4726 
4727 	case DRM_COLOR_YCBCR_BT2020:
4728 		if (full_range)
4729 			*color_space = COLOR_SPACE_2020_YCBCR;
4730 		else
4731 			return -EINVAL;
4732 		break;
4733 
4734 	default:
4735 		return -EINVAL;
4736 	}
4737 
4738 	return 0;
4739 }
4740 
4741 static int
4742 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4743 			    const struct drm_plane_state *plane_state,
4744 			    const uint64_t tiling_flags,
4745 			    struct dc_plane_info *plane_info,
4746 			    struct dc_plane_address *address,
4747 			    bool tmz_surface,
4748 			    bool force_disable_dcc)
4749 {
4750 	const struct drm_framebuffer *fb = plane_state->fb;
4751 	const struct amdgpu_framebuffer *afb =
4752 		to_amdgpu_framebuffer(plane_state->fb);
4753 	int ret;
4754 
4755 	memset(plane_info, 0, sizeof(*plane_info));
4756 
4757 	switch (fb->format->format) {
4758 	case DRM_FORMAT_C8:
4759 		plane_info->format =
4760 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4761 		break;
4762 	case DRM_FORMAT_RGB565:
4763 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4764 		break;
4765 	case DRM_FORMAT_XRGB8888:
4766 	case DRM_FORMAT_ARGB8888:
4767 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4768 		break;
4769 	case DRM_FORMAT_XRGB2101010:
4770 	case DRM_FORMAT_ARGB2101010:
4771 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4772 		break;
4773 	case DRM_FORMAT_XBGR2101010:
4774 	case DRM_FORMAT_ABGR2101010:
4775 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4776 		break;
4777 	case DRM_FORMAT_XBGR8888:
4778 	case DRM_FORMAT_ABGR8888:
4779 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4780 		break;
4781 	case DRM_FORMAT_NV21:
4782 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4783 		break;
4784 	case DRM_FORMAT_NV12:
4785 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4786 		break;
4787 	case DRM_FORMAT_P010:
4788 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4789 		break;
4790 	case DRM_FORMAT_XRGB16161616F:
4791 	case DRM_FORMAT_ARGB16161616F:
4792 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4793 		break;
4794 	case DRM_FORMAT_XBGR16161616F:
4795 	case DRM_FORMAT_ABGR16161616F:
4796 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4797 		break;
4798 	default:
4799 		DRM_ERROR(
4800 			"Unsupported screen format %p4cc\n",
4801 			&fb->format->format);
4802 		return -EINVAL;
4803 	}
4804 
4805 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4806 	case DRM_MODE_ROTATE_0:
4807 		plane_info->rotation = ROTATION_ANGLE_0;
4808 		break;
4809 	case DRM_MODE_ROTATE_90:
4810 		plane_info->rotation = ROTATION_ANGLE_90;
4811 		break;
4812 	case DRM_MODE_ROTATE_180:
4813 		plane_info->rotation = ROTATION_ANGLE_180;
4814 		break;
4815 	case DRM_MODE_ROTATE_270:
4816 		plane_info->rotation = ROTATION_ANGLE_270;
4817 		break;
4818 	default:
4819 		plane_info->rotation = ROTATION_ANGLE_0;
4820 		break;
4821 	}
4822 
4823 	plane_info->visible = true;
4824 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4825 
4826 	plane_info->layer_index = 0;
4827 
4828 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4829 					  &plane_info->color_space);
4830 	if (ret)
4831 		return ret;
4832 
4833 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4834 					   plane_info->rotation, tiling_flags,
4835 					   &plane_info->tiling_info,
4836 					   &plane_info->plane_size,
4837 					   &plane_info->dcc, address, tmz_surface,
4838 					   force_disable_dcc);
4839 	if (ret)
4840 		return ret;
4841 
4842 	fill_blending_from_plane_state(
4843 		plane_state, &plane_info->per_pixel_alpha,
4844 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4845 
4846 	return 0;
4847 }
4848 
4849 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4850 				    struct dc_plane_state *dc_plane_state,
4851 				    struct drm_plane_state *plane_state,
4852 				    struct drm_crtc_state *crtc_state)
4853 {
4854 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4855 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4856 	struct dc_scaling_info scaling_info;
4857 	struct dc_plane_info plane_info;
4858 	int ret;
4859 	bool force_disable_dcc = false;
4860 
4861 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4862 	if (ret)
4863 		return ret;
4864 
4865 	dc_plane_state->src_rect = scaling_info.src_rect;
4866 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4867 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4868 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4869 
4870 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4871 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4872 					  afb->tiling_flags,
4873 					  &plane_info,
4874 					  &dc_plane_state->address,
4875 					  afb->tmz_surface,
4876 					  force_disable_dcc);
4877 	if (ret)
4878 		return ret;
4879 
4880 	dc_plane_state->format = plane_info.format;
4881 	dc_plane_state->color_space = plane_info.color_space;
4882 	dc_plane_state->format = plane_info.format;
4883 	dc_plane_state->plane_size = plane_info.plane_size;
4884 	dc_plane_state->rotation = plane_info.rotation;
4885 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4886 	dc_plane_state->stereo_format = plane_info.stereo_format;
4887 	dc_plane_state->tiling_info = plane_info.tiling_info;
4888 	dc_plane_state->visible = plane_info.visible;
4889 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4890 	dc_plane_state->global_alpha = plane_info.global_alpha;
4891 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4892 	dc_plane_state->dcc = plane_info.dcc;
4893 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4894 	dc_plane_state->flip_int_enabled = true;
4895 
4896 	/*
4897 	 * Always set input transfer function, since plane state is refreshed
4898 	 * every time.
4899 	 */
4900 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4901 	if (ret)
4902 		return ret;
4903 
4904 	return 0;
4905 }
4906 
4907 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4908 					   const struct dm_connector_state *dm_state,
4909 					   struct dc_stream_state *stream)
4910 {
4911 	enum amdgpu_rmx_type rmx_type;
4912 
4913 	struct rect src = { 0 }; /* viewport in composition space*/
4914 	struct rect dst = { 0 }; /* stream addressable area */
4915 
4916 	/* no mode. nothing to be done */
4917 	if (!mode)
4918 		return;
4919 
4920 	/* Full screen scaling by default */
4921 	src.width = mode->hdisplay;
4922 	src.height = mode->vdisplay;
4923 	dst.width = stream->timing.h_addressable;
4924 	dst.height = stream->timing.v_addressable;
4925 
4926 	if (dm_state) {
4927 		rmx_type = dm_state->scaling;
4928 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4929 			if (src.width * dst.height <
4930 					src.height * dst.width) {
4931 				/* height needs less upscaling/more downscaling */
4932 				dst.width = src.width *
4933 						dst.height / src.height;
4934 			} else {
4935 				/* width needs less upscaling/more downscaling */
4936 				dst.height = src.height *
4937 						dst.width / src.width;
4938 			}
4939 		} else if (rmx_type == RMX_CENTER) {
4940 			dst = src;
4941 		}
4942 
4943 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4944 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4945 
4946 		if (dm_state->underscan_enable) {
4947 			dst.x += dm_state->underscan_hborder / 2;
4948 			dst.y += dm_state->underscan_vborder / 2;
4949 			dst.width -= dm_state->underscan_hborder;
4950 			dst.height -= dm_state->underscan_vborder;
4951 		}
4952 	}
4953 
4954 	stream->src = src;
4955 	stream->dst = dst;
4956 
4957 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4958 		      dst.x, dst.y, dst.width, dst.height);
4959 
4960 }
4961 
4962 static enum dc_color_depth
4963 convert_color_depth_from_display_info(const struct drm_connector *connector,
4964 				      bool is_y420, int requested_bpc)
4965 {
4966 	uint8_t bpc;
4967 
4968 	if (is_y420) {
4969 		bpc = 8;
4970 
4971 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4972 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4973 			bpc = 16;
4974 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4975 			bpc = 12;
4976 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4977 			bpc = 10;
4978 	} else {
4979 		bpc = (uint8_t)connector->display_info.bpc;
4980 		/* Assume 8 bpc by default if no bpc is specified. */
4981 		bpc = bpc ? bpc : 8;
4982 	}
4983 
4984 	if (requested_bpc > 0) {
4985 		/*
4986 		 * Cap display bpc based on the user requested value.
4987 		 *
4988 		 * The value for state->max_bpc may not correctly updated
4989 		 * depending on when the connector gets added to the state
4990 		 * or if this was called outside of atomic check, so it
4991 		 * can't be used directly.
4992 		 */
4993 		bpc = min_t(u8, bpc, requested_bpc);
4994 
4995 		/* Round down to the nearest even number. */
4996 		bpc = bpc - (bpc & 1);
4997 	}
4998 
4999 	switch (bpc) {
5000 	case 0:
5001 		/*
5002 		 * Temporary Work around, DRM doesn't parse color depth for
5003 		 * EDID revision before 1.4
5004 		 * TODO: Fix edid parsing
5005 		 */
5006 		return COLOR_DEPTH_888;
5007 	case 6:
5008 		return COLOR_DEPTH_666;
5009 	case 8:
5010 		return COLOR_DEPTH_888;
5011 	case 10:
5012 		return COLOR_DEPTH_101010;
5013 	case 12:
5014 		return COLOR_DEPTH_121212;
5015 	case 14:
5016 		return COLOR_DEPTH_141414;
5017 	case 16:
5018 		return COLOR_DEPTH_161616;
5019 	default:
5020 		return COLOR_DEPTH_UNDEFINED;
5021 	}
5022 }
5023 
5024 static enum dc_aspect_ratio
5025 get_aspect_ratio(const struct drm_display_mode *mode_in)
5026 {
5027 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5028 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5029 }
5030 
5031 static enum dc_color_space
5032 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5033 {
5034 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5035 
5036 	switch (dc_crtc_timing->pixel_encoding)	{
5037 	case PIXEL_ENCODING_YCBCR422:
5038 	case PIXEL_ENCODING_YCBCR444:
5039 	case PIXEL_ENCODING_YCBCR420:
5040 	{
5041 		/*
5042 		 * 27030khz is the separation point between HDTV and SDTV
5043 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5044 		 * respectively
5045 		 */
5046 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5047 			if (dc_crtc_timing->flags.Y_ONLY)
5048 				color_space =
5049 					COLOR_SPACE_YCBCR709_LIMITED;
5050 			else
5051 				color_space = COLOR_SPACE_YCBCR709;
5052 		} else {
5053 			if (dc_crtc_timing->flags.Y_ONLY)
5054 				color_space =
5055 					COLOR_SPACE_YCBCR601_LIMITED;
5056 			else
5057 				color_space = COLOR_SPACE_YCBCR601;
5058 		}
5059 
5060 	}
5061 	break;
5062 	case PIXEL_ENCODING_RGB:
5063 		color_space = COLOR_SPACE_SRGB;
5064 		break;
5065 
5066 	default:
5067 		WARN_ON(1);
5068 		break;
5069 	}
5070 
5071 	return color_space;
5072 }
5073 
5074 static bool adjust_colour_depth_from_display_info(
5075 	struct dc_crtc_timing *timing_out,
5076 	const struct drm_display_info *info)
5077 {
5078 	enum dc_color_depth depth = timing_out->display_color_depth;
5079 	int normalized_clk;
5080 	do {
5081 		normalized_clk = timing_out->pix_clk_100hz / 10;
5082 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5083 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5084 			normalized_clk /= 2;
5085 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5086 		switch (depth) {
5087 		case COLOR_DEPTH_888:
5088 			break;
5089 		case COLOR_DEPTH_101010:
5090 			normalized_clk = (normalized_clk * 30) / 24;
5091 			break;
5092 		case COLOR_DEPTH_121212:
5093 			normalized_clk = (normalized_clk * 36) / 24;
5094 			break;
5095 		case COLOR_DEPTH_161616:
5096 			normalized_clk = (normalized_clk * 48) / 24;
5097 			break;
5098 		default:
5099 			/* The above depths are the only ones valid for HDMI. */
5100 			return false;
5101 		}
5102 		if (normalized_clk <= info->max_tmds_clock) {
5103 			timing_out->display_color_depth = depth;
5104 			return true;
5105 		}
5106 	} while (--depth > COLOR_DEPTH_666);
5107 	return false;
5108 }
5109 
5110 static void fill_stream_properties_from_drm_display_mode(
5111 	struct dc_stream_state *stream,
5112 	const struct drm_display_mode *mode_in,
5113 	const struct drm_connector *connector,
5114 	const struct drm_connector_state *connector_state,
5115 	const struct dc_stream_state *old_stream,
5116 	int requested_bpc)
5117 {
5118 	struct dc_crtc_timing *timing_out = &stream->timing;
5119 	const struct drm_display_info *info = &connector->display_info;
5120 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5121 	struct hdmi_vendor_infoframe hv_frame;
5122 	struct hdmi_avi_infoframe avi_frame;
5123 
5124 	memset(&hv_frame, 0, sizeof(hv_frame));
5125 	memset(&avi_frame, 0, sizeof(avi_frame));
5126 
5127 	timing_out->h_border_left = 0;
5128 	timing_out->h_border_right = 0;
5129 	timing_out->v_border_top = 0;
5130 	timing_out->v_border_bottom = 0;
5131 	/* TODO: un-hardcode */
5132 	if (drm_mode_is_420_only(info, mode_in)
5133 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5134 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5135 	else if (drm_mode_is_420_also(info, mode_in)
5136 			&& aconnector->force_yuv420_output)
5137 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5138 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5139 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5140 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5141 	else
5142 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5143 
5144 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5145 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5146 		connector,
5147 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5148 		requested_bpc);
5149 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5150 	timing_out->hdmi_vic = 0;
5151 
5152 	if(old_stream) {
5153 		timing_out->vic = old_stream->timing.vic;
5154 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5155 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5156 	} else {
5157 		timing_out->vic = drm_match_cea_mode(mode_in);
5158 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5159 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5160 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5161 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5162 	}
5163 
5164 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5165 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5166 		timing_out->vic = avi_frame.video_code;
5167 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5168 		timing_out->hdmi_vic = hv_frame.vic;
5169 	}
5170 
5171 	if (is_freesync_video_mode(mode_in, aconnector)) {
5172 		timing_out->h_addressable = mode_in->hdisplay;
5173 		timing_out->h_total = mode_in->htotal;
5174 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5175 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5176 		timing_out->v_total = mode_in->vtotal;
5177 		timing_out->v_addressable = mode_in->vdisplay;
5178 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5179 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5180 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5181 	} else {
5182 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5183 		timing_out->h_total = mode_in->crtc_htotal;
5184 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5185 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5186 		timing_out->v_total = mode_in->crtc_vtotal;
5187 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5188 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5189 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5190 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5191 	}
5192 
5193 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5194 
5195 	stream->output_color_space = get_output_color_space(timing_out);
5196 
5197 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5198 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5199 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5200 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5201 		    drm_mode_is_420_also(info, mode_in) &&
5202 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5203 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5204 			adjust_colour_depth_from_display_info(timing_out, info);
5205 		}
5206 	}
5207 }
5208 
5209 static void fill_audio_info(struct audio_info *audio_info,
5210 			    const struct drm_connector *drm_connector,
5211 			    const struct dc_sink *dc_sink)
5212 {
5213 	int i = 0;
5214 	int cea_revision = 0;
5215 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5216 
5217 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5218 	audio_info->product_id = edid_caps->product_id;
5219 
5220 	cea_revision = drm_connector->display_info.cea_rev;
5221 
5222 	strscpy(audio_info->display_name,
5223 		edid_caps->display_name,
5224 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5225 
5226 	if (cea_revision >= 3) {
5227 		audio_info->mode_count = edid_caps->audio_mode_count;
5228 
5229 		for (i = 0; i < audio_info->mode_count; ++i) {
5230 			audio_info->modes[i].format_code =
5231 					(enum audio_format_code)
5232 					(edid_caps->audio_modes[i].format_code);
5233 			audio_info->modes[i].channel_count =
5234 					edid_caps->audio_modes[i].channel_count;
5235 			audio_info->modes[i].sample_rates.all =
5236 					edid_caps->audio_modes[i].sample_rate;
5237 			audio_info->modes[i].sample_size =
5238 					edid_caps->audio_modes[i].sample_size;
5239 		}
5240 	}
5241 
5242 	audio_info->flags.all = edid_caps->speaker_flags;
5243 
5244 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5245 	if (drm_connector->latency_present[0]) {
5246 		audio_info->video_latency = drm_connector->video_latency[0];
5247 		audio_info->audio_latency = drm_connector->audio_latency[0];
5248 	}
5249 
5250 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5251 
5252 }
5253 
5254 static void
5255 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5256 				      struct drm_display_mode *dst_mode)
5257 {
5258 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5259 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5260 	dst_mode->crtc_clock = src_mode->crtc_clock;
5261 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5262 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5263 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5264 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5265 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5266 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5267 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5268 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5269 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5270 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5271 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5272 }
5273 
5274 static void
5275 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5276 					const struct drm_display_mode *native_mode,
5277 					bool scale_enabled)
5278 {
5279 	if (scale_enabled) {
5280 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5281 	} else if (native_mode->clock == drm_mode->clock &&
5282 			native_mode->htotal == drm_mode->htotal &&
5283 			native_mode->vtotal == drm_mode->vtotal) {
5284 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5285 	} else {
5286 		/* no scaling nor amdgpu inserted, no need to patch */
5287 	}
5288 }
5289 
5290 static struct dc_sink *
5291 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5292 {
5293 	struct dc_sink_init_data sink_init_data = { 0 };
5294 	struct dc_sink *sink = NULL;
5295 	sink_init_data.link = aconnector->dc_link;
5296 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5297 
5298 	sink = dc_sink_create(&sink_init_data);
5299 	if (!sink) {
5300 		DRM_ERROR("Failed to create sink!\n");
5301 		return NULL;
5302 	}
5303 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5304 
5305 	return sink;
5306 }
5307 
5308 static void set_multisync_trigger_params(
5309 		struct dc_stream_state *stream)
5310 {
5311 	struct dc_stream_state *master = NULL;
5312 
5313 	if (stream->triggered_crtc_reset.enabled) {
5314 		master = stream->triggered_crtc_reset.event_source;
5315 		stream->triggered_crtc_reset.event =
5316 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5317 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5318 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5319 	}
5320 }
5321 
5322 static void set_master_stream(struct dc_stream_state *stream_set[],
5323 			      int stream_count)
5324 {
5325 	int j, highest_rfr = 0, master_stream = 0;
5326 
5327 	for (j = 0;  j < stream_count; j++) {
5328 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5329 			int refresh_rate = 0;
5330 
5331 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5332 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5333 			if (refresh_rate > highest_rfr) {
5334 				highest_rfr = refresh_rate;
5335 				master_stream = j;
5336 			}
5337 		}
5338 	}
5339 	for (j = 0;  j < stream_count; j++) {
5340 		if (stream_set[j])
5341 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5342 	}
5343 }
5344 
5345 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5346 {
5347 	int i = 0;
5348 	struct dc_stream_state *stream;
5349 
5350 	if (context->stream_count < 2)
5351 		return;
5352 	for (i = 0; i < context->stream_count ; i++) {
5353 		if (!context->streams[i])
5354 			continue;
5355 		/*
5356 		 * TODO: add a function to read AMD VSDB bits and set
5357 		 * crtc_sync_master.multi_sync_enabled flag
5358 		 * For now it's set to false
5359 		 */
5360 	}
5361 
5362 	set_master_stream(context->streams, context->stream_count);
5363 
5364 	for (i = 0; i < context->stream_count ; i++) {
5365 		stream = context->streams[i];
5366 
5367 		if (!stream)
5368 			continue;
5369 
5370 		set_multisync_trigger_params(stream);
5371 	}
5372 }
5373 
5374 static struct drm_display_mode *
5375 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5376 			  bool use_probed_modes)
5377 {
5378 	struct drm_display_mode *m, *m_pref = NULL;
5379 	u16 current_refresh, highest_refresh;
5380 	struct list_head *list_head = use_probed_modes ?
5381 						    &aconnector->base.probed_modes :
5382 						    &aconnector->base.modes;
5383 
5384 	if (aconnector->freesync_vid_base.clock != 0)
5385 		return &aconnector->freesync_vid_base;
5386 
5387 	/* Find the preferred mode */
5388 	list_for_each_entry (m, list_head, head) {
5389 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5390 			m_pref = m;
5391 			break;
5392 		}
5393 	}
5394 
5395 	if (!m_pref) {
5396 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5397 		m_pref = list_first_entry_or_null(
5398 			&aconnector->base.modes, struct drm_display_mode, head);
5399 		if (!m_pref) {
5400 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5401 			return NULL;
5402 		}
5403 	}
5404 
5405 	highest_refresh = drm_mode_vrefresh(m_pref);
5406 
5407 	/*
5408 	 * Find the mode with highest refresh rate with same resolution.
5409 	 * For some monitors, preferred mode is not the mode with highest
5410 	 * supported refresh rate.
5411 	 */
5412 	list_for_each_entry (m, list_head, head) {
5413 		current_refresh  = drm_mode_vrefresh(m);
5414 
5415 		if (m->hdisplay == m_pref->hdisplay &&
5416 		    m->vdisplay == m_pref->vdisplay &&
5417 		    highest_refresh < current_refresh) {
5418 			highest_refresh = current_refresh;
5419 			m_pref = m;
5420 		}
5421 	}
5422 
5423 	aconnector->freesync_vid_base = *m_pref;
5424 	return m_pref;
5425 }
5426 
5427 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5428 				   struct amdgpu_dm_connector *aconnector)
5429 {
5430 	struct drm_display_mode *high_mode;
5431 	int timing_diff;
5432 
5433 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5434 	if (!high_mode || !mode)
5435 		return false;
5436 
5437 	timing_diff = high_mode->vtotal - mode->vtotal;
5438 
5439 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5440 	    high_mode->hdisplay != mode->hdisplay ||
5441 	    high_mode->vdisplay != mode->vdisplay ||
5442 	    high_mode->hsync_start != mode->hsync_start ||
5443 	    high_mode->hsync_end != mode->hsync_end ||
5444 	    high_mode->htotal != mode->htotal ||
5445 	    high_mode->hskew != mode->hskew ||
5446 	    high_mode->vscan != mode->vscan ||
5447 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5448 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5449 		return false;
5450 	else
5451 		return true;
5452 }
5453 
5454 static struct dc_stream_state *
5455 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5456 		       const struct drm_display_mode *drm_mode,
5457 		       const struct dm_connector_state *dm_state,
5458 		       const struct dc_stream_state *old_stream,
5459 		       int requested_bpc)
5460 {
5461 	struct drm_display_mode *preferred_mode = NULL;
5462 	struct drm_connector *drm_connector;
5463 	const struct drm_connector_state *con_state =
5464 		dm_state ? &dm_state->base : NULL;
5465 	struct dc_stream_state *stream = NULL;
5466 	struct drm_display_mode mode = *drm_mode;
5467 	struct drm_display_mode saved_mode;
5468 	struct drm_display_mode *freesync_mode = NULL;
5469 	bool native_mode_found = false;
5470 	bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5471 	int mode_refresh;
5472 	int preferred_refresh = 0;
5473 #if defined(CONFIG_DRM_AMD_DC_DCN)
5474 	struct dsc_dec_dpcd_caps dsc_caps;
5475 	uint32_t link_bandwidth_kbps;
5476 #endif
5477 	struct dc_sink *sink = NULL;
5478 
5479 	memset(&saved_mode, 0, sizeof(saved_mode));
5480 
5481 	if (aconnector == NULL) {
5482 		DRM_ERROR("aconnector is NULL!\n");
5483 		return stream;
5484 	}
5485 
5486 	drm_connector = &aconnector->base;
5487 
5488 	if (!aconnector->dc_sink) {
5489 		sink = create_fake_sink(aconnector);
5490 		if (!sink)
5491 			return stream;
5492 	} else {
5493 		sink = aconnector->dc_sink;
5494 		dc_sink_retain(sink);
5495 	}
5496 
5497 	stream = dc_create_stream_for_sink(sink);
5498 
5499 	if (stream == NULL) {
5500 		DRM_ERROR("Failed to create stream for sink!\n");
5501 		goto finish;
5502 	}
5503 
5504 	stream->dm_stream_context = aconnector;
5505 
5506 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5507 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5508 
5509 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5510 		/* Search for preferred mode */
5511 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5512 			native_mode_found = true;
5513 			break;
5514 		}
5515 	}
5516 	if (!native_mode_found)
5517 		preferred_mode = list_first_entry_or_null(
5518 				&aconnector->base.modes,
5519 				struct drm_display_mode,
5520 				head);
5521 
5522 	mode_refresh = drm_mode_vrefresh(&mode);
5523 
5524 	if (preferred_mode == NULL) {
5525 		/*
5526 		 * This may not be an error, the use case is when we have no
5527 		 * usermode calls to reset and set mode upon hotplug. In this
5528 		 * case, we call set mode ourselves to restore the previous mode
5529 		 * and the modelist may not be filled in in time.
5530 		 */
5531 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5532 	} else {
5533 		recalculate_timing |= amdgpu_freesync_vid_mode &&
5534 				 is_freesync_video_mode(&mode, aconnector);
5535 		if (recalculate_timing) {
5536 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5537 			saved_mode = mode;
5538 			mode = *freesync_mode;
5539 		} else {
5540 			decide_crtc_timing_for_drm_display_mode(
5541 				&mode, preferred_mode,
5542 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5543 		}
5544 
5545 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5546 	}
5547 
5548 	if (recalculate_timing)
5549 		drm_mode_set_crtcinfo(&saved_mode, 0);
5550 	else if (!dm_state)
5551 		drm_mode_set_crtcinfo(&mode, 0);
5552 
5553        /*
5554 	* If scaling is enabled and refresh rate didn't change
5555 	* we copy the vic and polarities of the old timings
5556 	*/
5557 	if (!recalculate_timing || mode_refresh != preferred_refresh)
5558 		fill_stream_properties_from_drm_display_mode(
5559 			stream, &mode, &aconnector->base, con_state, NULL,
5560 			requested_bpc);
5561 	else
5562 		fill_stream_properties_from_drm_display_mode(
5563 			stream, &mode, &aconnector->base, con_state, old_stream,
5564 			requested_bpc);
5565 
5566 	stream->timing.flags.DSC = 0;
5567 
5568 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5569 #if defined(CONFIG_DRM_AMD_DC_DCN)
5570 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5571 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5572 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5573 				      &dsc_caps);
5574 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5575 							     dc_link_get_link_cap(aconnector->dc_link));
5576 
5577 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5578 			/* Set DSC policy according to dsc_clock_en */
5579 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5580 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5581 
5582 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5583 						  &dsc_caps,
5584 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5585 						  0,
5586 						  link_bandwidth_kbps,
5587 						  &stream->timing,
5588 						  &stream->timing.dsc_cfg))
5589 				stream->timing.flags.DSC = 1;
5590 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5591 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5592 				stream->timing.flags.DSC = 1;
5593 
5594 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5595 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5596 
5597 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5598 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5599 
5600 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5601 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5602 		}
5603 #endif
5604 	}
5605 
5606 	update_stream_scaling_settings(&mode, dm_state, stream);
5607 
5608 	fill_audio_info(
5609 		&stream->audio_info,
5610 		drm_connector,
5611 		sink);
5612 
5613 	update_stream_signal(stream, sink);
5614 
5615 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5616 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5617 
5618 	if (stream->link->psr_settings.psr_feature_enabled) {
5619 		//
5620 		// should decide stream support vsc sdp colorimetry capability
5621 		// before building vsc info packet
5622 		//
5623 		stream->use_vsc_sdp_for_colorimetry = false;
5624 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5625 			stream->use_vsc_sdp_for_colorimetry =
5626 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5627 		} else {
5628 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5629 				stream->use_vsc_sdp_for_colorimetry = true;
5630 		}
5631 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5632 	}
5633 finish:
5634 	dc_sink_release(sink);
5635 
5636 	return stream;
5637 }
5638 
5639 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5640 {
5641 	drm_crtc_cleanup(crtc);
5642 	kfree(crtc);
5643 }
5644 
5645 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5646 				  struct drm_crtc_state *state)
5647 {
5648 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5649 
5650 	/* TODO Destroy dc_stream objects are stream object is flattened */
5651 	if (cur->stream)
5652 		dc_stream_release(cur->stream);
5653 
5654 
5655 	__drm_atomic_helper_crtc_destroy_state(state);
5656 
5657 
5658 	kfree(state);
5659 }
5660 
5661 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5662 {
5663 	struct dm_crtc_state *state;
5664 
5665 	if (crtc->state)
5666 		dm_crtc_destroy_state(crtc, crtc->state);
5667 
5668 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5669 	if (WARN_ON(!state))
5670 		return;
5671 
5672 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5673 }
5674 
5675 static struct drm_crtc_state *
5676 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5677 {
5678 	struct dm_crtc_state *state, *cur;
5679 
5680 	cur = to_dm_crtc_state(crtc->state);
5681 
5682 	if (WARN_ON(!crtc->state))
5683 		return NULL;
5684 
5685 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5686 	if (!state)
5687 		return NULL;
5688 
5689 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5690 
5691 	if (cur->stream) {
5692 		state->stream = cur->stream;
5693 		dc_stream_retain(state->stream);
5694 	}
5695 
5696 	state->active_planes = cur->active_planes;
5697 	state->vrr_infopacket = cur->vrr_infopacket;
5698 	state->abm_level = cur->abm_level;
5699 	state->vrr_supported = cur->vrr_supported;
5700 	state->freesync_config = cur->freesync_config;
5701 	state->cm_has_degamma = cur->cm_has_degamma;
5702 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5703 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5704 
5705 	return &state->base;
5706 }
5707 
5708 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5709 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5710 {
5711 	crtc_debugfs_init(crtc);
5712 
5713 	return 0;
5714 }
5715 #endif
5716 
5717 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5718 {
5719 	enum dc_irq_source irq_source;
5720 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5721 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5722 	int rc;
5723 
5724 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5725 
5726 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5727 
5728 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5729 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
5730 	return rc;
5731 }
5732 
5733 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5734 {
5735 	enum dc_irq_source irq_source;
5736 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5737 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5738 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5739 #if defined(CONFIG_DRM_AMD_DC_DCN)
5740 	struct amdgpu_display_manager *dm = &adev->dm;
5741 	unsigned long flags;
5742 #endif
5743 	int rc = 0;
5744 
5745 	if (enable) {
5746 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5747 		if (amdgpu_dm_vrr_active(acrtc_state))
5748 			rc = dm_set_vupdate_irq(crtc, true);
5749 	} else {
5750 		/* vblank irq off -> vupdate irq off */
5751 		rc = dm_set_vupdate_irq(crtc, false);
5752 	}
5753 
5754 	if (rc)
5755 		return rc;
5756 
5757 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5758 
5759 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5760 		return -EBUSY;
5761 
5762 	if (amdgpu_in_reset(adev))
5763 		return 0;
5764 
5765 #if defined(CONFIG_DRM_AMD_DC_DCN)
5766 	spin_lock_irqsave(&dm->vblank_lock, flags);
5767 	dm->vblank_workqueue->dm = dm;
5768 	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5769 	dm->vblank_workqueue->enable = enable;
5770 	spin_unlock_irqrestore(&dm->vblank_lock, flags);
5771 	schedule_work(&dm->vblank_workqueue->mall_work);
5772 #endif
5773 
5774 	return 0;
5775 }
5776 
5777 static int dm_enable_vblank(struct drm_crtc *crtc)
5778 {
5779 	return dm_set_vblank(crtc, true);
5780 }
5781 
5782 static void dm_disable_vblank(struct drm_crtc *crtc)
5783 {
5784 	dm_set_vblank(crtc, false);
5785 }
5786 
5787 /* Implemented only the options currently availible for the driver */
5788 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5789 	.reset = dm_crtc_reset_state,
5790 	.destroy = amdgpu_dm_crtc_destroy,
5791 	.set_config = drm_atomic_helper_set_config,
5792 	.page_flip = drm_atomic_helper_page_flip,
5793 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5794 	.atomic_destroy_state = dm_crtc_destroy_state,
5795 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5796 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5797 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5798 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5799 	.enable_vblank = dm_enable_vblank,
5800 	.disable_vblank = dm_disable_vblank,
5801 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5802 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5803 	.late_register = amdgpu_dm_crtc_late_register,
5804 #endif
5805 };
5806 
5807 static enum drm_connector_status
5808 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5809 {
5810 	bool connected;
5811 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5812 
5813 	/*
5814 	 * Notes:
5815 	 * 1. This interface is NOT called in context of HPD irq.
5816 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5817 	 * makes it a bad place for *any* MST-related activity.
5818 	 */
5819 
5820 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5821 	    !aconnector->fake_enable)
5822 		connected = (aconnector->dc_sink != NULL);
5823 	else
5824 		connected = (aconnector->base.force == DRM_FORCE_ON);
5825 
5826 	update_subconnector_property(aconnector);
5827 
5828 	return (connected ? connector_status_connected :
5829 			connector_status_disconnected);
5830 }
5831 
5832 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5833 					    struct drm_connector_state *connector_state,
5834 					    struct drm_property *property,
5835 					    uint64_t val)
5836 {
5837 	struct drm_device *dev = connector->dev;
5838 	struct amdgpu_device *adev = drm_to_adev(dev);
5839 	struct dm_connector_state *dm_old_state =
5840 		to_dm_connector_state(connector->state);
5841 	struct dm_connector_state *dm_new_state =
5842 		to_dm_connector_state(connector_state);
5843 
5844 	int ret = -EINVAL;
5845 
5846 	if (property == dev->mode_config.scaling_mode_property) {
5847 		enum amdgpu_rmx_type rmx_type;
5848 
5849 		switch (val) {
5850 		case DRM_MODE_SCALE_CENTER:
5851 			rmx_type = RMX_CENTER;
5852 			break;
5853 		case DRM_MODE_SCALE_ASPECT:
5854 			rmx_type = RMX_ASPECT;
5855 			break;
5856 		case DRM_MODE_SCALE_FULLSCREEN:
5857 			rmx_type = RMX_FULL;
5858 			break;
5859 		case DRM_MODE_SCALE_NONE:
5860 		default:
5861 			rmx_type = RMX_OFF;
5862 			break;
5863 		}
5864 
5865 		if (dm_old_state->scaling == rmx_type)
5866 			return 0;
5867 
5868 		dm_new_state->scaling = rmx_type;
5869 		ret = 0;
5870 	} else if (property == adev->mode_info.underscan_hborder_property) {
5871 		dm_new_state->underscan_hborder = val;
5872 		ret = 0;
5873 	} else if (property == adev->mode_info.underscan_vborder_property) {
5874 		dm_new_state->underscan_vborder = val;
5875 		ret = 0;
5876 	} else if (property == adev->mode_info.underscan_property) {
5877 		dm_new_state->underscan_enable = val;
5878 		ret = 0;
5879 	} else if (property == adev->mode_info.abm_level_property) {
5880 		dm_new_state->abm_level = val;
5881 		ret = 0;
5882 	}
5883 
5884 	return ret;
5885 }
5886 
5887 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5888 					    const struct drm_connector_state *state,
5889 					    struct drm_property *property,
5890 					    uint64_t *val)
5891 {
5892 	struct drm_device *dev = connector->dev;
5893 	struct amdgpu_device *adev = drm_to_adev(dev);
5894 	struct dm_connector_state *dm_state =
5895 		to_dm_connector_state(state);
5896 	int ret = -EINVAL;
5897 
5898 	if (property == dev->mode_config.scaling_mode_property) {
5899 		switch (dm_state->scaling) {
5900 		case RMX_CENTER:
5901 			*val = DRM_MODE_SCALE_CENTER;
5902 			break;
5903 		case RMX_ASPECT:
5904 			*val = DRM_MODE_SCALE_ASPECT;
5905 			break;
5906 		case RMX_FULL:
5907 			*val = DRM_MODE_SCALE_FULLSCREEN;
5908 			break;
5909 		case RMX_OFF:
5910 		default:
5911 			*val = DRM_MODE_SCALE_NONE;
5912 			break;
5913 		}
5914 		ret = 0;
5915 	} else if (property == adev->mode_info.underscan_hborder_property) {
5916 		*val = dm_state->underscan_hborder;
5917 		ret = 0;
5918 	} else if (property == adev->mode_info.underscan_vborder_property) {
5919 		*val = dm_state->underscan_vborder;
5920 		ret = 0;
5921 	} else if (property == adev->mode_info.underscan_property) {
5922 		*val = dm_state->underscan_enable;
5923 		ret = 0;
5924 	} else if (property == adev->mode_info.abm_level_property) {
5925 		*val = dm_state->abm_level;
5926 		ret = 0;
5927 	}
5928 
5929 	return ret;
5930 }
5931 
5932 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5933 {
5934 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5935 
5936 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5937 }
5938 
5939 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5940 {
5941 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5942 	const struct dc_link *link = aconnector->dc_link;
5943 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5944 	struct amdgpu_display_manager *dm = &adev->dm;
5945 
5946 	/*
5947 	 * Call only if mst_mgr was iniitalized before since it's not done
5948 	 * for all connector types.
5949 	 */
5950 	if (aconnector->mst_mgr.dev)
5951 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5952 
5953 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5954 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5955 
5956 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5957 	    link->type != dc_connection_none &&
5958 	    dm->backlight_dev) {
5959 		backlight_device_unregister(dm->backlight_dev);
5960 		dm->backlight_dev = NULL;
5961 	}
5962 #endif
5963 
5964 	if (aconnector->dc_em_sink)
5965 		dc_sink_release(aconnector->dc_em_sink);
5966 	aconnector->dc_em_sink = NULL;
5967 	if (aconnector->dc_sink)
5968 		dc_sink_release(aconnector->dc_sink);
5969 	aconnector->dc_sink = NULL;
5970 
5971 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5972 	drm_connector_unregister(connector);
5973 	drm_connector_cleanup(connector);
5974 	if (aconnector->i2c) {
5975 		i2c_del_adapter(&aconnector->i2c->base);
5976 		kfree(aconnector->i2c);
5977 	}
5978 	kfree(aconnector->dm_dp_aux.aux.name);
5979 
5980 	kfree(connector);
5981 }
5982 
5983 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5984 {
5985 	struct dm_connector_state *state =
5986 		to_dm_connector_state(connector->state);
5987 
5988 	if (connector->state)
5989 		__drm_atomic_helper_connector_destroy_state(connector->state);
5990 
5991 	kfree(state);
5992 
5993 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5994 
5995 	if (state) {
5996 		state->scaling = RMX_OFF;
5997 		state->underscan_enable = false;
5998 		state->underscan_hborder = 0;
5999 		state->underscan_vborder = 0;
6000 		state->base.max_requested_bpc = 8;
6001 		state->vcpi_slots = 0;
6002 		state->pbn = 0;
6003 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6004 			state->abm_level = amdgpu_dm_abm_level;
6005 
6006 		__drm_atomic_helper_connector_reset(connector, &state->base);
6007 	}
6008 }
6009 
6010 struct drm_connector_state *
6011 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6012 {
6013 	struct dm_connector_state *state =
6014 		to_dm_connector_state(connector->state);
6015 
6016 	struct dm_connector_state *new_state =
6017 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6018 
6019 	if (!new_state)
6020 		return NULL;
6021 
6022 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6023 
6024 	new_state->freesync_capable = state->freesync_capable;
6025 	new_state->abm_level = state->abm_level;
6026 	new_state->scaling = state->scaling;
6027 	new_state->underscan_enable = state->underscan_enable;
6028 	new_state->underscan_hborder = state->underscan_hborder;
6029 	new_state->underscan_vborder = state->underscan_vborder;
6030 	new_state->vcpi_slots = state->vcpi_slots;
6031 	new_state->pbn = state->pbn;
6032 	return &new_state->base;
6033 }
6034 
6035 static int
6036 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6037 {
6038 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6039 		to_amdgpu_dm_connector(connector);
6040 	int r;
6041 
6042 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6043 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6044 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6045 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6046 		if (r)
6047 			return r;
6048 	}
6049 
6050 #if defined(CONFIG_DEBUG_FS)
6051 	connector_debugfs_init(amdgpu_dm_connector);
6052 #endif
6053 
6054 	return 0;
6055 }
6056 
6057 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6058 	.reset = amdgpu_dm_connector_funcs_reset,
6059 	.detect = amdgpu_dm_connector_detect,
6060 	.fill_modes = drm_helper_probe_single_connector_modes,
6061 	.destroy = amdgpu_dm_connector_destroy,
6062 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6063 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6064 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6065 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6066 	.late_register = amdgpu_dm_connector_late_register,
6067 	.early_unregister = amdgpu_dm_connector_unregister
6068 };
6069 
6070 static int get_modes(struct drm_connector *connector)
6071 {
6072 	return amdgpu_dm_connector_get_modes(connector);
6073 }
6074 
6075 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6076 {
6077 	struct dc_sink_init_data init_params = {
6078 			.link = aconnector->dc_link,
6079 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6080 	};
6081 	struct edid *edid;
6082 
6083 	if (!aconnector->base.edid_blob_ptr) {
6084 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6085 				aconnector->base.name);
6086 
6087 		aconnector->base.force = DRM_FORCE_OFF;
6088 		aconnector->base.override_edid = false;
6089 		return;
6090 	}
6091 
6092 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6093 
6094 	aconnector->edid = edid;
6095 
6096 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6097 		aconnector->dc_link,
6098 		(uint8_t *)edid,
6099 		(edid->extensions + 1) * EDID_LENGTH,
6100 		&init_params);
6101 
6102 	if (aconnector->base.force == DRM_FORCE_ON) {
6103 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6104 		aconnector->dc_link->local_sink :
6105 		aconnector->dc_em_sink;
6106 		dc_sink_retain(aconnector->dc_sink);
6107 	}
6108 }
6109 
6110 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6111 {
6112 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6113 
6114 	/*
6115 	 * In case of headless boot with force on for DP managed connector
6116 	 * Those settings have to be != 0 to get initial modeset
6117 	 */
6118 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6119 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6120 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6121 	}
6122 
6123 
6124 	aconnector->base.override_edid = true;
6125 	create_eml_sink(aconnector);
6126 }
6127 
6128 static struct dc_stream_state *
6129 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6130 				const struct drm_display_mode *drm_mode,
6131 				const struct dm_connector_state *dm_state,
6132 				const struct dc_stream_state *old_stream)
6133 {
6134 	struct drm_connector *connector = &aconnector->base;
6135 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6136 	struct dc_stream_state *stream;
6137 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6138 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6139 	enum dc_status dc_result = DC_OK;
6140 
6141 	do {
6142 		stream = create_stream_for_sink(aconnector, drm_mode,
6143 						dm_state, old_stream,
6144 						requested_bpc);
6145 		if (stream == NULL) {
6146 			DRM_ERROR("Failed to create stream for sink!\n");
6147 			break;
6148 		}
6149 
6150 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6151 
6152 		if (dc_result != DC_OK) {
6153 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6154 				      drm_mode->hdisplay,
6155 				      drm_mode->vdisplay,
6156 				      drm_mode->clock,
6157 				      dc_result,
6158 				      dc_status_to_str(dc_result));
6159 
6160 			dc_stream_release(stream);
6161 			stream = NULL;
6162 			requested_bpc -= 2; /* lower bpc to retry validation */
6163 		}
6164 
6165 	} while (stream == NULL && requested_bpc >= 6);
6166 
6167 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6168 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6169 
6170 		aconnector->force_yuv420_output = true;
6171 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6172 						dm_state, old_stream);
6173 		aconnector->force_yuv420_output = false;
6174 	}
6175 
6176 	return stream;
6177 }
6178 
6179 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6180 				   struct drm_display_mode *mode)
6181 {
6182 	int result = MODE_ERROR;
6183 	struct dc_sink *dc_sink;
6184 	/* TODO: Unhardcode stream count */
6185 	struct dc_stream_state *stream;
6186 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6187 
6188 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6189 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6190 		return result;
6191 
6192 	/*
6193 	 * Only run this the first time mode_valid is called to initilialize
6194 	 * EDID mgmt
6195 	 */
6196 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6197 		!aconnector->dc_em_sink)
6198 		handle_edid_mgmt(aconnector);
6199 
6200 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6201 
6202 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6203 				aconnector->base.force != DRM_FORCE_ON) {
6204 		DRM_ERROR("dc_sink is NULL!\n");
6205 		goto fail;
6206 	}
6207 
6208 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6209 	if (stream) {
6210 		dc_stream_release(stream);
6211 		result = MODE_OK;
6212 	}
6213 
6214 fail:
6215 	/* TODO: error handling*/
6216 	return result;
6217 }
6218 
6219 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6220 				struct dc_info_packet *out)
6221 {
6222 	struct hdmi_drm_infoframe frame;
6223 	unsigned char buf[30]; /* 26 + 4 */
6224 	ssize_t len;
6225 	int ret, i;
6226 
6227 	memset(out, 0, sizeof(*out));
6228 
6229 	if (!state->hdr_output_metadata)
6230 		return 0;
6231 
6232 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6233 	if (ret)
6234 		return ret;
6235 
6236 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6237 	if (len < 0)
6238 		return (int)len;
6239 
6240 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6241 	if (len != 30)
6242 		return -EINVAL;
6243 
6244 	/* Prepare the infopacket for DC. */
6245 	switch (state->connector->connector_type) {
6246 	case DRM_MODE_CONNECTOR_HDMIA:
6247 		out->hb0 = 0x87; /* type */
6248 		out->hb1 = 0x01; /* version */
6249 		out->hb2 = 0x1A; /* length */
6250 		out->sb[0] = buf[3]; /* checksum */
6251 		i = 1;
6252 		break;
6253 
6254 	case DRM_MODE_CONNECTOR_DisplayPort:
6255 	case DRM_MODE_CONNECTOR_eDP:
6256 		out->hb0 = 0x00; /* sdp id, zero */
6257 		out->hb1 = 0x87; /* type */
6258 		out->hb2 = 0x1D; /* payload len - 1 */
6259 		out->hb3 = (0x13 << 2); /* sdp version */
6260 		out->sb[0] = 0x01; /* version */
6261 		out->sb[1] = 0x1A; /* length */
6262 		i = 2;
6263 		break;
6264 
6265 	default:
6266 		return -EINVAL;
6267 	}
6268 
6269 	memcpy(&out->sb[i], &buf[4], 26);
6270 	out->valid = true;
6271 
6272 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6273 		       sizeof(out->sb), false);
6274 
6275 	return 0;
6276 }
6277 
6278 static bool
6279 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6280 			  const struct drm_connector_state *new_state)
6281 {
6282 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6283 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6284 
6285 	if (old_blob != new_blob) {
6286 		if (old_blob && new_blob &&
6287 		    old_blob->length == new_blob->length)
6288 			return memcmp(old_blob->data, new_blob->data,
6289 				      old_blob->length);
6290 
6291 		return true;
6292 	}
6293 
6294 	return false;
6295 }
6296 
6297 static int
6298 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6299 				 struct drm_atomic_state *state)
6300 {
6301 	struct drm_connector_state *new_con_state =
6302 		drm_atomic_get_new_connector_state(state, conn);
6303 	struct drm_connector_state *old_con_state =
6304 		drm_atomic_get_old_connector_state(state, conn);
6305 	struct drm_crtc *crtc = new_con_state->crtc;
6306 	struct drm_crtc_state *new_crtc_state;
6307 	int ret;
6308 
6309 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6310 
6311 	if (!crtc)
6312 		return 0;
6313 
6314 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6315 		struct dc_info_packet hdr_infopacket;
6316 
6317 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6318 		if (ret)
6319 			return ret;
6320 
6321 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6322 		if (IS_ERR(new_crtc_state))
6323 			return PTR_ERR(new_crtc_state);
6324 
6325 		/*
6326 		 * DC considers the stream backends changed if the
6327 		 * static metadata changes. Forcing the modeset also
6328 		 * gives a simple way for userspace to switch from
6329 		 * 8bpc to 10bpc when setting the metadata to enter
6330 		 * or exit HDR.
6331 		 *
6332 		 * Changing the static metadata after it's been
6333 		 * set is permissible, however. So only force a
6334 		 * modeset if we're entering or exiting HDR.
6335 		 */
6336 		new_crtc_state->mode_changed =
6337 			!old_con_state->hdr_output_metadata ||
6338 			!new_con_state->hdr_output_metadata;
6339 	}
6340 
6341 	return 0;
6342 }
6343 
6344 static const struct drm_connector_helper_funcs
6345 amdgpu_dm_connector_helper_funcs = {
6346 	/*
6347 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6348 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6349 	 * are missing after user start lightdm. So we need to renew modes list.
6350 	 * in get_modes call back, not just return the modes count
6351 	 */
6352 	.get_modes = get_modes,
6353 	.mode_valid = amdgpu_dm_connector_mode_valid,
6354 	.atomic_check = amdgpu_dm_connector_atomic_check,
6355 };
6356 
6357 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6358 {
6359 }
6360 
6361 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6362 {
6363 	struct drm_atomic_state *state = new_crtc_state->state;
6364 	struct drm_plane *plane;
6365 	int num_active = 0;
6366 
6367 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6368 		struct drm_plane_state *new_plane_state;
6369 
6370 		/* Cursor planes are "fake". */
6371 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6372 			continue;
6373 
6374 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6375 
6376 		if (!new_plane_state) {
6377 			/*
6378 			 * The plane is enable on the CRTC and hasn't changed
6379 			 * state. This means that it previously passed
6380 			 * validation and is therefore enabled.
6381 			 */
6382 			num_active += 1;
6383 			continue;
6384 		}
6385 
6386 		/* We need a framebuffer to be considered enabled. */
6387 		num_active += (new_plane_state->fb != NULL);
6388 	}
6389 
6390 	return num_active;
6391 }
6392 
6393 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6394 					 struct drm_crtc_state *new_crtc_state)
6395 {
6396 	struct dm_crtc_state *dm_new_crtc_state =
6397 		to_dm_crtc_state(new_crtc_state);
6398 
6399 	dm_new_crtc_state->active_planes = 0;
6400 
6401 	if (!dm_new_crtc_state->stream)
6402 		return;
6403 
6404 	dm_new_crtc_state->active_planes =
6405 		count_crtc_active_planes(new_crtc_state);
6406 }
6407 
6408 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6409 				       struct drm_atomic_state *state)
6410 {
6411 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6412 									  crtc);
6413 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6414 	struct dc *dc = adev->dm.dc;
6415 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6416 	int ret = -EINVAL;
6417 
6418 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6419 
6420 	dm_update_crtc_active_planes(crtc, crtc_state);
6421 
6422 	if (unlikely(!dm_crtc_state->stream &&
6423 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6424 		WARN_ON(1);
6425 		return ret;
6426 	}
6427 
6428 	/*
6429 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6430 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6431 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6432 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6433 	 */
6434 	if (crtc_state->enable &&
6435 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6436 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6437 		return -EINVAL;
6438 	}
6439 
6440 	/* In some use cases, like reset, no stream is attached */
6441 	if (!dm_crtc_state->stream)
6442 		return 0;
6443 
6444 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6445 		return 0;
6446 
6447 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6448 	return ret;
6449 }
6450 
6451 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6452 				      const struct drm_display_mode *mode,
6453 				      struct drm_display_mode *adjusted_mode)
6454 {
6455 	return true;
6456 }
6457 
6458 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6459 	.disable = dm_crtc_helper_disable,
6460 	.atomic_check = dm_crtc_helper_atomic_check,
6461 	.mode_fixup = dm_crtc_helper_mode_fixup,
6462 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6463 };
6464 
6465 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6466 {
6467 
6468 }
6469 
6470 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6471 {
6472 	switch (display_color_depth) {
6473 		case COLOR_DEPTH_666:
6474 			return 6;
6475 		case COLOR_DEPTH_888:
6476 			return 8;
6477 		case COLOR_DEPTH_101010:
6478 			return 10;
6479 		case COLOR_DEPTH_121212:
6480 			return 12;
6481 		case COLOR_DEPTH_141414:
6482 			return 14;
6483 		case COLOR_DEPTH_161616:
6484 			return 16;
6485 		default:
6486 			break;
6487 		}
6488 	return 0;
6489 }
6490 
6491 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6492 					  struct drm_crtc_state *crtc_state,
6493 					  struct drm_connector_state *conn_state)
6494 {
6495 	struct drm_atomic_state *state = crtc_state->state;
6496 	struct drm_connector *connector = conn_state->connector;
6497 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6498 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6499 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6500 	struct drm_dp_mst_topology_mgr *mst_mgr;
6501 	struct drm_dp_mst_port *mst_port;
6502 	enum dc_color_depth color_depth;
6503 	int clock, bpp = 0;
6504 	bool is_y420 = false;
6505 
6506 	if (!aconnector->port || !aconnector->dc_sink)
6507 		return 0;
6508 
6509 	mst_port = aconnector->port;
6510 	mst_mgr = &aconnector->mst_port->mst_mgr;
6511 
6512 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6513 		return 0;
6514 
6515 	if (!state->duplicated) {
6516 		int max_bpc = conn_state->max_requested_bpc;
6517 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6518 				aconnector->force_yuv420_output;
6519 		color_depth = convert_color_depth_from_display_info(connector,
6520 								    is_y420,
6521 								    max_bpc);
6522 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6523 		clock = adjusted_mode->clock;
6524 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6525 	}
6526 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6527 									   mst_mgr,
6528 									   mst_port,
6529 									   dm_new_connector_state->pbn,
6530 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6531 	if (dm_new_connector_state->vcpi_slots < 0) {
6532 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6533 		return dm_new_connector_state->vcpi_slots;
6534 	}
6535 	return 0;
6536 }
6537 
6538 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6539 	.disable = dm_encoder_helper_disable,
6540 	.atomic_check = dm_encoder_helper_atomic_check
6541 };
6542 
6543 #if defined(CONFIG_DRM_AMD_DC_DCN)
6544 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6545 					    struct dc_state *dc_state)
6546 {
6547 	struct dc_stream_state *stream = NULL;
6548 	struct drm_connector *connector;
6549 	struct drm_connector_state *new_con_state, *old_con_state;
6550 	struct amdgpu_dm_connector *aconnector;
6551 	struct dm_connector_state *dm_conn_state;
6552 	int i, j, clock, bpp;
6553 	int vcpi, pbn_div, pbn = 0;
6554 
6555 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6556 
6557 		aconnector = to_amdgpu_dm_connector(connector);
6558 
6559 		if (!aconnector->port)
6560 			continue;
6561 
6562 		if (!new_con_state || !new_con_state->crtc)
6563 			continue;
6564 
6565 		dm_conn_state = to_dm_connector_state(new_con_state);
6566 
6567 		for (j = 0; j < dc_state->stream_count; j++) {
6568 			stream = dc_state->streams[j];
6569 			if (!stream)
6570 				continue;
6571 
6572 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6573 				break;
6574 
6575 			stream = NULL;
6576 		}
6577 
6578 		if (!stream)
6579 			continue;
6580 
6581 		if (stream->timing.flags.DSC != 1) {
6582 			drm_dp_mst_atomic_enable_dsc(state,
6583 						     aconnector->port,
6584 						     dm_conn_state->pbn,
6585 						     0,
6586 						     false);
6587 			continue;
6588 		}
6589 
6590 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6591 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6592 		clock = stream->timing.pix_clk_100hz / 10;
6593 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6594 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6595 						    aconnector->port,
6596 						    pbn, pbn_div,
6597 						    true);
6598 		if (vcpi < 0)
6599 			return vcpi;
6600 
6601 		dm_conn_state->pbn = pbn;
6602 		dm_conn_state->vcpi_slots = vcpi;
6603 	}
6604 	return 0;
6605 }
6606 #endif
6607 
6608 static void dm_drm_plane_reset(struct drm_plane *plane)
6609 {
6610 	struct dm_plane_state *amdgpu_state = NULL;
6611 
6612 	if (plane->state)
6613 		plane->funcs->atomic_destroy_state(plane, plane->state);
6614 
6615 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6616 	WARN_ON(amdgpu_state == NULL);
6617 
6618 	if (amdgpu_state)
6619 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6620 }
6621 
6622 static struct drm_plane_state *
6623 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6624 {
6625 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6626 
6627 	old_dm_plane_state = to_dm_plane_state(plane->state);
6628 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6629 	if (!dm_plane_state)
6630 		return NULL;
6631 
6632 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6633 
6634 	if (old_dm_plane_state->dc_state) {
6635 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6636 		dc_plane_state_retain(dm_plane_state->dc_state);
6637 	}
6638 
6639 	return &dm_plane_state->base;
6640 }
6641 
6642 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6643 				struct drm_plane_state *state)
6644 {
6645 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6646 
6647 	if (dm_plane_state->dc_state)
6648 		dc_plane_state_release(dm_plane_state->dc_state);
6649 
6650 	drm_atomic_helper_plane_destroy_state(plane, state);
6651 }
6652 
6653 static const struct drm_plane_funcs dm_plane_funcs = {
6654 	.update_plane	= drm_atomic_helper_update_plane,
6655 	.disable_plane	= drm_atomic_helper_disable_plane,
6656 	.destroy	= drm_primary_helper_destroy,
6657 	.reset = dm_drm_plane_reset,
6658 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6659 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6660 	.format_mod_supported = dm_plane_format_mod_supported,
6661 };
6662 
6663 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6664 				      struct drm_plane_state *new_state)
6665 {
6666 	struct amdgpu_framebuffer *afb;
6667 	struct drm_gem_object *obj;
6668 	struct amdgpu_device *adev;
6669 	struct amdgpu_bo *rbo;
6670 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6671 	struct list_head list;
6672 	struct ttm_validate_buffer tv;
6673 	struct ww_acquire_ctx ticket;
6674 	uint32_t domain;
6675 	int r;
6676 
6677 	if (!new_state->fb) {
6678 		DRM_DEBUG_KMS("No FB bound\n");
6679 		return 0;
6680 	}
6681 
6682 	afb = to_amdgpu_framebuffer(new_state->fb);
6683 	obj = new_state->fb->obj[0];
6684 	rbo = gem_to_amdgpu_bo(obj);
6685 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6686 	INIT_LIST_HEAD(&list);
6687 
6688 	tv.bo = &rbo->tbo;
6689 	tv.num_shared = 1;
6690 	list_add(&tv.head, &list);
6691 
6692 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6693 	if (r) {
6694 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6695 		return r;
6696 	}
6697 
6698 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6699 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6700 	else
6701 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6702 
6703 	r = amdgpu_bo_pin(rbo, domain);
6704 	if (unlikely(r != 0)) {
6705 		if (r != -ERESTARTSYS)
6706 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6707 		ttm_eu_backoff_reservation(&ticket, &list);
6708 		return r;
6709 	}
6710 
6711 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6712 	if (unlikely(r != 0)) {
6713 		amdgpu_bo_unpin(rbo);
6714 		ttm_eu_backoff_reservation(&ticket, &list);
6715 		DRM_ERROR("%p bind failed\n", rbo);
6716 		return r;
6717 	}
6718 
6719 	ttm_eu_backoff_reservation(&ticket, &list);
6720 
6721 	afb->address = amdgpu_bo_gpu_offset(rbo);
6722 
6723 	amdgpu_bo_ref(rbo);
6724 
6725 	/**
6726 	 * We don't do surface updates on planes that have been newly created,
6727 	 * but we also don't have the afb->address during atomic check.
6728 	 *
6729 	 * Fill in buffer attributes depending on the address here, but only on
6730 	 * newly created planes since they're not being used by DC yet and this
6731 	 * won't modify global state.
6732 	 */
6733 	dm_plane_state_old = to_dm_plane_state(plane->state);
6734 	dm_plane_state_new = to_dm_plane_state(new_state);
6735 
6736 	if (dm_plane_state_new->dc_state &&
6737 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6738 		struct dc_plane_state *plane_state =
6739 			dm_plane_state_new->dc_state;
6740 		bool force_disable_dcc = !plane_state->dcc.enable;
6741 
6742 		fill_plane_buffer_attributes(
6743 			adev, afb, plane_state->format, plane_state->rotation,
6744 			afb->tiling_flags,
6745 			&plane_state->tiling_info, &plane_state->plane_size,
6746 			&plane_state->dcc, &plane_state->address,
6747 			afb->tmz_surface, force_disable_dcc);
6748 	}
6749 
6750 	return 0;
6751 }
6752 
6753 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6754 				       struct drm_plane_state *old_state)
6755 {
6756 	struct amdgpu_bo *rbo;
6757 	int r;
6758 
6759 	if (!old_state->fb)
6760 		return;
6761 
6762 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6763 	r = amdgpu_bo_reserve(rbo, false);
6764 	if (unlikely(r)) {
6765 		DRM_ERROR("failed to reserve rbo before unpin\n");
6766 		return;
6767 	}
6768 
6769 	amdgpu_bo_unpin(rbo);
6770 	amdgpu_bo_unreserve(rbo);
6771 	amdgpu_bo_unref(&rbo);
6772 }
6773 
6774 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6775 				       struct drm_crtc_state *new_crtc_state)
6776 {
6777 	struct drm_framebuffer *fb = state->fb;
6778 	int min_downscale, max_upscale;
6779 	int min_scale = 0;
6780 	int max_scale = INT_MAX;
6781 
6782 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6783 	if (fb && state->crtc) {
6784 		/* Validate viewport to cover the case when only the position changes */
6785 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6786 			int viewport_width = state->crtc_w;
6787 			int viewport_height = state->crtc_h;
6788 
6789 			if (state->crtc_x < 0)
6790 				viewport_width += state->crtc_x;
6791 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6792 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6793 
6794 			if (state->crtc_y < 0)
6795 				viewport_height += state->crtc_y;
6796 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6797 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6798 
6799 			if (viewport_width < 0 || viewport_height < 0) {
6800 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6801 				return -EINVAL;
6802 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6803 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6804 				return -EINVAL;
6805 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
6806 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6807 				return -EINVAL;
6808 			}
6809 
6810 		}
6811 
6812 		/* Get min/max allowed scaling factors from plane caps. */
6813 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6814 					     &min_downscale, &max_upscale);
6815 		/*
6816 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
6817 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6818 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6819 		 */
6820 		min_scale = (1000 << 16) / max_upscale;
6821 		max_scale = (1000 << 16) / min_downscale;
6822 	}
6823 
6824 	return drm_atomic_helper_check_plane_state(
6825 		state, new_crtc_state, min_scale, max_scale, true, true);
6826 }
6827 
6828 static int dm_plane_atomic_check(struct drm_plane *plane,
6829 				 struct drm_atomic_state *state)
6830 {
6831 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6832 										 plane);
6833 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6834 	struct dc *dc = adev->dm.dc;
6835 	struct dm_plane_state *dm_plane_state;
6836 	struct dc_scaling_info scaling_info;
6837 	struct drm_crtc_state *new_crtc_state;
6838 	int ret;
6839 
6840 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6841 
6842 	dm_plane_state = to_dm_plane_state(new_plane_state);
6843 
6844 	if (!dm_plane_state->dc_state)
6845 		return 0;
6846 
6847 	new_crtc_state =
6848 		drm_atomic_get_new_crtc_state(state,
6849 					      new_plane_state->crtc);
6850 	if (!new_crtc_state)
6851 		return -EINVAL;
6852 
6853 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6854 	if (ret)
6855 		return ret;
6856 
6857 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6858 	if (ret)
6859 		return ret;
6860 
6861 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6862 		return 0;
6863 
6864 	return -EINVAL;
6865 }
6866 
6867 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6868 				       struct drm_atomic_state *state)
6869 {
6870 	/* Only support async updates on cursor planes. */
6871 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6872 		return -EINVAL;
6873 
6874 	return 0;
6875 }
6876 
6877 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6878 					 struct drm_atomic_state *state)
6879 {
6880 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6881 									   plane);
6882 	struct drm_plane_state *old_state =
6883 		drm_atomic_get_old_plane_state(state, plane);
6884 
6885 	trace_amdgpu_dm_atomic_update_cursor(new_state);
6886 
6887 	swap(plane->state->fb, new_state->fb);
6888 
6889 	plane->state->src_x = new_state->src_x;
6890 	plane->state->src_y = new_state->src_y;
6891 	plane->state->src_w = new_state->src_w;
6892 	plane->state->src_h = new_state->src_h;
6893 	plane->state->crtc_x = new_state->crtc_x;
6894 	plane->state->crtc_y = new_state->crtc_y;
6895 	plane->state->crtc_w = new_state->crtc_w;
6896 	plane->state->crtc_h = new_state->crtc_h;
6897 
6898 	handle_cursor_update(plane, old_state);
6899 }
6900 
6901 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6902 	.prepare_fb = dm_plane_helper_prepare_fb,
6903 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6904 	.atomic_check = dm_plane_atomic_check,
6905 	.atomic_async_check = dm_plane_atomic_async_check,
6906 	.atomic_async_update = dm_plane_atomic_async_update
6907 };
6908 
6909 /*
6910  * TODO: these are currently initialized to rgb formats only.
6911  * For future use cases we should either initialize them dynamically based on
6912  * plane capabilities, or initialize this array to all formats, so internal drm
6913  * check will succeed, and let DC implement proper check
6914  */
6915 static const uint32_t rgb_formats[] = {
6916 	DRM_FORMAT_XRGB8888,
6917 	DRM_FORMAT_ARGB8888,
6918 	DRM_FORMAT_RGBA8888,
6919 	DRM_FORMAT_XRGB2101010,
6920 	DRM_FORMAT_XBGR2101010,
6921 	DRM_FORMAT_ARGB2101010,
6922 	DRM_FORMAT_ABGR2101010,
6923 	DRM_FORMAT_XBGR8888,
6924 	DRM_FORMAT_ABGR8888,
6925 	DRM_FORMAT_RGB565,
6926 };
6927 
6928 static const uint32_t overlay_formats[] = {
6929 	DRM_FORMAT_XRGB8888,
6930 	DRM_FORMAT_ARGB8888,
6931 	DRM_FORMAT_RGBA8888,
6932 	DRM_FORMAT_XBGR8888,
6933 	DRM_FORMAT_ABGR8888,
6934 	DRM_FORMAT_RGB565
6935 };
6936 
6937 static const u32 cursor_formats[] = {
6938 	DRM_FORMAT_ARGB8888
6939 };
6940 
6941 static int get_plane_formats(const struct drm_plane *plane,
6942 			     const struct dc_plane_cap *plane_cap,
6943 			     uint32_t *formats, int max_formats)
6944 {
6945 	int i, num_formats = 0;
6946 
6947 	/*
6948 	 * TODO: Query support for each group of formats directly from
6949 	 * DC plane caps. This will require adding more formats to the
6950 	 * caps list.
6951 	 */
6952 
6953 	switch (plane->type) {
6954 	case DRM_PLANE_TYPE_PRIMARY:
6955 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6956 			if (num_formats >= max_formats)
6957 				break;
6958 
6959 			formats[num_formats++] = rgb_formats[i];
6960 		}
6961 
6962 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6963 			formats[num_formats++] = DRM_FORMAT_NV12;
6964 		if (plane_cap && plane_cap->pixel_format_support.p010)
6965 			formats[num_formats++] = DRM_FORMAT_P010;
6966 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6967 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6968 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6969 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6970 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6971 		}
6972 		break;
6973 
6974 	case DRM_PLANE_TYPE_OVERLAY:
6975 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6976 			if (num_formats >= max_formats)
6977 				break;
6978 
6979 			formats[num_formats++] = overlay_formats[i];
6980 		}
6981 		break;
6982 
6983 	case DRM_PLANE_TYPE_CURSOR:
6984 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6985 			if (num_formats >= max_formats)
6986 				break;
6987 
6988 			formats[num_formats++] = cursor_formats[i];
6989 		}
6990 		break;
6991 	}
6992 
6993 	return num_formats;
6994 }
6995 
6996 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6997 				struct drm_plane *plane,
6998 				unsigned long possible_crtcs,
6999 				const struct dc_plane_cap *plane_cap)
7000 {
7001 	uint32_t formats[32];
7002 	int num_formats;
7003 	int res = -EPERM;
7004 	unsigned int supported_rotations;
7005 	uint64_t *modifiers = NULL;
7006 
7007 	num_formats = get_plane_formats(plane, plane_cap, formats,
7008 					ARRAY_SIZE(formats));
7009 
7010 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7011 	if (res)
7012 		return res;
7013 
7014 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7015 				       &dm_plane_funcs, formats, num_formats,
7016 				       modifiers, plane->type, NULL);
7017 	kfree(modifiers);
7018 	if (res)
7019 		return res;
7020 
7021 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7022 	    plane_cap && plane_cap->per_pixel_alpha) {
7023 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7024 					  BIT(DRM_MODE_BLEND_PREMULTI);
7025 
7026 		drm_plane_create_alpha_property(plane);
7027 		drm_plane_create_blend_mode_property(plane, blend_caps);
7028 	}
7029 
7030 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7031 	    plane_cap &&
7032 	    (plane_cap->pixel_format_support.nv12 ||
7033 	     plane_cap->pixel_format_support.p010)) {
7034 		/* This only affects YUV formats. */
7035 		drm_plane_create_color_properties(
7036 			plane,
7037 			BIT(DRM_COLOR_YCBCR_BT601) |
7038 			BIT(DRM_COLOR_YCBCR_BT709) |
7039 			BIT(DRM_COLOR_YCBCR_BT2020),
7040 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7041 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7042 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7043 	}
7044 
7045 	supported_rotations =
7046 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7047 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7048 
7049 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7050 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7051 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7052 						   supported_rotations);
7053 
7054 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7055 
7056 	/* Create (reset) the plane state */
7057 	if (plane->funcs->reset)
7058 		plane->funcs->reset(plane);
7059 
7060 	return 0;
7061 }
7062 
7063 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7064 			       struct drm_plane *plane,
7065 			       uint32_t crtc_index)
7066 {
7067 	struct amdgpu_crtc *acrtc = NULL;
7068 	struct drm_plane *cursor_plane;
7069 
7070 	int res = -ENOMEM;
7071 
7072 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7073 	if (!cursor_plane)
7074 		goto fail;
7075 
7076 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7077 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7078 
7079 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7080 	if (!acrtc)
7081 		goto fail;
7082 
7083 	res = drm_crtc_init_with_planes(
7084 			dm->ddev,
7085 			&acrtc->base,
7086 			plane,
7087 			cursor_plane,
7088 			&amdgpu_dm_crtc_funcs, NULL);
7089 
7090 	if (res)
7091 		goto fail;
7092 
7093 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7094 
7095 	/* Create (reset) the plane state */
7096 	if (acrtc->base.funcs->reset)
7097 		acrtc->base.funcs->reset(&acrtc->base);
7098 
7099 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7100 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7101 
7102 	acrtc->crtc_id = crtc_index;
7103 	acrtc->base.enabled = false;
7104 	acrtc->otg_inst = -1;
7105 
7106 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7107 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7108 				   true, MAX_COLOR_LUT_ENTRIES);
7109 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7110 
7111 	return 0;
7112 
7113 fail:
7114 	kfree(acrtc);
7115 	kfree(cursor_plane);
7116 	return res;
7117 }
7118 
7119 
7120 static int to_drm_connector_type(enum signal_type st)
7121 {
7122 	switch (st) {
7123 	case SIGNAL_TYPE_HDMI_TYPE_A:
7124 		return DRM_MODE_CONNECTOR_HDMIA;
7125 	case SIGNAL_TYPE_EDP:
7126 		return DRM_MODE_CONNECTOR_eDP;
7127 	case SIGNAL_TYPE_LVDS:
7128 		return DRM_MODE_CONNECTOR_LVDS;
7129 	case SIGNAL_TYPE_RGB:
7130 		return DRM_MODE_CONNECTOR_VGA;
7131 	case SIGNAL_TYPE_DISPLAY_PORT:
7132 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7133 		return DRM_MODE_CONNECTOR_DisplayPort;
7134 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7135 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7136 		return DRM_MODE_CONNECTOR_DVID;
7137 	case SIGNAL_TYPE_VIRTUAL:
7138 		return DRM_MODE_CONNECTOR_VIRTUAL;
7139 
7140 	default:
7141 		return DRM_MODE_CONNECTOR_Unknown;
7142 	}
7143 }
7144 
7145 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7146 {
7147 	struct drm_encoder *encoder;
7148 
7149 	/* There is only one encoder per connector */
7150 	drm_connector_for_each_possible_encoder(connector, encoder)
7151 		return encoder;
7152 
7153 	return NULL;
7154 }
7155 
7156 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7157 {
7158 	struct drm_encoder *encoder;
7159 	struct amdgpu_encoder *amdgpu_encoder;
7160 
7161 	encoder = amdgpu_dm_connector_to_encoder(connector);
7162 
7163 	if (encoder == NULL)
7164 		return;
7165 
7166 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7167 
7168 	amdgpu_encoder->native_mode.clock = 0;
7169 
7170 	if (!list_empty(&connector->probed_modes)) {
7171 		struct drm_display_mode *preferred_mode = NULL;
7172 
7173 		list_for_each_entry(preferred_mode,
7174 				    &connector->probed_modes,
7175 				    head) {
7176 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7177 				amdgpu_encoder->native_mode = *preferred_mode;
7178 
7179 			break;
7180 		}
7181 
7182 	}
7183 }
7184 
7185 static struct drm_display_mode *
7186 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7187 			     char *name,
7188 			     int hdisplay, int vdisplay)
7189 {
7190 	struct drm_device *dev = encoder->dev;
7191 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7192 	struct drm_display_mode *mode = NULL;
7193 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7194 
7195 	mode = drm_mode_duplicate(dev, native_mode);
7196 
7197 	if (mode == NULL)
7198 		return NULL;
7199 
7200 	mode->hdisplay = hdisplay;
7201 	mode->vdisplay = vdisplay;
7202 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7203 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7204 
7205 	return mode;
7206 
7207 }
7208 
7209 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7210 						 struct drm_connector *connector)
7211 {
7212 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7213 	struct drm_display_mode *mode = NULL;
7214 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7215 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7216 				to_amdgpu_dm_connector(connector);
7217 	int i;
7218 	int n;
7219 	struct mode_size {
7220 		char name[DRM_DISPLAY_MODE_LEN];
7221 		int w;
7222 		int h;
7223 	} common_modes[] = {
7224 		{  "640x480",  640,  480},
7225 		{  "800x600",  800,  600},
7226 		{ "1024x768", 1024,  768},
7227 		{ "1280x720", 1280,  720},
7228 		{ "1280x800", 1280,  800},
7229 		{"1280x1024", 1280, 1024},
7230 		{ "1440x900", 1440,  900},
7231 		{"1680x1050", 1680, 1050},
7232 		{"1600x1200", 1600, 1200},
7233 		{"1920x1080", 1920, 1080},
7234 		{"1920x1200", 1920, 1200}
7235 	};
7236 
7237 	n = ARRAY_SIZE(common_modes);
7238 
7239 	for (i = 0; i < n; i++) {
7240 		struct drm_display_mode *curmode = NULL;
7241 		bool mode_existed = false;
7242 
7243 		if (common_modes[i].w > native_mode->hdisplay ||
7244 		    common_modes[i].h > native_mode->vdisplay ||
7245 		   (common_modes[i].w == native_mode->hdisplay &&
7246 		    common_modes[i].h == native_mode->vdisplay))
7247 			continue;
7248 
7249 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7250 			if (common_modes[i].w == curmode->hdisplay &&
7251 			    common_modes[i].h == curmode->vdisplay) {
7252 				mode_existed = true;
7253 				break;
7254 			}
7255 		}
7256 
7257 		if (mode_existed)
7258 			continue;
7259 
7260 		mode = amdgpu_dm_create_common_mode(encoder,
7261 				common_modes[i].name, common_modes[i].w,
7262 				common_modes[i].h);
7263 		drm_mode_probed_add(connector, mode);
7264 		amdgpu_dm_connector->num_modes++;
7265 	}
7266 }
7267 
7268 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7269 					      struct edid *edid)
7270 {
7271 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7272 			to_amdgpu_dm_connector(connector);
7273 
7274 	if (edid) {
7275 		/* empty probed_modes */
7276 		INIT_LIST_HEAD(&connector->probed_modes);
7277 		amdgpu_dm_connector->num_modes =
7278 				drm_add_edid_modes(connector, edid);
7279 
7280 		/* sorting the probed modes before calling function
7281 		 * amdgpu_dm_get_native_mode() since EDID can have
7282 		 * more than one preferred mode. The modes that are
7283 		 * later in the probed mode list could be of higher
7284 		 * and preferred resolution. For example, 3840x2160
7285 		 * resolution in base EDID preferred timing and 4096x2160
7286 		 * preferred resolution in DID extension block later.
7287 		 */
7288 		drm_mode_sort(&connector->probed_modes);
7289 		amdgpu_dm_get_native_mode(connector);
7290 
7291 		/* Freesync capabilities are reset by calling
7292 		 * drm_add_edid_modes() and need to be
7293 		 * restored here.
7294 		 */
7295 		amdgpu_dm_update_freesync_caps(connector, edid);
7296 	} else {
7297 		amdgpu_dm_connector->num_modes = 0;
7298 	}
7299 }
7300 
7301 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7302 			      struct drm_display_mode *mode)
7303 {
7304 	struct drm_display_mode *m;
7305 
7306 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7307 		if (drm_mode_equal(m, mode))
7308 			return true;
7309 	}
7310 
7311 	return false;
7312 }
7313 
7314 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7315 {
7316 	const struct drm_display_mode *m;
7317 	struct drm_display_mode *new_mode;
7318 	uint i;
7319 	uint32_t new_modes_count = 0;
7320 
7321 	/* Standard FPS values
7322 	 *
7323 	 * 23.976   - TV/NTSC
7324 	 * 24 	    - Cinema
7325 	 * 25 	    - TV/PAL
7326 	 * 29.97    - TV/NTSC
7327 	 * 30 	    - TV/NTSC
7328 	 * 48 	    - Cinema HFR
7329 	 * 50 	    - TV/PAL
7330 	 * 60 	    - Commonly used
7331 	 * 48,72,96 - Multiples of 24
7332 	 */
7333 	const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7334 					 48000, 50000, 60000, 72000, 96000 };
7335 
7336 	/*
7337 	 * Find mode with highest refresh rate with the same resolution
7338 	 * as the preferred mode. Some monitors report a preferred mode
7339 	 * with lower resolution than the highest refresh rate supported.
7340 	 */
7341 
7342 	m = get_highest_refresh_rate_mode(aconnector, true);
7343 	if (!m)
7344 		return 0;
7345 
7346 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7347 		uint64_t target_vtotal, target_vtotal_diff;
7348 		uint64_t num, den;
7349 
7350 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7351 			continue;
7352 
7353 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7354 		    common_rates[i] > aconnector->max_vfreq * 1000)
7355 			continue;
7356 
7357 		num = (unsigned long long)m->clock * 1000 * 1000;
7358 		den = common_rates[i] * (unsigned long long)m->htotal;
7359 		target_vtotal = div_u64(num, den);
7360 		target_vtotal_diff = target_vtotal - m->vtotal;
7361 
7362 		/* Check for illegal modes */
7363 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7364 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7365 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7366 			continue;
7367 
7368 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7369 		if (!new_mode)
7370 			goto out;
7371 
7372 		new_mode->vtotal += (u16)target_vtotal_diff;
7373 		new_mode->vsync_start += (u16)target_vtotal_diff;
7374 		new_mode->vsync_end += (u16)target_vtotal_diff;
7375 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7376 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7377 
7378 		if (!is_duplicate_mode(aconnector, new_mode)) {
7379 			drm_mode_probed_add(&aconnector->base, new_mode);
7380 			new_modes_count += 1;
7381 		} else
7382 			drm_mode_destroy(aconnector->base.dev, new_mode);
7383 	}
7384  out:
7385 	return new_modes_count;
7386 }
7387 
7388 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7389 						   struct edid *edid)
7390 {
7391 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7392 		to_amdgpu_dm_connector(connector);
7393 
7394 	if (!(amdgpu_freesync_vid_mode && edid))
7395 		return;
7396 
7397 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7398 		amdgpu_dm_connector->num_modes +=
7399 			add_fs_modes(amdgpu_dm_connector);
7400 }
7401 
7402 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7403 {
7404 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7405 			to_amdgpu_dm_connector(connector);
7406 	struct drm_encoder *encoder;
7407 	struct edid *edid = amdgpu_dm_connector->edid;
7408 
7409 	encoder = amdgpu_dm_connector_to_encoder(connector);
7410 
7411 	if (!drm_edid_is_valid(edid)) {
7412 		amdgpu_dm_connector->num_modes =
7413 				drm_add_modes_noedid(connector, 640, 480);
7414 	} else {
7415 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7416 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7417 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7418 	}
7419 	amdgpu_dm_fbc_init(connector);
7420 
7421 	return amdgpu_dm_connector->num_modes;
7422 }
7423 
7424 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7425 				     struct amdgpu_dm_connector *aconnector,
7426 				     int connector_type,
7427 				     struct dc_link *link,
7428 				     int link_index)
7429 {
7430 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7431 
7432 	/*
7433 	 * Some of the properties below require access to state, like bpc.
7434 	 * Allocate some default initial connector state with our reset helper.
7435 	 */
7436 	if (aconnector->base.funcs->reset)
7437 		aconnector->base.funcs->reset(&aconnector->base);
7438 
7439 	aconnector->connector_id = link_index;
7440 	aconnector->dc_link = link;
7441 	aconnector->base.interlace_allowed = false;
7442 	aconnector->base.doublescan_allowed = false;
7443 	aconnector->base.stereo_allowed = false;
7444 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7445 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7446 	aconnector->audio_inst = -1;
7447 	mutex_init(&aconnector->hpd_lock);
7448 
7449 	/*
7450 	 * configure support HPD hot plug connector_>polled default value is 0
7451 	 * which means HPD hot plug not supported
7452 	 */
7453 	switch (connector_type) {
7454 	case DRM_MODE_CONNECTOR_HDMIA:
7455 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7456 		aconnector->base.ycbcr_420_allowed =
7457 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7458 		break;
7459 	case DRM_MODE_CONNECTOR_DisplayPort:
7460 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7461 		aconnector->base.ycbcr_420_allowed =
7462 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7463 		break;
7464 	case DRM_MODE_CONNECTOR_DVID:
7465 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7466 		break;
7467 	default:
7468 		break;
7469 	}
7470 
7471 	drm_object_attach_property(&aconnector->base.base,
7472 				dm->ddev->mode_config.scaling_mode_property,
7473 				DRM_MODE_SCALE_NONE);
7474 
7475 	drm_object_attach_property(&aconnector->base.base,
7476 				adev->mode_info.underscan_property,
7477 				UNDERSCAN_OFF);
7478 	drm_object_attach_property(&aconnector->base.base,
7479 				adev->mode_info.underscan_hborder_property,
7480 				0);
7481 	drm_object_attach_property(&aconnector->base.base,
7482 				adev->mode_info.underscan_vborder_property,
7483 				0);
7484 
7485 	if (!aconnector->mst_port)
7486 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7487 
7488 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7489 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7490 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7491 
7492 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7493 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7494 		drm_object_attach_property(&aconnector->base.base,
7495 				adev->mode_info.abm_level_property, 0);
7496 	}
7497 
7498 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7499 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7500 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7501 		drm_object_attach_property(
7502 			&aconnector->base.base,
7503 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
7504 
7505 		if (!aconnector->mst_port)
7506 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7507 
7508 #ifdef CONFIG_DRM_AMD_DC_HDCP
7509 		if (adev->dm.hdcp_workqueue)
7510 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7511 #endif
7512 	}
7513 }
7514 
7515 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7516 			      struct i2c_msg *msgs, int num)
7517 {
7518 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7519 	struct ddc_service *ddc_service = i2c->ddc_service;
7520 	struct i2c_command cmd;
7521 	int i;
7522 	int result = -EIO;
7523 
7524 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7525 
7526 	if (!cmd.payloads)
7527 		return result;
7528 
7529 	cmd.number_of_payloads = num;
7530 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7531 	cmd.speed = 100;
7532 
7533 	for (i = 0; i < num; i++) {
7534 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7535 		cmd.payloads[i].address = msgs[i].addr;
7536 		cmd.payloads[i].length = msgs[i].len;
7537 		cmd.payloads[i].data = msgs[i].buf;
7538 	}
7539 
7540 	if (dc_submit_i2c(
7541 			ddc_service->ctx->dc,
7542 			ddc_service->ddc_pin->hw_info.ddc_channel,
7543 			&cmd))
7544 		result = num;
7545 
7546 	kfree(cmd.payloads);
7547 	return result;
7548 }
7549 
7550 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7551 {
7552 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7553 }
7554 
7555 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7556 	.master_xfer = amdgpu_dm_i2c_xfer,
7557 	.functionality = amdgpu_dm_i2c_func,
7558 };
7559 
7560 static struct amdgpu_i2c_adapter *
7561 create_i2c(struct ddc_service *ddc_service,
7562 	   int link_index,
7563 	   int *res)
7564 {
7565 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7566 	struct amdgpu_i2c_adapter *i2c;
7567 
7568 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7569 	if (!i2c)
7570 		return NULL;
7571 	i2c->base.owner = THIS_MODULE;
7572 	i2c->base.class = I2C_CLASS_DDC;
7573 	i2c->base.dev.parent = &adev->pdev->dev;
7574 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7575 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7576 	i2c_set_adapdata(&i2c->base, i2c);
7577 	i2c->ddc_service = ddc_service;
7578 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7579 
7580 	return i2c;
7581 }
7582 
7583 
7584 /*
7585  * Note: this function assumes that dc_link_detect() was called for the
7586  * dc_link which will be represented by this aconnector.
7587  */
7588 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7589 				    struct amdgpu_dm_connector *aconnector,
7590 				    uint32_t link_index,
7591 				    struct amdgpu_encoder *aencoder)
7592 {
7593 	int res = 0;
7594 	int connector_type;
7595 	struct dc *dc = dm->dc;
7596 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7597 	struct amdgpu_i2c_adapter *i2c;
7598 
7599 	link->priv = aconnector;
7600 
7601 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7602 
7603 	i2c = create_i2c(link->ddc, link->link_index, &res);
7604 	if (!i2c) {
7605 		DRM_ERROR("Failed to create i2c adapter data\n");
7606 		return -ENOMEM;
7607 	}
7608 
7609 	aconnector->i2c = i2c;
7610 	res = i2c_add_adapter(&i2c->base);
7611 
7612 	if (res) {
7613 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7614 		goto out_free;
7615 	}
7616 
7617 	connector_type = to_drm_connector_type(link->connector_signal);
7618 
7619 	res = drm_connector_init_with_ddc(
7620 			dm->ddev,
7621 			&aconnector->base,
7622 			&amdgpu_dm_connector_funcs,
7623 			connector_type,
7624 			&i2c->base);
7625 
7626 	if (res) {
7627 		DRM_ERROR("connector_init failed\n");
7628 		aconnector->connector_id = -1;
7629 		goto out_free;
7630 	}
7631 
7632 	drm_connector_helper_add(
7633 			&aconnector->base,
7634 			&amdgpu_dm_connector_helper_funcs);
7635 
7636 	amdgpu_dm_connector_init_helper(
7637 		dm,
7638 		aconnector,
7639 		connector_type,
7640 		link,
7641 		link_index);
7642 
7643 	drm_connector_attach_encoder(
7644 		&aconnector->base, &aencoder->base);
7645 
7646 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7647 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7648 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7649 
7650 out_free:
7651 	if (res) {
7652 		kfree(i2c);
7653 		aconnector->i2c = NULL;
7654 	}
7655 	return res;
7656 }
7657 
7658 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7659 {
7660 	switch (adev->mode_info.num_crtc) {
7661 	case 1:
7662 		return 0x1;
7663 	case 2:
7664 		return 0x3;
7665 	case 3:
7666 		return 0x7;
7667 	case 4:
7668 		return 0xf;
7669 	case 5:
7670 		return 0x1f;
7671 	case 6:
7672 	default:
7673 		return 0x3f;
7674 	}
7675 }
7676 
7677 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7678 				  struct amdgpu_encoder *aencoder,
7679 				  uint32_t link_index)
7680 {
7681 	struct amdgpu_device *adev = drm_to_adev(dev);
7682 
7683 	int res = drm_encoder_init(dev,
7684 				   &aencoder->base,
7685 				   &amdgpu_dm_encoder_funcs,
7686 				   DRM_MODE_ENCODER_TMDS,
7687 				   NULL);
7688 
7689 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7690 
7691 	if (!res)
7692 		aencoder->encoder_id = link_index;
7693 	else
7694 		aencoder->encoder_id = -1;
7695 
7696 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7697 
7698 	return res;
7699 }
7700 
7701 static void manage_dm_interrupts(struct amdgpu_device *adev,
7702 				 struct amdgpu_crtc *acrtc,
7703 				 bool enable)
7704 {
7705 	/*
7706 	 * We have no guarantee that the frontend index maps to the same
7707 	 * backend index - some even map to more than one.
7708 	 *
7709 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7710 	 */
7711 	int irq_type =
7712 		amdgpu_display_crtc_idx_to_irq_type(
7713 			adev,
7714 			acrtc->crtc_id);
7715 
7716 	if (enable) {
7717 		drm_crtc_vblank_on(&acrtc->base);
7718 		amdgpu_irq_get(
7719 			adev,
7720 			&adev->pageflip_irq,
7721 			irq_type);
7722 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7723 		amdgpu_irq_get(
7724 			adev,
7725 			&adev->vline0_irq,
7726 			irq_type);
7727 #endif
7728 	} else {
7729 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7730 		amdgpu_irq_put(
7731 			adev,
7732 			&adev->vline0_irq,
7733 			irq_type);
7734 #endif
7735 		amdgpu_irq_put(
7736 			adev,
7737 			&adev->pageflip_irq,
7738 			irq_type);
7739 		drm_crtc_vblank_off(&acrtc->base);
7740 	}
7741 }
7742 
7743 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7744 				      struct amdgpu_crtc *acrtc)
7745 {
7746 	int irq_type =
7747 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7748 
7749 	/**
7750 	 * This reads the current state for the IRQ and force reapplies
7751 	 * the setting to hardware.
7752 	 */
7753 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7754 }
7755 
7756 static bool
7757 is_scaling_state_different(const struct dm_connector_state *dm_state,
7758 			   const struct dm_connector_state *old_dm_state)
7759 {
7760 	if (dm_state->scaling != old_dm_state->scaling)
7761 		return true;
7762 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7763 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7764 			return true;
7765 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7766 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7767 			return true;
7768 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7769 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7770 		return true;
7771 	return false;
7772 }
7773 
7774 #ifdef CONFIG_DRM_AMD_DC_HDCP
7775 static bool is_content_protection_different(struct drm_connector_state *state,
7776 					    const struct drm_connector_state *old_state,
7777 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7778 {
7779 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7780 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7781 
7782 	/* Handle: Type0/1 change */
7783 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7784 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7785 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7786 		return true;
7787 	}
7788 
7789 	/* CP is being re enabled, ignore this
7790 	 *
7791 	 * Handles:	ENABLED -> DESIRED
7792 	 */
7793 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7794 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7795 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7796 		return false;
7797 	}
7798 
7799 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7800 	 *
7801 	 * Handles:	UNDESIRED -> ENABLED
7802 	 */
7803 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7804 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7805 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7806 
7807 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7808 	 * hot-plug, headless s3, dpms
7809 	 *
7810 	 * Handles:	DESIRED -> DESIRED (Special case)
7811 	 */
7812 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7813 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7814 		dm_con_state->update_hdcp = false;
7815 		return true;
7816 	}
7817 
7818 	/*
7819 	 * Handles:	UNDESIRED -> UNDESIRED
7820 	 *		DESIRED -> DESIRED
7821 	 *		ENABLED -> ENABLED
7822 	 */
7823 	if (old_state->content_protection == state->content_protection)
7824 		return false;
7825 
7826 	/*
7827 	 * Handles:	UNDESIRED -> DESIRED
7828 	 *		DESIRED -> UNDESIRED
7829 	 *		ENABLED -> UNDESIRED
7830 	 */
7831 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7832 		return true;
7833 
7834 	/*
7835 	 * Handles:	DESIRED -> ENABLED
7836 	 */
7837 	return false;
7838 }
7839 
7840 #endif
7841 static void remove_stream(struct amdgpu_device *adev,
7842 			  struct amdgpu_crtc *acrtc,
7843 			  struct dc_stream_state *stream)
7844 {
7845 	/* this is the update mode case */
7846 
7847 	acrtc->otg_inst = -1;
7848 	acrtc->enabled = false;
7849 }
7850 
7851 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7852 			       struct dc_cursor_position *position)
7853 {
7854 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7855 	int x, y;
7856 	int xorigin = 0, yorigin = 0;
7857 
7858 	if (!crtc || !plane->state->fb)
7859 		return 0;
7860 
7861 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7862 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7863 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7864 			  __func__,
7865 			  plane->state->crtc_w,
7866 			  plane->state->crtc_h);
7867 		return -EINVAL;
7868 	}
7869 
7870 	x = plane->state->crtc_x;
7871 	y = plane->state->crtc_y;
7872 
7873 	if (x <= -amdgpu_crtc->max_cursor_width ||
7874 	    y <= -amdgpu_crtc->max_cursor_height)
7875 		return 0;
7876 
7877 	if (x < 0) {
7878 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7879 		x = 0;
7880 	}
7881 	if (y < 0) {
7882 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7883 		y = 0;
7884 	}
7885 	position->enable = true;
7886 	position->translate_by_source = true;
7887 	position->x = x;
7888 	position->y = y;
7889 	position->x_hotspot = xorigin;
7890 	position->y_hotspot = yorigin;
7891 
7892 	return 0;
7893 }
7894 
7895 static void handle_cursor_update(struct drm_plane *plane,
7896 				 struct drm_plane_state *old_plane_state)
7897 {
7898 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7899 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7900 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7901 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7902 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7903 	uint64_t address = afb ? afb->address : 0;
7904 	struct dc_cursor_position position = {0};
7905 	struct dc_cursor_attributes attributes;
7906 	int ret;
7907 
7908 	if (!plane->state->fb && !old_plane_state->fb)
7909 		return;
7910 
7911 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
7912 		      __func__,
7913 		      amdgpu_crtc->crtc_id,
7914 		      plane->state->crtc_w,
7915 		      plane->state->crtc_h);
7916 
7917 	ret = get_cursor_position(plane, crtc, &position);
7918 	if (ret)
7919 		return;
7920 
7921 	if (!position.enable) {
7922 		/* turn off cursor */
7923 		if (crtc_state && crtc_state->stream) {
7924 			mutex_lock(&adev->dm.dc_lock);
7925 			dc_stream_set_cursor_position(crtc_state->stream,
7926 						      &position);
7927 			mutex_unlock(&adev->dm.dc_lock);
7928 		}
7929 		return;
7930 	}
7931 
7932 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
7933 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
7934 
7935 	memset(&attributes, 0, sizeof(attributes));
7936 	attributes.address.high_part = upper_32_bits(address);
7937 	attributes.address.low_part  = lower_32_bits(address);
7938 	attributes.width             = plane->state->crtc_w;
7939 	attributes.height            = plane->state->crtc_h;
7940 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7941 	attributes.rotation_angle    = 0;
7942 	attributes.attribute_flags.value = 0;
7943 
7944 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7945 
7946 	if (crtc_state->stream) {
7947 		mutex_lock(&adev->dm.dc_lock);
7948 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7949 							 &attributes))
7950 			DRM_ERROR("DC failed to set cursor attributes\n");
7951 
7952 		if (!dc_stream_set_cursor_position(crtc_state->stream,
7953 						   &position))
7954 			DRM_ERROR("DC failed to set cursor position\n");
7955 		mutex_unlock(&adev->dm.dc_lock);
7956 	}
7957 }
7958 
7959 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7960 {
7961 
7962 	assert_spin_locked(&acrtc->base.dev->event_lock);
7963 	WARN_ON(acrtc->event);
7964 
7965 	acrtc->event = acrtc->base.state->event;
7966 
7967 	/* Set the flip status */
7968 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7969 
7970 	/* Mark this event as consumed */
7971 	acrtc->base.state->event = NULL;
7972 
7973 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7974 		     acrtc->crtc_id);
7975 }
7976 
7977 static void update_freesync_state_on_stream(
7978 	struct amdgpu_display_manager *dm,
7979 	struct dm_crtc_state *new_crtc_state,
7980 	struct dc_stream_state *new_stream,
7981 	struct dc_plane_state *surface,
7982 	u32 flip_timestamp_in_us)
7983 {
7984 	struct mod_vrr_params vrr_params;
7985 	struct dc_info_packet vrr_infopacket = {0};
7986 	struct amdgpu_device *adev = dm->adev;
7987 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7988 	unsigned long flags;
7989 	bool pack_sdp_v1_3 = false;
7990 
7991 	if (!new_stream)
7992 		return;
7993 
7994 	/*
7995 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7996 	 * For now it's sufficient to just guard against these conditions.
7997 	 */
7998 
7999 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8000 		return;
8001 
8002 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8003         vrr_params = acrtc->dm_irq_params.vrr_params;
8004 
8005 	if (surface) {
8006 		mod_freesync_handle_preflip(
8007 			dm->freesync_module,
8008 			surface,
8009 			new_stream,
8010 			flip_timestamp_in_us,
8011 			&vrr_params);
8012 
8013 		if (adev->family < AMDGPU_FAMILY_AI &&
8014 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8015 			mod_freesync_handle_v_update(dm->freesync_module,
8016 						     new_stream, &vrr_params);
8017 
8018 			/* Need to call this before the frame ends. */
8019 			dc_stream_adjust_vmin_vmax(dm->dc,
8020 						   new_crtc_state->stream,
8021 						   &vrr_params.adjust);
8022 		}
8023 	}
8024 
8025 	mod_freesync_build_vrr_infopacket(
8026 		dm->freesync_module,
8027 		new_stream,
8028 		&vrr_params,
8029 		PACKET_TYPE_VRR,
8030 		TRANSFER_FUNC_UNKNOWN,
8031 		&vrr_infopacket,
8032 		pack_sdp_v1_3);
8033 
8034 	new_crtc_state->freesync_timing_changed |=
8035 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8036 			&vrr_params.adjust,
8037 			sizeof(vrr_params.adjust)) != 0);
8038 
8039 	new_crtc_state->freesync_vrr_info_changed |=
8040 		(memcmp(&new_crtc_state->vrr_infopacket,
8041 			&vrr_infopacket,
8042 			sizeof(vrr_infopacket)) != 0);
8043 
8044 	acrtc->dm_irq_params.vrr_params = vrr_params;
8045 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8046 
8047 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8048 	new_stream->vrr_infopacket = vrr_infopacket;
8049 
8050 	if (new_crtc_state->freesync_vrr_info_changed)
8051 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8052 			      new_crtc_state->base.crtc->base.id,
8053 			      (int)new_crtc_state->base.vrr_enabled,
8054 			      (int)vrr_params.state);
8055 
8056 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8057 }
8058 
8059 static void update_stream_irq_parameters(
8060 	struct amdgpu_display_manager *dm,
8061 	struct dm_crtc_state *new_crtc_state)
8062 {
8063 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8064 	struct mod_vrr_params vrr_params;
8065 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8066 	struct amdgpu_device *adev = dm->adev;
8067 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8068 	unsigned long flags;
8069 
8070 	if (!new_stream)
8071 		return;
8072 
8073 	/*
8074 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8075 	 * For now it's sufficient to just guard against these conditions.
8076 	 */
8077 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8078 		return;
8079 
8080 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8081 	vrr_params = acrtc->dm_irq_params.vrr_params;
8082 
8083 	if (new_crtc_state->vrr_supported &&
8084 	    config.min_refresh_in_uhz &&
8085 	    config.max_refresh_in_uhz) {
8086 		/*
8087 		 * if freesync compatible mode was set, config.state will be set
8088 		 * in atomic check
8089 		 */
8090 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8091 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8092 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8093 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8094 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8095 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8096 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8097 		} else {
8098 			config.state = new_crtc_state->base.vrr_enabled ?
8099 						     VRR_STATE_ACTIVE_VARIABLE :
8100 						     VRR_STATE_INACTIVE;
8101 		}
8102 	} else {
8103 		config.state = VRR_STATE_UNSUPPORTED;
8104 	}
8105 
8106 	mod_freesync_build_vrr_params(dm->freesync_module,
8107 				      new_stream,
8108 				      &config, &vrr_params);
8109 
8110 	new_crtc_state->freesync_timing_changed |=
8111 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8112 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8113 
8114 	new_crtc_state->freesync_config = config;
8115 	/* Copy state for access from DM IRQ handler */
8116 	acrtc->dm_irq_params.freesync_config = config;
8117 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8118 	acrtc->dm_irq_params.vrr_params = vrr_params;
8119 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8120 }
8121 
8122 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8123 					    struct dm_crtc_state *new_state)
8124 {
8125 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8126 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8127 
8128 	if (!old_vrr_active && new_vrr_active) {
8129 		/* Transition VRR inactive -> active:
8130 		 * While VRR is active, we must not disable vblank irq, as a
8131 		 * reenable after disable would compute bogus vblank/pflip
8132 		 * timestamps if it likely happened inside display front-porch.
8133 		 *
8134 		 * We also need vupdate irq for the actual core vblank handling
8135 		 * at end of vblank.
8136 		 */
8137 		dm_set_vupdate_irq(new_state->base.crtc, true);
8138 		drm_crtc_vblank_get(new_state->base.crtc);
8139 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8140 				 __func__, new_state->base.crtc->base.id);
8141 	} else if (old_vrr_active && !new_vrr_active) {
8142 		/* Transition VRR active -> inactive:
8143 		 * Allow vblank irq disable again for fixed refresh rate.
8144 		 */
8145 		dm_set_vupdate_irq(new_state->base.crtc, false);
8146 		drm_crtc_vblank_put(new_state->base.crtc);
8147 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8148 				 __func__, new_state->base.crtc->base.id);
8149 	}
8150 }
8151 
8152 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8153 {
8154 	struct drm_plane *plane;
8155 	struct drm_plane_state *old_plane_state, *new_plane_state;
8156 	int i;
8157 
8158 	/*
8159 	 * TODO: Make this per-stream so we don't issue redundant updates for
8160 	 * commits with multiple streams.
8161 	 */
8162 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
8163 				       new_plane_state, i)
8164 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8165 			handle_cursor_update(plane, old_plane_state);
8166 }
8167 
8168 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8169 				    struct dc_state *dc_state,
8170 				    struct drm_device *dev,
8171 				    struct amdgpu_display_manager *dm,
8172 				    struct drm_crtc *pcrtc,
8173 				    bool wait_for_vblank)
8174 {
8175 	uint32_t i;
8176 	uint64_t timestamp_ns;
8177 	struct drm_plane *plane;
8178 	struct drm_plane_state *old_plane_state, *new_plane_state;
8179 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8180 	struct drm_crtc_state *new_pcrtc_state =
8181 			drm_atomic_get_new_crtc_state(state, pcrtc);
8182 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8183 	struct dm_crtc_state *dm_old_crtc_state =
8184 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8185 	int planes_count = 0, vpos, hpos;
8186 	long r;
8187 	unsigned long flags;
8188 	struct amdgpu_bo *abo;
8189 	uint32_t target_vblank, last_flip_vblank;
8190 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8191 	bool pflip_present = false;
8192 	struct {
8193 		struct dc_surface_update surface_updates[MAX_SURFACES];
8194 		struct dc_plane_info plane_infos[MAX_SURFACES];
8195 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8196 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8197 		struct dc_stream_update stream_update;
8198 	} *bundle;
8199 
8200 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8201 
8202 	if (!bundle) {
8203 		dm_error("Failed to allocate update bundle\n");
8204 		goto cleanup;
8205 	}
8206 
8207 	/*
8208 	 * Disable the cursor first if we're disabling all the planes.
8209 	 * It'll remain on the screen after the planes are re-enabled
8210 	 * if we don't.
8211 	 */
8212 	if (acrtc_state->active_planes == 0)
8213 		amdgpu_dm_commit_cursors(state);
8214 
8215 	/* update planes when needed */
8216 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8217 		struct drm_crtc *crtc = new_plane_state->crtc;
8218 		struct drm_crtc_state *new_crtc_state;
8219 		struct drm_framebuffer *fb = new_plane_state->fb;
8220 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8221 		bool plane_needs_flip;
8222 		struct dc_plane_state *dc_plane;
8223 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8224 
8225 		/* Cursor plane is handled after stream updates */
8226 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8227 			continue;
8228 
8229 		if (!fb || !crtc || pcrtc != crtc)
8230 			continue;
8231 
8232 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8233 		if (!new_crtc_state->active)
8234 			continue;
8235 
8236 		dc_plane = dm_new_plane_state->dc_state;
8237 
8238 		bundle->surface_updates[planes_count].surface = dc_plane;
8239 		if (new_pcrtc_state->color_mgmt_changed) {
8240 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8241 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8242 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8243 		}
8244 
8245 		fill_dc_scaling_info(new_plane_state,
8246 				     &bundle->scaling_infos[planes_count]);
8247 
8248 		bundle->surface_updates[planes_count].scaling_info =
8249 			&bundle->scaling_infos[planes_count];
8250 
8251 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8252 
8253 		pflip_present = pflip_present || plane_needs_flip;
8254 
8255 		if (!plane_needs_flip) {
8256 			planes_count += 1;
8257 			continue;
8258 		}
8259 
8260 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8261 
8262 		/*
8263 		 * Wait for all fences on this FB. Do limited wait to avoid
8264 		 * deadlock during GPU reset when this fence will not signal
8265 		 * but we hold reservation lock for the BO.
8266 		 */
8267 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8268 							false,
8269 							msecs_to_jiffies(5000));
8270 		if (unlikely(r <= 0))
8271 			DRM_ERROR("Waiting for fences timed out!");
8272 
8273 		fill_dc_plane_info_and_addr(
8274 			dm->adev, new_plane_state,
8275 			afb->tiling_flags,
8276 			&bundle->plane_infos[planes_count],
8277 			&bundle->flip_addrs[planes_count].address,
8278 			afb->tmz_surface, false);
8279 
8280 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8281 				 new_plane_state->plane->index,
8282 				 bundle->plane_infos[planes_count].dcc.enable);
8283 
8284 		bundle->surface_updates[planes_count].plane_info =
8285 			&bundle->plane_infos[planes_count];
8286 
8287 		/*
8288 		 * Only allow immediate flips for fast updates that don't
8289 		 * change FB pitch, DCC state, rotation or mirroing.
8290 		 */
8291 		bundle->flip_addrs[planes_count].flip_immediate =
8292 			crtc->state->async_flip &&
8293 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8294 
8295 		timestamp_ns = ktime_get_ns();
8296 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8297 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8298 		bundle->surface_updates[planes_count].surface = dc_plane;
8299 
8300 		if (!bundle->surface_updates[planes_count].surface) {
8301 			DRM_ERROR("No surface for CRTC: id=%d\n",
8302 					acrtc_attach->crtc_id);
8303 			continue;
8304 		}
8305 
8306 		if (plane == pcrtc->primary)
8307 			update_freesync_state_on_stream(
8308 				dm,
8309 				acrtc_state,
8310 				acrtc_state->stream,
8311 				dc_plane,
8312 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8313 
8314 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8315 				 __func__,
8316 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8317 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8318 
8319 		planes_count += 1;
8320 
8321 	}
8322 
8323 	if (pflip_present) {
8324 		if (!vrr_active) {
8325 			/* Use old throttling in non-vrr fixed refresh rate mode
8326 			 * to keep flip scheduling based on target vblank counts
8327 			 * working in a backwards compatible way, e.g., for
8328 			 * clients using the GLX_OML_sync_control extension or
8329 			 * DRI3/Present extension with defined target_msc.
8330 			 */
8331 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8332 		}
8333 		else {
8334 			/* For variable refresh rate mode only:
8335 			 * Get vblank of last completed flip to avoid > 1 vrr
8336 			 * flips per video frame by use of throttling, but allow
8337 			 * flip programming anywhere in the possibly large
8338 			 * variable vrr vblank interval for fine-grained flip
8339 			 * timing control and more opportunity to avoid stutter
8340 			 * on late submission of flips.
8341 			 */
8342 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8343 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8344 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8345 		}
8346 
8347 		target_vblank = last_flip_vblank + wait_for_vblank;
8348 
8349 		/*
8350 		 * Wait until we're out of the vertical blank period before the one
8351 		 * targeted by the flip
8352 		 */
8353 		while ((acrtc_attach->enabled &&
8354 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8355 							    0, &vpos, &hpos, NULL,
8356 							    NULL, &pcrtc->hwmode)
8357 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8358 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8359 			(int)(target_vblank -
8360 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8361 			usleep_range(1000, 1100);
8362 		}
8363 
8364 		/**
8365 		 * Prepare the flip event for the pageflip interrupt to handle.
8366 		 *
8367 		 * This only works in the case where we've already turned on the
8368 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8369 		 * from 0 -> n planes we have to skip a hardware generated event
8370 		 * and rely on sending it from software.
8371 		 */
8372 		if (acrtc_attach->base.state->event &&
8373 		    acrtc_state->active_planes > 0) {
8374 			drm_crtc_vblank_get(pcrtc);
8375 
8376 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8377 
8378 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8379 			prepare_flip_isr(acrtc_attach);
8380 
8381 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8382 		}
8383 
8384 		if (acrtc_state->stream) {
8385 			if (acrtc_state->freesync_vrr_info_changed)
8386 				bundle->stream_update.vrr_infopacket =
8387 					&acrtc_state->stream->vrr_infopacket;
8388 		}
8389 	}
8390 
8391 	/* Update the planes if changed or disable if we don't have any. */
8392 	if ((planes_count || acrtc_state->active_planes == 0) &&
8393 		acrtc_state->stream) {
8394 		bundle->stream_update.stream = acrtc_state->stream;
8395 		if (new_pcrtc_state->mode_changed) {
8396 			bundle->stream_update.src = acrtc_state->stream->src;
8397 			bundle->stream_update.dst = acrtc_state->stream->dst;
8398 		}
8399 
8400 		if (new_pcrtc_state->color_mgmt_changed) {
8401 			/*
8402 			 * TODO: This isn't fully correct since we've actually
8403 			 * already modified the stream in place.
8404 			 */
8405 			bundle->stream_update.gamut_remap =
8406 				&acrtc_state->stream->gamut_remap_matrix;
8407 			bundle->stream_update.output_csc_transform =
8408 				&acrtc_state->stream->csc_color_matrix;
8409 			bundle->stream_update.out_transfer_func =
8410 				acrtc_state->stream->out_transfer_func;
8411 		}
8412 
8413 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8414 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8415 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8416 
8417 		/*
8418 		 * If FreeSync state on the stream has changed then we need to
8419 		 * re-adjust the min/max bounds now that DC doesn't handle this
8420 		 * as part of commit.
8421 		 */
8422 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8423 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8424 			dc_stream_adjust_vmin_vmax(
8425 				dm->dc, acrtc_state->stream,
8426 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8427 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8428 		}
8429 		mutex_lock(&dm->dc_lock);
8430 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8431 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8432 			amdgpu_dm_psr_disable(acrtc_state->stream);
8433 
8434 		dc_commit_updates_for_stream(dm->dc,
8435 						     bundle->surface_updates,
8436 						     planes_count,
8437 						     acrtc_state->stream,
8438 						     &bundle->stream_update,
8439 						     dc_state);
8440 
8441 		/**
8442 		 * Enable or disable the interrupts on the backend.
8443 		 *
8444 		 * Most pipes are put into power gating when unused.
8445 		 *
8446 		 * When power gating is enabled on a pipe we lose the
8447 		 * interrupt enablement state when power gating is disabled.
8448 		 *
8449 		 * So we need to update the IRQ control state in hardware
8450 		 * whenever the pipe turns on (since it could be previously
8451 		 * power gated) or off (since some pipes can't be power gated
8452 		 * on some ASICs).
8453 		 */
8454 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8455 			dm_update_pflip_irq_state(drm_to_adev(dev),
8456 						  acrtc_attach);
8457 
8458 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8459 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8460 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8461 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8462 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8463 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8464 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
8465 			amdgpu_dm_psr_enable(acrtc_state->stream);
8466 		}
8467 
8468 		mutex_unlock(&dm->dc_lock);
8469 	}
8470 
8471 	/*
8472 	 * Update cursor state *after* programming all the planes.
8473 	 * This avoids redundant programming in the case where we're going
8474 	 * to be disabling a single plane - those pipes are being disabled.
8475 	 */
8476 	if (acrtc_state->active_planes)
8477 		amdgpu_dm_commit_cursors(state);
8478 
8479 cleanup:
8480 	kfree(bundle);
8481 }
8482 
8483 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8484 				   struct drm_atomic_state *state)
8485 {
8486 	struct amdgpu_device *adev = drm_to_adev(dev);
8487 	struct amdgpu_dm_connector *aconnector;
8488 	struct drm_connector *connector;
8489 	struct drm_connector_state *old_con_state, *new_con_state;
8490 	struct drm_crtc_state *new_crtc_state;
8491 	struct dm_crtc_state *new_dm_crtc_state;
8492 	const struct dc_stream_status *status;
8493 	int i, inst;
8494 
8495 	/* Notify device removals. */
8496 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8497 		if (old_con_state->crtc != new_con_state->crtc) {
8498 			/* CRTC changes require notification. */
8499 			goto notify;
8500 		}
8501 
8502 		if (!new_con_state->crtc)
8503 			continue;
8504 
8505 		new_crtc_state = drm_atomic_get_new_crtc_state(
8506 			state, new_con_state->crtc);
8507 
8508 		if (!new_crtc_state)
8509 			continue;
8510 
8511 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8512 			continue;
8513 
8514 	notify:
8515 		aconnector = to_amdgpu_dm_connector(connector);
8516 
8517 		mutex_lock(&adev->dm.audio_lock);
8518 		inst = aconnector->audio_inst;
8519 		aconnector->audio_inst = -1;
8520 		mutex_unlock(&adev->dm.audio_lock);
8521 
8522 		amdgpu_dm_audio_eld_notify(adev, inst);
8523 	}
8524 
8525 	/* Notify audio device additions. */
8526 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8527 		if (!new_con_state->crtc)
8528 			continue;
8529 
8530 		new_crtc_state = drm_atomic_get_new_crtc_state(
8531 			state, new_con_state->crtc);
8532 
8533 		if (!new_crtc_state)
8534 			continue;
8535 
8536 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8537 			continue;
8538 
8539 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8540 		if (!new_dm_crtc_state->stream)
8541 			continue;
8542 
8543 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8544 		if (!status)
8545 			continue;
8546 
8547 		aconnector = to_amdgpu_dm_connector(connector);
8548 
8549 		mutex_lock(&adev->dm.audio_lock);
8550 		inst = status->audio_inst;
8551 		aconnector->audio_inst = inst;
8552 		mutex_unlock(&adev->dm.audio_lock);
8553 
8554 		amdgpu_dm_audio_eld_notify(adev, inst);
8555 	}
8556 }
8557 
8558 /*
8559  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8560  * @crtc_state: the DRM CRTC state
8561  * @stream_state: the DC stream state.
8562  *
8563  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8564  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8565  */
8566 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8567 						struct dc_stream_state *stream_state)
8568 {
8569 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8570 }
8571 
8572 /**
8573  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8574  * @state: The atomic state to commit
8575  *
8576  * This will tell DC to commit the constructed DC state from atomic_check,
8577  * programming the hardware. Any failures here implies a hardware failure, since
8578  * atomic check should have filtered anything non-kosher.
8579  */
8580 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8581 {
8582 	struct drm_device *dev = state->dev;
8583 	struct amdgpu_device *adev = drm_to_adev(dev);
8584 	struct amdgpu_display_manager *dm = &adev->dm;
8585 	struct dm_atomic_state *dm_state;
8586 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8587 	uint32_t i, j;
8588 	struct drm_crtc *crtc;
8589 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8590 	unsigned long flags;
8591 	bool wait_for_vblank = true;
8592 	struct drm_connector *connector;
8593 	struct drm_connector_state *old_con_state, *new_con_state;
8594 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8595 	int crtc_disable_count = 0;
8596 	bool mode_set_reset_required = false;
8597 
8598 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8599 
8600 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8601 
8602 	dm_state = dm_atomic_get_new_state(state);
8603 	if (dm_state && dm_state->context) {
8604 		dc_state = dm_state->context;
8605 	} else {
8606 		/* No state changes, retain current state. */
8607 		dc_state_temp = dc_create_state(dm->dc);
8608 		ASSERT(dc_state_temp);
8609 		dc_state = dc_state_temp;
8610 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8611 	}
8612 
8613 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8614 				       new_crtc_state, i) {
8615 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8616 
8617 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8618 
8619 		if (old_crtc_state->active &&
8620 		    (!new_crtc_state->active ||
8621 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8622 			manage_dm_interrupts(adev, acrtc, false);
8623 			dc_stream_release(dm_old_crtc_state->stream);
8624 		}
8625 	}
8626 
8627 	drm_atomic_helper_calc_timestamping_constants(state);
8628 
8629 	/* update changed items */
8630 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8631 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8632 
8633 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8634 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8635 
8636 		DRM_DEBUG_ATOMIC(
8637 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8638 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8639 			"connectors_changed:%d\n",
8640 			acrtc->crtc_id,
8641 			new_crtc_state->enable,
8642 			new_crtc_state->active,
8643 			new_crtc_state->planes_changed,
8644 			new_crtc_state->mode_changed,
8645 			new_crtc_state->active_changed,
8646 			new_crtc_state->connectors_changed);
8647 
8648 		/* Disable cursor if disabling crtc */
8649 		if (old_crtc_state->active && !new_crtc_state->active) {
8650 			struct dc_cursor_position position;
8651 
8652 			memset(&position, 0, sizeof(position));
8653 			mutex_lock(&dm->dc_lock);
8654 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8655 			mutex_unlock(&dm->dc_lock);
8656 		}
8657 
8658 		/* Copy all transient state flags into dc state */
8659 		if (dm_new_crtc_state->stream) {
8660 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8661 							    dm_new_crtc_state->stream);
8662 		}
8663 
8664 		/* handles headless hotplug case, updating new_state and
8665 		 * aconnector as needed
8666 		 */
8667 
8668 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8669 
8670 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8671 
8672 			if (!dm_new_crtc_state->stream) {
8673 				/*
8674 				 * this could happen because of issues with
8675 				 * userspace notifications delivery.
8676 				 * In this case userspace tries to set mode on
8677 				 * display which is disconnected in fact.
8678 				 * dc_sink is NULL in this case on aconnector.
8679 				 * We expect reset mode will come soon.
8680 				 *
8681 				 * This can also happen when unplug is done
8682 				 * during resume sequence ended
8683 				 *
8684 				 * In this case, we want to pretend we still
8685 				 * have a sink to keep the pipe running so that
8686 				 * hw state is consistent with the sw state
8687 				 */
8688 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8689 						__func__, acrtc->base.base.id);
8690 				continue;
8691 			}
8692 
8693 			if (dm_old_crtc_state->stream)
8694 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8695 
8696 			pm_runtime_get_noresume(dev->dev);
8697 
8698 			acrtc->enabled = true;
8699 			acrtc->hw_mode = new_crtc_state->mode;
8700 			crtc->hwmode = new_crtc_state->mode;
8701 			mode_set_reset_required = true;
8702 		} else if (modereset_required(new_crtc_state)) {
8703 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8704 			/* i.e. reset mode */
8705 			if (dm_old_crtc_state->stream)
8706 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8707 
8708 			mode_set_reset_required = true;
8709 		}
8710 	} /* for_each_crtc_in_state() */
8711 
8712 	if (dc_state) {
8713 		/* if there mode set or reset, disable eDP PSR */
8714 		if (mode_set_reset_required)
8715 			amdgpu_dm_psr_disable_all(dm);
8716 
8717 		dm_enable_per_frame_crtc_master_sync(dc_state);
8718 		mutex_lock(&dm->dc_lock);
8719 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8720 #if defined(CONFIG_DRM_AMD_DC_DCN)
8721                /* Allow idle optimization when vblank count is 0 for display off */
8722                if (dm->active_vblank_irq_count == 0)
8723                    dc_allow_idle_optimizations(dm->dc,true);
8724 #endif
8725 		mutex_unlock(&dm->dc_lock);
8726 	}
8727 
8728 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8729 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8730 
8731 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8732 
8733 		if (dm_new_crtc_state->stream != NULL) {
8734 			const struct dc_stream_status *status =
8735 					dc_stream_get_status(dm_new_crtc_state->stream);
8736 
8737 			if (!status)
8738 				status = dc_stream_get_status_from_state(dc_state,
8739 									 dm_new_crtc_state->stream);
8740 			if (!status)
8741 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8742 			else
8743 				acrtc->otg_inst = status->primary_otg_inst;
8744 		}
8745 	}
8746 #ifdef CONFIG_DRM_AMD_DC_HDCP
8747 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8748 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8749 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8750 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8751 
8752 		new_crtc_state = NULL;
8753 
8754 		if (acrtc)
8755 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8756 
8757 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8758 
8759 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8760 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8761 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8762 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8763 			dm_new_con_state->update_hdcp = true;
8764 			continue;
8765 		}
8766 
8767 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8768 			hdcp_update_display(
8769 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8770 				new_con_state->hdcp_content_type,
8771 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8772 	}
8773 #endif
8774 
8775 	/* Handle connector state changes */
8776 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8777 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8778 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8779 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8780 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8781 		struct dc_stream_update stream_update;
8782 		struct dc_info_packet hdr_packet;
8783 		struct dc_stream_status *status = NULL;
8784 		bool abm_changed, hdr_changed, scaling_changed;
8785 
8786 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8787 		memset(&stream_update, 0, sizeof(stream_update));
8788 
8789 		if (acrtc) {
8790 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8791 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8792 		}
8793 
8794 		/* Skip any modesets/resets */
8795 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8796 			continue;
8797 
8798 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8799 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8800 
8801 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8802 							     dm_old_con_state);
8803 
8804 		abm_changed = dm_new_crtc_state->abm_level !=
8805 			      dm_old_crtc_state->abm_level;
8806 
8807 		hdr_changed =
8808 			is_hdr_metadata_different(old_con_state, new_con_state);
8809 
8810 		if (!scaling_changed && !abm_changed && !hdr_changed)
8811 			continue;
8812 
8813 		stream_update.stream = dm_new_crtc_state->stream;
8814 		if (scaling_changed) {
8815 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8816 					dm_new_con_state, dm_new_crtc_state->stream);
8817 
8818 			stream_update.src = dm_new_crtc_state->stream->src;
8819 			stream_update.dst = dm_new_crtc_state->stream->dst;
8820 		}
8821 
8822 		if (abm_changed) {
8823 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8824 
8825 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8826 		}
8827 
8828 		if (hdr_changed) {
8829 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8830 			stream_update.hdr_static_metadata = &hdr_packet;
8831 		}
8832 
8833 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8834 		WARN_ON(!status);
8835 		WARN_ON(!status->plane_count);
8836 
8837 		/*
8838 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8839 		 * Here we create an empty update on each plane.
8840 		 * To fix this, DC should permit updating only stream properties.
8841 		 */
8842 		for (j = 0; j < status->plane_count; j++)
8843 			dummy_updates[j].surface = status->plane_states[0];
8844 
8845 
8846 		mutex_lock(&dm->dc_lock);
8847 		dc_commit_updates_for_stream(dm->dc,
8848 						     dummy_updates,
8849 						     status->plane_count,
8850 						     dm_new_crtc_state->stream,
8851 						     &stream_update,
8852 						     dc_state);
8853 		mutex_unlock(&dm->dc_lock);
8854 	}
8855 
8856 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8857 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8858 				      new_crtc_state, i) {
8859 		if (old_crtc_state->active && !new_crtc_state->active)
8860 			crtc_disable_count++;
8861 
8862 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8863 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8864 
8865 		/* For freesync config update on crtc state and params for irq */
8866 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8867 
8868 		/* Handle vrr on->off / off->on transitions */
8869 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8870 						dm_new_crtc_state);
8871 	}
8872 
8873 	/**
8874 	 * Enable interrupts for CRTCs that are newly enabled or went through
8875 	 * a modeset. It was intentionally deferred until after the front end
8876 	 * state was modified to wait until the OTG was on and so the IRQ
8877 	 * handlers didn't access stale or invalid state.
8878 	 */
8879 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8880 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8881 #ifdef CONFIG_DEBUG_FS
8882 		bool configure_crc = false;
8883 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
8884 #endif
8885 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8886 
8887 		if (new_crtc_state->active &&
8888 		    (!old_crtc_state->active ||
8889 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8890 			dc_stream_retain(dm_new_crtc_state->stream);
8891 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8892 			manage_dm_interrupts(adev, acrtc, true);
8893 
8894 #ifdef CONFIG_DEBUG_FS
8895 			/**
8896 			 * Frontend may have changed so reapply the CRC capture
8897 			 * settings for the stream.
8898 			 */
8899 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8900 			spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8901 			cur_crc_src = acrtc->dm_irq_params.crc_src;
8902 			spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8903 
8904 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8905 				configure_crc = true;
8906 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8907 				if (amdgpu_dm_crc_window_is_activated(crtc))
8908 					configure_crc = false;
8909 #endif
8910 			}
8911 
8912 			if (configure_crc)
8913 				amdgpu_dm_crtc_configure_crc_source(
8914 					crtc, dm_new_crtc_state, cur_crc_src);
8915 #endif
8916 		}
8917 	}
8918 
8919 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8920 		if (new_crtc_state->async_flip)
8921 			wait_for_vblank = false;
8922 
8923 	/* update planes when needed per crtc*/
8924 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8925 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8926 
8927 		if (dm_new_crtc_state->stream)
8928 			amdgpu_dm_commit_planes(state, dc_state, dev,
8929 						dm, crtc, wait_for_vblank);
8930 	}
8931 
8932 	/* Update audio instances for each connector. */
8933 	amdgpu_dm_commit_audio(dev, state);
8934 
8935 	/*
8936 	 * send vblank event on all events not handled in flip and
8937 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8938 	 */
8939 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8940 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8941 
8942 		if (new_crtc_state->event)
8943 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8944 
8945 		new_crtc_state->event = NULL;
8946 	}
8947 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8948 
8949 	/* Signal HW programming completion */
8950 	drm_atomic_helper_commit_hw_done(state);
8951 
8952 	if (wait_for_vblank)
8953 		drm_atomic_helper_wait_for_flip_done(dev, state);
8954 
8955 	drm_atomic_helper_cleanup_planes(dev, state);
8956 
8957 	/* return the stolen vga memory back to VRAM */
8958 	if (!adev->mman.keep_stolen_vga_memory)
8959 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8960 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8961 
8962 	/*
8963 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8964 	 * so we can put the GPU into runtime suspend if we're not driving any
8965 	 * displays anymore
8966 	 */
8967 	for (i = 0; i < crtc_disable_count; i++)
8968 		pm_runtime_put_autosuspend(dev->dev);
8969 	pm_runtime_mark_last_busy(dev->dev);
8970 
8971 	if (dc_state_temp)
8972 		dc_release_state(dc_state_temp);
8973 }
8974 
8975 
8976 static int dm_force_atomic_commit(struct drm_connector *connector)
8977 {
8978 	int ret = 0;
8979 	struct drm_device *ddev = connector->dev;
8980 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8981 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8982 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8983 	struct drm_connector_state *conn_state;
8984 	struct drm_crtc_state *crtc_state;
8985 	struct drm_plane_state *plane_state;
8986 
8987 	if (!state)
8988 		return -ENOMEM;
8989 
8990 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8991 
8992 	/* Construct an atomic state to restore previous display setting */
8993 
8994 	/*
8995 	 * Attach connectors to drm_atomic_state
8996 	 */
8997 	conn_state = drm_atomic_get_connector_state(state, connector);
8998 
8999 	ret = PTR_ERR_OR_ZERO(conn_state);
9000 	if (ret)
9001 		goto out;
9002 
9003 	/* Attach crtc to drm_atomic_state*/
9004 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9005 
9006 	ret = PTR_ERR_OR_ZERO(crtc_state);
9007 	if (ret)
9008 		goto out;
9009 
9010 	/* force a restore */
9011 	crtc_state->mode_changed = true;
9012 
9013 	/* Attach plane to drm_atomic_state */
9014 	plane_state = drm_atomic_get_plane_state(state, plane);
9015 
9016 	ret = PTR_ERR_OR_ZERO(plane_state);
9017 	if (ret)
9018 		goto out;
9019 
9020 	/* Call commit internally with the state we just constructed */
9021 	ret = drm_atomic_commit(state);
9022 
9023 out:
9024 	drm_atomic_state_put(state);
9025 	if (ret)
9026 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9027 
9028 	return ret;
9029 }
9030 
9031 /*
9032  * This function handles all cases when set mode does not come upon hotplug.
9033  * This includes when a display is unplugged then plugged back into the
9034  * same port and when running without usermode desktop manager supprot
9035  */
9036 void dm_restore_drm_connector_state(struct drm_device *dev,
9037 				    struct drm_connector *connector)
9038 {
9039 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9040 	struct amdgpu_crtc *disconnected_acrtc;
9041 	struct dm_crtc_state *acrtc_state;
9042 
9043 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9044 		return;
9045 
9046 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9047 	if (!disconnected_acrtc)
9048 		return;
9049 
9050 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9051 	if (!acrtc_state->stream)
9052 		return;
9053 
9054 	/*
9055 	 * If the previous sink is not released and different from the current,
9056 	 * we deduce we are in a state where we can not rely on usermode call
9057 	 * to turn on the display, so we do it here
9058 	 */
9059 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9060 		dm_force_atomic_commit(&aconnector->base);
9061 }
9062 
9063 /*
9064  * Grabs all modesetting locks to serialize against any blocking commits,
9065  * Waits for completion of all non blocking commits.
9066  */
9067 static int do_aquire_global_lock(struct drm_device *dev,
9068 				 struct drm_atomic_state *state)
9069 {
9070 	struct drm_crtc *crtc;
9071 	struct drm_crtc_commit *commit;
9072 	long ret;
9073 
9074 	/*
9075 	 * Adding all modeset locks to aquire_ctx will
9076 	 * ensure that when the framework release it the
9077 	 * extra locks we are locking here will get released to
9078 	 */
9079 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9080 	if (ret)
9081 		return ret;
9082 
9083 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9084 		spin_lock(&crtc->commit_lock);
9085 		commit = list_first_entry_or_null(&crtc->commit_list,
9086 				struct drm_crtc_commit, commit_entry);
9087 		if (commit)
9088 			drm_crtc_commit_get(commit);
9089 		spin_unlock(&crtc->commit_lock);
9090 
9091 		if (!commit)
9092 			continue;
9093 
9094 		/*
9095 		 * Make sure all pending HW programming completed and
9096 		 * page flips done
9097 		 */
9098 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9099 
9100 		if (ret > 0)
9101 			ret = wait_for_completion_interruptible_timeout(
9102 					&commit->flip_done, 10*HZ);
9103 
9104 		if (ret == 0)
9105 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9106 				  "timed out\n", crtc->base.id, crtc->name);
9107 
9108 		drm_crtc_commit_put(commit);
9109 	}
9110 
9111 	return ret < 0 ? ret : 0;
9112 }
9113 
9114 static void get_freesync_config_for_crtc(
9115 	struct dm_crtc_state *new_crtc_state,
9116 	struct dm_connector_state *new_con_state)
9117 {
9118 	struct mod_freesync_config config = {0};
9119 	struct amdgpu_dm_connector *aconnector =
9120 			to_amdgpu_dm_connector(new_con_state->base.connector);
9121 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9122 	int vrefresh = drm_mode_vrefresh(mode);
9123 	bool fs_vid_mode = false;
9124 
9125 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9126 					vrefresh >= aconnector->min_vfreq &&
9127 					vrefresh <= aconnector->max_vfreq;
9128 
9129 	if (new_crtc_state->vrr_supported) {
9130 		new_crtc_state->stream->ignore_msa_timing_param = true;
9131 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9132 
9133 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9134 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9135 		config.vsif_supported = true;
9136 		config.btr = true;
9137 
9138 		if (fs_vid_mode) {
9139 			config.state = VRR_STATE_ACTIVE_FIXED;
9140 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9141 			goto out;
9142 		} else if (new_crtc_state->base.vrr_enabled) {
9143 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9144 		} else {
9145 			config.state = VRR_STATE_INACTIVE;
9146 		}
9147 	}
9148 out:
9149 	new_crtc_state->freesync_config = config;
9150 }
9151 
9152 static void reset_freesync_config_for_crtc(
9153 	struct dm_crtc_state *new_crtc_state)
9154 {
9155 	new_crtc_state->vrr_supported = false;
9156 
9157 	memset(&new_crtc_state->vrr_infopacket, 0,
9158 	       sizeof(new_crtc_state->vrr_infopacket));
9159 }
9160 
9161 static bool
9162 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9163 				 struct drm_crtc_state *new_crtc_state)
9164 {
9165 	struct drm_display_mode old_mode, new_mode;
9166 
9167 	if (!old_crtc_state || !new_crtc_state)
9168 		return false;
9169 
9170 	old_mode = old_crtc_state->mode;
9171 	new_mode = new_crtc_state->mode;
9172 
9173 	if (old_mode.clock       == new_mode.clock &&
9174 	    old_mode.hdisplay    == new_mode.hdisplay &&
9175 	    old_mode.vdisplay    == new_mode.vdisplay &&
9176 	    old_mode.htotal      == new_mode.htotal &&
9177 	    old_mode.vtotal      != new_mode.vtotal &&
9178 	    old_mode.hsync_start == new_mode.hsync_start &&
9179 	    old_mode.vsync_start != new_mode.vsync_start &&
9180 	    old_mode.hsync_end   == new_mode.hsync_end &&
9181 	    old_mode.vsync_end   != new_mode.vsync_end &&
9182 	    old_mode.hskew       == new_mode.hskew &&
9183 	    old_mode.vscan       == new_mode.vscan &&
9184 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9185 	    (new_mode.vsync_end - new_mode.vsync_start))
9186 		return true;
9187 
9188 	return false;
9189 }
9190 
9191 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9192 	uint64_t num, den, res;
9193 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9194 
9195 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9196 
9197 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9198 	den = (unsigned long long)new_crtc_state->mode.htotal *
9199 	      (unsigned long long)new_crtc_state->mode.vtotal;
9200 
9201 	res = div_u64(num, den);
9202 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9203 }
9204 
9205 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9206 				struct drm_atomic_state *state,
9207 				struct drm_crtc *crtc,
9208 				struct drm_crtc_state *old_crtc_state,
9209 				struct drm_crtc_state *new_crtc_state,
9210 				bool enable,
9211 				bool *lock_and_validation_needed)
9212 {
9213 	struct dm_atomic_state *dm_state = NULL;
9214 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9215 	struct dc_stream_state *new_stream;
9216 	int ret = 0;
9217 
9218 	/*
9219 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9220 	 * update changed items
9221 	 */
9222 	struct amdgpu_crtc *acrtc = NULL;
9223 	struct amdgpu_dm_connector *aconnector = NULL;
9224 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9225 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9226 
9227 	new_stream = NULL;
9228 
9229 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9230 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9231 	acrtc = to_amdgpu_crtc(crtc);
9232 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9233 
9234 	/* TODO This hack should go away */
9235 	if (aconnector && enable) {
9236 		/* Make sure fake sink is created in plug-in scenario */
9237 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9238 							    &aconnector->base);
9239 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9240 							    &aconnector->base);
9241 
9242 		if (IS_ERR(drm_new_conn_state)) {
9243 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9244 			goto fail;
9245 		}
9246 
9247 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9248 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9249 
9250 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9251 			goto skip_modeset;
9252 
9253 		new_stream = create_validate_stream_for_sink(aconnector,
9254 							     &new_crtc_state->mode,
9255 							     dm_new_conn_state,
9256 							     dm_old_crtc_state->stream);
9257 
9258 		/*
9259 		 * we can have no stream on ACTION_SET if a display
9260 		 * was disconnected during S3, in this case it is not an
9261 		 * error, the OS will be updated after detection, and
9262 		 * will do the right thing on next atomic commit
9263 		 */
9264 
9265 		if (!new_stream) {
9266 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9267 					__func__, acrtc->base.base.id);
9268 			ret = -ENOMEM;
9269 			goto fail;
9270 		}
9271 
9272 		/*
9273 		 * TODO: Check VSDB bits to decide whether this should
9274 		 * be enabled or not.
9275 		 */
9276 		new_stream->triggered_crtc_reset.enabled =
9277 			dm->force_timing_sync;
9278 
9279 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9280 
9281 		ret = fill_hdr_info_packet(drm_new_conn_state,
9282 					   &new_stream->hdr_static_metadata);
9283 		if (ret)
9284 			goto fail;
9285 
9286 		/*
9287 		 * If we already removed the old stream from the context
9288 		 * (and set the new stream to NULL) then we can't reuse
9289 		 * the old stream even if the stream and scaling are unchanged.
9290 		 * We'll hit the BUG_ON and black screen.
9291 		 *
9292 		 * TODO: Refactor this function to allow this check to work
9293 		 * in all conditions.
9294 		 */
9295 		if (amdgpu_freesync_vid_mode &&
9296 		    dm_new_crtc_state->stream &&
9297 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9298 			goto skip_modeset;
9299 
9300 		if (dm_new_crtc_state->stream &&
9301 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9302 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9303 			new_crtc_state->mode_changed = false;
9304 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9305 					 new_crtc_state->mode_changed);
9306 		}
9307 	}
9308 
9309 	/* mode_changed flag may get updated above, need to check again */
9310 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9311 		goto skip_modeset;
9312 
9313 	DRM_DEBUG_ATOMIC(
9314 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9315 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9316 		"connectors_changed:%d\n",
9317 		acrtc->crtc_id,
9318 		new_crtc_state->enable,
9319 		new_crtc_state->active,
9320 		new_crtc_state->planes_changed,
9321 		new_crtc_state->mode_changed,
9322 		new_crtc_state->active_changed,
9323 		new_crtc_state->connectors_changed);
9324 
9325 	/* Remove stream for any changed/disabled CRTC */
9326 	if (!enable) {
9327 
9328 		if (!dm_old_crtc_state->stream)
9329 			goto skip_modeset;
9330 
9331 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9332 		    is_timing_unchanged_for_freesync(new_crtc_state,
9333 						     old_crtc_state)) {
9334 			new_crtc_state->mode_changed = false;
9335 			DRM_DEBUG_DRIVER(
9336 				"Mode change not required for front porch change, "
9337 				"setting mode_changed to %d",
9338 				new_crtc_state->mode_changed);
9339 
9340 			set_freesync_fixed_config(dm_new_crtc_state);
9341 
9342 			goto skip_modeset;
9343 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9344 			   is_freesync_video_mode(&new_crtc_state->mode,
9345 						  aconnector)) {
9346 			set_freesync_fixed_config(dm_new_crtc_state);
9347 		}
9348 
9349 		ret = dm_atomic_get_state(state, &dm_state);
9350 		if (ret)
9351 			goto fail;
9352 
9353 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9354 				crtc->base.id);
9355 
9356 		/* i.e. reset mode */
9357 		if (dc_remove_stream_from_ctx(
9358 				dm->dc,
9359 				dm_state->context,
9360 				dm_old_crtc_state->stream) != DC_OK) {
9361 			ret = -EINVAL;
9362 			goto fail;
9363 		}
9364 
9365 		dc_stream_release(dm_old_crtc_state->stream);
9366 		dm_new_crtc_state->stream = NULL;
9367 
9368 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9369 
9370 		*lock_and_validation_needed = true;
9371 
9372 	} else {/* Add stream for any updated/enabled CRTC */
9373 		/*
9374 		 * Quick fix to prevent NULL pointer on new_stream when
9375 		 * added MST connectors not found in existing crtc_state in the chained mode
9376 		 * TODO: need to dig out the root cause of that
9377 		 */
9378 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9379 			goto skip_modeset;
9380 
9381 		if (modereset_required(new_crtc_state))
9382 			goto skip_modeset;
9383 
9384 		if (modeset_required(new_crtc_state, new_stream,
9385 				     dm_old_crtc_state->stream)) {
9386 
9387 			WARN_ON(dm_new_crtc_state->stream);
9388 
9389 			ret = dm_atomic_get_state(state, &dm_state);
9390 			if (ret)
9391 				goto fail;
9392 
9393 			dm_new_crtc_state->stream = new_stream;
9394 
9395 			dc_stream_retain(new_stream);
9396 
9397 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9398 					 crtc->base.id);
9399 
9400 			if (dc_add_stream_to_ctx(
9401 					dm->dc,
9402 					dm_state->context,
9403 					dm_new_crtc_state->stream) != DC_OK) {
9404 				ret = -EINVAL;
9405 				goto fail;
9406 			}
9407 
9408 			*lock_and_validation_needed = true;
9409 		}
9410 	}
9411 
9412 skip_modeset:
9413 	/* Release extra reference */
9414 	if (new_stream)
9415 		 dc_stream_release(new_stream);
9416 
9417 	/*
9418 	 * We want to do dc stream updates that do not require a
9419 	 * full modeset below.
9420 	 */
9421 	if (!(enable && aconnector && new_crtc_state->active))
9422 		return 0;
9423 	/*
9424 	 * Given above conditions, the dc state cannot be NULL because:
9425 	 * 1. We're in the process of enabling CRTCs (just been added
9426 	 *    to the dc context, or already is on the context)
9427 	 * 2. Has a valid connector attached, and
9428 	 * 3. Is currently active and enabled.
9429 	 * => The dc stream state currently exists.
9430 	 */
9431 	BUG_ON(dm_new_crtc_state->stream == NULL);
9432 
9433 	/* Scaling or underscan settings */
9434 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9435 		update_stream_scaling_settings(
9436 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9437 
9438 	/* ABM settings */
9439 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9440 
9441 	/*
9442 	 * Color management settings. We also update color properties
9443 	 * when a modeset is needed, to ensure it gets reprogrammed.
9444 	 */
9445 	if (dm_new_crtc_state->base.color_mgmt_changed ||
9446 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9447 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9448 		if (ret)
9449 			goto fail;
9450 	}
9451 
9452 	/* Update Freesync settings. */
9453 	get_freesync_config_for_crtc(dm_new_crtc_state,
9454 				     dm_new_conn_state);
9455 
9456 	return ret;
9457 
9458 fail:
9459 	if (new_stream)
9460 		dc_stream_release(new_stream);
9461 	return ret;
9462 }
9463 
9464 static bool should_reset_plane(struct drm_atomic_state *state,
9465 			       struct drm_plane *plane,
9466 			       struct drm_plane_state *old_plane_state,
9467 			       struct drm_plane_state *new_plane_state)
9468 {
9469 	struct drm_plane *other;
9470 	struct drm_plane_state *old_other_state, *new_other_state;
9471 	struct drm_crtc_state *new_crtc_state;
9472 	int i;
9473 
9474 	/*
9475 	 * TODO: Remove this hack once the checks below are sufficient
9476 	 * enough to determine when we need to reset all the planes on
9477 	 * the stream.
9478 	 */
9479 	if (state->allow_modeset)
9480 		return true;
9481 
9482 	/* Exit early if we know that we're adding or removing the plane. */
9483 	if (old_plane_state->crtc != new_plane_state->crtc)
9484 		return true;
9485 
9486 	/* old crtc == new_crtc == NULL, plane not in context. */
9487 	if (!new_plane_state->crtc)
9488 		return false;
9489 
9490 	new_crtc_state =
9491 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9492 
9493 	if (!new_crtc_state)
9494 		return true;
9495 
9496 	/* CRTC Degamma changes currently require us to recreate planes. */
9497 	if (new_crtc_state->color_mgmt_changed)
9498 		return true;
9499 
9500 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9501 		return true;
9502 
9503 	/*
9504 	 * If there are any new primary or overlay planes being added or
9505 	 * removed then the z-order can potentially change. To ensure
9506 	 * correct z-order and pipe acquisition the current DC architecture
9507 	 * requires us to remove and recreate all existing planes.
9508 	 *
9509 	 * TODO: Come up with a more elegant solution for this.
9510 	 */
9511 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9512 		struct amdgpu_framebuffer *old_afb, *new_afb;
9513 		if (other->type == DRM_PLANE_TYPE_CURSOR)
9514 			continue;
9515 
9516 		if (old_other_state->crtc != new_plane_state->crtc &&
9517 		    new_other_state->crtc != new_plane_state->crtc)
9518 			continue;
9519 
9520 		if (old_other_state->crtc != new_other_state->crtc)
9521 			return true;
9522 
9523 		/* Src/dst size and scaling updates. */
9524 		if (old_other_state->src_w != new_other_state->src_w ||
9525 		    old_other_state->src_h != new_other_state->src_h ||
9526 		    old_other_state->crtc_w != new_other_state->crtc_w ||
9527 		    old_other_state->crtc_h != new_other_state->crtc_h)
9528 			return true;
9529 
9530 		/* Rotation / mirroring updates. */
9531 		if (old_other_state->rotation != new_other_state->rotation)
9532 			return true;
9533 
9534 		/* Blending updates. */
9535 		if (old_other_state->pixel_blend_mode !=
9536 		    new_other_state->pixel_blend_mode)
9537 			return true;
9538 
9539 		/* Alpha updates. */
9540 		if (old_other_state->alpha != new_other_state->alpha)
9541 			return true;
9542 
9543 		/* Colorspace changes. */
9544 		if (old_other_state->color_range != new_other_state->color_range ||
9545 		    old_other_state->color_encoding != new_other_state->color_encoding)
9546 			return true;
9547 
9548 		/* Framebuffer checks fall at the end. */
9549 		if (!old_other_state->fb || !new_other_state->fb)
9550 			continue;
9551 
9552 		/* Pixel format changes can require bandwidth updates. */
9553 		if (old_other_state->fb->format != new_other_state->fb->format)
9554 			return true;
9555 
9556 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9557 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9558 
9559 		/* Tiling and DCC changes also require bandwidth updates. */
9560 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9561 		    old_afb->base.modifier != new_afb->base.modifier)
9562 			return true;
9563 	}
9564 
9565 	return false;
9566 }
9567 
9568 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9569 			      struct drm_plane_state *new_plane_state,
9570 			      struct drm_framebuffer *fb)
9571 {
9572 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9573 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9574 	unsigned int pitch;
9575 	bool linear;
9576 
9577 	if (fb->width > new_acrtc->max_cursor_width ||
9578 	    fb->height > new_acrtc->max_cursor_height) {
9579 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9580 				 new_plane_state->fb->width,
9581 				 new_plane_state->fb->height);
9582 		return -EINVAL;
9583 	}
9584 	if (new_plane_state->src_w != fb->width << 16 ||
9585 	    new_plane_state->src_h != fb->height << 16) {
9586 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9587 		return -EINVAL;
9588 	}
9589 
9590 	/* Pitch in pixels */
9591 	pitch = fb->pitches[0] / fb->format->cpp[0];
9592 
9593 	if (fb->width != pitch) {
9594 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9595 				 fb->width, pitch);
9596 		return -EINVAL;
9597 	}
9598 
9599 	switch (pitch) {
9600 	case 64:
9601 	case 128:
9602 	case 256:
9603 		/* FB pitch is supported by cursor plane */
9604 		break;
9605 	default:
9606 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9607 		return -EINVAL;
9608 	}
9609 
9610 	/* Core DRM takes care of checking FB modifiers, so we only need to
9611 	 * check tiling flags when the FB doesn't have a modifier. */
9612 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9613 		if (adev->family < AMDGPU_FAMILY_AI) {
9614 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9615 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9616 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9617 		} else {
9618 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9619 		}
9620 		if (!linear) {
9621 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9622 			return -EINVAL;
9623 		}
9624 	}
9625 
9626 	return 0;
9627 }
9628 
9629 static int dm_update_plane_state(struct dc *dc,
9630 				 struct drm_atomic_state *state,
9631 				 struct drm_plane *plane,
9632 				 struct drm_plane_state *old_plane_state,
9633 				 struct drm_plane_state *new_plane_state,
9634 				 bool enable,
9635 				 bool *lock_and_validation_needed)
9636 {
9637 
9638 	struct dm_atomic_state *dm_state = NULL;
9639 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9640 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9641 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9642 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9643 	struct amdgpu_crtc *new_acrtc;
9644 	bool needs_reset;
9645 	int ret = 0;
9646 
9647 
9648 	new_plane_crtc = new_plane_state->crtc;
9649 	old_plane_crtc = old_plane_state->crtc;
9650 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9651 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9652 
9653 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9654 		if (!enable || !new_plane_crtc ||
9655 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9656 			return 0;
9657 
9658 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9659 
9660 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9661 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9662 			return -EINVAL;
9663 		}
9664 
9665 		if (new_plane_state->fb) {
9666 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9667 						 new_plane_state->fb);
9668 			if (ret)
9669 				return ret;
9670 		}
9671 
9672 		return 0;
9673 	}
9674 
9675 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9676 					 new_plane_state);
9677 
9678 	/* Remove any changed/removed planes */
9679 	if (!enable) {
9680 		if (!needs_reset)
9681 			return 0;
9682 
9683 		if (!old_plane_crtc)
9684 			return 0;
9685 
9686 		old_crtc_state = drm_atomic_get_old_crtc_state(
9687 				state, old_plane_crtc);
9688 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9689 
9690 		if (!dm_old_crtc_state->stream)
9691 			return 0;
9692 
9693 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9694 				plane->base.id, old_plane_crtc->base.id);
9695 
9696 		ret = dm_atomic_get_state(state, &dm_state);
9697 		if (ret)
9698 			return ret;
9699 
9700 		if (!dc_remove_plane_from_context(
9701 				dc,
9702 				dm_old_crtc_state->stream,
9703 				dm_old_plane_state->dc_state,
9704 				dm_state->context)) {
9705 
9706 			return -EINVAL;
9707 		}
9708 
9709 
9710 		dc_plane_state_release(dm_old_plane_state->dc_state);
9711 		dm_new_plane_state->dc_state = NULL;
9712 
9713 		*lock_and_validation_needed = true;
9714 
9715 	} else { /* Add new planes */
9716 		struct dc_plane_state *dc_new_plane_state;
9717 
9718 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9719 			return 0;
9720 
9721 		if (!new_plane_crtc)
9722 			return 0;
9723 
9724 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9725 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9726 
9727 		if (!dm_new_crtc_state->stream)
9728 			return 0;
9729 
9730 		if (!needs_reset)
9731 			return 0;
9732 
9733 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9734 		if (ret)
9735 			return ret;
9736 
9737 		WARN_ON(dm_new_plane_state->dc_state);
9738 
9739 		dc_new_plane_state = dc_create_plane_state(dc);
9740 		if (!dc_new_plane_state)
9741 			return -ENOMEM;
9742 
9743 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9744 				 plane->base.id, new_plane_crtc->base.id);
9745 
9746 		ret = fill_dc_plane_attributes(
9747 			drm_to_adev(new_plane_crtc->dev),
9748 			dc_new_plane_state,
9749 			new_plane_state,
9750 			new_crtc_state);
9751 		if (ret) {
9752 			dc_plane_state_release(dc_new_plane_state);
9753 			return ret;
9754 		}
9755 
9756 		ret = dm_atomic_get_state(state, &dm_state);
9757 		if (ret) {
9758 			dc_plane_state_release(dc_new_plane_state);
9759 			return ret;
9760 		}
9761 
9762 		/*
9763 		 * Any atomic check errors that occur after this will
9764 		 * not need a release. The plane state will be attached
9765 		 * to the stream, and therefore part of the atomic
9766 		 * state. It'll be released when the atomic state is
9767 		 * cleaned.
9768 		 */
9769 		if (!dc_add_plane_to_context(
9770 				dc,
9771 				dm_new_crtc_state->stream,
9772 				dc_new_plane_state,
9773 				dm_state->context)) {
9774 
9775 			dc_plane_state_release(dc_new_plane_state);
9776 			return -EINVAL;
9777 		}
9778 
9779 		dm_new_plane_state->dc_state = dc_new_plane_state;
9780 
9781 		/* Tell DC to do a full surface update every time there
9782 		 * is a plane change. Inefficient, but works for now.
9783 		 */
9784 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9785 
9786 		*lock_and_validation_needed = true;
9787 	}
9788 
9789 
9790 	return ret;
9791 }
9792 
9793 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9794 				struct drm_crtc *crtc,
9795 				struct drm_crtc_state *new_crtc_state)
9796 {
9797 	struct drm_plane_state *new_cursor_state, *new_primary_state;
9798 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9799 
9800 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9801 	 * cursor per pipe but it's going to inherit the scaling and
9802 	 * positioning from the underlying pipe. Check the cursor plane's
9803 	 * blending properties match the primary plane's. */
9804 
9805 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9806 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9807 	if (!new_cursor_state || !new_primary_state ||
9808 	    !new_cursor_state->fb || !new_primary_state->fb) {
9809 		return 0;
9810 	}
9811 
9812 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9813 			 (new_cursor_state->src_w >> 16);
9814 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9815 			 (new_cursor_state->src_h >> 16);
9816 
9817 	primary_scale_w = new_primary_state->crtc_w * 1000 /
9818 			 (new_primary_state->src_w >> 16);
9819 	primary_scale_h = new_primary_state->crtc_h * 1000 /
9820 			 (new_primary_state->src_h >> 16);
9821 
9822 	if (cursor_scale_w != primary_scale_w ||
9823 	    cursor_scale_h != primary_scale_h) {
9824 		DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9825 		return -EINVAL;
9826 	}
9827 
9828 	return 0;
9829 }
9830 
9831 #if defined(CONFIG_DRM_AMD_DC_DCN)
9832 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9833 {
9834 	struct drm_connector *connector;
9835 	struct drm_connector_state *conn_state;
9836 	struct amdgpu_dm_connector *aconnector = NULL;
9837 	int i;
9838 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9839 		if (conn_state->crtc != crtc)
9840 			continue;
9841 
9842 		aconnector = to_amdgpu_dm_connector(connector);
9843 		if (!aconnector->port || !aconnector->mst_port)
9844 			aconnector = NULL;
9845 		else
9846 			break;
9847 	}
9848 
9849 	if (!aconnector)
9850 		return 0;
9851 
9852 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9853 }
9854 #endif
9855 
9856 /**
9857  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9858  * @dev: The DRM device
9859  * @state: The atomic state to commit
9860  *
9861  * Validate that the given atomic state is programmable by DC into hardware.
9862  * This involves constructing a &struct dc_state reflecting the new hardware
9863  * state we wish to commit, then querying DC to see if it is programmable. It's
9864  * important not to modify the existing DC state. Otherwise, atomic_check
9865  * may unexpectedly commit hardware changes.
9866  *
9867  * When validating the DC state, it's important that the right locks are
9868  * acquired. For full updates case which removes/adds/updates streams on one
9869  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9870  * that any such full update commit will wait for completion of any outstanding
9871  * flip using DRMs synchronization events.
9872  *
9873  * Note that DM adds the affected connectors for all CRTCs in state, when that
9874  * might not seem necessary. This is because DC stream creation requires the
9875  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9876  * be possible but non-trivial - a possible TODO item.
9877  *
9878  * Return: -Error code if validation failed.
9879  */
9880 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9881 				  struct drm_atomic_state *state)
9882 {
9883 	struct amdgpu_device *adev = drm_to_adev(dev);
9884 	struct dm_atomic_state *dm_state = NULL;
9885 	struct dc *dc = adev->dm.dc;
9886 	struct drm_connector *connector;
9887 	struct drm_connector_state *old_con_state, *new_con_state;
9888 	struct drm_crtc *crtc;
9889 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9890 	struct drm_plane *plane;
9891 	struct drm_plane_state *old_plane_state, *new_plane_state;
9892 	enum dc_status status;
9893 	int ret, i;
9894 	bool lock_and_validation_needed = false;
9895 	struct dm_crtc_state *dm_old_crtc_state;
9896 
9897 	trace_amdgpu_dm_atomic_check_begin(state);
9898 
9899 	ret = drm_atomic_helper_check_modeset(dev, state);
9900 	if (ret)
9901 		goto fail;
9902 
9903 	/* Check connector changes */
9904 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9905 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9906 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9907 
9908 		/* Skip connectors that are disabled or part of modeset already. */
9909 		if (!old_con_state->crtc && !new_con_state->crtc)
9910 			continue;
9911 
9912 		if (!new_con_state->crtc)
9913 			continue;
9914 
9915 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9916 		if (IS_ERR(new_crtc_state)) {
9917 			ret = PTR_ERR(new_crtc_state);
9918 			goto fail;
9919 		}
9920 
9921 		if (dm_old_con_state->abm_level !=
9922 		    dm_new_con_state->abm_level)
9923 			new_crtc_state->connectors_changed = true;
9924 	}
9925 
9926 #if defined(CONFIG_DRM_AMD_DC_DCN)
9927 	if (dc_resource_is_dsc_encoding_supported(dc)) {
9928 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9929 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9930 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9931 				if (ret)
9932 					goto fail;
9933 			}
9934 		}
9935 	}
9936 #endif
9937 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9938 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9939 
9940 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9941 		    !new_crtc_state->color_mgmt_changed &&
9942 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9943 			dm_old_crtc_state->dsc_force_changed == false)
9944 			continue;
9945 
9946 		if (!new_crtc_state->enable)
9947 			continue;
9948 
9949 		ret = drm_atomic_add_affected_connectors(state, crtc);
9950 		if (ret)
9951 			return ret;
9952 
9953 		ret = drm_atomic_add_affected_planes(state, crtc);
9954 		if (ret)
9955 			goto fail;
9956 
9957 		if (dm_old_crtc_state->dsc_force_changed)
9958 			new_crtc_state->mode_changed = true;
9959 	}
9960 
9961 	/*
9962 	 * Add all primary and overlay planes on the CRTC to the state
9963 	 * whenever a plane is enabled to maintain correct z-ordering
9964 	 * and to enable fast surface updates.
9965 	 */
9966 	drm_for_each_crtc(crtc, dev) {
9967 		bool modified = false;
9968 
9969 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9970 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9971 				continue;
9972 
9973 			if (new_plane_state->crtc == crtc ||
9974 			    old_plane_state->crtc == crtc) {
9975 				modified = true;
9976 				break;
9977 			}
9978 		}
9979 
9980 		if (!modified)
9981 			continue;
9982 
9983 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9984 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9985 				continue;
9986 
9987 			new_plane_state =
9988 				drm_atomic_get_plane_state(state, plane);
9989 
9990 			if (IS_ERR(new_plane_state)) {
9991 				ret = PTR_ERR(new_plane_state);
9992 				goto fail;
9993 			}
9994 		}
9995 	}
9996 
9997 	/* Remove exiting planes if they are modified */
9998 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9999 		ret = dm_update_plane_state(dc, state, plane,
10000 					    old_plane_state,
10001 					    new_plane_state,
10002 					    false,
10003 					    &lock_and_validation_needed);
10004 		if (ret)
10005 			goto fail;
10006 	}
10007 
10008 	/* Disable all crtcs which require disable */
10009 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10010 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10011 					   old_crtc_state,
10012 					   new_crtc_state,
10013 					   false,
10014 					   &lock_and_validation_needed);
10015 		if (ret)
10016 			goto fail;
10017 	}
10018 
10019 	/* Enable all crtcs which require enable */
10020 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10021 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10022 					   old_crtc_state,
10023 					   new_crtc_state,
10024 					   true,
10025 					   &lock_and_validation_needed);
10026 		if (ret)
10027 			goto fail;
10028 	}
10029 
10030 	/* Add new/modified planes */
10031 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10032 		ret = dm_update_plane_state(dc, state, plane,
10033 					    old_plane_state,
10034 					    new_plane_state,
10035 					    true,
10036 					    &lock_and_validation_needed);
10037 		if (ret)
10038 			goto fail;
10039 	}
10040 
10041 	/* Run this here since we want to validate the streams we created */
10042 	ret = drm_atomic_helper_check_planes(dev, state);
10043 	if (ret)
10044 		goto fail;
10045 
10046 	/* Check cursor planes scaling */
10047 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10048 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10049 		if (ret)
10050 			goto fail;
10051 	}
10052 
10053 	if (state->legacy_cursor_update) {
10054 		/*
10055 		 * This is a fast cursor update coming from the plane update
10056 		 * helper, check if it can be done asynchronously for better
10057 		 * performance.
10058 		 */
10059 		state->async_update =
10060 			!drm_atomic_helper_async_check(dev, state);
10061 
10062 		/*
10063 		 * Skip the remaining global validation if this is an async
10064 		 * update. Cursor updates can be done without affecting
10065 		 * state or bandwidth calcs and this avoids the performance
10066 		 * penalty of locking the private state object and
10067 		 * allocating a new dc_state.
10068 		 */
10069 		if (state->async_update)
10070 			return 0;
10071 	}
10072 
10073 	/* Check scaling and underscan changes*/
10074 	/* TODO Removed scaling changes validation due to inability to commit
10075 	 * new stream into context w\o causing full reset. Need to
10076 	 * decide how to handle.
10077 	 */
10078 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10079 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10080 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10081 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10082 
10083 		/* Skip any modesets/resets */
10084 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10085 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10086 			continue;
10087 
10088 		/* Skip any thing not scale or underscan changes */
10089 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10090 			continue;
10091 
10092 		lock_and_validation_needed = true;
10093 	}
10094 
10095 	/**
10096 	 * Streams and planes are reset when there are changes that affect
10097 	 * bandwidth. Anything that affects bandwidth needs to go through
10098 	 * DC global validation to ensure that the configuration can be applied
10099 	 * to hardware.
10100 	 *
10101 	 * We have to currently stall out here in atomic_check for outstanding
10102 	 * commits to finish in this case because our IRQ handlers reference
10103 	 * DRM state directly - we can end up disabling interrupts too early
10104 	 * if we don't.
10105 	 *
10106 	 * TODO: Remove this stall and drop DM state private objects.
10107 	 */
10108 	if (lock_and_validation_needed) {
10109 		ret = dm_atomic_get_state(state, &dm_state);
10110 		if (ret)
10111 			goto fail;
10112 
10113 		ret = do_aquire_global_lock(dev, state);
10114 		if (ret)
10115 			goto fail;
10116 
10117 #if defined(CONFIG_DRM_AMD_DC_DCN)
10118 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10119 			goto fail;
10120 
10121 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10122 		if (ret)
10123 			goto fail;
10124 #endif
10125 
10126 		/*
10127 		 * Perform validation of MST topology in the state:
10128 		 * We need to perform MST atomic check before calling
10129 		 * dc_validate_global_state(), or there is a chance
10130 		 * to get stuck in an infinite loop and hang eventually.
10131 		 */
10132 		ret = drm_dp_mst_atomic_check(state);
10133 		if (ret)
10134 			goto fail;
10135 		status = dc_validate_global_state(dc, dm_state->context, false);
10136 		if (status != DC_OK) {
10137 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
10138 				       dc_status_to_str(status), status);
10139 			ret = -EINVAL;
10140 			goto fail;
10141 		}
10142 	} else {
10143 		/*
10144 		 * The commit is a fast update. Fast updates shouldn't change
10145 		 * the DC context, affect global validation, and can have their
10146 		 * commit work done in parallel with other commits not touching
10147 		 * the same resource. If we have a new DC context as part of
10148 		 * the DM atomic state from validation we need to free it and
10149 		 * retain the existing one instead.
10150 		 *
10151 		 * Furthermore, since the DM atomic state only contains the DC
10152 		 * context and can safely be annulled, we can free the state
10153 		 * and clear the associated private object now to free
10154 		 * some memory and avoid a possible use-after-free later.
10155 		 */
10156 
10157 		for (i = 0; i < state->num_private_objs; i++) {
10158 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10159 
10160 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10161 				int j = state->num_private_objs-1;
10162 
10163 				dm_atomic_destroy_state(obj,
10164 						state->private_objs[i].state);
10165 
10166 				/* If i is not at the end of the array then the
10167 				 * last element needs to be moved to where i was
10168 				 * before the array can safely be truncated.
10169 				 */
10170 				if (i != j)
10171 					state->private_objs[i] =
10172 						state->private_objs[j];
10173 
10174 				state->private_objs[j].ptr = NULL;
10175 				state->private_objs[j].state = NULL;
10176 				state->private_objs[j].old_state = NULL;
10177 				state->private_objs[j].new_state = NULL;
10178 
10179 				state->num_private_objs = j;
10180 				break;
10181 			}
10182 		}
10183 	}
10184 
10185 	/* Store the overall update type for use later in atomic check. */
10186 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10187 		struct dm_crtc_state *dm_new_crtc_state =
10188 			to_dm_crtc_state(new_crtc_state);
10189 
10190 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10191 							 UPDATE_TYPE_FULL :
10192 							 UPDATE_TYPE_FAST;
10193 	}
10194 
10195 	/* Must be success */
10196 	WARN_ON(ret);
10197 
10198 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10199 
10200 	return ret;
10201 
10202 fail:
10203 	if (ret == -EDEADLK)
10204 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10205 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10206 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10207 	else
10208 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10209 
10210 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10211 
10212 	return ret;
10213 }
10214 
10215 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10216 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10217 {
10218 	uint8_t dpcd_data;
10219 	bool capable = false;
10220 
10221 	if (amdgpu_dm_connector->dc_link &&
10222 		dm_helpers_dp_read_dpcd(
10223 				NULL,
10224 				amdgpu_dm_connector->dc_link,
10225 				DP_DOWN_STREAM_PORT_COUNT,
10226 				&dpcd_data,
10227 				sizeof(dpcd_data))) {
10228 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10229 	}
10230 
10231 	return capable;
10232 }
10233 
10234 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10235 		uint8_t *edid_ext, int len,
10236 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10237 {
10238 	int i;
10239 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10240 	struct dc *dc = adev->dm.dc;
10241 
10242 	/* send extension block to DMCU for parsing */
10243 	for (i = 0; i < len; i += 8) {
10244 		bool res;
10245 		int offset;
10246 
10247 		/* send 8 bytes a time */
10248 		if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10249 			return false;
10250 
10251 		if (i+8 == len) {
10252 			/* EDID block sent completed, expect result */
10253 			int version, min_rate, max_rate;
10254 
10255 			res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10256 			if (res) {
10257 				/* amd vsdb found */
10258 				vsdb_info->freesync_supported = 1;
10259 				vsdb_info->amd_vsdb_version = version;
10260 				vsdb_info->min_refresh_rate_hz = min_rate;
10261 				vsdb_info->max_refresh_rate_hz = max_rate;
10262 				return true;
10263 			}
10264 			/* not amd vsdb */
10265 			return false;
10266 		}
10267 
10268 		/* check for ack*/
10269 		res = dc_edid_parser_recv_cea_ack(dc, &offset);
10270 		if (!res)
10271 			return false;
10272 	}
10273 
10274 	return false;
10275 }
10276 
10277 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10278 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10279 {
10280 	uint8_t *edid_ext = NULL;
10281 	int i;
10282 	bool valid_vsdb_found = false;
10283 
10284 	/*----- drm_find_cea_extension() -----*/
10285 	/* No EDID or EDID extensions */
10286 	if (edid == NULL || edid->extensions == 0)
10287 		return -ENODEV;
10288 
10289 	/* Find CEA extension */
10290 	for (i = 0; i < edid->extensions; i++) {
10291 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10292 		if (edid_ext[0] == CEA_EXT)
10293 			break;
10294 	}
10295 
10296 	if (i == edid->extensions)
10297 		return -ENODEV;
10298 
10299 	/*----- cea_db_offsets() -----*/
10300 	if (edid_ext[0] != CEA_EXT)
10301 		return -ENODEV;
10302 
10303 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10304 
10305 	return valid_vsdb_found ? i : -ENODEV;
10306 }
10307 
10308 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10309 					struct edid *edid)
10310 {
10311 	int i = 0;
10312 	struct detailed_timing *timing;
10313 	struct detailed_non_pixel *data;
10314 	struct detailed_data_monitor_range *range;
10315 	struct amdgpu_dm_connector *amdgpu_dm_connector =
10316 			to_amdgpu_dm_connector(connector);
10317 	struct dm_connector_state *dm_con_state = NULL;
10318 
10319 	struct drm_device *dev = connector->dev;
10320 	struct amdgpu_device *adev = drm_to_adev(dev);
10321 	bool freesync_capable = false;
10322 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10323 
10324 	if (!connector->state) {
10325 		DRM_ERROR("%s - Connector has no state", __func__);
10326 		goto update;
10327 	}
10328 
10329 	if (!edid) {
10330 		dm_con_state = to_dm_connector_state(connector->state);
10331 
10332 		amdgpu_dm_connector->min_vfreq = 0;
10333 		amdgpu_dm_connector->max_vfreq = 0;
10334 		amdgpu_dm_connector->pixel_clock_mhz = 0;
10335 
10336 		goto update;
10337 	}
10338 
10339 	dm_con_state = to_dm_connector_state(connector->state);
10340 
10341 	if (!amdgpu_dm_connector->dc_sink) {
10342 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10343 		goto update;
10344 	}
10345 	if (!adev->dm.freesync_module)
10346 		goto update;
10347 
10348 
10349 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10350 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10351 		bool edid_check_required = false;
10352 
10353 		if (edid) {
10354 			edid_check_required = is_dp_capable_without_timing_msa(
10355 						adev->dm.dc,
10356 						amdgpu_dm_connector);
10357 		}
10358 
10359 		if (edid_check_required == true && (edid->version > 1 ||
10360 		   (edid->version == 1 && edid->revision > 1))) {
10361 			for (i = 0; i < 4; i++) {
10362 
10363 				timing	= &edid->detailed_timings[i];
10364 				data	= &timing->data.other_data;
10365 				range	= &data->data.range;
10366 				/*
10367 				 * Check if monitor has continuous frequency mode
10368 				 */
10369 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10370 					continue;
10371 				/*
10372 				 * Check for flag range limits only. If flag == 1 then
10373 				 * no additional timing information provided.
10374 				 * Default GTF, GTF Secondary curve and CVT are not
10375 				 * supported
10376 				 */
10377 				if (range->flags != 1)
10378 					continue;
10379 
10380 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10381 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10382 				amdgpu_dm_connector->pixel_clock_mhz =
10383 					range->pixel_clock_mhz * 10;
10384 
10385 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10386 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10387 
10388 				break;
10389 			}
10390 
10391 			if (amdgpu_dm_connector->max_vfreq -
10392 			    amdgpu_dm_connector->min_vfreq > 10) {
10393 
10394 				freesync_capable = true;
10395 			}
10396 		}
10397 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10398 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10399 		if (i >= 0 && vsdb_info.freesync_supported) {
10400 			timing  = &edid->detailed_timings[i];
10401 			data    = &timing->data.other_data;
10402 
10403 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10404 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10405 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10406 				freesync_capable = true;
10407 
10408 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10409 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10410 		}
10411 	}
10412 
10413 update:
10414 	if (dm_con_state)
10415 		dm_con_state->freesync_capable = freesync_capable;
10416 
10417 	if (connector->vrr_capable_property)
10418 		drm_connector_set_vrr_capable_property(connector,
10419 						       freesync_capable);
10420 }
10421 
10422 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10423 {
10424 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10425 
10426 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10427 		return;
10428 	if (link->type == dc_connection_none)
10429 		return;
10430 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10431 					dpcd_data, sizeof(dpcd_data))) {
10432 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10433 
10434 		if (dpcd_data[0] == 0) {
10435 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10436 			link->psr_settings.psr_feature_enabled = false;
10437 		} else {
10438 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
10439 			link->psr_settings.psr_feature_enabled = true;
10440 		}
10441 
10442 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10443 	}
10444 }
10445 
10446 /*
10447  * amdgpu_dm_link_setup_psr() - configure psr link
10448  * @stream: stream state
10449  *
10450  * Return: true if success
10451  */
10452 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10453 {
10454 	struct dc_link *link = NULL;
10455 	struct psr_config psr_config = {0};
10456 	struct psr_context psr_context = {0};
10457 	bool ret = false;
10458 
10459 	if (stream == NULL)
10460 		return false;
10461 
10462 	link = stream->link;
10463 
10464 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10465 
10466 	if (psr_config.psr_version > 0) {
10467 		psr_config.psr_exit_link_training_required = 0x1;
10468 		psr_config.psr_frame_capture_indication_req = 0;
10469 		psr_config.psr_rfb_setup_time = 0x37;
10470 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10471 		psr_config.allow_smu_optimizations = 0x0;
10472 
10473 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10474 
10475 	}
10476 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
10477 
10478 	return ret;
10479 }
10480 
10481 /*
10482  * amdgpu_dm_psr_enable() - enable psr f/w
10483  * @stream: stream state
10484  *
10485  * Return: true if success
10486  */
10487 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10488 {
10489 	struct dc_link *link = stream->link;
10490 	unsigned int vsync_rate_hz = 0;
10491 	struct dc_static_screen_params params = {0};
10492 	/* Calculate number of static frames before generating interrupt to
10493 	 * enter PSR.
10494 	 */
10495 	// Init fail safe of 2 frames static
10496 	unsigned int num_frames_static = 2;
10497 
10498 	DRM_DEBUG_DRIVER("Enabling psr...\n");
10499 
10500 	vsync_rate_hz = div64_u64(div64_u64((
10501 			stream->timing.pix_clk_100hz * 100),
10502 			stream->timing.v_total),
10503 			stream->timing.h_total);
10504 
10505 	/* Round up
10506 	 * Calculate number of frames such that at least 30 ms of time has
10507 	 * passed.
10508 	 */
10509 	if (vsync_rate_hz != 0) {
10510 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10511 		num_frames_static = (30000 / frame_time_microsec) + 1;
10512 	}
10513 
10514 	params.triggers.cursor_update = true;
10515 	params.triggers.overlay_update = true;
10516 	params.triggers.surface_update = true;
10517 	params.num_frames = num_frames_static;
10518 
10519 	dc_stream_set_static_screen_params(link->ctx->dc,
10520 					   &stream, 1,
10521 					   &params);
10522 
10523 	return dc_link_set_psr_allow_active(link, true, false, false);
10524 }
10525 
10526 /*
10527  * amdgpu_dm_psr_disable() - disable psr f/w
10528  * @stream:  stream state
10529  *
10530  * Return: true if success
10531  */
10532 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10533 {
10534 
10535 	DRM_DEBUG_DRIVER("Disabling psr...\n");
10536 
10537 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
10538 }
10539 
10540 /*
10541  * amdgpu_dm_psr_disable() - disable psr f/w
10542  * if psr is enabled on any stream
10543  *
10544  * Return: true if success
10545  */
10546 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10547 {
10548 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10549 	return dc_set_psr_allow_active(dm->dc, false);
10550 }
10551 
10552 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10553 {
10554 	struct amdgpu_device *adev = drm_to_adev(dev);
10555 	struct dc *dc = adev->dm.dc;
10556 	int i;
10557 
10558 	mutex_lock(&adev->dm.dc_lock);
10559 	if (dc->current_state) {
10560 		for (i = 0; i < dc->current_state->stream_count; ++i)
10561 			dc->current_state->streams[i]
10562 				->triggered_crtc_reset.enabled =
10563 				adev->dm.force_timing_sync;
10564 
10565 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10566 		dc_trigger_sync(dc, dc->current_state);
10567 	}
10568 	mutex_unlock(&adev->dm.dc_lock);
10569 }
10570 
10571 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10572 		       uint32_t value, const char *func_name)
10573 {
10574 #ifdef DM_CHECK_ADDR_0
10575 	if (address == 0) {
10576 		DC_ERR("invalid register write. address = 0");
10577 		return;
10578 	}
10579 #endif
10580 	cgs_write_register(ctx->cgs_device, address, value);
10581 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10582 }
10583 
10584 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10585 			  const char *func_name)
10586 {
10587 	uint32_t value;
10588 #ifdef DM_CHECK_ADDR_0
10589 	if (address == 0) {
10590 		DC_ERR("invalid register read; address = 0\n");
10591 		return 0;
10592 	}
10593 #endif
10594 
10595 	if (ctx->dmub_srv &&
10596 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10597 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10598 		ASSERT(false);
10599 		return 0;
10600 	}
10601 
10602 	value = cgs_read_register(ctx->cgs_device, address);
10603 
10604 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10605 
10606 	return value;
10607 }
10608