1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38 
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50 
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58 
59 #include "ivsrcid/ivsrcid_vislands30.h"
60 
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/version.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69 
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79 #include <drm/drm_hdcp.h>
80 
81 #if defined(CONFIG_DRM_AMD_DC_DCN)
82 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 
84 #include "dcn/dcn_1_0_offset.h"
85 #include "dcn/dcn_1_0_sh_mask.h"
86 #include "soc15_hw_ip.h"
87 #include "vega10_ip_offset.h"
88 
89 #include "soc15_common.h"
90 #endif
91 
92 #include "modules/inc/mod_freesync.h"
93 #include "modules/power/power_helpers.h"
94 #include "modules/inc/mod_info_packet.h"
95 
96 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
97 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
104 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
106 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108 
109 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111 
112 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114 
115 /* Number of bytes in PSP header for firmware. */
116 #define PSP_HEADER_BYTES 0x100
117 
118 /* Number of bytes in PSP footer for firmware. */
119 #define PSP_FOOTER_BYTES 0x100
120 
121 /**
122  * DOC: overview
123  *
124  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
125  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
126  * requests into DC requests, and DC responses into DRM responses.
127  *
128  * The root control structure is &struct amdgpu_display_manager.
129  */
130 
131 /* basic init/fini API */
132 static int amdgpu_dm_init(struct amdgpu_device *adev);
133 static void amdgpu_dm_fini(struct amdgpu_device *adev);
134 
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137 	switch (link->dpcd_caps.dongle_type) {
138 	case DISPLAY_DONGLE_NONE:
139 		return DRM_MODE_SUBCONNECTOR_Native;
140 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 		return DRM_MODE_SUBCONNECTOR_VGA;
142 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 		return DRM_MODE_SUBCONNECTOR_DVID;
145 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 		return DRM_MODE_SUBCONNECTOR_HDMIA;
148 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 	default:
150 		return DRM_MODE_SUBCONNECTOR_Unknown;
151 	}
152 }
153 
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156 	struct dc_link *link = aconnector->dc_link;
157 	struct drm_connector *connector = &aconnector->base;
158 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159 
160 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161 		return;
162 
163 	if (aconnector->dc_sink)
164 		subconnector = get_subconnector_type(link);
165 
166 	drm_object_property_set_value(&connector->base,
167 			connector->dev->mode_config.dp_subconnector_property,
168 			subconnector);
169 }
170 
171 /*
172  * initializes drm_device display related structures, based on the information
173  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174  * drm_encoder, drm_mode_config
175  *
176  * Returns 0 on success
177  */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181 
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183 				struct drm_plane *plane,
184 				unsigned long possible_crtcs,
185 				const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 			       struct drm_plane *plane,
188 			       uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
191 				    uint32_t link_index,
192 				    struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 				  struct amdgpu_encoder *aencoder,
195 				  uint32_t link_index);
196 
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198 
199 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
200 				   struct drm_atomic_state *state,
201 				   bool nonblock);
202 
203 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
204 
205 static int amdgpu_dm_atomic_check(struct drm_device *dev,
206 				  struct drm_atomic_state *state);
207 
208 static void handle_cursor_update(struct drm_plane *plane,
209 				 struct drm_plane_state *old_plane_state);
210 
211 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
212 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
213 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
214 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
215 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
216 
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219 
220 /*
221  * dm_vblank_get_counter
222  *
223  * @brief
224  * Get counter for number of vertical blanks
225  *
226  * @param
227  * struct amdgpu_device *adev - [in] desired amdgpu device
228  * int disp_idx - [in] which CRTC to get the counter from
229  *
230  * @return
231  * Counter for vertical blanks
232  */
233 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
234 {
235 	if (crtc >= adev->mode_info.num_crtc)
236 		return 0;
237 	else {
238 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
239 
240 		if (acrtc->dm_irq_params.stream == NULL) {
241 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
242 				  crtc);
243 			return 0;
244 		}
245 
246 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
247 	}
248 }
249 
250 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
251 				  u32 *vbl, u32 *position)
252 {
253 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
254 
255 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
256 		return -EINVAL;
257 	else {
258 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
259 
260 		if (acrtc->dm_irq_params.stream ==  NULL) {
261 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
262 				  crtc);
263 			return 0;
264 		}
265 
266 		/*
267 		 * TODO rework base driver to use values directly.
268 		 * for now parse it back into reg-format
269 		 */
270 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
271 					 &v_blank_start,
272 					 &v_blank_end,
273 					 &h_position,
274 					 &v_position);
275 
276 		*position = v_position | (h_position << 16);
277 		*vbl = v_blank_start | (v_blank_end << 16);
278 	}
279 
280 	return 0;
281 }
282 
283 static bool dm_is_idle(void *handle)
284 {
285 	/* XXX todo */
286 	return true;
287 }
288 
289 static int dm_wait_for_idle(void *handle)
290 {
291 	/* XXX todo */
292 	return 0;
293 }
294 
295 static bool dm_check_soft_reset(void *handle)
296 {
297 	return false;
298 }
299 
300 static int dm_soft_reset(void *handle)
301 {
302 	/* XXX todo */
303 	return 0;
304 }
305 
306 static struct amdgpu_crtc *
307 get_crtc_by_otg_inst(struct amdgpu_device *adev,
308 		     int otg_inst)
309 {
310 	struct drm_device *dev = adev_to_drm(adev);
311 	struct drm_crtc *crtc;
312 	struct amdgpu_crtc *amdgpu_crtc;
313 
314 	if (otg_inst == -1) {
315 		WARN_ON(1);
316 		return adev->mode_info.crtcs[0];
317 	}
318 
319 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
320 		amdgpu_crtc = to_amdgpu_crtc(crtc);
321 
322 		if (amdgpu_crtc->otg_inst == otg_inst)
323 			return amdgpu_crtc;
324 	}
325 
326 	return NULL;
327 }
328 
329 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
330 {
331 	return acrtc->dm_irq_params.freesync_config.state ==
332 		       VRR_STATE_ACTIVE_VARIABLE ||
333 	       acrtc->dm_irq_params.freesync_config.state ==
334 		       VRR_STATE_ACTIVE_FIXED;
335 }
336 
337 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
338 {
339 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
340 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
341 }
342 
343 /**
344  * dm_pflip_high_irq() - Handle pageflip interrupt
345  * @interrupt_params: ignored
346  *
347  * Handles the pageflip interrupt by notifying all interested parties
348  * that the pageflip has been completed.
349  */
350 static void dm_pflip_high_irq(void *interrupt_params)
351 {
352 	struct amdgpu_crtc *amdgpu_crtc;
353 	struct common_irq_params *irq_params = interrupt_params;
354 	struct amdgpu_device *adev = irq_params->adev;
355 	unsigned long flags;
356 	struct drm_pending_vblank_event *e;
357 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
358 	bool vrr_active;
359 
360 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
361 
362 	/* IRQ could occur when in initial stage */
363 	/* TODO work and BO cleanup */
364 	if (amdgpu_crtc == NULL) {
365 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
366 		return;
367 	}
368 
369 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
370 
371 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
372 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
373 						 amdgpu_crtc->pflip_status,
374 						 AMDGPU_FLIP_SUBMITTED,
375 						 amdgpu_crtc->crtc_id,
376 						 amdgpu_crtc);
377 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
378 		return;
379 	}
380 
381 	/* page flip completed. */
382 	e = amdgpu_crtc->event;
383 	amdgpu_crtc->event = NULL;
384 
385 	if (!e)
386 		WARN_ON(1);
387 
388 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
389 
390 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
391 	if (!vrr_active ||
392 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
393 				      &v_blank_end, &hpos, &vpos) ||
394 	    (vpos < v_blank_start)) {
395 		/* Update to correct count and vblank timestamp if racing with
396 		 * vblank irq. This also updates to the correct vblank timestamp
397 		 * even in VRR mode, as scanout is past the front-porch atm.
398 		 */
399 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
400 
401 		/* Wake up userspace by sending the pageflip event with proper
402 		 * count and timestamp of vblank of flip completion.
403 		 */
404 		if (e) {
405 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
406 
407 			/* Event sent, so done with vblank for this flip */
408 			drm_crtc_vblank_put(&amdgpu_crtc->base);
409 		}
410 	} else if (e) {
411 		/* VRR active and inside front-porch: vblank count and
412 		 * timestamp for pageflip event will only be up to date after
413 		 * drm_crtc_handle_vblank() has been executed from late vblank
414 		 * irq handler after start of back-porch (vline 0). We queue the
415 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
416 		 * updated timestamp and count, once it runs after us.
417 		 *
418 		 * We need to open-code this instead of using the helper
419 		 * drm_crtc_arm_vblank_event(), as that helper would
420 		 * call drm_crtc_accurate_vblank_count(), which we must
421 		 * not call in VRR mode while we are in front-porch!
422 		 */
423 
424 		/* sequence will be replaced by real count during send-out. */
425 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
426 		e->pipe = amdgpu_crtc->crtc_id;
427 
428 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
429 		e = NULL;
430 	}
431 
432 	/* Keep track of vblank of this flip for flip throttling. We use the
433 	 * cooked hw counter, as that one incremented at start of this vblank
434 	 * of pageflip completion, so last_flip_vblank is the forbidden count
435 	 * for queueing new pageflips if vsync + VRR is enabled.
436 	 */
437 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
438 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
439 
440 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
441 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
442 
443 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
444 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
445 			 vrr_active, (int) !e);
446 }
447 
448 static void dm_vupdate_high_irq(void *interrupt_params)
449 {
450 	struct common_irq_params *irq_params = interrupt_params;
451 	struct amdgpu_device *adev = irq_params->adev;
452 	struct amdgpu_crtc *acrtc;
453 	unsigned long flags;
454 	int vrr_active;
455 
456 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
457 
458 	if (acrtc) {
459 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
460 
461 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
462 			      acrtc->crtc_id,
463 			      vrr_active);
464 
465 		/* Core vblank handling is done here after end of front-porch in
466 		 * vrr mode, as vblank timestamping will give valid results
467 		 * while now done after front-porch. This will also deliver
468 		 * page-flip completion events that have been queued to us
469 		 * if a pageflip happened inside front-porch.
470 		 */
471 		if (vrr_active) {
472 			drm_crtc_handle_vblank(&acrtc->base);
473 
474 			/* BTR processing for pre-DCE12 ASICs */
475 			if (acrtc->dm_irq_params.stream &&
476 			    adev->family < AMDGPU_FAMILY_AI) {
477 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
478 				mod_freesync_handle_v_update(
479 				    adev->dm.freesync_module,
480 				    acrtc->dm_irq_params.stream,
481 				    &acrtc->dm_irq_params.vrr_params);
482 
483 				dc_stream_adjust_vmin_vmax(
484 				    adev->dm.dc,
485 				    acrtc->dm_irq_params.stream,
486 				    &acrtc->dm_irq_params.vrr_params.adjust);
487 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
488 			}
489 		}
490 	}
491 }
492 
493 /**
494  * dm_crtc_high_irq() - Handles CRTC interrupt
495  * @interrupt_params: used for determining the CRTC instance
496  *
497  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
498  * event handler.
499  */
500 static void dm_crtc_high_irq(void *interrupt_params)
501 {
502 	struct common_irq_params *irq_params = interrupt_params;
503 	struct amdgpu_device *adev = irq_params->adev;
504 	struct amdgpu_crtc *acrtc;
505 	unsigned long flags;
506 	int vrr_active;
507 
508 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
509 	if (!acrtc)
510 		return;
511 
512 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
513 
514 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
515 		      vrr_active, acrtc->dm_irq_params.active_planes);
516 
517 	/**
518 	 * Core vblank handling at start of front-porch is only possible
519 	 * in non-vrr mode, as only there vblank timestamping will give
520 	 * valid results while done in front-porch. Otherwise defer it
521 	 * to dm_vupdate_high_irq after end of front-porch.
522 	 */
523 	if (!vrr_active)
524 		drm_crtc_handle_vblank(&acrtc->base);
525 
526 	/**
527 	 * Following stuff must happen at start of vblank, for crc
528 	 * computation and below-the-range btr support in vrr mode.
529 	 */
530 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
531 
532 	/* BTR updates need to happen before VUPDATE on Vega and above. */
533 	if (adev->family < AMDGPU_FAMILY_AI)
534 		return;
535 
536 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
537 
538 	if (acrtc->dm_irq_params.stream &&
539 	    acrtc->dm_irq_params.vrr_params.supported &&
540 	    acrtc->dm_irq_params.freesync_config.state ==
541 		    VRR_STATE_ACTIVE_VARIABLE) {
542 		mod_freesync_handle_v_update(adev->dm.freesync_module,
543 					     acrtc->dm_irq_params.stream,
544 					     &acrtc->dm_irq_params.vrr_params);
545 
546 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
547 					   &acrtc->dm_irq_params.vrr_params.adjust);
548 	}
549 
550 	/*
551 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
552 	 * In that case, pageflip completion interrupts won't fire and pageflip
553 	 * completion events won't get delivered. Prevent this by sending
554 	 * pending pageflip events from here if a flip is still pending.
555 	 *
556 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
557 	 * avoid race conditions between flip programming and completion,
558 	 * which could cause too early flip completion events.
559 	 */
560 	if (adev->family >= AMDGPU_FAMILY_RV &&
561 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
562 	    acrtc->dm_irq_params.active_planes == 0) {
563 		if (acrtc->event) {
564 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
565 			acrtc->event = NULL;
566 			drm_crtc_vblank_put(&acrtc->base);
567 		}
568 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
569 	}
570 
571 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
572 }
573 
574 static int dm_set_clockgating_state(void *handle,
575 		  enum amd_clockgating_state state)
576 {
577 	return 0;
578 }
579 
580 static int dm_set_powergating_state(void *handle,
581 		  enum amd_powergating_state state)
582 {
583 	return 0;
584 }
585 
586 /* Prototypes of private functions */
587 static int dm_early_init(void* handle);
588 
589 /* Allocate memory for FBC compressed data  */
590 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
591 {
592 	struct drm_device *dev = connector->dev;
593 	struct amdgpu_device *adev = drm_to_adev(dev);
594 	struct dm_compressor_info *compressor = &adev->dm.compressor;
595 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
596 	struct drm_display_mode *mode;
597 	unsigned long max_size = 0;
598 
599 	if (adev->dm.dc->fbc_compressor == NULL)
600 		return;
601 
602 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
603 		return;
604 
605 	if (compressor->bo_ptr)
606 		return;
607 
608 
609 	list_for_each_entry(mode, &connector->modes, head) {
610 		if (max_size < mode->htotal * mode->vtotal)
611 			max_size = mode->htotal * mode->vtotal;
612 	}
613 
614 	if (max_size) {
615 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
616 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
617 			    &compressor->gpu_addr, &compressor->cpu_addr);
618 
619 		if (r)
620 			DRM_ERROR("DM: Failed to initialize FBC\n");
621 		else {
622 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
623 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
624 		}
625 
626 	}
627 
628 }
629 
630 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
631 					  int pipe, bool *enabled,
632 					  unsigned char *buf, int max_bytes)
633 {
634 	struct drm_device *dev = dev_get_drvdata(kdev);
635 	struct amdgpu_device *adev = drm_to_adev(dev);
636 	struct drm_connector *connector;
637 	struct drm_connector_list_iter conn_iter;
638 	struct amdgpu_dm_connector *aconnector;
639 	int ret = 0;
640 
641 	*enabled = false;
642 
643 	mutex_lock(&adev->dm.audio_lock);
644 
645 	drm_connector_list_iter_begin(dev, &conn_iter);
646 	drm_for_each_connector_iter(connector, &conn_iter) {
647 		aconnector = to_amdgpu_dm_connector(connector);
648 		if (aconnector->audio_inst != port)
649 			continue;
650 
651 		*enabled = true;
652 		ret = drm_eld_size(connector->eld);
653 		memcpy(buf, connector->eld, min(max_bytes, ret));
654 
655 		break;
656 	}
657 	drm_connector_list_iter_end(&conn_iter);
658 
659 	mutex_unlock(&adev->dm.audio_lock);
660 
661 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
662 
663 	return ret;
664 }
665 
666 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
667 	.get_eld = amdgpu_dm_audio_component_get_eld,
668 };
669 
670 static int amdgpu_dm_audio_component_bind(struct device *kdev,
671 				       struct device *hda_kdev, void *data)
672 {
673 	struct drm_device *dev = dev_get_drvdata(kdev);
674 	struct amdgpu_device *adev = drm_to_adev(dev);
675 	struct drm_audio_component *acomp = data;
676 
677 	acomp->ops = &amdgpu_dm_audio_component_ops;
678 	acomp->dev = kdev;
679 	adev->dm.audio_component = acomp;
680 
681 	return 0;
682 }
683 
684 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
685 					  struct device *hda_kdev, void *data)
686 {
687 	struct drm_device *dev = dev_get_drvdata(kdev);
688 	struct amdgpu_device *adev = drm_to_adev(dev);
689 	struct drm_audio_component *acomp = data;
690 
691 	acomp->ops = NULL;
692 	acomp->dev = NULL;
693 	adev->dm.audio_component = NULL;
694 }
695 
696 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
697 	.bind	= amdgpu_dm_audio_component_bind,
698 	.unbind	= amdgpu_dm_audio_component_unbind,
699 };
700 
701 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
702 {
703 	int i, ret;
704 
705 	if (!amdgpu_audio)
706 		return 0;
707 
708 	adev->mode_info.audio.enabled = true;
709 
710 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
711 
712 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
713 		adev->mode_info.audio.pin[i].channels = -1;
714 		adev->mode_info.audio.pin[i].rate = -1;
715 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
716 		adev->mode_info.audio.pin[i].status_bits = 0;
717 		adev->mode_info.audio.pin[i].category_code = 0;
718 		adev->mode_info.audio.pin[i].connected = false;
719 		adev->mode_info.audio.pin[i].id =
720 			adev->dm.dc->res_pool->audios[i]->inst;
721 		adev->mode_info.audio.pin[i].offset = 0;
722 	}
723 
724 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
725 	if (ret < 0)
726 		return ret;
727 
728 	adev->dm.audio_registered = true;
729 
730 	return 0;
731 }
732 
733 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
734 {
735 	if (!amdgpu_audio)
736 		return;
737 
738 	if (!adev->mode_info.audio.enabled)
739 		return;
740 
741 	if (adev->dm.audio_registered) {
742 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
743 		adev->dm.audio_registered = false;
744 	}
745 
746 	/* TODO: Disable audio? */
747 
748 	adev->mode_info.audio.enabled = false;
749 }
750 
751 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
752 {
753 	struct drm_audio_component *acomp = adev->dm.audio_component;
754 
755 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
756 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
757 
758 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
759 						 pin, -1);
760 	}
761 }
762 
763 static int dm_dmub_hw_init(struct amdgpu_device *adev)
764 {
765 	const struct dmcub_firmware_header_v1_0 *hdr;
766 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
767 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
768 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
769 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
770 	struct abm *abm = adev->dm.dc->res_pool->abm;
771 	struct dmub_srv_hw_params hw_params;
772 	enum dmub_status status;
773 	const unsigned char *fw_inst_const, *fw_bss_data;
774 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
775 	bool has_hw_support;
776 
777 	if (!dmub_srv)
778 		/* DMUB isn't supported on the ASIC. */
779 		return 0;
780 
781 	if (!fb_info) {
782 		DRM_ERROR("No framebuffer info for DMUB service.\n");
783 		return -EINVAL;
784 	}
785 
786 	if (!dmub_fw) {
787 		/* Firmware required for DMUB support. */
788 		DRM_ERROR("No firmware provided for DMUB.\n");
789 		return -EINVAL;
790 	}
791 
792 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
793 	if (status != DMUB_STATUS_OK) {
794 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
795 		return -EINVAL;
796 	}
797 
798 	if (!has_hw_support) {
799 		DRM_INFO("DMUB unsupported on ASIC\n");
800 		return 0;
801 	}
802 
803 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
804 
805 	fw_inst_const = dmub_fw->data +
806 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
807 			PSP_HEADER_BYTES;
808 
809 	fw_bss_data = dmub_fw->data +
810 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
811 		      le32_to_cpu(hdr->inst_const_bytes);
812 
813 	/* Copy firmware and bios info into FB memory. */
814 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
815 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
816 
817 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
818 
819 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
820 	 * amdgpu_ucode_init_single_fw will load dmub firmware
821 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
822 	 * will be done by dm_dmub_hw_init
823 	 */
824 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
825 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
826 				fw_inst_const_size);
827 	}
828 
829 	if (fw_bss_data_size)
830 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
831 		       fw_bss_data, fw_bss_data_size);
832 
833 	/* Copy firmware bios info into FB memory. */
834 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
835 	       adev->bios_size);
836 
837 	/* Reset regions that need to be reset. */
838 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
839 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
840 
841 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
842 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
843 
844 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
845 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
846 
847 	/* Initialize hardware. */
848 	memset(&hw_params, 0, sizeof(hw_params));
849 	hw_params.fb_base = adev->gmc.fb_start;
850 	hw_params.fb_offset = adev->gmc.aper_base;
851 
852 	/* backdoor load firmware and trigger dmub running */
853 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
854 		hw_params.load_inst_const = true;
855 
856 	if (dmcu)
857 		hw_params.psp_version = dmcu->psp_version;
858 
859 	for (i = 0; i < fb_info->num_fb; ++i)
860 		hw_params.fb[i] = &fb_info->fb[i];
861 
862 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
863 	if (status != DMUB_STATUS_OK) {
864 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
865 		return -EINVAL;
866 	}
867 
868 	/* Wait for firmware load to finish. */
869 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
870 	if (status != DMUB_STATUS_OK)
871 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
872 
873 	/* Init DMCU and ABM if available. */
874 	if (dmcu && abm) {
875 		dmcu->funcs->dmcu_init(dmcu);
876 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
877 	}
878 
879 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
880 	if (!adev->dm.dc->ctx->dmub_srv) {
881 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
882 		return -ENOMEM;
883 	}
884 
885 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
886 		 adev->dm.dmcub_fw_version);
887 
888 	return 0;
889 }
890 
891 #if defined(CONFIG_DRM_AMD_DC_DCN)
892 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
893 {
894 	uint64_t pt_base;
895 	uint32_t logical_addr_low;
896 	uint32_t logical_addr_high;
897 	uint32_t agp_base, agp_bot, agp_top;
898 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
899 
900 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
901 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
902 
903 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
904 		/*
905 		 * Raven2 has a HW issue that it is unable to use the vram which
906 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
907 		 * workaround that increase system aperture high address (add 1)
908 		 * to get rid of the VM fault and hardware hang.
909 		 */
910 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
911 	else
912 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
913 
914 	agp_base = 0;
915 	agp_bot = adev->gmc.agp_start >> 24;
916 	agp_top = adev->gmc.agp_end >> 24;
917 
918 
919 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
920 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
921 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
922 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
923 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
924 	page_table_base.low_part = lower_32_bits(pt_base);
925 
926 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
927 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
928 
929 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
930 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
931 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
932 
933 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
934 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
935 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
936 
937 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
938 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
939 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
940 
941 	pa_config->is_hvm_enabled = 0;
942 
943 }
944 #endif
945 
946 #ifdef CONFIG_DEBUG_FS
947 static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
948 {
949 	dm->crc_win_x_start_property =
950 		drm_property_create_range(adev_to_drm(dm->adev),
951 					  DRM_MODE_PROP_ATOMIC,
952 					  "AMD_CRC_WIN_X_START", 0, U16_MAX);
953 	if (!dm->crc_win_x_start_property)
954 		return -ENOMEM;
955 
956 	dm->crc_win_y_start_property =
957 		drm_property_create_range(adev_to_drm(dm->adev),
958 					  DRM_MODE_PROP_ATOMIC,
959 					  "AMD_CRC_WIN_Y_START", 0, U16_MAX);
960 	if (!dm->crc_win_y_start_property)
961 		return -ENOMEM;
962 
963 	dm->crc_win_x_end_property =
964 		drm_property_create_range(adev_to_drm(dm->adev),
965 					  DRM_MODE_PROP_ATOMIC,
966 					  "AMD_CRC_WIN_X_END", 0, U16_MAX);
967 	if (!dm->crc_win_x_end_property)
968 		return -ENOMEM;
969 
970 	dm->crc_win_y_end_property =
971 		drm_property_create_range(adev_to_drm(dm->adev),
972 					  DRM_MODE_PROP_ATOMIC,
973 					  "AMD_CRC_WIN_Y_END", 0, U16_MAX);
974 	if (!dm->crc_win_y_end_property)
975 		return -ENOMEM;
976 
977 	return 0;
978 }
979 #endif
980 
981 static int amdgpu_dm_init(struct amdgpu_device *adev)
982 {
983 	struct dc_init_data init_data;
984 #ifdef CONFIG_DRM_AMD_DC_HDCP
985 	struct dc_callback_init init_params;
986 #endif
987 	int r;
988 
989 	adev->dm.ddev = adev_to_drm(adev);
990 	adev->dm.adev = adev;
991 
992 	/* Zero all the fields */
993 	memset(&init_data, 0, sizeof(init_data));
994 #ifdef CONFIG_DRM_AMD_DC_HDCP
995 	memset(&init_params, 0, sizeof(init_params));
996 #endif
997 
998 	mutex_init(&adev->dm.dc_lock);
999 	mutex_init(&adev->dm.audio_lock);
1000 
1001 	if(amdgpu_dm_irq_init(adev)) {
1002 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1003 		goto error;
1004 	}
1005 
1006 	init_data.asic_id.chip_family = adev->family;
1007 
1008 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1009 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1010 
1011 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1012 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1013 	init_data.asic_id.atombios_base_address =
1014 		adev->mode_info.atom_context->bios;
1015 
1016 	init_data.driver = adev;
1017 
1018 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1019 
1020 	if (!adev->dm.cgs_device) {
1021 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1022 		goto error;
1023 	}
1024 
1025 	init_data.cgs_device = adev->dm.cgs_device;
1026 
1027 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1028 
1029 	switch (adev->asic_type) {
1030 	case CHIP_CARRIZO:
1031 	case CHIP_STONEY:
1032 	case CHIP_RAVEN:
1033 	case CHIP_RENOIR:
1034 		init_data.flags.gpu_vm_support = true;
1035 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1036 			init_data.flags.disable_dmcu = true;
1037 		break;
1038 	default:
1039 		break;
1040 	}
1041 
1042 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1043 		init_data.flags.fbc_support = true;
1044 
1045 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1046 		init_data.flags.multi_mon_pp_mclk_switch = true;
1047 
1048 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1049 		init_data.flags.disable_fractional_pwm = true;
1050 
1051 	init_data.flags.power_down_display_on_boot = true;
1052 
1053 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1054 
1055 	/* Display Core create. */
1056 	adev->dm.dc = dc_create(&init_data);
1057 
1058 	if (adev->dm.dc) {
1059 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1060 	} else {
1061 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1062 		goto error;
1063 	}
1064 
1065 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1066 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1067 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1068 	}
1069 
1070 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1071 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1072 
1073 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1074 		adev->dm.dc->debug.disable_stutter = true;
1075 
1076 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1077 		adev->dm.dc->debug.disable_dsc = true;
1078 
1079 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1080 		adev->dm.dc->debug.disable_clock_gate = true;
1081 
1082 	r = dm_dmub_hw_init(adev);
1083 	if (r) {
1084 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1085 		goto error;
1086 	}
1087 
1088 	dc_hardware_init(adev->dm.dc);
1089 
1090 #if defined(CONFIG_DRM_AMD_DC_DCN)
1091 	if (adev->asic_type == CHIP_RENOIR) {
1092 		struct dc_phy_addr_space_config pa_config;
1093 
1094 		mmhub_read_system_context(adev, &pa_config);
1095 
1096 		// Call the DC init_memory func
1097 		dc_setup_system_context(adev->dm.dc, &pa_config);
1098 	}
1099 #endif
1100 
1101 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1102 	if (!adev->dm.freesync_module) {
1103 		DRM_ERROR(
1104 		"amdgpu: failed to initialize freesync_module.\n");
1105 	} else
1106 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1107 				adev->dm.freesync_module);
1108 
1109 	amdgpu_dm_init_color_mod();
1110 
1111 #ifdef CONFIG_DRM_AMD_DC_HDCP
1112 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1113 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1114 
1115 		if (!adev->dm.hdcp_workqueue)
1116 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1117 		else
1118 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1119 
1120 		dc_init_callbacks(adev->dm.dc, &init_params);
1121 	}
1122 #endif
1123 #ifdef CONFIG_DEBUG_FS
1124 	if (create_crtc_crc_properties(&adev->dm))
1125 		DRM_ERROR("amdgpu: failed to create crc property.\n");
1126 #endif
1127 	if (amdgpu_dm_initialize_drm_device(adev)) {
1128 		DRM_ERROR(
1129 		"amdgpu: failed to initialize sw for display support.\n");
1130 		goto error;
1131 	}
1132 
1133 	/* Update the actual used number of crtc */
1134 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1135 
1136 	/* create fake encoders for MST */
1137 	dm_dp_create_fake_mst_encoders(adev);
1138 
1139 	/* TODO: Add_display_info? */
1140 
1141 	/* TODO use dynamic cursor width */
1142 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1143 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1144 
1145 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1146 		DRM_ERROR(
1147 		"amdgpu: failed to initialize sw for display support.\n");
1148 		goto error;
1149 	}
1150 
1151 
1152 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1153 
1154 	return 0;
1155 error:
1156 	amdgpu_dm_fini(adev);
1157 
1158 	return -EINVAL;
1159 }
1160 
1161 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1162 {
1163 	int i;
1164 
1165 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1166 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1167 	}
1168 
1169 	amdgpu_dm_audio_fini(adev);
1170 
1171 	amdgpu_dm_destroy_drm_device(&adev->dm);
1172 
1173 #ifdef CONFIG_DRM_AMD_DC_HDCP
1174 	if (adev->dm.hdcp_workqueue) {
1175 		hdcp_destroy(adev->dm.hdcp_workqueue);
1176 		adev->dm.hdcp_workqueue = NULL;
1177 	}
1178 
1179 	if (adev->dm.dc)
1180 		dc_deinit_callbacks(adev->dm.dc);
1181 #endif
1182 	if (adev->dm.dc->ctx->dmub_srv) {
1183 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1184 		adev->dm.dc->ctx->dmub_srv = NULL;
1185 	}
1186 
1187 	if (adev->dm.dmub_bo)
1188 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1189 				      &adev->dm.dmub_bo_gpu_addr,
1190 				      &adev->dm.dmub_bo_cpu_addr);
1191 
1192 	/* DC Destroy TODO: Replace destroy DAL */
1193 	if (adev->dm.dc)
1194 		dc_destroy(&adev->dm.dc);
1195 	/*
1196 	 * TODO: pageflip, vlank interrupt
1197 	 *
1198 	 * amdgpu_dm_irq_fini(adev);
1199 	 */
1200 
1201 	if (adev->dm.cgs_device) {
1202 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1203 		adev->dm.cgs_device = NULL;
1204 	}
1205 	if (adev->dm.freesync_module) {
1206 		mod_freesync_destroy(adev->dm.freesync_module);
1207 		adev->dm.freesync_module = NULL;
1208 	}
1209 
1210 	mutex_destroy(&adev->dm.audio_lock);
1211 	mutex_destroy(&adev->dm.dc_lock);
1212 
1213 	return;
1214 }
1215 
1216 static int load_dmcu_fw(struct amdgpu_device *adev)
1217 {
1218 	const char *fw_name_dmcu = NULL;
1219 	int r;
1220 	const struct dmcu_firmware_header_v1_0 *hdr;
1221 
1222 	switch(adev->asic_type) {
1223 #if defined(CONFIG_DRM_AMD_DC_SI)
1224 	case CHIP_TAHITI:
1225 	case CHIP_PITCAIRN:
1226 	case CHIP_VERDE:
1227 	case CHIP_OLAND:
1228 #endif
1229 	case CHIP_BONAIRE:
1230 	case CHIP_HAWAII:
1231 	case CHIP_KAVERI:
1232 	case CHIP_KABINI:
1233 	case CHIP_MULLINS:
1234 	case CHIP_TONGA:
1235 	case CHIP_FIJI:
1236 	case CHIP_CARRIZO:
1237 	case CHIP_STONEY:
1238 	case CHIP_POLARIS11:
1239 	case CHIP_POLARIS10:
1240 	case CHIP_POLARIS12:
1241 	case CHIP_VEGAM:
1242 	case CHIP_VEGA10:
1243 	case CHIP_VEGA12:
1244 	case CHIP_VEGA20:
1245 	case CHIP_NAVI10:
1246 	case CHIP_NAVI14:
1247 	case CHIP_RENOIR:
1248 	case CHIP_SIENNA_CICHLID:
1249 	case CHIP_NAVY_FLOUNDER:
1250 	case CHIP_DIMGREY_CAVEFISH:
1251 	case CHIP_VANGOGH:
1252 		return 0;
1253 	case CHIP_NAVI12:
1254 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1255 		break;
1256 	case CHIP_RAVEN:
1257 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1258 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1259 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1260 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1261 		else
1262 			return 0;
1263 		break;
1264 	default:
1265 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1266 		return -EINVAL;
1267 	}
1268 
1269 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1270 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1271 		return 0;
1272 	}
1273 
1274 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1275 	if (r == -ENOENT) {
1276 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1277 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1278 		adev->dm.fw_dmcu = NULL;
1279 		return 0;
1280 	}
1281 	if (r) {
1282 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1283 			fw_name_dmcu);
1284 		return r;
1285 	}
1286 
1287 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1288 	if (r) {
1289 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1290 			fw_name_dmcu);
1291 		release_firmware(adev->dm.fw_dmcu);
1292 		adev->dm.fw_dmcu = NULL;
1293 		return r;
1294 	}
1295 
1296 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1297 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1298 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1299 	adev->firmware.fw_size +=
1300 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1301 
1302 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1303 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1304 	adev->firmware.fw_size +=
1305 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1306 
1307 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1308 
1309 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1310 
1311 	return 0;
1312 }
1313 
1314 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1315 {
1316 	struct amdgpu_device *adev = ctx;
1317 
1318 	return dm_read_reg(adev->dm.dc->ctx, address);
1319 }
1320 
1321 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1322 				     uint32_t value)
1323 {
1324 	struct amdgpu_device *adev = ctx;
1325 
1326 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1327 }
1328 
1329 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1330 {
1331 	struct dmub_srv_create_params create_params;
1332 	struct dmub_srv_region_params region_params;
1333 	struct dmub_srv_region_info region_info;
1334 	struct dmub_srv_fb_params fb_params;
1335 	struct dmub_srv_fb_info *fb_info;
1336 	struct dmub_srv *dmub_srv;
1337 	const struct dmcub_firmware_header_v1_0 *hdr;
1338 	const char *fw_name_dmub;
1339 	enum dmub_asic dmub_asic;
1340 	enum dmub_status status;
1341 	int r;
1342 
1343 	switch (adev->asic_type) {
1344 	case CHIP_RENOIR:
1345 		dmub_asic = DMUB_ASIC_DCN21;
1346 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1347 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1348 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1349 		break;
1350 	case CHIP_SIENNA_CICHLID:
1351 		dmub_asic = DMUB_ASIC_DCN30;
1352 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1353 		break;
1354 	case CHIP_NAVY_FLOUNDER:
1355 		dmub_asic = DMUB_ASIC_DCN30;
1356 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1357 		break;
1358 	case CHIP_VANGOGH:
1359 		dmub_asic = DMUB_ASIC_DCN301;
1360 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1361 		break;
1362 	case CHIP_DIMGREY_CAVEFISH:
1363 		dmub_asic = DMUB_ASIC_DCN302;
1364 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1365 		break;
1366 
1367 	default:
1368 		/* ASIC doesn't support DMUB. */
1369 		return 0;
1370 	}
1371 
1372 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1373 	if (r) {
1374 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1375 		return 0;
1376 	}
1377 
1378 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1379 	if (r) {
1380 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1381 		return 0;
1382 	}
1383 
1384 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1385 
1386 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1387 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1388 			AMDGPU_UCODE_ID_DMCUB;
1389 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1390 			adev->dm.dmub_fw;
1391 		adev->firmware.fw_size +=
1392 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1393 
1394 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1395 			 adev->dm.dmcub_fw_version);
1396 	}
1397 
1398 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1399 
1400 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1401 	dmub_srv = adev->dm.dmub_srv;
1402 
1403 	if (!dmub_srv) {
1404 		DRM_ERROR("Failed to allocate DMUB service!\n");
1405 		return -ENOMEM;
1406 	}
1407 
1408 	memset(&create_params, 0, sizeof(create_params));
1409 	create_params.user_ctx = adev;
1410 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1411 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1412 	create_params.asic = dmub_asic;
1413 
1414 	/* Create the DMUB service. */
1415 	status = dmub_srv_create(dmub_srv, &create_params);
1416 	if (status != DMUB_STATUS_OK) {
1417 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1418 		return -EINVAL;
1419 	}
1420 
1421 	/* Calculate the size of all the regions for the DMUB service. */
1422 	memset(&region_params, 0, sizeof(region_params));
1423 
1424 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1425 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1426 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1427 	region_params.vbios_size = adev->bios_size;
1428 	region_params.fw_bss_data = region_params.bss_data_size ?
1429 		adev->dm.dmub_fw->data +
1430 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1431 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1432 	region_params.fw_inst_const =
1433 		adev->dm.dmub_fw->data +
1434 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1435 		PSP_HEADER_BYTES;
1436 
1437 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1438 					   &region_info);
1439 
1440 	if (status != DMUB_STATUS_OK) {
1441 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1442 		return -EINVAL;
1443 	}
1444 
1445 	/*
1446 	 * Allocate a framebuffer based on the total size of all the regions.
1447 	 * TODO: Move this into GART.
1448 	 */
1449 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1450 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1451 				    &adev->dm.dmub_bo_gpu_addr,
1452 				    &adev->dm.dmub_bo_cpu_addr);
1453 	if (r)
1454 		return r;
1455 
1456 	/* Rebase the regions on the framebuffer address. */
1457 	memset(&fb_params, 0, sizeof(fb_params));
1458 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1459 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1460 	fb_params.region_info = &region_info;
1461 
1462 	adev->dm.dmub_fb_info =
1463 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1464 	fb_info = adev->dm.dmub_fb_info;
1465 
1466 	if (!fb_info) {
1467 		DRM_ERROR(
1468 			"Failed to allocate framebuffer info for DMUB service!\n");
1469 		return -ENOMEM;
1470 	}
1471 
1472 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1473 	if (status != DMUB_STATUS_OK) {
1474 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1475 		return -EINVAL;
1476 	}
1477 
1478 	return 0;
1479 }
1480 
1481 static int dm_sw_init(void *handle)
1482 {
1483 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1484 	int r;
1485 
1486 	r = dm_dmub_sw_init(adev);
1487 	if (r)
1488 		return r;
1489 
1490 	return load_dmcu_fw(adev);
1491 }
1492 
1493 static int dm_sw_fini(void *handle)
1494 {
1495 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1496 
1497 	kfree(adev->dm.dmub_fb_info);
1498 	adev->dm.dmub_fb_info = NULL;
1499 
1500 	if (adev->dm.dmub_srv) {
1501 		dmub_srv_destroy(adev->dm.dmub_srv);
1502 		adev->dm.dmub_srv = NULL;
1503 	}
1504 
1505 	release_firmware(adev->dm.dmub_fw);
1506 	adev->dm.dmub_fw = NULL;
1507 
1508 	release_firmware(adev->dm.fw_dmcu);
1509 	adev->dm.fw_dmcu = NULL;
1510 
1511 	return 0;
1512 }
1513 
1514 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1515 {
1516 	struct amdgpu_dm_connector *aconnector;
1517 	struct drm_connector *connector;
1518 	struct drm_connector_list_iter iter;
1519 	int ret = 0;
1520 
1521 	drm_connector_list_iter_begin(dev, &iter);
1522 	drm_for_each_connector_iter(connector, &iter) {
1523 		aconnector = to_amdgpu_dm_connector(connector);
1524 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1525 		    aconnector->mst_mgr.aux) {
1526 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1527 					 aconnector,
1528 					 aconnector->base.base.id);
1529 
1530 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1531 			if (ret < 0) {
1532 				DRM_ERROR("DM_MST: Failed to start MST\n");
1533 				aconnector->dc_link->type =
1534 					dc_connection_single;
1535 				break;
1536 			}
1537 		}
1538 	}
1539 	drm_connector_list_iter_end(&iter);
1540 
1541 	return ret;
1542 }
1543 
1544 static int dm_late_init(void *handle)
1545 {
1546 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1547 
1548 	struct dmcu_iram_parameters params;
1549 	unsigned int linear_lut[16];
1550 	int i;
1551 	struct dmcu *dmcu = NULL;
1552 	bool ret = true;
1553 
1554 	dmcu = adev->dm.dc->res_pool->dmcu;
1555 
1556 	for (i = 0; i < 16; i++)
1557 		linear_lut[i] = 0xFFFF * i / 15;
1558 
1559 	params.set = 0;
1560 	params.backlight_ramping_start = 0xCCCC;
1561 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1562 	params.backlight_lut_array_size = 16;
1563 	params.backlight_lut_array = linear_lut;
1564 
1565 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1566 	 * 0xFFFF x 0.01 = 0x28F
1567 	 */
1568 	params.min_abm_backlight = 0x28F;
1569 
1570 	/* In the case where abm is implemented on dmcub,
1571 	 * dmcu object will be null.
1572 	 * ABM 2.4 and up are implemented on dmcub.
1573 	 */
1574 	if (dmcu)
1575 		ret = dmcu_load_iram(dmcu, params);
1576 	else if (adev->dm.dc->ctx->dmub_srv)
1577 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1578 
1579 	if (!ret)
1580 		return -EINVAL;
1581 
1582 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1583 }
1584 
1585 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1586 {
1587 	struct amdgpu_dm_connector *aconnector;
1588 	struct drm_connector *connector;
1589 	struct drm_connector_list_iter iter;
1590 	struct drm_dp_mst_topology_mgr *mgr;
1591 	int ret;
1592 	bool need_hotplug = false;
1593 
1594 	drm_connector_list_iter_begin(dev, &iter);
1595 	drm_for_each_connector_iter(connector, &iter) {
1596 		aconnector = to_amdgpu_dm_connector(connector);
1597 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1598 		    aconnector->mst_port)
1599 			continue;
1600 
1601 		mgr = &aconnector->mst_mgr;
1602 
1603 		if (suspend) {
1604 			drm_dp_mst_topology_mgr_suspend(mgr);
1605 		} else {
1606 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1607 			if (ret < 0) {
1608 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1609 				need_hotplug = true;
1610 			}
1611 		}
1612 	}
1613 	drm_connector_list_iter_end(&iter);
1614 
1615 	if (need_hotplug)
1616 		drm_kms_helper_hotplug_event(dev);
1617 }
1618 
1619 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1620 {
1621 	struct smu_context *smu = &adev->smu;
1622 	int ret = 0;
1623 
1624 	if (!is_support_sw_smu(adev))
1625 		return 0;
1626 
1627 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1628 	 * on window driver dc implementation.
1629 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1630 	 * should be passed to smu during boot up and resume from s3.
1631 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1632 	 * dcn20_resource_construct
1633 	 * then call pplib functions below to pass the settings to smu:
1634 	 * smu_set_watermarks_for_clock_ranges
1635 	 * smu_set_watermarks_table
1636 	 * navi10_set_watermarks_table
1637 	 * smu_write_watermarks_table
1638 	 *
1639 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1640 	 * dc has implemented different flow for window driver:
1641 	 * dc_hardware_init / dc_set_power_state
1642 	 * dcn10_init_hw
1643 	 * notify_wm_ranges
1644 	 * set_wm_ranges
1645 	 * -- Linux
1646 	 * smu_set_watermarks_for_clock_ranges
1647 	 * renoir_set_watermarks_table
1648 	 * smu_write_watermarks_table
1649 	 *
1650 	 * For Linux,
1651 	 * dc_hardware_init -> amdgpu_dm_init
1652 	 * dc_set_power_state --> dm_resume
1653 	 *
1654 	 * therefore, this function apply to navi10/12/14 but not Renoir
1655 	 * *
1656 	 */
1657 	switch(adev->asic_type) {
1658 	case CHIP_NAVI10:
1659 	case CHIP_NAVI14:
1660 	case CHIP_NAVI12:
1661 		break;
1662 	default:
1663 		return 0;
1664 	}
1665 
1666 	ret = smu_write_watermarks_table(smu);
1667 	if (ret) {
1668 		DRM_ERROR("Failed to update WMTABLE!\n");
1669 		return ret;
1670 	}
1671 
1672 	return 0;
1673 }
1674 
1675 /**
1676  * dm_hw_init() - Initialize DC device
1677  * @handle: The base driver device containing the amdgpu_dm device.
1678  *
1679  * Initialize the &struct amdgpu_display_manager device. This involves calling
1680  * the initializers of each DM component, then populating the struct with them.
1681  *
1682  * Although the function implies hardware initialization, both hardware and
1683  * software are initialized here. Splitting them out to their relevant init
1684  * hooks is a future TODO item.
1685  *
1686  * Some notable things that are initialized here:
1687  *
1688  * - Display Core, both software and hardware
1689  * - DC modules that we need (freesync and color management)
1690  * - DRM software states
1691  * - Interrupt sources and handlers
1692  * - Vblank support
1693  * - Debug FS entries, if enabled
1694  */
1695 static int dm_hw_init(void *handle)
1696 {
1697 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1698 	/* Create DAL display manager */
1699 	amdgpu_dm_init(adev);
1700 	amdgpu_dm_hpd_init(adev);
1701 
1702 	return 0;
1703 }
1704 
1705 /**
1706  * dm_hw_fini() - Teardown DC device
1707  * @handle: The base driver device containing the amdgpu_dm device.
1708  *
1709  * Teardown components within &struct amdgpu_display_manager that require
1710  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1711  * were loaded. Also flush IRQ workqueues and disable them.
1712  */
1713 static int dm_hw_fini(void *handle)
1714 {
1715 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1716 
1717 	amdgpu_dm_hpd_fini(adev);
1718 
1719 	amdgpu_dm_irq_fini(adev);
1720 	amdgpu_dm_fini(adev);
1721 	return 0;
1722 }
1723 
1724 
1725 static int dm_enable_vblank(struct drm_crtc *crtc);
1726 static void dm_disable_vblank(struct drm_crtc *crtc);
1727 
1728 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1729 				 struct dc_state *state, bool enable)
1730 {
1731 	enum dc_irq_source irq_source;
1732 	struct amdgpu_crtc *acrtc;
1733 	int rc = -EBUSY;
1734 	int i = 0;
1735 
1736 	for (i = 0; i < state->stream_count; i++) {
1737 		acrtc = get_crtc_by_otg_inst(
1738 				adev, state->stream_status[i].primary_otg_inst);
1739 
1740 		if (acrtc && state->stream_status[i].plane_count != 0) {
1741 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1742 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1743 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1744 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1745 			if (rc)
1746 				DRM_WARN("Failed to %s pflip interrupts\n",
1747 					 enable ? "enable" : "disable");
1748 
1749 			if (enable) {
1750 				rc = dm_enable_vblank(&acrtc->base);
1751 				if (rc)
1752 					DRM_WARN("Failed to enable vblank interrupts\n");
1753 			} else {
1754 				dm_disable_vblank(&acrtc->base);
1755 			}
1756 
1757 		}
1758 	}
1759 
1760 }
1761 
1762 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1763 {
1764 	struct dc_state *context = NULL;
1765 	enum dc_status res = DC_ERROR_UNEXPECTED;
1766 	int i;
1767 	struct dc_stream_state *del_streams[MAX_PIPES];
1768 	int del_streams_count = 0;
1769 
1770 	memset(del_streams, 0, sizeof(del_streams));
1771 
1772 	context = dc_create_state(dc);
1773 	if (context == NULL)
1774 		goto context_alloc_fail;
1775 
1776 	dc_resource_state_copy_construct_current(dc, context);
1777 
1778 	/* First remove from context all streams */
1779 	for (i = 0; i < context->stream_count; i++) {
1780 		struct dc_stream_state *stream = context->streams[i];
1781 
1782 		del_streams[del_streams_count++] = stream;
1783 	}
1784 
1785 	/* Remove all planes for removed streams and then remove the streams */
1786 	for (i = 0; i < del_streams_count; i++) {
1787 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1788 			res = DC_FAIL_DETACH_SURFACES;
1789 			goto fail;
1790 		}
1791 
1792 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1793 		if (res != DC_OK)
1794 			goto fail;
1795 	}
1796 
1797 
1798 	res = dc_validate_global_state(dc, context, false);
1799 
1800 	if (res != DC_OK) {
1801 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1802 		goto fail;
1803 	}
1804 
1805 	res = dc_commit_state(dc, context);
1806 
1807 fail:
1808 	dc_release_state(context);
1809 
1810 context_alloc_fail:
1811 	return res;
1812 }
1813 
1814 static int dm_suspend(void *handle)
1815 {
1816 	struct amdgpu_device *adev = handle;
1817 	struct amdgpu_display_manager *dm = &adev->dm;
1818 	int ret = 0;
1819 
1820 	if (amdgpu_in_reset(adev)) {
1821 		mutex_lock(&dm->dc_lock);
1822 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1823 
1824 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1825 
1826 		amdgpu_dm_commit_zero_streams(dm->dc);
1827 
1828 		amdgpu_dm_irq_suspend(adev);
1829 
1830 		return ret;
1831 	}
1832 
1833 	WARN_ON(adev->dm.cached_state);
1834 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1835 
1836 	s3_handle_mst(adev_to_drm(adev), true);
1837 
1838 	amdgpu_dm_irq_suspend(adev);
1839 
1840 
1841 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1842 
1843 	return 0;
1844 }
1845 
1846 static struct amdgpu_dm_connector *
1847 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1848 					     struct drm_crtc *crtc)
1849 {
1850 	uint32_t i;
1851 	struct drm_connector_state *new_con_state;
1852 	struct drm_connector *connector;
1853 	struct drm_crtc *crtc_from_state;
1854 
1855 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1856 		crtc_from_state = new_con_state->crtc;
1857 
1858 		if (crtc_from_state == crtc)
1859 			return to_amdgpu_dm_connector(connector);
1860 	}
1861 
1862 	return NULL;
1863 }
1864 
1865 static void emulated_link_detect(struct dc_link *link)
1866 {
1867 	struct dc_sink_init_data sink_init_data = { 0 };
1868 	struct display_sink_capability sink_caps = { 0 };
1869 	enum dc_edid_status edid_status;
1870 	struct dc_context *dc_ctx = link->ctx;
1871 	struct dc_sink *sink = NULL;
1872 	struct dc_sink *prev_sink = NULL;
1873 
1874 	link->type = dc_connection_none;
1875 	prev_sink = link->local_sink;
1876 
1877 	if (prev_sink != NULL)
1878 		dc_sink_retain(prev_sink);
1879 
1880 	switch (link->connector_signal) {
1881 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1882 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1883 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1884 		break;
1885 	}
1886 
1887 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1888 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1889 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1890 		break;
1891 	}
1892 
1893 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1894 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1895 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1896 		break;
1897 	}
1898 
1899 	case SIGNAL_TYPE_LVDS: {
1900 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1901 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1902 		break;
1903 	}
1904 
1905 	case SIGNAL_TYPE_EDP: {
1906 		sink_caps.transaction_type =
1907 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1908 		sink_caps.signal = SIGNAL_TYPE_EDP;
1909 		break;
1910 	}
1911 
1912 	case SIGNAL_TYPE_DISPLAY_PORT: {
1913 		sink_caps.transaction_type =
1914 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1915 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1916 		break;
1917 	}
1918 
1919 	default:
1920 		DC_ERROR("Invalid connector type! signal:%d\n",
1921 			link->connector_signal);
1922 		return;
1923 	}
1924 
1925 	sink_init_data.link = link;
1926 	sink_init_data.sink_signal = sink_caps.signal;
1927 
1928 	sink = dc_sink_create(&sink_init_data);
1929 	if (!sink) {
1930 		DC_ERROR("Failed to create sink!\n");
1931 		return;
1932 	}
1933 
1934 	/* dc_sink_create returns a new reference */
1935 	link->local_sink = sink;
1936 
1937 	edid_status = dm_helpers_read_local_edid(
1938 			link->ctx,
1939 			link,
1940 			sink);
1941 
1942 	if (edid_status != EDID_OK)
1943 		DC_ERROR("Failed to read EDID");
1944 
1945 }
1946 
1947 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1948 				     struct amdgpu_display_manager *dm)
1949 {
1950 	struct {
1951 		struct dc_surface_update surface_updates[MAX_SURFACES];
1952 		struct dc_plane_info plane_infos[MAX_SURFACES];
1953 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1954 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1955 		struct dc_stream_update stream_update;
1956 	} * bundle;
1957 	int k, m;
1958 
1959 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1960 
1961 	if (!bundle) {
1962 		dm_error("Failed to allocate update bundle\n");
1963 		goto cleanup;
1964 	}
1965 
1966 	for (k = 0; k < dc_state->stream_count; k++) {
1967 		bundle->stream_update.stream = dc_state->streams[k];
1968 
1969 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1970 			bundle->surface_updates[m].surface =
1971 				dc_state->stream_status->plane_states[m];
1972 			bundle->surface_updates[m].surface->force_full_update =
1973 				true;
1974 		}
1975 		dc_commit_updates_for_stream(
1976 			dm->dc, bundle->surface_updates,
1977 			dc_state->stream_status->plane_count,
1978 			dc_state->streams[k], &bundle->stream_update, dc_state);
1979 	}
1980 
1981 cleanup:
1982 	kfree(bundle);
1983 
1984 	return;
1985 }
1986 
1987 static void dm_set_dpms_off(struct dc_link *link)
1988 {
1989 	struct dc_stream_state *stream_state;
1990 	struct amdgpu_dm_connector *aconnector = link->priv;
1991 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1992 	struct dc_stream_update stream_update;
1993 	bool dpms_off = true;
1994 
1995 	memset(&stream_update, 0, sizeof(stream_update));
1996 	stream_update.dpms_off = &dpms_off;
1997 
1998 	mutex_lock(&adev->dm.dc_lock);
1999 	stream_state = dc_stream_find_from_link(link);
2000 
2001 	if (stream_state == NULL) {
2002 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2003 		mutex_unlock(&adev->dm.dc_lock);
2004 		return;
2005 	}
2006 
2007 	stream_update.stream = stream_state;
2008 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2009 				     stream_state, &stream_update,
2010 				     stream_state->ctx->dc->current_state);
2011 	mutex_unlock(&adev->dm.dc_lock);
2012 }
2013 
2014 static int dm_resume(void *handle)
2015 {
2016 	struct amdgpu_device *adev = handle;
2017 	struct drm_device *ddev = adev_to_drm(adev);
2018 	struct amdgpu_display_manager *dm = &adev->dm;
2019 	struct amdgpu_dm_connector *aconnector;
2020 	struct drm_connector *connector;
2021 	struct drm_connector_list_iter iter;
2022 	struct drm_crtc *crtc;
2023 	struct drm_crtc_state *new_crtc_state;
2024 	struct dm_crtc_state *dm_new_crtc_state;
2025 	struct drm_plane *plane;
2026 	struct drm_plane_state *new_plane_state;
2027 	struct dm_plane_state *dm_new_plane_state;
2028 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2029 	enum dc_connection_type new_connection_type = dc_connection_none;
2030 	struct dc_state *dc_state;
2031 	int i, r, j;
2032 
2033 	if (amdgpu_in_reset(adev)) {
2034 		dc_state = dm->cached_dc_state;
2035 
2036 		r = dm_dmub_hw_init(adev);
2037 		if (r)
2038 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2039 
2040 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2041 		dc_resume(dm->dc);
2042 
2043 		amdgpu_dm_irq_resume_early(adev);
2044 
2045 		for (i = 0; i < dc_state->stream_count; i++) {
2046 			dc_state->streams[i]->mode_changed = true;
2047 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2048 				dc_state->stream_status->plane_states[j]->update_flags.raw
2049 					= 0xffffffff;
2050 			}
2051 		}
2052 
2053 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2054 
2055 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2056 
2057 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2058 
2059 		dc_release_state(dm->cached_dc_state);
2060 		dm->cached_dc_state = NULL;
2061 
2062 		amdgpu_dm_irq_resume_late(adev);
2063 
2064 		mutex_unlock(&dm->dc_lock);
2065 
2066 		return 0;
2067 	}
2068 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2069 	dc_release_state(dm_state->context);
2070 	dm_state->context = dc_create_state(dm->dc);
2071 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2072 	dc_resource_state_construct(dm->dc, dm_state->context);
2073 
2074 	/* Before powering on DC we need to re-initialize DMUB. */
2075 	r = dm_dmub_hw_init(adev);
2076 	if (r)
2077 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2078 
2079 	/* power on hardware */
2080 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2081 
2082 	/* program HPD filter */
2083 	dc_resume(dm->dc);
2084 
2085 	/*
2086 	 * early enable HPD Rx IRQ, should be done before set mode as short
2087 	 * pulse interrupts are used for MST
2088 	 */
2089 	amdgpu_dm_irq_resume_early(adev);
2090 
2091 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2092 	s3_handle_mst(ddev, false);
2093 
2094 	/* Do detection*/
2095 	drm_connector_list_iter_begin(ddev, &iter);
2096 	drm_for_each_connector_iter(connector, &iter) {
2097 		aconnector = to_amdgpu_dm_connector(connector);
2098 
2099 		/*
2100 		 * this is the case when traversing through already created
2101 		 * MST connectors, should be skipped
2102 		 */
2103 		if (aconnector->mst_port)
2104 			continue;
2105 
2106 		mutex_lock(&aconnector->hpd_lock);
2107 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2108 			DRM_ERROR("KMS: Failed to detect connector\n");
2109 
2110 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2111 			emulated_link_detect(aconnector->dc_link);
2112 		else
2113 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2114 
2115 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2116 			aconnector->fake_enable = false;
2117 
2118 		if (aconnector->dc_sink)
2119 			dc_sink_release(aconnector->dc_sink);
2120 		aconnector->dc_sink = NULL;
2121 		amdgpu_dm_update_connector_after_detect(aconnector);
2122 		mutex_unlock(&aconnector->hpd_lock);
2123 	}
2124 	drm_connector_list_iter_end(&iter);
2125 
2126 	/* Force mode set in atomic commit */
2127 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2128 		new_crtc_state->active_changed = true;
2129 
2130 	/*
2131 	 * atomic_check is expected to create the dc states. We need to release
2132 	 * them here, since they were duplicated as part of the suspend
2133 	 * procedure.
2134 	 */
2135 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2136 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2137 		if (dm_new_crtc_state->stream) {
2138 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2139 			dc_stream_release(dm_new_crtc_state->stream);
2140 			dm_new_crtc_state->stream = NULL;
2141 		}
2142 	}
2143 
2144 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2145 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2146 		if (dm_new_plane_state->dc_state) {
2147 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2148 			dc_plane_state_release(dm_new_plane_state->dc_state);
2149 			dm_new_plane_state->dc_state = NULL;
2150 		}
2151 	}
2152 
2153 	drm_atomic_helper_resume(ddev, dm->cached_state);
2154 
2155 	dm->cached_state = NULL;
2156 
2157 	amdgpu_dm_irq_resume_late(adev);
2158 
2159 	amdgpu_dm_smu_write_watermarks_table(adev);
2160 
2161 	return 0;
2162 }
2163 
2164 /**
2165  * DOC: DM Lifecycle
2166  *
2167  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2168  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2169  * the base driver's device list to be initialized and torn down accordingly.
2170  *
2171  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2172  */
2173 
2174 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2175 	.name = "dm",
2176 	.early_init = dm_early_init,
2177 	.late_init = dm_late_init,
2178 	.sw_init = dm_sw_init,
2179 	.sw_fini = dm_sw_fini,
2180 	.hw_init = dm_hw_init,
2181 	.hw_fini = dm_hw_fini,
2182 	.suspend = dm_suspend,
2183 	.resume = dm_resume,
2184 	.is_idle = dm_is_idle,
2185 	.wait_for_idle = dm_wait_for_idle,
2186 	.check_soft_reset = dm_check_soft_reset,
2187 	.soft_reset = dm_soft_reset,
2188 	.set_clockgating_state = dm_set_clockgating_state,
2189 	.set_powergating_state = dm_set_powergating_state,
2190 };
2191 
2192 const struct amdgpu_ip_block_version dm_ip_block =
2193 {
2194 	.type = AMD_IP_BLOCK_TYPE_DCE,
2195 	.major = 1,
2196 	.minor = 0,
2197 	.rev = 0,
2198 	.funcs = &amdgpu_dm_funcs,
2199 };
2200 
2201 
2202 /**
2203  * DOC: atomic
2204  *
2205  * *WIP*
2206  */
2207 
2208 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2209 	.fb_create = amdgpu_display_user_framebuffer_create,
2210 	.get_format_info = amd_get_format_info,
2211 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2212 	.atomic_check = amdgpu_dm_atomic_check,
2213 	.atomic_commit = amdgpu_dm_atomic_commit,
2214 };
2215 
2216 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2217 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2218 };
2219 
2220 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2221 {
2222 	u32 max_cll, min_cll, max, min, q, r;
2223 	struct amdgpu_dm_backlight_caps *caps;
2224 	struct amdgpu_display_manager *dm;
2225 	struct drm_connector *conn_base;
2226 	struct amdgpu_device *adev;
2227 	struct dc_link *link = NULL;
2228 	static const u8 pre_computed_values[] = {
2229 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2230 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2231 
2232 	if (!aconnector || !aconnector->dc_link)
2233 		return;
2234 
2235 	link = aconnector->dc_link;
2236 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2237 		return;
2238 
2239 	conn_base = &aconnector->base;
2240 	adev = drm_to_adev(conn_base->dev);
2241 	dm = &adev->dm;
2242 	caps = &dm->backlight_caps;
2243 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2244 	caps->aux_support = false;
2245 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2246 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2247 
2248 	if (caps->ext_caps->bits.oled == 1 ||
2249 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2250 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2251 		caps->aux_support = true;
2252 
2253 	/* From the specification (CTA-861-G), for calculating the maximum
2254 	 * luminance we need to use:
2255 	 *	Luminance = 50*2**(CV/32)
2256 	 * Where CV is a one-byte value.
2257 	 * For calculating this expression we may need float point precision;
2258 	 * to avoid this complexity level, we take advantage that CV is divided
2259 	 * by a constant. From the Euclids division algorithm, we know that CV
2260 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2261 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2262 	 * need to pre-compute the value of r/32. For pre-computing the values
2263 	 * We just used the following Ruby line:
2264 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2265 	 * The results of the above expressions can be verified at
2266 	 * pre_computed_values.
2267 	 */
2268 	q = max_cll >> 5;
2269 	r = max_cll % 32;
2270 	max = (1 << q) * pre_computed_values[r];
2271 
2272 	// min luminance: maxLum * (CV/255)^2 / 100
2273 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2274 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2275 
2276 	caps->aux_max_input_signal = max;
2277 	caps->aux_min_input_signal = min;
2278 }
2279 
2280 void amdgpu_dm_update_connector_after_detect(
2281 		struct amdgpu_dm_connector *aconnector)
2282 {
2283 	struct drm_connector *connector = &aconnector->base;
2284 	struct drm_device *dev = connector->dev;
2285 	struct dc_sink *sink;
2286 
2287 	/* MST handled by drm_mst framework */
2288 	if (aconnector->mst_mgr.mst_state == true)
2289 		return;
2290 
2291 	sink = aconnector->dc_link->local_sink;
2292 	if (sink)
2293 		dc_sink_retain(sink);
2294 
2295 	/*
2296 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2297 	 * the connector sink is set to either fake or physical sink depends on link status.
2298 	 * Skip if already done during boot.
2299 	 */
2300 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2301 			&& aconnector->dc_em_sink) {
2302 
2303 		/*
2304 		 * For S3 resume with headless use eml_sink to fake stream
2305 		 * because on resume connector->sink is set to NULL
2306 		 */
2307 		mutex_lock(&dev->mode_config.mutex);
2308 
2309 		if (sink) {
2310 			if (aconnector->dc_sink) {
2311 				amdgpu_dm_update_freesync_caps(connector, NULL);
2312 				/*
2313 				 * retain and release below are used to
2314 				 * bump up refcount for sink because the link doesn't point
2315 				 * to it anymore after disconnect, so on next crtc to connector
2316 				 * reshuffle by UMD we will get into unwanted dc_sink release
2317 				 */
2318 				dc_sink_release(aconnector->dc_sink);
2319 			}
2320 			aconnector->dc_sink = sink;
2321 			dc_sink_retain(aconnector->dc_sink);
2322 			amdgpu_dm_update_freesync_caps(connector,
2323 					aconnector->edid);
2324 		} else {
2325 			amdgpu_dm_update_freesync_caps(connector, NULL);
2326 			if (!aconnector->dc_sink) {
2327 				aconnector->dc_sink = aconnector->dc_em_sink;
2328 				dc_sink_retain(aconnector->dc_sink);
2329 			}
2330 		}
2331 
2332 		mutex_unlock(&dev->mode_config.mutex);
2333 
2334 		if (sink)
2335 			dc_sink_release(sink);
2336 		return;
2337 	}
2338 
2339 	/*
2340 	 * TODO: temporary guard to look for proper fix
2341 	 * if this sink is MST sink, we should not do anything
2342 	 */
2343 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2344 		dc_sink_release(sink);
2345 		return;
2346 	}
2347 
2348 	if (aconnector->dc_sink == sink) {
2349 		/*
2350 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2351 		 * Do nothing!!
2352 		 */
2353 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2354 				aconnector->connector_id);
2355 		if (sink)
2356 			dc_sink_release(sink);
2357 		return;
2358 	}
2359 
2360 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2361 		aconnector->connector_id, aconnector->dc_sink, sink);
2362 
2363 	mutex_lock(&dev->mode_config.mutex);
2364 
2365 	/*
2366 	 * 1. Update status of the drm connector
2367 	 * 2. Send an event and let userspace tell us what to do
2368 	 */
2369 	if (sink) {
2370 		/*
2371 		 * TODO: check if we still need the S3 mode update workaround.
2372 		 * If yes, put it here.
2373 		 */
2374 		if (aconnector->dc_sink)
2375 			amdgpu_dm_update_freesync_caps(connector, NULL);
2376 
2377 		aconnector->dc_sink = sink;
2378 		dc_sink_retain(aconnector->dc_sink);
2379 		if (sink->dc_edid.length == 0) {
2380 			aconnector->edid = NULL;
2381 			if (aconnector->dc_link->aux_mode) {
2382 				drm_dp_cec_unset_edid(
2383 					&aconnector->dm_dp_aux.aux);
2384 			}
2385 		} else {
2386 			aconnector->edid =
2387 				(struct edid *)sink->dc_edid.raw_edid;
2388 
2389 			drm_connector_update_edid_property(connector,
2390 							   aconnector->edid);
2391 			drm_add_edid_modes(connector, aconnector->edid);
2392 
2393 			if (aconnector->dc_link->aux_mode)
2394 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2395 						    aconnector->edid);
2396 		}
2397 
2398 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2399 		update_connector_ext_caps(aconnector);
2400 	} else {
2401 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2402 		amdgpu_dm_update_freesync_caps(connector, NULL);
2403 		drm_connector_update_edid_property(connector, NULL);
2404 		aconnector->num_modes = 0;
2405 		dc_sink_release(aconnector->dc_sink);
2406 		aconnector->dc_sink = NULL;
2407 		aconnector->edid = NULL;
2408 #ifdef CONFIG_DRM_AMD_DC_HDCP
2409 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2410 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2411 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2412 #endif
2413 	}
2414 
2415 	mutex_unlock(&dev->mode_config.mutex);
2416 
2417 	update_subconnector_property(aconnector);
2418 
2419 	if (sink)
2420 		dc_sink_release(sink);
2421 }
2422 
2423 static void handle_hpd_irq(void *param)
2424 {
2425 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2426 	struct drm_connector *connector = &aconnector->base;
2427 	struct drm_device *dev = connector->dev;
2428 	enum dc_connection_type new_connection_type = dc_connection_none;
2429 #ifdef CONFIG_DRM_AMD_DC_HDCP
2430 	struct amdgpu_device *adev = drm_to_adev(dev);
2431 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2432 #endif
2433 
2434 	/*
2435 	 * In case of failure or MST no need to update connector status or notify the OS
2436 	 * since (for MST case) MST does this in its own context.
2437 	 */
2438 	mutex_lock(&aconnector->hpd_lock);
2439 
2440 #ifdef CONFIG_DRM_AMD_DC_HDCP
2441 	if (adev->dm.hdcp_workqueue) {
2442 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2443 		dm_con_state->update_hdcp = true;
2444 	}
2445 #endif
2446 	if (aconnector->fake_enable)
2447 		aconnector->fake_enable = false;
2448 
2449 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2450 		DRM_ERROR("KMS: Failed to detect connector\n");
2451 
2452 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2453 		emulated_link_detect(aconnector->dc_link);
2454 
2455 
2456 		drm_modeset_lock_all(dev);
2457 		dm_restore_drm_connector_state(dev, connector);
2458 		drm_modeset_unlock_all(dev);
2459 
2460 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2461 			drm_kms_helper_hotplug_event(dev);
2462 
2463 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2464 		if (new_connection_type == dc_connection_none &&
2465 		    aconnector->dc_link->type == dc_connection_none)
2466 			dm_set_dpms_off(aconnector->dc_link);
2467 
2468 		amdgpu_dm_update_connector_after_detect(aconnector);
2469 
2470 		drm_modeset_lock_all(dev);
2471 		dm_restore_drm_connector_state(dev, connector);
2472 		drm_modeset_unlock_all(dev);
2473 
2474 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2475 			drm_kms_helper_hotplug_event(dev);
2476 	}
2477 	mutex_unlock(&aconnector->hpd_lock);
2478 
2479 }
2480 
2481 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2482 {
2483 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2484 	uint8_t dret;
2485 	bool new_irq_handled = false;
2486 	int dpcd_addr;
2487 	int dpcd_bytes_to_read;
2488 
2489 	const int max_process_count = 30;
2490 	int process_count = 0;
2491 
2492 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2493 
2494 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2495 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2496 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2497 		dpcd_addr = DP_SINK_COUNT;
2498 	} else {
2499 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2500 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2501 		dpcd_addr = DP_SINK_COUNT_ESI;
2502 	}
2503 
2504 	dret = drm_dp_dpcd_read(
2505 		&aconnector->dm_dp_aux.aux,
2506 		dpcd_addr,
2507 		esi,
2508 		dpcd_bytes_to_read);
2509 
2510 	while (dret == dpcd_bytes_to_read &&
2511 		process_count < max_process_count) {
2512 		uint8_t retry;
2513 		dret = 0;
2514 
2515 		process_count++;
2516 
2517 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2518 		/* handle HPD short pulse irq */
2519 		if (aconnector->mst_mgr.mst_state)
2520 			drm_dp_mst_hpd_irq(
2521 				&aconnector->mst_mgr,
2522 				esi,
2523 				&new_irq_handled);
2524 
2525 		if (new_irq_handled) {
2526 			/* ACK at DPCD to notify down stream */
2527 			const int ack_dpcd_bytes_to_write =
2528 				dpcd_bytes_to_read - 1;
2529 
2530 			for (retry = 0; retry < 3; retry++) {
2531 				uint8_t wret;
2532 
2533 				wret = drm_dp_dpcd_write(
2534 					&aconnector->dm_dp_aux.aux,
2535 					dpcd_addr + 1,
2536 					&esi[1],
2537 					ack_dpcd_bytes_to_write);
2538 				if (wret == ack_dpcd_bytes_to_write)
2539 					break;
2540 			}
2541 
2542 			/* check if there is new irq to be handled */
2543 			dret = drm_dp_dpcd_read(
2544 				&aconnector->dm_dp_aux.aux,
2545 				dpcd_addr,
2546 				esi,
2547 				dpcd_bytes_to_read);
2548 
2549 			new_irq_handled = false;
2550 		} else {
2551 			break;
2552 		}
2553 	}
2554 
2555 	if (process_count == max_process_count)
2556 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2557 }
2558 
2559 static void handle_hpd_rx_irq(void *param)
2560 {
2561 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2562 	struct drm_connector *connector = &aconnector->base;
2563 	struct drm_device *dev = connector->dev;
2564 	struct dc_link *dc_link = aconnector->dc_link;
2565 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2566 	enum dc_connection_type new_connection_type = dc_connection_none;
2567 #ifdef CONFIG_DRM_AMD_DC_HDCP
2568 	union hpd_irq_data hpd_irq_data;
2569 	struct amdgpu_device *adev = drm_to_adev(dev);
2570 
2571 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2572 #endif
2573 
2574 	/*
2575 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2576 	 * conflict, after implement i2c helper, this mutex should be
2577 	 * retired.
2578 	 */
2579 	if (dc_link->type != dc_connection_mst_branch)
2580 		mutex_lock(&aconnector->hpd_lock);
2581 
2582 
2583 #ifdef CONFIG_DRM_AMD_DC_HDCP
2584 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2585 #else
2586 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2587 #endif
2588 			!is_mst_root_connector) {
2589 		/* Downstream Port status changed. */
2590 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2591 			DRM_ERROR("KMS: Failed to detect connector\n");
2592 
2593 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2594 			emulated_link_detect(dc_link);
2595 
2596 			if (aconnector->fake_enable)
2597 				aconnector->fake_enable = false;
2598 
2599 			amdgpu_dm_update_connector_after_detect(aconnector);
2600 
2601 
2602 			drm_modeset_lock_all(dev);
2603 			dm_restore_drm_connector_state(dev, connector);
2604 			drm_modeset_unlock_all(dev);
2605 
2606 			drm_kms_helper_hotplug_event(dev);
2607 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2608 
2609 			if (aconnector->fake_enable)
2610 				aconnector->fake_enable = false;
2611 
2612 			amdgpu_dm_update_connector_after_detect(aconnector);
2613 
2614 
2615 			drm_modeset_lock_all(dev);
2616 			dm_restore_drm_connector_state(dev, connector);
2617 			drm_modeset_unlock_all(dev);
2618 
2619 			drm_kms_helper_hotplug_event(dev);
2620 		}
2621 	}
2622 #ifdef CONFIG_DRM_AMD_DC_HDCP
2623 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2624 		if (adev->dm.hdcp_workqueue)
2625 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2626 	}
2627 #endif
2628 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2629 	    (dc_link->type == dc_connection_mst_branch))
2630 		dm_handle_hpd_rx_irq(aconnector);
2631 
2632 	if (dc_link->type != dc_connection_mst_branch) {
2633 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2634 		mutex_unlock(&aconnector->hpd_lock);
2635 	}
2636 }
2637 
2638 static void register_hpd_handlers(struct amdgpu_device *adev)
2639 {
2640 	struct drm_device *dev = adev_to_drm(adev);
2641 	struct drm_connector *connector;
2642 	struct amdgpu_dm_connector *aconnector;
2643 	const struct dc_link *dc_link;
2644 	struct dc_interrupt_params int_params = {0};
2645 
2646 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2647 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2648 
2649 	list_for_each_entry(connector,
2650 			&dev->mode_config.connector_list, head)	{
2651 
2652 		aconnector = to_amdgpu_dm_connector(connector);
2653 		dc_link = aconnector->dc_link;
2654 
2655 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2656 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2657 			int_params.irq_source = dc_link->irq_source_hpd;
2658 
2659 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2660 					handle_hpd_irq,
2661 					(void *) aconnector);
2662 		}
2663 
2664 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2665 
2666 			/* Also register for DP short pulse (hpd_rx). */
2667 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2668 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2669 
2670 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2671 					handle_hpd_rx_irq,
2672 					(void *) aconnector);
2673 		}
2674 	}
2675 }
2676 
2677 #if defined(CONFIG_DRM_AMD_DC_SI)
2678 /* Register IRQ sources and initialize IRQ callbacks */
2679 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2680 {
2681 	struct dc *dc = adev->dm.dc;
2682 	struct common_irq_params *c_irq_params;
2683 	struct dc_interrupt_params int_params = {0};
2684 	int r;
2685 	int i;
2686 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2687 
2688 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2689 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2690 
2691 	/*
2692 	 * Actions of amdgpu_irq_add_id():
2693 	 * 1. Register a set() function with base driver.
2694 	 *    Base driver will call set() function to enable/disable an
2695 	 *    interrupt in DC hardware.
2696 	 * 2. Register amdgpu_dm_irq_handler().
2697 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2698 	 *    coming from DC hardware.
2699 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2700 	 *    for acknowledging and handling. */
2701 
2702 	/* Use VBLANK interrupt */
2703 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2704 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2705 		if (r) {
2706 			DRM_ERROR("Failed to add crtc irq id!\n");
2707 			return r;
2708 		}
2709 
2710 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2711 		int_params.irq_source =
2712 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2713 
2714 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2715 
2716 		c_irq_params->adev = adev;
2717 		c_irq_params->irq_src = int_params.irq_source;
2718 
2719 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2720 				dm_crtc_high_irq, c_irq_params);
2721 	}
2722 
2723 	/* Use GRPH_PFLIP interrupt */
2724 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2725 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2726 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2727 		if (r) {
2728 			DRM_ERROR("Failed to add page flip irq id!\n");
2729 			return r;
2730 		}
2731 
2732 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2733 		int_params.irq_source =
2734 			dc_interrupt_to_irq_source(dc, i, 0);
2735 
2736 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2737 
2738 		c_irq_params->adev = adev;
2739 		c_irq_params->irq_src = int_params.irq_source;
2740 
2741 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2742 				dm_pflip_high_irq, c_irq_params);
2743 
2744 	}
2745 
2746 	/* HPD */
2747 	r = amdgpu_irq_add_id(adev, client_id,
2748 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2749 	if (r) {
2750 		DRM_ERROR("Failed to add hpd irq id!\n");
2751 		return r;
2752 	}
2753 
2754 	register_hpd_handlers(adev);
2755 
2756 	return 0;
2757 }
2758 #endif
2759 
2760 /* Register IRQ sources and initialize IRQ callbacks */
2761 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2762 {
2763 	struct dc *dc = adev->dm.dc;
2764 	struct common_irq_params *c_irq_params;
2765 	struct dc_interrupt_params int_params = {0};
2766 	int r;
2767 	int i;
2768 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2769 
2770 	if (adev->asic_type >= CHIP_VEGA10)
2771 		client_id = SOC15_IH_CLIENTID_DCE;
2772 
2773 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2774 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2775 
2776 	/*
2777 	 * Actions of amdgpu_irq_add_id():
2778 	 * 1. Register a set() function with base driver.
2779 	 *    Base driver will call set() function to enable/disable an
2780 	 *    interrupt in DC hardware.
2781 	 * 2. Register amdgpu_dm_irq_handler().
2782 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2783 	 *    coming from DC hardware.
2784 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2785 	 *    for acknowledging and handling. */
2786 
2787 	/* Use VBLANK interrupt */
2788 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2789 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2790 		if (r) {
2791 			DRM_ERROR("Failed to add crtc irq id!\n");
2792 			return r;
2793 		}
2794 
2795 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2796 		int_params.irq_source =
2797 			dc_interrupt_to_irq_source(dc, i, 0);
2798 
2799 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2800 
2801 		c_irq_params->adev = adev;
2802 		c_irq_params->irq_src = int_params.irq_source;
2803 
2804 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2805 				dm_crtc_high_irq, c_irq_params);
2806 	}
2807 
2808 	/* Use VUPDATE interrupt */
2809 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2810 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2811 		if (r) {
2812 			DRM_ERROR("Failed to add vupdate irq id!\n");
2813 			return r;
2814 		}
2815 
2816 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2817 		int_params.irq_source =
2818 			dc_interrupt_to_irq_source(dc, i, 0);
2819 
2820 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2821 
2822 		c_irq_params->adev = adev;
2823 		c_irq_params->irq_src = int_params.irq_source;
2824 
2825 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2826 				dm_vupdate_high_irq, c_irq_params);
2827 	}
2828 
2829 	/* Use GRPH_PFLIP interrupt */
2830 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2831 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2832 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2833 		if (r) {
2834 			DRM_ERROR("Failed to add page flip irq id!\n");
2835 			return r;
2836 		}
2837 
2838 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2839 		int_params.irq_source =
2840 			dc_interrupt_to_irq_source(dc, i, 0);
2841 
2842 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2843 
2844 		c_irq_params->adev = adev;
2845 		c_irq_params->irq_src = int_params.irq_source;
2846 
2847 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2848 				dm_pflip_high_irq, c_irq_params);
2849 
2850 	}
2851 
2852 	/* HPD */
2853 	r = amdgpu_irq_add_id(adev, client_id,
2854 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2855 	if (r) {
2856 		DRM_ERROR("Failed to add hpd irq id!\n");
2857 		return r;
2858 	}
2859 
2860 	register_hpd_handlers(adev);
2861 
2862 	return 0;
2863 }
2864 
2865 #if defined(CONFIG_DRM_AMD_DC_DCN)
2866 /* Register IRQ sources and initialize IRQ callbacks */
2867 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2868 {
2869 	struct dc *dc = adev->dm.dc;
2870 	struct common_irq_params *c_irq_params;
2871 	struct dc_interrupt_params int_params = {0};
2872 	int r;
2873 	int i;
2874 
2875 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2876 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2877 
2878 	/*
2879 	 * Actions of amdgpu_irq_add_id():
2880 	 * 1. Register a set() function with base driver.
2881 	 *    Base driver will call set() function to enable/disable an
2882 	 *    interrupt in DC hardware.
2883 	 * 2. Register amdgpu_dm_irq_handler().
2884 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2885 	 *    coming from DC hardware.
2886 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2887 	 *    for acknowledging and handling.
2888 	 */
2889 
2890 	/* Use VSTARTUP interrupt */
2891 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2892 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2893 			i++) {
2894 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2895 
2896 		if (r) {
2897 			DRM_ERROR("Failed to add crtc irq id!\n");
2898 			return r;
2899 		}
2900 
2901 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2902 		int_params.irq_source =
2903 			dc_interrupt_to_irq_source(dc, i, 0);
2904 
2905 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2906 
2907 		c_irq_params->adev = adev;
2908 		c_irq_params->irq_src = int_params.irq_source;
2909 
2910 		amdgpu_dm_irq_register_interrupt(
2911 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2912 	}
2913 
2914 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2915 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2916 	 * to trigger at end of each vblank, regardless of state of the lock,
2917 	 * matching DCE behaviour.
2918 	 */
2919 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2920 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2921 	     i++) {
2922 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2923 
2924 		if (r) {
2925 			DRM_ERROR("Failed to add vupdate irq id!\n");
2926 			return r;
2927 		}
2928 
2929 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2930 		int_params.irq_source =
2931 			dc_interrupt_to_irq_source(dc, i, 0);
2932 
2933 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2934 
2935 		c_irq_params->adev = adev;
2936 		c_irq_params->irq_src = int_params.irq_source;
2937 
2938 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2939 				dm_vupdate_high_irq, c_irq_params);
2940 	}
2941 
2942 	/* Use GRPH_PFLIP interrupt */
2943 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2944 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2945 			i++) {
2946 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2947 		if (r) {
2948 			DRM_ERROR("Failed to add page flip irq id!\n");
2949 			return r;
2950 		}
2951 
2952 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2953 		int_params.irq_source =
2954 			dc_interrupt_to_irq_source(dc, i, 0);
2955 
2956 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2957 
2958 		c_irq_params->adev = adev;
2959 		c_irq_params->irq_src = int_params.irq_source;
2960 
2961 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2962 				dm_pflip_high_irq, c_irq_params);
2963 
2964 	}
2965 
2966 	/* HPD */
2967 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2968 			&adev->hpd_irq);
2969 	if (r) {
2970 		DRM_ERROR("Failed to add hpd irq id!\n");
2971 		return r;
2972 	}
2973 
2974 	register_hpd_handlers(adev);
2975 
2976 	return 0;
2977 }
2978 #endif
2979 
2980 /*
2981  * Acquires the lock for the atomic state object and returns
2982  * the new atomic state.
2983  *
2984  * This should only be called during atomic check.
2985  */
2986 static int dm_atomic_get_state(struct drm_atomic_state *state,
2987 			       struct dm_atomic_state **dm_state)
2988 {
2989 	struct drm_device *dev = state->dev;
2990 	struct amdgpu_device *adev = drm_to_adev(dev);
2991 	struct amdgpu_display_manager *dm = &adev->dm;
2992 	struct drm_private_state *priv_state;
2993 
2994 	if (*dm_state)
2995 		return 0;
2996 
2997 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2998 	if (IS_ERR(priv_state))
2999 		return PTR_ERR(priv_state);
3000 
3001 	*dm_state = to_dm_atomic_state(priv_state);
3002 
3003 	return 0;
3004 }
3005 
3006 static struct dm_atomic_state *
3007 dm_atomic_get_new_state(struct drm_atomic_state *state)
3008 {
3009 	struct drm_device *dev = state->dev;
3010 	struct amdgpu_device *adev = drm_to_adev(dev);
3011 	struct amdgpu_display_manager *dm = &adev->dm;
3012 	struct drm_private_obj *obj;
3013 	struct drm_private_state *new_obj_state;
3014 	int i;
3015 
3016 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3017 		if (obj->funcs == dm->atomic_obj.funcs)
3018 			return to_dm_atomic_state(new_obj_state);
3019 	}
3020 
3021 	return NULL;
3022 }
3023 
3024 static struct drm_private_state *
3025 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3026 {
3027 	struct dm_atomic_state *old_state, *new_state;
3028 
3029 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3030 	if (!new_state)
3031 		return NULL;
3032 
3033 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3034 
3035 	old_state = to_dm_atomic_state(obj->state);
3036 
3037 	if (old_state && old_state->context)
3038 		new_state->context = dc_copy_state(old_state->context);
3039 
3040 	if (!new_state->context) {
3041 		kfree(new_state);
3042 		return NULL;
3043 	}
3044 
3045 	return &new_state->base;
3046 }
3047 
3048 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3049 				    struct drm_private_state *state)
3050 {
3051 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3052 
3053 	if (dm_state && dm_state->context)
3054 		dc_release_state(dm_state->context);
3055 
3056 	kfree(dm_state);
3057 }
3058 
3059 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3060 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3061 	.atomic_destroy_state = dm_atomic_destroy_state,
3062 };
3063 
3064 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3065 {
3066 	struct dm_atomic_state *state;
3067 	int r;
3068 
3069 	adev->mode_info.mode_config_initialized = true;
3070 
3071 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3072 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3073 
3074 	adev_to_drm(adev)->mode_config.max_width = 16384;
3075 	adev_to_drm(adev)->mode_config.max_height = 16384;
3076 
3077 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3078 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3079 	/* indicates support for immediate flip */
3080 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3081 
3082 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3083 
3084 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3085 	if (!state)
3086 		return -ENOMEM;
3087 
3088 	state->context = dc_create_state(adev->dm.dc);
3089 	if (!state->context) {
3090 		kfree(state);
3091 		return -ENOMEM;
3092 	}
3093 
3094 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3095 
3096 	drm_atomic_private_obj_init(adev_to_drm(adev),
3097 				    &adev->dm.atomic_obj,
3098 				    &state->base,
3099 				    &dm_atomic_state_funcs);
3100 
3101 	r = amdgpu_display_modeset_create_props(adev);
3102 	if (r) {
3103 		dc_release_state(state->context);
3104 		kfree(state);
3105 		return r;
3106 	}
3107 
3108 	r = amdgpu_dm_audio_init(adev);
3109 	if (r) {
3110 		dc_release_state(state->context);
3111 		kfree(state);
3112 		return r;
3113 	}
3114 
3115 	return 0;
3116 }
3117 
3118 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3119 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3120 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3121 
3122 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3123 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3124 
3125 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3126 {
3127 #if defined(CONFIG_ACPI)
3128 	struct amdgpu_dm_backlight_caps caps;
3129 
3130 	memset(&caps, 0, sizeof(caps));
3131 
3132 	if (dm->backlight_caps.caps_valid)
3133 		return;
3134 
3135 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3136 	if (caps.caps_valid) {
3137 		dm->backlight_caps.caps_valid = true;
3138 		if (caps.aux_support)
3139 			return;
3140 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3141 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3142 	} else {
3143 		dm->backlight_caps.min_input_signal =
3144 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3145 		dm->backlight_caps.max_input_signal =
3146 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3147 	}
3148 #else
3149 	if (dm->backlight_caps.aux_support)
3150 		return;
3151 
3152 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3153 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3154 #endif
3155 }
3156 
3157 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3158 {
3159 	bool rc;
3160 
3161 	if (!link)
3162 		return 1;
3163 
3164 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
3165 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3166 
3167 	return rc ? 0 : 1;
3168 }
3169 
3170 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3171 				unsigned *min, unsigned *max)
3172 {
3173 	if (!caps)
3174 		return 0;
3175 
3176 	if (caps->aux_support) {
3177 		// Firmware limits are in nits, DC API wants millinits.
3178 		*max = 1000 * caps->aux_max_input_signal;
3179 		*min = 1000 * caps->aux_min_input_signal;
3180 	} else {
3181 		// Firmware limits are 8-bit, PWM control is 16-bit.
3182 		*max = 0x101 * caps->max_input_signal;
3183 		*min = 0x101 * caps->min_input_signal;
3184 	}
3185 	return 1;
3186 }
3187 
3188 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3189 					uint32_t brightness)
3190 {
3191 	unsigned min, max;
3192 
3193 	if (!get_brightness_range(caps, &min, &max))
3194 		return brightness;
3195 
3196 	// Rescale 0..255 to min..max
3197 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3198 				       AMDGPU_MAX_BL_LEVEL);
3199 }
3200 
3201 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3202 				      uint32_t brightness)
3203 {
3204 	unsigned min, max;
3205 
3206 	if (!get_brightness_range(caps, &min, &max))
3207 		return brightness;
3208 
3209 	if (brightness < min)
3210 		return 0;
3211 	// Rescale min..max to 0..255
3212 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3213 				 max - min);
3214 }
3215 
3216 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3217 {
3218 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3219 	struct amdgpu_dm_backlight_caps caps;
3220 	struct dc_link *link = NULL;
3221 	u32 brightness;
3222 	bool rc;
3223 
3224 	amdgpu_dm_update_backlight_caps(dm);
3225 	caps = dm->backlight_caps;
3226 
3227 	link = (struct dc_link *)dm->backlight_link;
3228 
3229 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3230 	// Change brightness based on AUX property
3231 	if (caps.aux_support)
3232 		return set_backlight_via_aux(link, brightness);
3233 
3234 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3235 
3236 	return rc ? 0 : 1;
3237 }
3238 
3239 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3240 {
3241 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3242 	int ret = dc_link_get_backlight_level(dm->backlight_link);
3243 
3244 	if (ret == DC_ERROR_UNEXPECTED)
3245 		return bd->props.brightness;
3246 	return convert_brightness_to_user(&dm->backlight_caps, ret);
3247 }
3248 
3249 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3250 	.options = BL_CORE_SUSPENDRESUME,
3251 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3252 	.update_status	= amdgpu_dm_backlight_update_status,
3253 };
3254 
3255 static void
3256 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3257 {
3258 	char bl_name[16];
3259 	struct backlight_properties props = { 0 };
3260 
3261 	amdgpu_dm_update_backlight_caps(dm);
3262 
3263 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3264 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3265 	props.type = BACKLIGHT_RAW;
3266 
3267 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3268 		 adev_to_drm(dm->adev)->primary->index);
3269 
3270 	dm->backlight_dev = backlight_device_register(bl_name,
3271 						      adev_to_drm(dm->adev)->dev,
3272 						      dm,
3273 						      &amdgpu_dm_backlight_ops,
3274 						      &props);
3275 
3276 	if (IS_ERR(dm->backlight_dev))
3277 		DRM_ERROR("DM: Backlight registration failed!\n");
3278 	else
3279 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3280 }
3281 
3282 #endif
3283 
3284 static int initialize_plane(struct amdgpu_display_manager *dm,
3285 			    struct amdgpu_mode_info *mode_info, int plane_id,
3286 			    enum drm_plane_type plane_type,
3287 			    const struct dc_plane_cap *plane_cap)
3288 {
3289 	struct drm_plane *plane;
3290 	unsigned long possible_crtcs;
3291 	int ret = 0;
3292 
3293 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3294 	if (!plane) {
3295 		DRM_ERROR("KMS: Failed to allocate plane\n");
3296 		return -ENOMEM;
3297 	}
3298 	plane->type = plane_type;
3299 
3300 	/*
3301 	 * HACK: IGT tests expect that the primary plane for a CRTC
3302 	 * can only have one possible CRTC. Only expose support for
3303 	 * any CRTC if they're not going to be used as a primary plane
3304 	 * for a CRTC - like overlay or underlay planes.
3305 	 */
3306 	possible_crtcs = 1 << plane_id;
3307 	if (plane_id >= dm->dc->caps.max_streams)
3308 		possible_crtcs = 0xff;
3309 
3310 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3311 
3312 	if (ret) {
3313 		DRM_ERROR("KMS: Failed to initialize plane\n");
3314 		kfree(plane);
3315 		return ret;
3316 	}
3317 
3318 	if (mode_info)
3319 		mode_info->planes[plane_id] = plane;
3320 
3321 	return ret;
3322 }
3323 
3324 
3325 static void register_backlight_device(struct amdgpu_display_manager *dm,
3326 				      struct dc_link *link)
3327 {
3328 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3329 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3330 
3331 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3332 	    link->type != dc_connection_none) {
3333 		/*
3334 		 * Event if registration failed, we should continue with
3335 		 * DM initialization because not having a backlight control
3336 		 * is better then a black screen.
3337 		 */
3338 		amdgpu_dm_register_backlight_device(dm);
3339 
3340 		if (dm->backlight_dev)
3341 			dm->backlight_link = link;
3342 	}
3343 #endif
3344 }
3345 
3346 
3347 /*
3348  * In this architecture, the association
3349  * connector -> encoder -> crtc
3350  * id not really requried. The crtc and connector will hold the
3351  * display_index as an abstraction to use with DAL component
3352  *
3353  * Returns 0 on success
3354  */
3355 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3356 {
3357 	struct amdgpu_display_manager *dm = &adev->dm;
3358 	int32_t i;
3359 	struct amdgpu_dm_connector *aconnector = NULL;
3360 	struct amdgpu_encoder *aencoder = NULL;
3361 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3362 	uint32_t link_cnt;
3363 	int32_t primary_planes;
3364 	enum dc_connection_type new_connection_type = dc_connection_none;
3365 	const struct dc_plane_cap *plane;
3366 
3367 	link_cnt = dm->dc->caps.max_links;
3368 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3369 		DRM_ERROR("DM: Failed to initialize mode config\n");
3370 		return -EINVAL;
3371 	}
3372 
3373 	/* There is one primary plane per CRTC */
3374 	primary_planes = dm->dc->caps.max_streams;
3375 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3376 
3377 	/*
3378 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3379 	 * Order is reversed to match iteration order in atomic check.
3380 	 */
3381 	for (i = (primary_planes - 1); i >= 0; i--) {
3382 		plane = &dm->dc->caps.planes[i];
3383 
3384 		if (initialize_plane(dm, mode_info, i,
3385 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3386 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3387 			goto fail;
3388 		}
3389 	}
3390 
3391 	/*
3392 	 * Initialize overlay planes, index starting after primary planes.
3393 	 * These planes have a higher DRM index than the primary planes since
3394 	 * they should be considered as having a higher z-order.
3395 	 * Order is reversed to match iteration order in atomic check.
3396 	 *
3397 	 * Only support DCN for now, and only expose one so we don't encourage
3398 	 * userspace to use up all the pipes.
3399 	 */
3400 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3401 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3402 
3403 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3404 			continue;
3405 
3406 		if (!plane->blends_with_above || !plane->blends_with_below)
3407 			continue;
3408 
3409 		if (!plane->pixel_format_support.argb8888)
3410 			continue;
3411 
3412 		if (initialize_plane(dm, NULL, primary_planes + i,
3413 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3414 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3415 			goto fail;
3416 		}
3417 
3418 		/* Only create one overlay plane. */
3419 		break;
3420 	}
3421 
3422 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3423 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3424 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3425 			goto fail;
3426 		}
3427 
3428 	dm->display_indexes_num = dm->dc->caps.max_streams;
3429 
3430 	/* loops over all connectors on the board */
3431 	for (i = 0; i < link_cnt; i++) {
3432 		struct dc_link *link = NULL;
3433 
3434 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3435 			DRM_ERROR(
3436 				"KMS: Cannot support more than %d display indexes\n",
3437 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3438 			continue;
3439 		}
3440 
3441 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3442 		if (!aconnector)
3443 			goto fail;
3444 
3445 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3446 		if (!aencoder)
3447 			goto fail;
3448 
3449 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3450 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3451 			goto fail;
3452 		}
3453 
3454 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3455 			DRM_ERROR("KMS: Failed to initialize connector\n");
3456 			goto fail;
3457 		}
3458 
3459 		link = dc_get_link_at_index(dm->dc, i);
3460 
3461 		if (!dc_link_detect_sink(link, &new_connection_type))
3462 			DRM_ERROR("KMS: Failed to detect connector\n");
3463 
3464 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3465 			emulated_link_detect(link);
3466 			amdgpu_dm_update_connector_after_detect(aconnector);
3467 
3468 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3469 			amdgpu_dm_update_connector_after_detect(aconnector);
3470 			register_backlight_device(dm, link);
3471 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3472 				amdgpu_dm_set_psr_caps(link);
3473 		}
3474 
3475 
3476 	}
3477 
3478 	/* Software is initialized. Now we can register interrupt handlers. */
3479 	switch (adev->asic_type) {
3480 #if defined(CONFIG_DRM_AMD_DC_SI)
3481 	case CHIP_TAHITI:
3482 	case CHIP_PITCAIRN:
3483 	case CHIP_VERDE:
3484 	case CHIP_OLAND:
3485 		if (dce60_register_irq_handlers(dm->adev)) {
3486 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3487 			goto fail;
3488 		}
3489 		break;
3490 #endif
3491 	case CHIP_BONAIRE:
3492 	case CHIP_HAWAII:
3493 	case CHIP_KAVERI:
3494 	case CHIP_KABINI:
3495 	case CHIP_MULLINS:
3496 	case CHIP_TONGA:
3497 	case CHIP_FIJI:
3498 	case CHIP_CARRIZO:
3499 	case CHIP_STONEY:
3500 	case CHIP_POLARIS11:
3501 	case CHIP_POLARIS10:
3502 	case CHIP_POLARIS12:
3503 	case CHIP_VEGAM:
3504 	case CHIP_VEGA10:
3505 	case CHIP_VEGA12:
3506 	case CHIP_VEGA20:
3507 		if (dce110_register_irq_handlers(dm->adev)) {
3508 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3509 			goto fail;
3510 		}
3511 		break;
3512 #if defined(CONFIG_DRM_AMD_DC_DCN)
3513 	case CHIP_RAVEN:
3514 	case CHIP_NAVI12:
3515 	case CHIP_NAVI10:
3516 	case CHIP_NAVI14:
3517 	case CHIP_RENOIR:
3518 	case CHIP_SIENNA_CICHLID:
3519 	case CHIP_NAVY_FLOUNDER:
3520 	case CHIP_DIMGREY_CAVEFISH:
3521 	case CHIP_VANGOGH:
3522 		if (dcn10_register_irq_handlers(dm->adev)) {
3523 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3524 			goto fail;
3525 		}
3526 		break;
3527 #endif
3528 	default:
3529 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3530 		goto fail;
3531 	}
3532 
3533 	return 0;
3534 fail:
3535 	kfree(aencoder);
3536 	kfree(aconnector);
3537 
3538 	return -EINVAL;
3539 }
3540 
3541 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3542 {
3543 	drm_mode_config_cleanup(dm->ddev);
3544 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3545 	return;
3546 }
3547 
3548 /******************************************************************************
3549  * amdgpu_display_funcs functions
3550  *****************************************************************************/
3551 
3552 /*
3553  * dm_bandwidth_update - program display watermarks
3554  *
3555  * @adev: amdgpu_device pointer
3556  *
3557  * Calculate and program the display watermarks and line buffer allocation.
3558  */
3559 static void dm_bandwidth_update(struct amdgpu_device *adev)
3560 {
3561 	/* TODO: implement later */
3562 }
3563 
3564 static const struct amdgpu_display_funcs dm_display_funcs = {
3565 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3566 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3567 	.backlight_set_level = NULL, /* never called for DC */
3568 	.backlight_get_level = NULL, /* never called for DC */
3569 	.hpd_sense = NULL,/* called unconditionally */
3570 	.hpd_set_polarity = NULL, /* called unconditionally */
3571 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3572 	.page_flip_get_scanoutpos =
3573 		dm_crtc_get_scanoutpos,/* called unconditionally */
3574 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3575 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3576 };
3577 
3578 #if defined(CONFIG_DEBUG_KERNEL_DC)
3579 
3580 static ssize_t s3_debug_store(struct device *device,
3581 			      struct device_attribute *attr,
3582 			      const char *buf,
3583 			      size_t count)
3584 {
3585 	int ret;
3586 	int s3_state;
3587 	struct drm_device *drm_dev = dev_get_drvdata(device);
3588 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3589 
3590 	ret = kstrtoint(buf, 0, &s3_state);
3591 
3592 	if (ret == 0) {
3593 		if (s3_state) {
3594 			dm_resume(adev);
3595 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3596 		} else
3597 			dm_suspend(adev);
3598 	}
3599 
3600 	return ret == 0 ? count : 0;
3601 }
3602 
3603 DEVICE_ATTR_WO(s3_debug);
3604 
3605 #endif
3606 
3607 static int dm_early_init(void *handle)
3608 {
3609 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3610 
3611 	switch (adev->asic_type) {
3612 #if defined(CONFIG_DRM_AMD_DC_SI)
3613 	case CHIP_TAHITI:
3614 	case CHIP_PITCAIRN:
3615 	case CHIP_VERDE:
3616 		adev->mode_info.num_crtc = 6;
3617 		adev->mode_info.num_hpd = 6;
3618 		adev->mode_info.num_dig = 6;
3619 		break;
3620 	case CHIP_OLAND:
3621 		adev->mode_info.num_crtc = 2;
3622 		adev->mode_info.num_hpd = 2;
3623 		adev->mode_info.num_dig = 2;
3624 		break;
3625 #endif
3626 	case CHIP_BONAIRE:
3627 	case CHIP_HAWAII:
3628 		adev->mode_info.num_crtc = 6;
3629 		adev->mode_info.num_hpd = 6;
3630 		adev->mode_info.num_dig = 6;
3631 		break;
3632 	case CHIP_KAVERI:
3633 		adev->mode_info.num_crtc = 4;
3634 		adev->mode_info.num_hpd = 6;
3635 		adev->mode_info.num_dig = 7;
3636 		break;
3637 	case CHIP_KABINI:
3638 	case CHIP_MULLINS:
3639 		adev->mode_info.num_crtc = 2;
3640 		adev->mode_info.num_hpd = 6;
3641 		adev->mode_info.num_dig = 6;
3642 		break;
3643 	case CHIP_FIJI:
3644 	case CHIP_TONGA:
3645 		adev->mode_info.num_crtc = 6;
3646 		adev->mode_info.num_hpd = 6;
3647 		adev->mode_info.num_dig = 7;
3648 		break;
3649 	case CHIP_CARRIZO:
3650 		adev->mode_info.num_crtc = 3;
3651 		adev->mode_info.num_hpd = 6;
3652 		adev->mode_info.num_dig = 9;
3653 		break;
3654 	case CHIP_STONEY:
3655 		adev->mode_info.num_crtc = 2;
3656 		adev->mode_info.num_hpd = 6;
3657 		adev->mode_info.num_dig = 9;
3658 		break;
3659 	case CHIP_POLARIS11:
3660 	case CHIP_POLARIS12:
3661 		adev->mode_info.num_crtc = 5;
3662 		adev->mode_info.num_hpd = 5;
3663 		adev->mode_info.num_dig = 5;
3664 		break;
3665 	case CHIP_POLARIS10:
3666 	case CHIP_VEGAM:
3667 		adev->mode_info.num_crtc = 6;
3668 		adev->mode_info.num_hpd = 6;
3669 		adev->mode_info.num_dig = 6;
3670 		break;
3671 	case CHIP_VEGA10:
3672 	case CHIP_VEGA12:
3673 	case CHIP_VEGA20:
3674 		adev->mode_info.num_crtc = 6;
3675 		adev->mode_info.num_hpd = 6;
3676 		adev->mode_info.num_dig = 6;
3677 		break;
3678 #if defined(CONFIG_DRM_AMD_DC_DCN)
3679 	case CHIP_RAVEN:
3680 	case CHIP_RENOIR:
3681 	case CHIP_VANGOGH:
3682 		adev->mode_info.num_crtc = 4;
3683 		adev->mode_info.num_hpd = 4;
3684 		adev->mode_info.num_dig = 4;
3685 		break;
3686 	case CHIP_NAVI10:
3687 	case CHIP_NAVI12:
3688 	case CHIP_SIENNA_CICHLID:
3689 	case CHIP_NAVY_FLOUNDER:
3690 		adev->mode_info.num_crtc = 6;
3691 		adev->mode_info.num_hpd = 6;
3692 		adev->mode_info.num_dig = 6;
3693 		break;
3694 	case CHIP_NAVI14:
3695 	case CHIP_DIMGREY_CAVEFISH:
3696 		adev->mode_info.num_crtc = 5;
3697 		adev->mode_info.num_hpd = 5;
3698 		adev->mode_info.num_dig = 5;
3699 		break;
3700 #endif
3701 	default:
3702 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3703 		return -EINVAL;
3704 	}
3705 
3706 	amdgpu_dm_set_irq_funcs(adev);
3707 
3708 	if (adev->mode_info.funcs == NULL)
3709 		adev->mode_info.funcs = &dm_display_funcs;
3710 
3711 	/*
3712 	 * Note: Do NOT change adev->audio_endpt_rreg and
3713 	 * adev->audio_endpt_wreg because they are initialised in
3714 	 * amdgpu_device_init()
3715 	 */
3716 #if defined(CONFIG_DEBUG_KERNEL_DC)
3717 	device_create_file(
3718 		adev_to_drm(adev)->dev,
3719 		&dev_attr_s3_debug);
3720 #endif
3721 
3722 	return 0;
3723 }
3724 
3725 static bool modeset_required(struct drm_crtc_state *crtc_state,
3726 			     struct dc_stream_state *new_stream,
3727 			     struct dc_stream_state *old_stream)
3728 {
3729 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3730 }
3731 
3732 static bool modereset_required(struct drm_crtc_state *crtc_state)
3733 {
3734 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3735 }
3736 
3737 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3738 {
3739 	drm_encoder_cleanup(encoder);
3740 	kfree(encoder);
3741 }
3742 
3743 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3744 	.destroy = amdgpu_dm_encoder_destroy,
3745 };
3746 
3747 
3748 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3749 				struct dc_scaling_info *scaling_info)
3750 {
3751 	int scale_w, scale_h;
3752 
3753 	memset(scaling_info, 0, sizeof(*scaling_info));
3754 
3755 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3756 	scaling_info->src_rect.x = state->src_x >> 16;
3757 	scaling_info->src_rect.y = state->src_y >> 16;
3758 
3759 	scaling_info->src_rect.width = state->src_w >> 16;
3760 	if (scaling_info->src_rect.width == 0)
3761 		return -EINVAL;
3762 
3763 	scaling_info->src_rect.height = state->src_h >> 16;
3764 	if (scaling_info->src_rect.height == 0)
3765 		return -EINVAL;
3766 
3767 	scaling_info->dst_rect.x = state->crtc_x;
3768 	scaling_info->dst_rect.y = state->crtc_y;
3769 
3770 	if (state->crtc_w == 0)
3771 		return -EINVAL;
3772 
3773 	scaling_info->dst_rect.width = state->crtc_w;
3774 
3775 	if (state->crtc_h == 0)
3776 		return -EINVAL;
3777 
3778 	scaling_info->dst_rect.height = state->crtc_h;
3779 
3780 	/* DRM doesn't specify clipping on destination output. */
3781 	scaling_info->clip_rect = scaling_info->dst_rect;
3782 
3783 	/* TODO: Validate scaling per-format with DC plane caps */
3784 	scale_w = scaling_info->dst_rect.width * 1000 /
3785 		  scaling_info->src_rect.width;
3786 
3787 	if (scale_w < 250 || scale_w > 16000)
3788 		return -EINVAL;
3789 
3790 	scale_h = scaling_info->dst_rect.height * 1000 /
3791 		  scaling_info->src_rect.height;
3792 
3793 	if (scale_h < 250 || scale_h > 16000)
3794 		return -EINVAL;
3795 
3796 	/*
3797 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3798 	 * assume reasonable defaults based on the format.
3799 	 */
3800 
3801 	return 0;
3802 }
3803 
3804 static void
3805 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3806 				 uint64_t tiling_flags)
3807 {
3808 	/* Fill GFX8 params */
3809 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3810 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3811 
3812 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3813 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3814 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3815 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3816 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3817 
3818 		/* XXX fix me for VI */
3819 		tiling_info->gfx8.num_banks = num_banks;
3820 		tiling_info->gfx8.array_mode =
3821 				DC_ARRAY_2D_TILED_THIN1;
3822 		tiling_info->gfx8.tile_split = tile_split;
3823 		tiling_info->gfx8.bank_width = bankw;
3824 		tiling_info->gfx8.bank_height = bankh;
3825 		tiling_info->gfx8.tile_aspect = mtaspect;
3826 		tiling_info->gfx8.tile_mode =
3827 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3828 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3829 			== DC_ARRAY_1D_TILED_THIN1) {
3830 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3831 	}
3832 
3833 	tiling_info->gfx8.pipe_config =
3834 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3835 }
3836 
3837 static void
3838 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3839 				  union dc_tiling_info *tiling_info)
3840 {
3841 	tiling_info->gfx9.num_pipes =
3842 		adev->gfx.config.gb_addr_config_fields.num_pipes;
3843 	tiling_info->gfx9.num_banks =
3844 		adev->gfx.config.gb_addr_config_fields.num_banks;
3845 	tiling_info->gfx9.pipe_interleave =
3846 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3847 	tiling_info->gfx9.num_shader_engines =
3848 		adev->gfx.config.gb_addr_config_fields.num_se;
3849 	tiling_info->gfx9.max_compressed_frags =
3850 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3851 	tiling_info->gfx9.num_rb_per_se =
3852 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3853 	tiling_info->gfx9.shaderEnable = 1;
3854 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3855 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
3856 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3857 	    adev->asic_type == CHIP_VANGOGH)
3858 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3859 }
3860 
3861 static int
3862 validate_dcc(struct amdgpu_device *adev,
3863 	     const enum surface_pixel_format format,
3864 	     const enum dc_rotation_angle rotation,
3865 	     const union dc_tiling_info *tiling_info,
3866 	     const struct dc_plane_dcc_param *dcc,
3867 	     const struct dc_plane_address *address,
3868 	     const struct plane_size *plane_size)
3869 {
3870 	struct dc *dc = adev->dm.dc;
3871 	struct dc_dcc_surface_param input;
3872 	struct dc_surface_dcc_cap output;
3873 
3874 	memset(&input, 0, sizeof(input));
3875 	memset(&output, 0, sizeof(output));
3876 
3877 	if (!dcc->enable)
3878 		return 0;
3879 
3880 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3881 	    !dc->cap_funcs.get_dcc_compression_cap)
3882 		return -EINVAL;
3883 
3884 	input.format = format;
3885 	input.surface_size.width = plane_size->surface_size.width;
3886 	input.surface_size.height = plane_size->surface_size.height;
3887 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3888 
3889 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3890 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3891 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3892 		input.scan = SCAN_DIRECTION_VERTICAL;
3893 
3894 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3895 		return -EINVAL;
3896 
3897 	if (!output.capable)
3898 		return -EINVAL;
3899 
3900 	if (dcc->independent_64b_blks == 0 &&
3901 	    output.grph.rgb.independent_64b_blks != 0)
3902 		return -EINVAL;
3903 
3904 	return 0;
3905 }
3906 
3907 static bool
3908 modifier_has_dcc(uint64_t modifier)
3909 {
3910 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3911 }
3912 
3913 static unsigned
3914 modifier_gfx9_swizzle_mode(uint64_t modifier)
3915 {
3916 	if (modifier == DRM_FORMAT_MOD_LINEAR)
3917 		return 0;
3918 
3919 	return AMD_FMT_MOD_GET(TILE, modifier);
3920 }
3921 
3922 static const struct drm_format_info *
3923 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3924 {
3925 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3926 }
3927 
3928 static void
3929 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3930 				    union dc_tiling_info *tiling_info,
3931 				    uint64_t modifier)
3932 {
3933 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3934 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3935 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3936 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3937 
3938 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
3939 
3940 	if (!IS_AMD_FMT_MOD(modifier))
3941 		return;
3942 
3943 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3944 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3945 
3946 	if (adev->family >= AMDGPU_FAMILY_NV) {
3947 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3948 	} else {
3949 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3950 
3951 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3952 	}
3953 }
3954 
3955 enum dm_micro_swizzle {
3956 	MICRO_SWIZZLE_Z = 0,
3957 	MICRO_SWIZZLE_S = 1,
3958 	MICRO_SWIZZLE_D = 2,
3959 	MICRO_SWIZZLE_R = 3
3960 };
3961 
3962 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3963 					  uint32_t format,
3964 					  uint64_t modifier)
3965 {
3966 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
3967 	const struct drm_format_info *info = drm_format_info(format);
3968 
3969 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3970 
3971 	if (!info)
3972 		return false;
3973 
3974 	/*
3975 	 * We always have to allow this modifier, because core DRM still
3976 	 * checks LINEAR support if userspace does not provide modifers.
3977 	 */
3978 	if (modifier == DRM_FORMAT_MOD_LINEAR)
3979 		return true;
3980 
3981 	/*
3982 	 * The arbitrary tiling support for multiplane formats has not been hooked
3983 	 * up.
3984 	 */
3985 	if (info->num_planes > 1)
3986 		return false;
3987 
3988 	/*
3989 	 * For D swizzle the canonical modifier depends on the bpp, so check
3990 	 * it here.
3991 	 */
3992 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
3993 	    adev->family >= AMDGPU_FAMILY_NV) {
3994 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
3995 			return false;
3996 	}
3997 
3998 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
3999 	    info->cpp[0] < 8)
4000 		return false;
4001 
4002 	if (modifier_has_dcc(modifier)) {
4003 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4004 		if (info->cpp[0] != 4)
4005 			return false;
4006 	}
4007 
4008 	return true;
4009 }
4010 
4011 static void
4012 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4013 {
4014 	if (!*mods)
4015 		return;
4016 
4017 	if (*cap - *size < 1) {
4018 		uint64_t new_cap = *cap * 2;
4019 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4020 
4021 		if (!new_mods) {
4022 			kfree(*mods);
4023 			*mods = NULL;
4024 			return;
4025 		}
4026 
4027 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4028 		kfree(*mods);
4029 		*mods = new_mods;
4030 		*cap = new_cap;
4031 	}
4032 
4033 	(*mods)[*size] = mod;
4034 	*size += 1;
4035 }
4036 
4037 static void
4038 add_gfx9_modifiers(const struct amdgpu_device *adev,
4039 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4040 {
4041 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4042 	int pipe_xor_bits = min(8, pipes +
4043 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4044 	int bank_xor_bits = min(8 - pipe_xor_bits,
4045 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4046 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4047 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4048 
4049 
4050 	if (adev->family == AMDGPU_FAMILY_RV) {
4051 		/* Raven2 and later */
4052 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4053 
4054 		/*
4055 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4056 		 * doesn't support _D on DCN
4057 		 */
4058 
4059 		if (has_constant_encode) {
4060 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4061 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4062 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4063 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4064 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4065 				    AMD_FMT_MOD_SET(DCC, 1) |
4066 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4067 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4068 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4069 		}
4070 
4071 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4072 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4073 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4074 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4075 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4076 			    AMD_FMT_MOD_SET(DCC, 1) |
4077 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4078 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4079 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4080 
4081 		if (has_constant_encode) {
4082 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4083 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4084 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4085 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4086 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4087 				    AMD_FMT_MOD_SET(DCC, 1) |
4088 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4089 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4090 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4091 
4092 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4093 				    AMD_FMT_MOD_SET(RB, rb) |
4094 				    AMD_FMT_MOD_SET(PIPE, pipes));
4095 		}
4096 
4097 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4098 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4099 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4100 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4101 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4102 			    AMD_FMT_MOD_SET(DCC, 1) |
4103 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4104 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4105 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4106 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4107 			    AMD_FMT_MOD_SET(RB, rb) |
4108 			    AMD_FMT_MOD_SET(PIPE, pipes));
4109 	}
4110 
4111 	/*
4112 	 * Only supported for 64bpp on Raven, will be filtered on format in
4113 	 * dm_plane_format_mod_supported.
4114 	 */
4115 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4116 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4117 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4118 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4119 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4120 
4121 	if (adev->family == AMDGPU_FAMILY_RV) {
4122 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4123 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4124 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4125 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4126 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4127 	}
4128 
4129 	/*
4130 	 * Only supported for 64bpp on Raven, will be filtered on format in
4131 	 * dm_plane_format_mod_supported.
4132 	 */
4133 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4134 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4135 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4136 
4137 	if (adev->family == AMDGPU_FAMILY_RV) {
4138 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4139 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4140 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4141 	}
4142 }
4143 
4144 static void
4145 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4146 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4147 {
4148 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4149 
4150 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4151 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4152 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4153 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4154 		    AMD_FMT_MOD_SET(DCC, 1) |
4155 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4156 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4157 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4158 
4159 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4160 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4161 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4162 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4163 		    AMD_FMT_MOD_SET(DCC, 1) |
4164 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4165 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4166 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4167 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4168 
4169 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4170 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4171 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4172 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4173 
4174 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4175 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4176 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4177 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4178 
4179 
4180 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4181 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4182 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4183 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4184 
4185 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4186 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4187 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4188 }
4189 
4190 static void
4191 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4192 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4193 {
4194 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4195 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4196 
4197 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4198 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4199 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4200 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4201 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4202 		    AMD_FMT_MOD_SET(DCC, 1) |
4203 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4204 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4205 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4206 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4207 
4208 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4209 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4210 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4211 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4212 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4213 		    AMD_FMT_MOD_SET(DCC, 1) |
4214 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4215 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4216 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4217 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4218 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4219 
4220 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4221 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4222 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4223 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4224 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4225 
4226 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4227 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4228 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4229 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4230 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4231 
4232 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4233 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4234 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4235 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4236 
4237 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4238 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4239 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4240 }
4241 
4242 static int
4243 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4244 {
4245 	uint64_t size = 0, capacity = 128;
4246 	*mods = NULL;
4247 
4248 	/* We have not hooked up any pre-GFX9 modifiers. */
4249 	if (adev->family < AMDGPU_FAMILY_AI)
4250 		return 0;
4251 
4252 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4253 
4254 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4255 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4256 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4257 		return *mods ? 0 : -ENOMEM;
4258 	}
4259 
4260 	switch (adev->family) {
4261 	case AMDGPU_FAMILY_AI:
4262 	case AMDGPU_FAMILY_RV:
4263 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4264 		break;
4265 	case AMDGPU_FAMILY_NV:
4266 	case AMDGPU_FAMILY_VGH:
4267 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4268 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4269 		else
4270 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4271 		break;
4272 	}
4273 
4274 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4275 
4276 	/* INVALID marks the end of the list. */
4277 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4278 
4279 	if (!*mods)
4280 		return -ENOMEM;
4281 
4282 	return 0;
4283 }
4284 
4285 static int
4286 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4287 					  const struct amdgpu_framebuffer *afb,
4288 					  const enum surface_pixel_format format,
4289 					  const enum dc_rotation_angle rotation,
4290 					  const struct plane_size *plane_size,
4291 					  union dc_tiling_info *tiling_info,
4292 					  struct dc_plane_dcc_param *dcc,
4293 					  struct dc_plane_address *address,
4294 					  const bool force_disable_dcc)
4295 {
4296 	const uint64_t modifier = afb->base.modifier;
4297 	int ret;
4298 
4299 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4300 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4301 
4302 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4303 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4304 
4305 		dcc->enable = 1;
4306 		dcc->meta_pitch = afb->base.pitches[1];
4307 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4308 
4309 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4310 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4311 	}
4312 
4313 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4314 	if (ret)
4315 		return ret;
4316 
4317 	return 0;
4318 }
4319 
4320 static int
4321 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4322 			     const struct amdgpu_framebuffer *afb,
4323 			     const enum surface_pixel_format format,
4324 			     const enum dc_rotation_angle rotation,
4325 			     const uint64_t tiling_flags,
4326 			     union dc_tiling_info *tiling_info,
4327 			     struct plane_size *plane_size,
4328 			     struct dc_plane_dcc_param *dcc,
4329 			     struct dc_plane_address *address,
4330 			     bool tmz_surface,
4331 			     bool force_disable_dcc)
4332 {
4333 	const struct drm_framebuffer *fb = &afb->base;
4334 	int ret;
4335 
4336 	memset(tiling_info, 0, sizeof(*tiling_info));
4337 	memset(plane_size, 0, sizeof(*plane_size));
4338 	memset(dcc, 0, sizeof(*dcc));
4339 	memset(address, 0, sizeof(*address));
4340 
4341 	address->tmz_surface = tmz_surface;
4342 
4343 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4344 		uint64_t addr = afb->address + fb->offsets[0];
4345 
4346 		plane_size->surface_size.x = 0;
4347 		plane_size->surface_size.y = 0;
4348 		plane_size->surface_size.width = fb->width;
4349 		plane_size->surface_size.height = fb->height;
4350 		plane_size->surface_pitch =
4351 			fb->pitches[0] / fb->format->cpp[0];
4352 
4353 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4354 		address->grph.addr.low_part = lower_32_bits(addr);
4355 		address->grph.addr.high_part = upper_32_bits(addr);
4356 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4357 		uint64_t luma_addr = afb->address + fb->offsets[0];
4358 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4359 
4360 		plane_size->surface_size.x = 0;
4361 		plane_size->surface_size.y = 0;
4362 		plane_size->surface_size.width = fb->width;
4363 		plane_size->surface_size.height = fb->height;
4364 		plane_size->surface_pitch =
4365 			fb->pitches[0] / fb->format->cpp[0];
4366 
4367 		plane_size->chroma_size.x = 0;
4368 		plane_size->chroma_size.y = 0;
4369 		/* TODO: set these based on surface format */
4370 		plane_size->chroma_size.width = fb->width / 2;
4371 		plane_size->chroma_size.height = fb->height / 2;
4372 
4373 		plane_size->chroma_pitch =
4374 			fb->pitches[1] / fb->format->cpp[1];
4375 
4376 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4377 		address->video_progressive.luma_addr.low_part =
4378 			lower_32_bits(luma_addr);
4379 		address->video_progressive.luma_addr.high_part =
4380 			upper_32_bits(luma_addr);
4381 		address->video_progressive.chroma_addr.low_part =
4382 			lower_32_bits(chroma_addr);
4383 		address->video_progressive.chroma_addr.high_part =
4384 			upper_32_bits(chroma_addr);
4385 	}
4386 
4387 	if (adev->family >= AMDGPU_FAMILY_AI) {
4388 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4389 								rotation, plane_size,
4390 								tiling_info, dcc,
4391 								address,
4392 								force_disable_dcc);
4393 		if (ret)
4394 			return ret;
4395 	} else {
4396 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4397 	}
4398 
4399 	return 0;
4400 }
4401 
4402 static void
4403 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4404 			       bool *per_pixel_alpha, bool *global_alpha,
4405 			       int *global_alpha_value)
4406 {
4407 	*per_pixel_alpha = false;
4408 	*global_alpha = false;
4409 	*global_alpha_value = 0xff;
4410 
4411 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4412 		return;
4413 
4414 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4415 		static const uint32_t alpha_formats[] = {
4416 			DRM_FORMAT_ARGB8888,
4417 			DRM_FORMAT_RGBA8888,
4418 			DRM_FORMAT_ABGR8888,
4419 		};
4420 		uint32_t format = plane_state->fb->format->format;
4421 		unsigned int i;
4422 
4423 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4424 			if (format == alpha_formats[i]) {
4425 				*per_pixel_alpha = true;
4426 				break;
4427 			}
4428 		}
4429 	}
4430 
4431 	if (plane_state->alpha < 0xffff) {
4432 		*global_alpha = true;
4433 		*global_alpha_value = plane_state->alpha >> 8;
4434 	}
4435 }
4436 
4437 static int
4438 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4439 			    const enum surface_pixel_format format,
4440 			    enum dc_color_space *color_space)
4441 {
4442 	bool full_range;
4443 
4444 	*color_space = COLOR_SPACE_SRGB;
4445 
4446 	/* DRM color properties only affect non-RGB formats. */
4447 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4448 		return 0;
4449 
4450 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4451 
4452 	switch (plane_state->color_encoding) {
4453 	case DRM_COLOR_YCBCR_BT601:
4454 		if (full_range)
4455 			*color_space = COLOR_SPACE_YCBCR601;
4456 		else
4457 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4458 		break;
4459 
4460 	case DRM_COLOR_YCBCR_BT709:
4461 		if (full_range)
4462 			*color_space = COLOR_SPACE_YCBCR709;
4463 		else
4464 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4465 		break;
4466 
4467 	case DRM_COLOR_YCBCR_BT2020:
4468 		if (full_range)
4469 			*color_space = COLOR_SPACE_2020_YCBCR;
4470 		else
4471 			return -EINVAL;
4472 		break;
4473 
4474 	default:
4475 		return -EINVAL;
4476 	}
4477 
4478 	return 0;
4479 }
4480 
4481 static int
4482 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4483 			    const struct drm_plane_state *plane_state,
4484 			    const uint64_t tiling_flags,
4485 			    struct dc_plane_info *plane_info,
4486 			    struct dc_plane_address *address,
4487 			    bool tmz_surface,
4488 			    bool force_disable_dcc)
4489 {
4490 	const struct drm_framebuffer *fb = plane_state->fb;
4491 	const struct amdgpu_framebuffer *afb =
4492 		to_amdgpu_framebuffer(plane_state->fb);
4493 	struct drm_format_name_buf format_name;
4494 	int ret;
4495 
4496 	memset(plane_info, 0, sizeof(*plane_info));
4497 
4498 	switch (fb->format->format) {
4499 	case DRM_FORMAT_C8:
4500 		plane_info->format =
4501 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4502 		break;
4503 	case DRM_FORMAT_RGB565:
4504 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4505 		break;
4506 	case DRM_FORMAT_XRGB8888:
4507 	case DRM_FORMAT_ARGB8888:
4508 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4509 		break;
4510 	case DRM_FORMAT_XRGB2101010:
4511 	case DRM_FORMAT_ARGB2101010:
4512 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4513 		break;
4514 	case DRM_FORMAT_XBGR2101010:
4515 	case DRM_FORMAT_ABGR2101010:
4516 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4517 		break;
4518 	case DRM_FORMAT_XBGR8888:
4519 	case DRM_FORMAT_ABGR8888:
4520 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4521 		break;
4522 	case DRM_FORMAT_NV21:
4523 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4524 		break;
4525 	case DRM_FORMAT_NV12:
4526 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4527 		break;
4528 	case DRM_FORMAT_P010:
4529 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4530 		break;
4531 	case DRM_FORMAT_XRGB16161616F:
4532 	case DRM_FORMAT_ARGB16161616F:
4533 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4534 		break;
4535 	case DRM_FORMAT_XBGR16161616F:
4536 	case DRM_FORMAT_ABGR16161616F:
4537 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4538 		break;
4539 	default:
4540 		DRM_ERROR(
4541 			"Unsupported screen format %s\n",
4542 			drm_get_format_name(fb->format->format, &format_name));
4543 		return -EINVAL;
4544 	}
4545 
4546 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4547 	case DRM_MODE_ROTATE_0:
4548 		plane_info->rotation = ROTATION_ANGLE_0;
4549 		break;
4550 	case DRM_MODE_ROTATE_90:
4551 		plane_info->rotation = ROTATION_ANGLE_90;
4552 		break;
4553 	case DRM_MODE_ROTATE_180:
4554 		plane_info->rotation = ROTATION_ANGLE_180;
4555 		break;
4556 	case DRM_MODE_ROTATE_270:
4557 		plane_info->rotation = ROTATION_ANGLE_270;
4558 		break;
4559 	default:
4560 		plane_info->rotation = ROTATION_ANGLE_0;
4561 		break;
4562 	}
4563 
4564 	plane_info->visible = true;
4565 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4566 
4567 	plane_info->layer_index = 0;
4568 
4569 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4570 					  &plane_info->color_space);
4571 	if (ret)
4572 		return ret;
4573 
4574 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4575 					   plane_info->rotation, tiling_flags,
4576 					   &plane_info->tiling_info,
4577 					   &plane_info->plane_size,
4578 					   &plane_info->dcc, address, tmz_surface,
4579 					   force_disable_dcc);
4580 	if (ret)
4581 		return ret;
4582 
4583 	fill_blending_from_plane_state(
4584 		plane_state, &plane_info->per_pixel_alpha,
4585 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4586 
4587 	return 0;
4588 }
4589 
4590 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4591 				    struct dc_plane_state *dc_plane_state,
4592 				    struct drm_plane_state *plane_state,
4593 				    struct drm_crtc_state *crtc_state)
4594 {
4595 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4596 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4597 	struct dc_scaling_info scaling_info;
4598 	struct dc_plane_info plane_info;
4599 	int ret;
4600 	bool force_disable_dcc = false;
4601 
4602 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4603 	if (ret)
4604 		return ret;
4605 
4606 	dc_plane_state->src_rect = scaling_info.src_rect;
4607 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4608 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4609 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4610 
4611 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4612 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4613 					  afb->tiling_flags,
4614 					  &plane_info,
4615 					  &dc_plane_state->address,
4616 					  afb->tmz_surface,
4617 					  force_disable_dcc);
4618 	if (ret)
4619 		return ret;
4620 
4621 	dc_plane_state->format = plane_info.format;
4622 	dc_plane_state->color_space = plane_info.color_space;
4623 	dc_plane_state->format = plane_info.format;
4624 	dc_plane_state->plane_size = plane_info.plane_size;
4625 	dc_plane_state->rotation = plane_info.rotation;
4626 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4627 	dc_plane_state->stereo_format = plane_info.stereo_format;
4628 	dc_plane_state->tiling_info = plane_info.tiling_info;
4629 	dc_plane_state->visible = plane_info.visible;
4630 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4631 	dc_plane_state->global_alpha = plane_info.global_alpha;
4632 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4633 	dc_plane_state->dcc = plane_info.dcc;
4634 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4635 
4636 	/*
4637 	 * Always set input transfer function, since plane state is refreshed
4638 	 * every time.
4639 	 */
4640 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4641 	if (ret)
4642 		return ret;
4643 
4644 	return 0;
4645 }
4646 
4647 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4648 					   const struct dm_connector_state *dm_state,
4649 					   struct dc_stream_state *stream)
4650 {
4651 	enum amdgpu_rmx_type rmx_type;
4652 
4653 	struct rect src = { 0 }; /* viewport in composition space*/
4654 	struct rect dst = { 0 }; /* stream addressable area */
4655 
4656 	/* no mode. nothing to be done */
4657 	if (!mode)
4658 		return;
4659 
4660 	/* Full screen scaling by default */
4661 	src.width = mode->hdisplay;
4662 	src.height = mode->vdisplay;
4663 	dst.width = stream->timing.h_addressable;
4664 	dst.height = stream->timing.v_addressable;
4665 
4666 	if (dm_state) {
4667 		rmx_type = dm_state->scaling;
4668 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4669 			if (src.width * dst.height <
4670 					src.height * dst.width) {
4671 				/* height needs less upscaling/more downscaling */
4672 				dst.width = src.width *
4673 						dst.height / src.height;
4674 			} else {
4675 				/* width needs less upscaling/more downscaling */
4676 				dst.height = src.height *
4677 						dst.width / src.width;
4678 			}
4679 		} else if (rmx_type == RMX_CENTER) {
4680 			dst = src;
4681 		}
4682 
4683 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4684 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4685 
4686 		if (dm_state->underscan_enable) {
4687 			dst.x += dm_state->underscan_hborder / 2;
4688 			dst.y += dm_state->underscan_vborder / 2;
4689 			dst.width -= dm_state->underscan_hborder;
4690 			dst.height -= dm_state->underscan_vborder;
4691 		}
4692 	}
4693 
4694 	stream->src = src;
4695 	stream->dst = dst;
4696 
4697 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4698 			dst.x, dst.y, dst.width, dst.height);
4699 
4700 }
4701 
4702 static enum dc_color_depth
4703 convert_color_depth_from_display_info(const struct drm_connector *connector,
4704 				      bool is_y420, int requested_bpc)
4705 {
4706 	uint8_t bpc;
4707 
4708 	if (is_y420) {
4709 		bpc = 8;
4710 
4711 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4712 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4713 			bpc = 16;
4714 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4715 			bpc = 12;
4716 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4717 			bpc = 10;
4718 	} else {
4719 		bpc = (uint8_t)connector->display_info.bpc;
4720 		/* Assume 8 bpc by default if no bpc is specified. */
4721 		bpc = bpc ? bpc : 8;
4722 	}
4723 
4724 	if (requested_bpc > 0) {
4725 		/*
4726 		 * Cap display bpc based on the user requested value.
4727 		 *
4728 		 * The value for state->max_bpc may not correctly updated
4729 		 * depending on when the connector gets added to the state
4730 		 * or if this was called outside of atomic check, so it
4731 		 * can't be used directly.
4732 		 */
4733 		bpc = min_t(u8, bpc, requested_bpc);
4734 
4735 		/* Round down to the nearest even number. */
4736 		bpc = bpc - (bpc & 1);
4737 	}
4738 
4739 	switch (bpc) {
4740 	case 0:
4741 		/*
4742 		 * Temporary Work around, DRM doesn't parse color depth for
4743 		 * EDID revision before 1.4
4744 		 * TODO: Fix edid parsing
4745 		 */
4746 		return COLOR_DEPTH_888;
4747 	case 6:
4748 		return COLOR_DEPTH_666;
4749 	case 8:
4750 		return COLOR_DEPTH_888;
4751 	case 10:
4752 		return COLOR_DEPTH_101010;
4753 	case 12:
4754 		return COLOR_DEPTH_121212;
4755 	case 14:
4756 		return COLOR_DEPTH_141414;
4757 	case 16:
4758 		return COLOR_DEPTH_161616;
4759 	default:
4760 		return COLOR_DEPTH_UNDEFINED;
4761 	}
4762 }
4763 
4764 static enum dc_aspect_ratio
4765 get_aspect_ratio(const struct drm_display_mode *mode_in)
4766 {
4767 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4768 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4769 }
4770 
4771 static enum dc_color_space
4772 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4773 {
4774 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4775 
4776 	switch (dc_crtc_timing->pixel_encoding)	{
4777 	case PIXEL_ENCODING_YCBCR422:
4778 	case PIXEL_ENCODING_YCBCR444:
4779 	case PIXEL_ENCODING_YCBCR420:
4780 	{
4781 		/*
4782 		 * 27030khz is the separation point between HDTV and SDTV
4783 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4784 		 * respectively
4785 		 */
4786 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4787 			if (dc_crtc_timing->flags.Y_ONLY)
4788 				color_space =
4789 					COLOR_SPACE_YCBCR709_LIMITED;
4790 			else
4791 				color_space = COLOR_SPACE_YCBCR709;
4792 		} else {
4793 			if (dc_crtc_timing->flags.Y_ONLY)
4794 				color_space =
4795 					COLOR_SPACE_YCBCR601_LIMITED;
4796 			else
4797 				color_space = COLOR_SPACE_YCBCR601;
4798 		}
4799 
4800 	}
4801 	break;
4802 	case PIXEL_ENCODING_RGB:
4803 		color_space = COLOR_SPACE_SRGB;
4804 		break;
4805 
4806 	default:
4807 		WARN_ON(1);
4808 		break;
4809 	}
4810 
4811 	return color_space;
4812 }
4813 
4814 static bool adjust_colour_depth_from_display_info(
4815 	struct dc_crtc_timing *timing_out,
4816 	const struct drm_display_info *info)
4817 {
4818 	enum dc_color_depth depth = timing_out->display_color_depth;
4819 	int normalized_clk;
4820 	do {
4821 		normalized_clk = timing_out->pix_clk_100hz / 10;
4822 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4823 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4824 			normalized_clk /= 2;
4825 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4826 		switch (depth) {
4827 		case COLOR_DEPTH_888:
4828 			break;
4829 		case COLOR_DEPTH_101010:
4830 			normalized_clk = (normalized_clk * 30) / 24;
4831 			break;
4832 		case COLOR_DEPTH_121212:
4833 			normalized_clk = (normalized_clk * 36) / 24;
4834 			break;
4835 		case COLOR_DEPTH_161616:
4836 			normalized_clk = (normalized_clk * 48) / 24;
4837 			break;
4838 		default:
4839 			/* The above depths are the only ones valid for HDMI. */
4840 			return false;
4841 		}
4842 		if (normalized_clk <= info->max_tmds_clock) {
4843 			timing_out->display_color_depth = depth;
4844 			return true;
4845 		}
4846 	} while (--depth > COLOR_DEPTH_666);
4847 	return false;
4848 }
4849 
4850 static void fill_stream_properties_from_drm_display_mode(
4851 	struct dc_stream_state *stream,
4852 	const struct drm_display_mode *mode_in,
4853 	const struct drm_connector *connector,
4854 	const struct drm_connector_state *connector_state,
4855 	const struct dc_stream_state *old_stream,
4856 	int requested_bpc)
4857 {
4858 	struct dc_crtc_timing *timing_out = &stream->timing;
4859 	const struct drm_display_info *info = &connector->display_info;
4860 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4861 	struct hdmi_vendor_infoframe hv_frame;
4862 	struct hdmi_avi_infoframe avi_frame;
4863 
4864 	memset(&hv_frame, 0, sizeof(hv_frame));
4865 	memset(&avi_frame, 0, sizeof(avi_frame));
4866 
4867 	timing_out->h_border_left = 0;
4868 	timing_out->h_border_right = 0;
4869 	timing_out->v_border_top = 0;
4870 	timing_out->v_border_bottom = 0;
4871 	/* TODO: un-hardcode */
4872 	if (drm_mode_is_420_only(info, mode_in)
4873 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4874 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4875 	else if (drm_mode_is_420_also(info, mode_in)
4876 			&& aconnector->force_yuv420_output)
4877 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4878 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4879 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4880 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4881 	else
4882 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4883 
4884 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4885 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4886 		connector,
4887 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4888 		requested_bpc);
4889 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4890 	timing_out->hdmi_vic = 0;
4891 
4892 	if(old_stream) {
4893 		timing_out->vic = old_stream->timing.vic;
4894 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4895 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4896 	} else {
4897 		timing_out->vic = drm_match_cea_mode(mode_in);
4898 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4899 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4900 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4901 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4902 	}
4903 
4904 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4905 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4906 		timing_out->vic = avi_frame.video_code;
4907 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4908 		timing_out->hdmi_vic = hv_frame.vic;
4909 	}
4910 
4911 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4912 	timing_out->h_total = mode_in->crtc_htotal;
4913 	timing_out->h_sync_width =
4914 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4915 	timing_out->h_front_porch =
4916 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4917 	timing_out->v_total = mode_in->crtc_vtotal;
4918 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4919 	timing_out->v_front_porch =
4920 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4921 	timing_out->v_sync_width =
4922 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4923 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4924 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4925 
4926 	stream->output_color_space = get_output_color_space(timing_out);
4927 
4928 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4929 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4930 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4931 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4932 		    drm_mode_is_420_also(info, mode_in) &&
4933 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4934 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4935 			adjust_colour_depth_from_display_info(timing_out, info);
4936 		}
4937 	}
4938 }
4939 
4940 static void fill_audio_info(struct audio_info *audio_info,
4941 			    const struct drm_connector *drm_connector,
4942 			    const struct dc_sink *dc_sink)
4943 {
4944 	int i = 0;
4945 	int cea_revision = 0;
4946 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4947 
4948 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4949 	audio_info->product_id = edid_caps->product_id;
4950 
4951 	cea_revision = drm_connector->display_info.cea_rev;
4952 
4953 	strscpy(audio_info->display_name,
4954 		edid_caps->display_name,
4955 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4956 
4957 	if (cea_revision >= 3) {
4958 		audio_info->mode_count = edid_caps->audio_mode_count;
4959 
4960 		for (i = 0; i < audio_info->mode_count; ++i) {
4961 			audio_info->modes[i].format_code =
4962 					(enum audio_format_code)
4963 					(edid_caps->audio_modes[i].format_code);
4964 			audio_info->modes[i].channel_count =
4965 					edid_caps->audio_modes[i].channel_count;
4966 			audio_info->modes[i].sample_rates.all =
4967 					edid_caps->audio_modes[i].sample_rate;
4968 			audio_info->modes[i].sample_size =
4969 					edid_caps->audio_modes[i].sample_size;
4970 		}
4971 	}
4972 
4973 	audio_info->flags.all = edid_caps->speaker_flags;
4974 
4975 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4976 	if (drm_connector->latency_present[0]) {
4977 		audio_info->video_latency = drm_connector->video_latency[0];
4978 		audio_info->audio_latency = drm_connector->audio_latency[0];
4979 	}
4980 
4981 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4982 
4983 }
4984 
4985 static void
4986 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4987 				      struct drm_display_mode *dst_mode)
4988 {
4989 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4990 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4991 	dst_mode->crtc_clock = src_mode->crtc_clock;
4992 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4993 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4994 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4995 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4996 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4997 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4998 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4999 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5000 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5001 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5002 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5003 }
5004 
5005 static void
5006 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5007 					const struct drm_display_mode *native_mode,
5008 					bool scale_enabled)
5009 {
5010 	if (scale_enabled) {
5011 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5012 	} else if (native_mode->clock == drm_mode->clock &&
5013 			native_mode->htotal == drm_mode->htotal &&
5014 			native_mode->vtotal == drm_mode->vtotal) {
5015 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5016 	} else {
5017 		/* no scaling nor amdgpu inserted, no need to patch */
5018 	}
5019 }
5020 
5021 static struct dc_sink *
5022 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5023 {
5024 	struct dc_sink_init_data sink_init_data = { 0 };
5025 	struct dc_sink *sink = NULL;
5026 	sink_init_data.link = aconnector->dc_link;
5027 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5028 
5029 	sink = dc_sink_create(&sink_init_data);
5030 	if (!sink) {
5031 		DRM_ERROR("Failed to create sink!\n");
5032 		return NULL;
5033 	}
5034 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5035 
5036 	return sink;
5037 }
5038 
5039 static void set_multisync_trigger_params(
5040 		struct dc_stream_state *stream)
5041 {
5042 	if (stream->triggered_crtc_reset.enabled) {
5043 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5044 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5045 	}
5046 }
5047 
5048 static void set_master_stream(struct dc_stream_state *stream_set[],
5049 			      int stream_count)
5050 {
5051 	int j, highest_rfr = 0, master_stream = 0;
5052 
5053 	for (j = 0;  j < stream_count; j++) {
5054 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5055 			int refresh_rate = 0;
5056 
5057 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5058 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5059 			if (refresh_rate > highest_rfr) {
5060 				highest_rfr = refresh_rate;
5061 				master_stream = j;
5062 			}
5063 		}
5064 	}
5065 	for (j = 0;  j < stream_count; j++) {
5066 		if (stream_set[j])
5067 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5068 	}
5069 }
5070 
5071 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5072 {
5073 	int i = 0;
5074 
5075 	if (context->stream_count < 2)
5076 		return;
5077 	for (i = 0; i < context->stream_count ; i++) {
5078 		if (!context->streams[i])
5079 			continue;
5080 		/*
5081 		 * TODO: add a function to read AMD VSDB bits and set
5082 		 * crtc_sync_master.multi_sync_enabled flag
5083 		 * For now it's set to false
5084 		 */
5085 		set_multisync_trigger_params(context->streams[i]);
5086 	}
5087 	set_master_stream(context->streams, context->stream_count);
5088 }
5089 
5090 static struct dc_stream_state *
5091 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5092 		       const struct drm_display_mode *drm_mode,
5093 		       const struct dm_connector_state *dm_state,
5094 		       const struct dc_stream_state *old_stream,
5095 		       int requested_bpc)
5096 {
5097 	struct drm_display_mode *preferred_mode = NULL;
5098 	struct drm_connector *drm_connector;
5099 	const struct drm_connector_state *con_state =
5100 		dm_state ? &dm_state->base : NULL;
5101 	struct dc_stream_state *stream = NULL;
5102 	struct drm_display_mode mode = *drm_mode;
5103 	bool native_mode_found = false;
5104 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5105 	int mode_refresh;
5106 	int preferred_refresh = 0;
5107 #if defined(CONFIG_DRM_AMD_DC_DCN)
5108 	struct dsc_dec_dpcd_caps dsc_caps;
5109 #endif
5110 	uint32_t link_bandwidth_kbps;
5111 
5112 	struct dc_sink *sink = NULL;
5113 	if (aconnector == NULL) {
5114 		DRM_ERROR("aconnector is NULL!\n");
5115 		return stream;
5116 	}
5117 
5118 	drm_connector = &aconnector->base;
5119 
5120 	if (!aconnector->dc_sink) {
5121 		sink = create_fake_sink(aconnector);
5122 		if (!sink)
5123 			return stream;
5124 	} else {
5125 		sink = aconnector->dc_sink;
5126 		dc_sink_retain(sink);
5127 	}
5128 
5129 	stream = dc_create_stream_for_sink(sink);
5130 
5131 	if (stream == NULL) {
5132 		DRM_ERROR("Failed to create stream for sink!\n");
5133 		goto finish;
5134 	}
5135 
5136 	stream->dm_stream_context = aconnector;
5137 
5138 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5139 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5140 
5141 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5142 		/* Search for preferred mode */
5143 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5144 			native_mode_found = true;
5145 			break;
5146 		}
5147 	}
5148 	if (!native_mode_found)
5149 		preferred_mode = list_first_entry_or_null(
5150 				&aconnector->base.modes,
5151 				struct drm_display_mode,
5152 				head);
5153 
5154 	mode_refresh = drm_mode_vrefresh(&mode);
5155 
5156 	if (preferred_mode == NULL) {
5157 		/*
5158 		 * This may not be an error, the use case is when we have no
5159 		 * usermode calls to reset and set mode upon hotplug. In this
5160 		 * case, we call set mode ourselves to restore the previous mode
5161 		 * and the modelist may not be filled in in time.
5162 		 */
5163 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5164 	} else {
5165 		decide_crtc_timing_for_drm_display_mode(
5166 				&mode, preferred_mode,
5167 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5168 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5169 	}
5170 
5171 	if (!dm_state)
5172 		drm_mode_set_crtcinfo(&mode, 0);
5173 
5174 	/*
5175 	* If scaling is enabled and refresh rate didn't change
5176 	* we copy the vic and polarities of the old timings
5177 	*/
5178 	if (!scale || mode_refresh != preferred_refresh)
5179 		fill_stream_properties_from_drm_display_mode(stream,
5180 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
5181 	else
5182 		fill_stream_properties_from_drm_display_mode(stream,
5183 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
5184 
5185 	stream->timing.flags.DSC = 0;
5186 
5187 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5188 #if defined(CONFIG_DRM_AMD_DC_DCN)
5189 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5190 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5191 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5192 				      &dsc_caps);
5193 #endif
5194 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5195 							     dc_link_get_link_cap(aconnector->dc_link));
5196 
5197 #if defined(CONFIG_DRM_AMD_DC_DCN)
5198 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5199 			/* Set DSC policy according to dsc_clock_en */
5200 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5201 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5202 
5203 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5204 						  &dsc_caps,
5205 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5206 						  0,
5207 						  link_bandwidth_kbps,
5208 						  &stream->timing,
5209 						  &stream->timing.dsc_cfg))
5210 				stream->timing.flags.DSC = 1;
5211 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5212 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5213 				stream->timing.flags.DSC = 1;
5214 
5215 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5216 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5217 
5218 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5219 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5220 
5221 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5222 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5223 		}
5224 #endif
5225 	}
5226 
5227 	update_stream_scaling_settings(&mode, dm_state, stream);
5228 
5229 	fill_audio_info(
5230 		&stream->audio_info,
5231 		drm_connector,
5232 		sink);
5233 
5234 	update_stream_signal(stream, sink);
5235 
5236 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5237 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5238 
5239 	if (stream->link->psr_settings.psr_feature_enabled) {
5240 		//
5241 		// should decide stream support vsc sdp colorimetry capability
5242 		// before building vsc info packet
5243 		//
5244 		stream->use_vsc_sdp_for_colorimetry = false;
5245 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5246 			stream->use_vsc_sdp_for_colorimetry =
5247 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5248 		} else {
5249 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5250 				stream->use_vsc_sdp_for_colorimetry = true;
5251 		}
5252 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5253 	}
5254 finish:
5255 	dc_sink_release(sink);
5256 
5257 	return stream;
5258 }
5259 
5260 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5261 {
5262 	drm_crtc_cleanup(crtc);
5263 	kfree(crtc);
5264 }
5265 
5266 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5267 				  struct drm_crtc_state *state)
5268 {
5269 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5270 
5271 	/* TODO Destroy dc_stream objects are stream object is flattened */
5272 	if (cur->stream)
5273 		dc_stream_release(cur->stream);
5274 
5275 
5276 	__drm_atomic_helper_crtc_destroy_state(state);
5277 
5278 
5279 	kfree(state);
5280 }
5281 
5282 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5283 {
5284 	struct dm_crtc_state *state;
5285 
5286 	if (crtc->state)
5287 		dm_crtc_destroy_state(crtc, crtc->state);
5288 
5289 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5290 	if (WARN_ON(!state))
5291 		return;
5292 
5293 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5294 }
5295 
5296 static struct drm_crtc_state *
5297 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5298 {
5299 	struct dm_crtc_state *state, *cur;
5300 
5301 	cur = to_dm_crtc_state(crtc->state);
5302 
5303 	if (WARN_ON(!crtc->state))
5304 		return NULL;
5305 
5306 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5307 	if (!state)
5308 		return NULL;
5309 
5310 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5311 
5312 	if (cur->stream) {
5313 		state->stream = cur->stream;
5314 		dc_stream_retain(state->stream);
5315 	}
5316 
5317 	state->active_planes = cur->active_planes;
5318 	state->vrr_infopacket = cur->vrr_infopacket;
5319 	state->abm_level = cur->abm_level;
5320 	state->vrr_supported = cur->vrr_supported;
5321 	state->freesync_config = cur->freesync_config;
5322 	state->crc_src = cur->crc_src;
5323 	state->cm_has_degamma = cur->cm_has_degamma;
5324 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5325 #ifdef CONFIG_DEBUG_FS
5326 	state->crc_window = cur->crc_window;
5327 #endif
5328 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5329 
5330 	return &state->base;
5331 }
5332 
5333 #ifdef CONFIG_DEBUG_FS
5334 int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
5335 					    struct drm_crtc_state *crtc_state,
5336 					    struct drm_property *property,
5337 					    uint64_t val)
5338 {
5339 	struct drm_device *dev = crtc->dev;
5340 	struct amdgpu_device *adev = drm_to_adev(dev);
5341 	struct dm_crtc_state *dm_new_state =
5342 		to_dm_crtc_state(crtc_state);
5343 
5344 	if (property == adev->dm.crc_win_x_start_property)
5345 		dm_new_state->crc_window.x_start = val;
5346 	else if (property == adev->dm.crc_win_y_start_property)
5347 		dm_new_state->crc_window.y_start = val;
5348 	else if (property == adev->dm.crc_win_x_end_property)
5349 		dm_new_state->crc_window.x_end = val;
5350 	else if (property == adev->dm.crc_win_y_end_property)
5351 		dm_new_state->crc_window.y_end = val;
5352 	else
5353 		return -EINVAL;
5354 
5355 	return 0;
5356 }
5357 
5358 int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
5359 					    const struct drm_crtc_state *state,
5360 					    struct drm_property *property,
5361 					    uint64_t *val)
5362 {
5363 	struct drm_device *dev = crtc->dev;
5364 	struct amdgpu_device *adev = drm_to_adev(dev);
5365 	struct dm_crtc_state *dm_state =
5366 		to_dm_crtc_state(state);
5367 
5368 	if (property == adev->dm.crc_win_x_start_property)
5369 		*val = dm_state->crc_window.x_start;
5370 	else if (property == adev->dm.crc_win_y_start_property)
5371 		*val = dm_state->crc_window.y_start;
5372 	else if (property == adev->dm.crc_win_x_end_property)
5373 		*val = dm_state->crc_window.x_end;
5374 	else if (property == adev->dm.crc_win_y_end_property)
5375 		*val = dm_state->crc_window.y_end;
5376 	else
5377 		return -EINVAL;
5378 
5379 	return 0;
5380 }
5381 #endif
5382 
5383 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5384 {
5385 	enum dc_irq_source irq_source;
5386 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5387 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5388 	int rc;
5389 
5390 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5391 
5392 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5393 
5394 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5395 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
5396 	return rc;
5397 }
5398 
5399 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5400 {
5401 	enum dc_irq_source irq_source;
5402 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5403 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5404 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5405 	int rc = 0;
5406 
5407 	if (enable) {
5408 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5409 		if (amdgpu_dm_vrr_active(acrtc_state))
5410 			rc = dm_set_vupdate_irq(crtc, true);
5411 	} else {
5412 		/* vblank irq off -> vupdate irq off */
5413 		rc = dm_set_vupdate_irq(crtc, false);
5414 	}
5415 
5416 	if (rc)
5417 		return rc;
5418 
5419 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5420 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5421 }
5422 
5423 static int dm_enable_vblank(struct drm_crtc *crtc)
5424 {
5425 	return dm_set_vblank(crtc, true);
5426 }
5427 
5428 static void dm_disable_vblank(struct drm_crtc *crtc)
5429 {
5430 	dm_set_vblank(crtc, false);
5431 }
5432 
5433 /* Implemented only the options currently availible for the driver */
5434 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5435 	.reset = dm_crtc_reset_state,
5436 	.destroy = amdgpu_dm_crtc_destroy,
5437 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
5438 	.set_config = drm_atomic_helper_set_config,
5439 	.page_flip = drm_atomic_helper_page_flip,
5440 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5441 	.atomic_destroy_state = dm_crtc_destroy_state,
5442 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5443 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5444 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5445 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5446 	.enable_vblank = dm_enable_vblank,
5447 	.disable_vblank = dm_disable_vblank,
5448 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5449 #ifdef CONFIG_DEBUG_FS
5450 	.atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
5451 	.atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
5452 #endif
5453 };
5454 
5455 static enum drm_connector_status
5456 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5457 {
5458 	bool connected;
5459 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5460 
5461 	/*
5462 	 * Notes:
5463 	 * 1. This interface is NOT called in context of HPD irq.
5464 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5465 	 * makes it a bad place for *any* MST-related activity.
5466 	 */
5467 
5468 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5469 	    !aconnector->fake_enable)
5470 		connected = (aconnector->dc_sink != NULL);
5471 	else
5472 		connected = (aconnector->base.force == DRM_FORCE_ON);
5473 
5474 	update_subconnector_property(aconnector);
5475 
5476 	return (connected ? connector_status_connected :
5477 			connector_status_disconnected);
5478 }
5479 
5480 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5481 					    struct drm_connector_state *connector_state,
5482 					    struct drm_property *property,
5483 					    uint64_t val)
5484 {
5485 	struct drm_device *dev = connector->dev;
5486 	struct amdgpu_device *adev = drm_to_adev(dev);
5487 	struct dm_connector_state *dm_old_state =
5488 		to_dm_connector_state(connector->state);
5489 	struct dm_connector_state *dm_new_state =
5490 		to_dm_connector_state(connector_state);
5491 
5492 	int ret = -EINVAL;
5493 
5494 	if (property == dev->mode_config.scaling_mode_property) {
5495 		enum amdgpu_rmx_type rmx_type;
5496 
5497 		switch (val) {
5498 		case DRM_MODE_SCALE_CENTER:
5499 			rmx_type = RMX_CENTER;
5500 			break;
5501 		case DRM_MODE_SCALE_ASPECT:
5502 			rmx_type = RMX_ASPECT;
5503 			break;
5504 		case DRM_MODE_SCALE_FULLSCREEN:
5505 			rmx_type = RMX_FULL;
5506 			break;
5507 		case DRM_MODE_SCALE_NONE:
5508 		default:
5509 			rmx_type = RMX_OFF;
5510 			break;
5511 		}
5512 
5513 		if (dm_old_state->scaling == rmx_type)
5514 			return 0;
5515 
5516 		dm_new_state->scaling = rmx_type;
5517 		ret = 0;
5518 	} else if (property == adev->mode_info.underscan_hborder_property) {
5519 		dm_new_state->underscan_hborder = val;
5520 		ret = 0;
5521 	} else if (property == adev->mode_info.underscan_vborder_property) {
5522 		dm_new_state->underscan_vborder = val;
5523 		ret = 0;
5524 	} else if (property == adev->mode_info.underscan_property) {
5525 		dm_new_state->underscan_enable = val;
5526 		ret = 0;
5527 	} else if (property == adev->mode_info.abm_level_property) {
5528 		dm_new_state->abm_level = val;
5529 		ret = 0;
5530 	}
5531 
5532 	return ret;
5533 }
5534 
5535 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5536 					    const struct drm_connector_state *state,
5537 					    struct drm_property *property,
5538 					    uint64_t *val)
5539 {
5540 	struct drm_device *dev = connector->dev;
5541 	struct amdgpu_device *adev = drm_to_adev(dev);
5542 	struct dm_connector_state *dm_state =
5543 		to_dm_connector_state(state);
5544 	int ret = -EINVAL;
5545 
5546 	if (property == dev->mode_config.scaling_mode_property) {
5547 		switch (dm_state->scaling) {
5548 		case RMX_CENTER:
5549 			*val = DRM_MODE_SCALE_CENTER;
5550 			break;
5551 		case RMX_ASPECT:
5552 			*val = DRM_MODE_SCALE_ASPECT;
5553 			break;
5554 		case RMX_FULL:
5555 			*val = DRM_MODE_SCALE_FULLSCREEN;
5556 			break;
5557 		case RMX_OFF:
5558 		default:
5559 			*val = DRM_MODE_SCALE_NONE;
5560 			break;
5561 		}
5562 		ret = 0;
5563 	} else if (property == adev->mode_info.underscan_hborder_property) {
5564 		*val = dm_state->underscan_hborder;
5565 		ret = 0;
5566 	} else if (property == adev->mode_info.underscan_vborder_property) {
5567 		*val = dm_state->underscan_vborder;
5568 		ret = 0;
5569 	} else if (property == adev->mode_info.underscan_property) {
5570 		*val = dm_state->underscan_enable;
5571 		ret = 0;
5572 	} else if (property == adev->mode_info.abm_level_property) {
5573 		*val = dm_state->abm_level;
5574 		ret = 0;
5575 	}
5576 
5577 	return ret;
5578 }
5579 
5580 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5581 {
5582 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5583 
5584 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5585 }
5586 
5587 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5588 {
5589 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5590 	const struct dc_link *link = aconnector->dc_link;
5591 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5592 	struct amdgpu_display_manager *dm = &adev->dm;
5593 
5594 	/*
5595 	 * Call only if mst_mgr was iniitalized before since it's not done
5596 	 * for all connector types.
5597 	 */
5598 	if (aconnector->mst_mgr.dev)
5599 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5600 
5601 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5602 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5603 
5604 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5605 	    link->type != dc_connection_none &&
5606 	    dm->backlight_dev) {
5607 		backlight_device_unregister(dm->backlight_dev);
5608 		dm->backlight_dev = NULL;
5609 	}
5610 #endif
5611 
5612 	if (aconnector->dc_em_sink)
5613 		dc_sink_release(aconnector->dc_em_sink);
5614 	aconnector->dc_em_sink = NULL;
5615 	if (aconnector->dc_sink)
5616 		dc_sink_release(aconnector->dc_sink);
5617 	aconnector->dc_sink = NULL;
5618 
5619 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5620 	drm_connector_unregister(connector);
5621 	drm_connector_cleanup(connector);
5622 	if (aconnector->i2c) {
5623 		i2c_del_adapter(&aconnector->i2c->base);
5624 		kfree(aconnector->i2c);
5625 	}
5626 	kfree(aconnector->dm_dp_aux.aux.name);
5627 
5628 	kfree(connector);
5629 }
5630 
5631 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5632 {
5633 	struct dm_connector_state *state =
5634 		to_dm_connector_state(connector->state);
5635 
5636 	if (connector->state)
5637 		__drm_atomic_helper_connector_destroy_state(connector->state);
5638 
5639 	kfree(state);
5640 
5641 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5642 
5643 	if (state) {
5644 		state->scaling = RMX_OFF;
5645 		state->underscan_enable = false;
5646 		state->underscan_hborder = 0;
5647 		state->underscan_vborder = 0;
5648 		state->base.max_requested_bpc = 8;
5649 		state->vcpi_slots = 0;
5650 		state->pbn = 0;
5651 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5652 			state->abm_level = amdgpu_dm_abm_level;
5653 
5654 		__drm_atomic_helper_connector_reset(connector, &state->base);
5655 	}
5656 }
5657 
5658 struct drm_connector_state *
5659 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5660 {
5661 	struct dm_connector_state *state =
5662 		to_dm_connector_state(connector->state);
5663 
5664 	struct dm_connector_state *new_state =
5665 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5666 
5667 	if (!new_state)
5668 		return NULL;
5669 
5670 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5671 
5672 	new_state->freesync_capable = state->freesync_capable;
5673 	new_state->abm_level = state->abm_level;
5674 	new_state->scaling = state->scaling;
5675 	new_state->underscan_enable = state->underscan_enable;
5676 	new_state->underscan_hborder = state->underscan_hborder;
5677 	new_state->underscan_vborder = state->underscan_vborder;
5678 	new_state->vcpi_slots = state->vcpi_slots;
5679 	new_state->pbn = state->pbn;
5680 	return &new_state->base;
5681 }
5682 
5683 static int
5684 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5685 {
5686 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5687 		to_amdgpu_dm_connector(connector);
5688 	int r;
5689 
5690 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5691 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5692 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5693 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5694 		if (r)
5695 			return r;
5696 	}
5697 
5698 #if defined(CONFIG_DEBUG_FS)
5699 	connector_debugfs_init(amdgpu_dm_connector);
5700 #endif
5701 
5702 	return 0;
5703 }
5704 
5705 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5706 	.reset = amdgpu_dm_connector_funcs_reset,
5707 	.detect = amdgpu_dm_connector_detect,
5708 	.fill_modes = drm_helper_probe_single_connector_modes,
5709 	.destroy = amdgpu_dm_connector_destroy,
5710 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5711 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5712 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5713 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5714 	.late_register = amdgpu_dm_connector_late_register,
5715 	.early_unregister = amdgpu_dm_connector_unregister
5716 };
5717 
5718 static int get_modes(struct drm_connector *connector)
5719 {
5720 	return amdgpu_dm_connector_get_modes(connector);
5721 }
5722 
5723 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5724 {
5725 	struct dc_sink_init_data init_params = {
5726 			.link = aconnector->dc_link,
5727 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5728 	};
5729 	struct edid *edid;
5730 
5731 	if (!aconnector->base.edid_blob_ptr) {
5732 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5733 				aconnector->base.name);
5734 
5735 		aconnector->base.force = DRM_FORCE_OFF;
5736 		aconnector->base.override_edid = false;
5737 		return;
5738 	}
5739 
5740 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5741 
5742 	aconnector->edid = edid;
5743 
5744 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5745 		aconnector->dc_link,
5746 		(uint8_t *)edid,
5747 		(edid->extensions + 1) * EDID_LENGTH,
5748 		&init_params);
5749 
5750 	if (aconnector->base.force == DRM_FORCE_ON) {
5751 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5752 		aconnector->dc_link->local_sink :
5753 		aconnector->dc_em_sink;
5754 		dc_sink_retain(aconnector->dc_sink);
5755 	}
5756 }
5757 
5758 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5759 {
5760 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5761 
5762 	/*
5763 	 * In case of headless boot with force on for DP managed connector
5764 	 * Those settings have to be != 0 to get initial modeset
5765 	 */
5766 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5767 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5768 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5769 	}
5770 
5771 
5772 	aconnector->base.override_edid = true;
5773 	create_eml_sink(aconnector);
5774 }
5775 
5776 static struct dc_stream_state *
5777 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5778 				const struct drm_display_mode *drm_mode,
5779 				const struct dm_connector_state *dm_state,
5780 				const struct dc_stream_state *old_stream)
5781 {
5782 	struct drm_connector *connector = &aconnector->base;
5783 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5784 	struct dc_stream_state *stream;
5785 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5786 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5787 	enum dc_status dc_result = DC_OK;
5788 
5789 	do {
5790 		stream = create_stream_for_sink(aconnector, drm_mode,
5791 						dm_state, old_stream,
5792 						requested_bpc);
5793 		if (stream == NULL) {
5794 			DRM_ERROR("Failed to create stream for sink!\n");
5795 			break;
5796 		}
5797 
5798 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5799 
5800 		if (dc_result != DC_OK) {
5801 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5802 				      drm_mode->hdisplay,
5803 				      drm_mode->vdisplay,
5804 				      drm_mode->clock,
5805 				      dc_result,
5806 				      dc_status_to_str(dc_result));
5807 
5808 			dc_stream_release(stream);
5809 			stream = NULL;
5810 			requested_bpc -= 2; /* lower bpc to retry validation */
5811 		}
5812 
5813 	} while (stream == NULL && requested_bpc >= 6);
5814 
5815 	return stream;
5816 }
5817 
5818 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5819 				   struct drm_display_mode *mode)
5820 {
5821 	int result = MODE_ERROR;
5822 	struct dc_sink *dc_sink;
5823 	/* TODO: Unhardcode stream count */
5824 	struct dc_stream_state *stream;
5825 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5826 
5827 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5828 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5829 		return result;
5830 
5831 	/*
5832 	 * Only run this the first time mode_valid is called to initilialize
5833 	 * EDID mgmt
5834 	 */
5835 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5836 		!aconnector->dc_em_sink)
5837 		handle_edid_mgmt(aconnector);
5838 
5839 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5840 
5841 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5842 				aconnector->base.force != DRM_FORCE_ON) {
5843 		DRM_ERROR("dc_sink is NULL!\n");
5844 		goto fail;
5845 	}
5846 
5847 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5848 	if (stream) {
5849 		dc_stream_release(stream);
5850 		result = MODE_OK;
5851 	}
5852 
5853 fail:
5854 	/* TODO: error handling*/
5855 	return result;
5856 }
5857 
5858 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5859 				struct dc_info_packet *out)
5860 {
5861 	struct hdmi_drm_infoframe frame;
5862 	unsigned char buf[30]; /* 26 + 4 */
5863 	ssize_t len;
5864 	int ret, i;
5865 
5866 	memset(out, 0, sizeof(*out));
5867 
5868 	if (!state->hdr_output_metadata)
5869 		return 0;
5870 
5871 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5872 	if (ret)
5873 		return ret;
5874 
5875 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5876 	if (len < 0)
5877 		return (int)len;
5878 
5879 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5880 	if (len != 30)
5881 		return -EINVAL;
5882 
5883 	/* Prepare the infopacket for DC. */
5884 	switch (state->connector->connector_type) {
5885 	case DRM_MODE_CONNECTOR_HDMIA:
5886 		out->hb0 = 0x87; /* type */
5887 		out->hb1 = 0x01; /* version */
5888 		out->hb2 = 0x1A; /* length */
5889 		out->sb[0] = buf[3]; /* checksum */
5890 		i = 1;
5891 		break;
5892 
5893 	case DRM_MODE_CONNECTOR_DisplayPort:
5894 	case DRM_MODE_CONNECTOR_eDP:
5895 		out->hb0 = 0x00; /* sdp id, zero */
5896 		out->hb1 = 0x87; /* type */
5897 		out->hb2 = 0x1D; /* payload len - 1 */
5898 		out->hb3 = (0x13 << 2); /* sdp version */
5899 		out->sb[0] = 0x01; /* version */
5900 		out->sb[1] = 0x1A; /* length */
5901 		i = 2;
5902 		break;
5903 
5904 	default:
5905 		return -EINVAL;
5906 	}
5907 
5908 	memcpy(&out->sb[i], &buf[4], 26);
5909 	out->valid = true;
5910 
5911 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5912 		       sizeof(out->sb), false);
5913 
5914 	return 0;
5915 }
5916 
5917 static bool
5918 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5919 			  const struct drm_connector_state *new_state)
5920 {
5921 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5922 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5923 
5924 	if (old_blob != new_blob) {
5925 		if (old_blob && new_blob &&
5926 		    old_blob->length == new_blob->length)
5927 			return memcmp(old_blob->data, new_blob->data,
5928 				      old_blob->length);
5929 
5930 		return true;
5931 	}
5932 
5933 	return false;
5934 }
5935 
5936 static int
5937 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5938 				 struct drm_atomic_state *state)
5939 {
5940 	struct drm_connector_state *new_con_state =
5941 		drm_atomic_get_new_connector_state(state, conn);
5942 	struct drm_connector_state *old_con_state =
5943 		drm_atomic_get_old_connector_state(state, conn);
5944 	struct drm_crtc *crtc = new_con_state->crtc;
5945 	struct drm_crtc_state *new_crtc_state;
5946 	int ret;
5947 
5948 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
5949 
5950 	if (!crtc)
5951 		return 0;
5952 
5953 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5954 		struct dc_info_packet hdr_infopacket;
5955 
5956 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5957 		if (ret)
5958 			return ret;
5959 
5960 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5961 		if (IS_ERR(new_crtc_state))
5962 			return PTR_ERR(new_crtc_state);
5963 
5964 		/*
5965 		 * DC considers the stream backends changed if the
5966 		 * static metadata changes. Forcing the modeset also
5967 		 * gives a simple way for userspace to switch from
5968 		 * 8bpc to 10bpc when setting the metadata to enter
5969 		 * or exit HDR.
5970 		 *
5971 		 * Changing the static metadata after it's been
5972 		 * set is permissible, however. So only force a
5973 		 * modeset if we're entering or exiting HDR.
5974 		 */
5975 		new_crtc_state->mode_changed =
5976 			!old_con_state->hdr_output_metadata ||
5977 			!new_con_state->hdr_output_metadata;
5978 	}
5979 
5980 	return 0;
5981 }
5982 
5983 static const struct drm_connector_helper_funcs
5984 amdgpu_dm_connector_helper_funcs = {
5985 	/*
5986 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5987 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5988 	 * are missing after user start lightdm. So we need to renew modes list.
5989 	 * in get_modes call back, not just return the modes count
5990 	 */
5991 	.get_modes = get_modes,
5992 	.mode_valid = amdgpu_dm_connector_mode_valid,
5993 	.atomic_check = amdgpu_dm_connector_atomic_check,
5994 };
5995 
5996 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5997 {
5998 }
5999 
6000 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6001 {
6002 	struct drm_atomic_state *state = new_crtc_state->state;
6003 	struct drm_plane *plane;
6004 	int num_active = 0;
6005 
6006 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6007 		struct drm_plane_state *new_plane_state;
6008 
6009 		/* Cursor planes are "fake". */
6010 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6011 			continue;
6012 
6013 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6014 
6015 		if (!new_plane_state) {
6016 			/*
6017 			 * The plane is enable on the CRTC and hasn't changed
6018 			 * state. This means that it previously passed
6019 			 * validation and is therefore enabled.
6020 			 */
6021 			num_active += 1;
6022 			continue;
6023 		}
6024 
6025 		/* We need a framebuffer to be considered enabled. */
6026 		num_active += (new_plane_state->fb != NULL);
6027 	}
6028 
6029 	return num_active;
6030 }
6031 
6032 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6033 					 struct drm_crtc_state *new_crtc_state)
6034 {
6035 	struct dm_crtc_state *dm_new_crtc_state =
6036 		to_dm_crtc_state(new_crtc_state);
6037 
6038 	dm_new_crtc_state->active_planes = 0;
6039 
6040 	if (!dm_new_crtc_state->stream)
6041 		return;
6042 
6043 	dm_new_crtc_state->active_planes =
6044 		count_crtc_active_planes(new_crtc_state);
6045 }
6046 
6047 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6048 				       struct drm_atomic_state *state)
6049 {
6050 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6051 									  crtc);
6052 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6053 	struct dc *dc = adev->dm.dc;
6054 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6055 	int ret = -EINVAL;
6056 
6057 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6058 
6059 	dm_update_crtc_active_planes(crtc, crtc_state);
6060 
6061 	if (unlikely(!dm_crtc_state->stream &&
6062 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6063 		WARN_ON(1);
6064 		return ret;
6065 	}
6066 
6067 	/*
6068 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6069 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6070 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6071 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6072 	 */
6073 	if (crtc_state->enable &&
6074 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary)))
6075 		return -EINVAL;
6076 
6077 	/* In some use cases, like reset, no stream is attached */
6078 	if (!dm_crtc_state->stream)
6079 		return 0;
6080 
6081 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6082 		return 0;
6083 
6084 	return ret;
6085 }
6086 
6087 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6088 				      const struct drm_display_mode *mode,
6089 				      struct drm_display_mode *adjusted_mode)
6090 {
6091 	return true;
6092 }
6093 
6094 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6095 	.disable = dm_crtc_helper_disable,
6096 	.atomic_check = dm_crtc_helper_atomic_check,
6097 	.mode_fixup = dm_crtc_helper_mode_fixup,
6098 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6099 };
6100 
6101 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6102 {
6103 
6104 }
6105 
6106 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6107 {
6108 	switch (display_color_depth) {
6109 		case COLOR_DEPTH_666:
6110 			return 6;
6111 		case COLOR_DEPTH_888:
6112 			return 8;
6113 		case COLOR_DEPTH_101010:
6114 			return 10;
6115 		case COLOR_DEPTH_121212:
6116 			return 12;
6117 		case COLOR_DEPTH_141414:
6118 			return 14;
6119 		case COLOR_DEPTH_161616:
6120 			return 16;
6121 		default:
6122 			break;
6123 		}
6124 	return 0;
6125 }
6126 
6127 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6128 					  struct drm_crtc_state *crtc_state,
6129 					  struct drm_connector_state *conn_state)
6130 {
6131 	struct drm_atomic_state *state = crtc_state->state;
6132 	struct drm_connector *connector = conn_state->connector;
6133 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6134 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6135 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6136 	struct drm_dp_mst_topology_mgr *mst_mgr;
6137 	struct drm_dp_mst_port *mst_port;
6138 	enum dc_color_depth color_depth;
6139 	int clock, bpp = 0;
6140 	bool is_y420 = false;
6141 
6142 	if (!aconnector->port || !aconnector->dc_sink)
6143 		return 0;
6144 
6145 	mst_port = aconnector->port;
6146 	mst_mgr = &aconnector->mst_port->mst_mgr;
6147 
6148 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6149 		return 0;
6150 
6151 	if (!state->duplicated) {
6152 		int max_bpc = conn_state->max_requested_bpc;
6153 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6154 				aconnector->force_yuv420_output;
6155 		color_depth = convert_color_depth_from_display_info(connector,
6156 								    is_y420,
6157 								    max_bpc);
6158 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6159 		clock = adjusted_mode->clock;
6160 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6161 	}
6162 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6163 									   mst_mgr,
6164 									   mst_port,
6165 									   dm_new_connector_state->pbn,
6166 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6167 	if (dm_new_connector_state->vcpi_slots < 0) {
6168 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6169 		return dm_new_connector_state->vcpi_slots;
6170 	}
6171 	return 0;
6172 }
6173 
6174 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6175 	.disable = dm_encoder_helper_disable,
6176 	.atomic_check = dm_encoder_helper_atomic_check
6177 };
6178 
6179 #if defined(CONFIG_DRM_AMD_DC_DCN)
6180 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6181 					    struct dc_state *dc_state)
6182 {
6183 	struct dc_stream_state *stream = NULL;
6184 	struct drm_connector *connector;
6185 	struct drm_connector_state *new_con_state, *old_con_state;
6186 	struct amdgpu_dm_connector *aconnector;
6187 	struct dm_connector_state *dm_conn_state;
6188 	int i, j, clock, bpp;
6189 	int vcpi, pbn_div, pbn = 0;
6190 
6191 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6192 
6193 		aconnector = to_amdgpu_dm_connector(connector);
6194 
6195 		if (!aconnector->port)
6196 			continue;
6197 
6198 		if (!new_con_state || !new_con_state->crtc)
6199 			continue;
6200 
6201 		dm_conn_state = to_dm_connector_state(new_con_state);
6202 
6203 		for (j = 0; j < dc_state->stream_count; j++) {
6204 			stream = dc_state->streams[j];
6205 			if (!stream)
6206 				continue;
6207 
6208 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6209 				break;
6210 
6211 			stream = NULL;
6212 		}
6213 
6214 		if (!stream)
6215 			continue;
6216 
6217 		if (stream->timing.flags.DSC != 1) {
6218 			drm_dp_mst_atomic_enable_dsc(state,
6219 						     aconnector->port,
6220 						     dm_conn_state->pbn,
6221 						     0,
6222 						     false);
6223 			continue;
6224 		}
6225 
6226 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6227 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6228 		clock = stream->timing.pix_clk_100hz / 10;
6229 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6230 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6231 						    aconnector->port,
6232 						    pbn, pbn_div,
6233 						    true);
6234 		if (vcpi < 0)
6235 			return vcpi;
6236 
6237 		dm_conn_state->pbn = pbn;
6238 		dm_conn_state->vcpi_slots = vcpi;
6239 	}
6240 	return 0;
6241 }
6242 #endif
6243 
6244 static void dm_drm_plane_reset(struct drm_plane *plane)
6245 {
6246 	struct dm_plane_state *amdgpu_state = NULL;
6247 
6248 	if (plane->state)
6249 		plane->funcs->atomic_destroy_state(plane, plane->state);
6250 
6251 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6252 	WARN_ON(amdgpu_state == NULL);
6253 
6254 	if (amdgpu_state)
6255 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6256 }
6257 
6258 static struct drm_plane_state *
6259 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6260 {
6261 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6262 
6263 	old_dm_plane_state = to_dm_plane_state(plane->state);
6264 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6265 	if (!dm_plane_state)
6266 		return NULL;
6267 
6268 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6269 
6270 	if (old_dm_plane_state->dc_state) {
6271 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6272 		dc_plane_state_retain(dm_plane_state->dc_state);
6273 	}
6274 
6275 	return &dm_plane_state->base;
6276 }
6277 
6278 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6279 				struct drm_plane_state *state)
6280 {
6281 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6282 
6283 	if (dm_plane_state->dc_state)
6284 		dc_plane_state_release(dm_plane_state->dc_state);
6285 
6286 	drm_atomic_helper_plane_destroy_state(plane, state);
6287 }
6288 
6289 static const struct drm_plane_funcs dm_plane_funcs = {
6290 	.update_plane	= drm_atomic_helper_update_plane,
6291 	.disable_plane	= drm_atomic_helper_disable_plane,
6292 	.destroy	= drm_primary_helper_destroy,
6293 	.reset = dm_drm_plane_reset,
6294 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6295 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6296 	.format_mod_supported = dm_plane_format_mod_supported,
6297 };
6298 
6299 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6300 				      struct drm_plane_state *new_state)
6301 {
6302 	struct amdgpu_framebuffer *afb;
6303 	struct drm_gem_object *obj;
6304 	struct amdgpu_device *adev;
6305 	struct amdgpu_bo *rbo;
6306 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6307 	struct list_head list;
6308 	struct ttm_validate_buffer tv;
6309 	struct ww_acquire_ctx ticket;
6310 	uint32_t domain;
6311 	int r;
6312 
6313 	if (!new_state->fb) {
6314 		DRM_DEBUG_DRIVER("No FB bound\n");
6315 		return 0;
6316 	}
6317 
6318 	afb = to_amdgpu_framebuffer(new_state->fb);
6319 	obj = new_state->fb->obj[0];
6320 	rbo = gem_to_amdgpu_bo(obj);
6321 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6322 	INIT_LIST_HEAD(&list);
6323 
6324 	tv.bo = &rbo->tbo;
6325 	tv.num_shared = 1;
6326 	list_add(&tv.head, &list);
6327 
6328 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6329 	if (r) {
6330 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6331 		return r;
6332 	}
6333 
6334 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6335 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6336 	else
6337 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6338 
6339 	r = amdgpu_bo_pin(rbo, domain);
6340 	if (unlikely(r != 0)) {
6341 		if (r != -ERESTARTSYS)
6342 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6343 		ttm_eu_backoff_reservation(&ticket, &list);
6344 		return r;
6345 	}
6346 
6347 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6348 	if (unlikely(r != 0)) {
6349 		amdgpu_bo_unpin(rbo);
6350 		ttm_eu_backoff_reservation(&ticket, &list);
6351 		DRM_ERROR("%p bind failed\n", rbo);
6352 		return r;
6353 	}
6354 
6355 	ttm_eu_backoff_reservation(&ticket, &list);
6356 
6357 	afb->address = amdgpu_bo_gpu_offset(rbo);
6358 
6359 	amdgpu_bo_ref(rbo);
6360 
6361 	/**
6362 	 * We don't do surface updates on planes that have been newly created,
6363 	 * but we also don't have the afb->address during atomic check.
6364 	 *
6365 	 * Fill in buffer attributes depending on the address here, but only on
6366 	 * newly created planes since they're not being used by DC yet and this
6367 	 * won't modify global state.
6368 	 */
6369 	dm_plane_state_old = to_dm_plane_state(plane->state);
6370 	dm_plane_state_new = to_dm_plane_state(new_state);
6371 
6372 	if (dm_plane_state_new->dc_state &&
6373 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6374 		struct dc_plane_state *plane_state =
6375 			dm_plane_state_new->dc_state;
6376 		bool force_disable_dcc = !plane_state->dcc.enable;
6377 
6378 		fill_plane_buffer_attributes(
6379 			adev, afb, plane_state->format, plane_state->rotation,
6380 			afb->tiling_flags,
6381 			&plane_state->tiling_info, &plane_state->plane_size,
6382 			&plane_state->dcc, &plane_state->address,
6383 			afb->tmz_surface, force_disable_dcc);
6384 	}
6385 
6386 	return 0;
6387 }
6388 
6389 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6390 				       struct drm_plane_state *old_state)
6391 {
6392 	struct amdgpu_bo *rbo;
6393 	int r;
6394 
6395 	if (!old_state->fb)
6396 		return;
6397 
6398 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6399 	r = amdgpu_bo_reserve(rbo, false);
6400 	if (unlikely(r)) {
6401 		DRM_ERROR("failed to reserve rbo before unpin\n");
6402 		return;
6403 	}
6404 
6405 	amdgpu_bo_unpin(rbo);
6406 	amdgpu_bo_unreserve(rbo);
6407 	amdgpu_bo_unref(&rbo);
6408 }
6409 
6410 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6411 				       struct drm_crtc_state *new_crtc_state)
6412 {
6413 	int max_downscale = 0;
6414 	int max_upscale = INT_MAX;
6415 
6416 	/* TODO: These should be checked against DC plane caps */
6417 	return drm_atomic_helper_check_plane_state(
6418 		state, new_crtc_state, max_downscale, max_upscale, true, true);
6419 }
6420 
6421 static int dm_plane_atomic_check(struct drm_plane *plane,
6422 				 struct drm_plane_state *state)
6423 {
6424 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6425 	struct dc *dc = adev->dm.dc;
6426 	struct dm_plane_state *dm_plane_state;
6427 	struct dc_scaling_info scaling_info;
6428 	struct drm_crtc_state *new_crtc_state;
6429 	int ret;
6430 
6431 	trace_amdgpu_dm_plane_atomic_check(state);
6432 
6433 	dm_plane_state = to_dm_plane_state(state);
6434 
6435 	if (!dm_plane_state->dc_state)
6436 		return 0;
6437 
6438 	new_crtc_state =
6439 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
6440 	if (!new_crtc_state)
6441 		return -EINVAL;
6442 
6443 	ret = dm_plane_helper_check_state(state, new_crtc_state);
6444 	if (ret)
6445 		return ret;
6446 
6447 	ret = fill_dc_scaling_info(state, &scaling_info);
6448 	if (ret)
6449 		return ret;
6450 
6451 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6452 		return 0;
6453 
6454 	return -EINVAL;
6455 }
6456 
6457 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6458 				       struct drm_plane_state *new_plane_state)
6459 {
6460 	/* Only support async updates on cursor planes. */
6461 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6462 		return -EINVAL;
6463 
6464 	return 0;
6465 }
6466 
6467 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6468 					 struct drm_plane_state *new_state)
6469 {
6470 	struct drm_plane_state *old_state =
6471 		drm_atomic_get_old_plane_state(new_state->state, plane);
6472 
6473 	trace_amdgpu_dm_atomic_update_cursor(new_state);
6474 
6475 	swap(plane->state->fb, new_state->fb);
6476 
6477 	plane->state->src_x = new_state->src_x;
6478 	plane->state->src_y = new_state->src_y;
6479 	plane->state->src_w = new_state->src_w;
6480 	plane->state->src_h = new_state->src_h;
6481 	plane->state->crtc_x = new_state->crtc_x;
6482 	plane->state->crtc_y = new_state->crtc_y;
6483 	plane->state->crtc_w = new_state->crtc_w;
6484 	plane->state->crtc_h = new_state->crtc_h;
6485 
6486 	handle_cursor_update(plane, old_state);
6487 }
6488 
6489 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6490 	.prepare_fb = dm_plane_helper_prepare_fb,
6491 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6492 	.atomic_check = dm_plane_atomic_check,
6493 	.atomic_async_check = dm_plane_atomic_async_check,
6494 	.atomic_async_update = dm_plane_atomic_async_update
6495 };
6496 
6497 /*
6498  * TODO: these are currently initialized to rgb formats only.
6499  * For future use cases we should either initialize them dynamically based on
6500  * plane capabilities, or initialize this array to all formats, so internal drm
6501  * check will succeed, and let DC implement proper check
6502  */
6503 static const uint32_t rgb_formats[] = {
6504 	DRM_FORMAT_XRGB8888,
6505 	DRM_FORMAT_ARGB8888,
6506 	DRM_FORMAT_RGBA8888,
6507 	DRM_FORMAT_XRGB2101010,
6508 	DRM_FORMAT_XBGR2101010,
6509 	DRM_FORMAT_ARGB2101010,
6510 	DRM_FORMAT_ABGR2101010,
6511 	DRM_FORMAT_XBGR8888,
6512 	DRM_FORMAT_ABGR8888,
6513 	DRM_FORMAT_RGB565,
6514 };
6515 
6516 static const uint32_t overlay_formats[] = {
6517 	DRM_FORMAT_XRGB8888,
6518 	DRM_FORMAT_ARGB8888,
6519 	DRM_FORMAT_RGBA8888,
6520 	DRM_FORMAT_XBGR8888,
6521 	DRM_FORMAT_ABGR8888,
6522 	DRM_FORMAT_RGB565
6523 };
6524 
6525 static const u32 cursor_formats[] = {
6526 	DRM_FORMAT_ARGB8888
6527 };
6528 
6529 static int get_plane_formats(const struct drm_plane *plane,
6530 			     const struct dc_plane_cap *plane_cap,
6531 			     uint32_t *formats, int max_formats)
6532 {
6533 	int i, num_formats = 0;
6534 
6535 	/*
6536 	 * TODO: Query support for each group of formats directly from
6537 	 * DC plane caps. This will require adding more formats to the
6538 	 * caps list.
6539 	 */
6540 
6541 	switch (plane->type) {
6542 	case DRM_PLANE_TYPE_PRIMARY:
6543 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6544 			if (num_formats >= max_formats)
6545 				break;
6546 
6547 			formats[num_formats++] = rgb_formats[i];
6548 		}
6549 
6550 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6551 			formats[num_formats++] = DRM_FORMAT_NV12;
6552 		if (plane_cap && plane_cap->pixel_format_support.p010)
6553 			formats[num_formats++] = DRM_FORMAT_P010;
6554 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6555 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6556 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6557 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6558 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6559 		}
6560 		break;
6561 
6562 	case DRM_PLANE_TYPE_OVERLAY:
6563 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6564 			if (num_formats >= max_formats)
6565 				break;
6566 
6567 			formats[num_formats++] = overlay_formats[i];
6568 		}
6569 		break;
6570 
6571 	case DRM_PLANE_TYPE_CURSOR:
6572 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6573 			if (num_formats >= max_formats)
6574 				break;
6575 
6576 			formats[num_formats++] = cursor_formats[i];
6577 		}
6578 		break;
6579 	}
6580 
6581 	return num_formats;
6582 }
6583 
6584 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6585 				struct drm_plane *plane,
6586 				unsigned long possible_crtcs,
6587 				const struct dc_plane_cap *plane_cap)
6588 {
6589 	uint32_t formats[32];
6590 	int num_formats;
6591 	int res = -EPERM;
6592 	unsigned int supported_rotations;
6593 	uint64_t *modifiers = NULL;
6594 
6595 	num_formats = get_plane_formats(plane, plane_cap, formats,
6596 					ARRAY_SIZE(formats));
6597 
6598 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6599 	if (res)
6600 		return res;
6601 
6602 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6603 				       &dm_plane_funcs, formats, num_formats,
6604 				       modifiers, plane->type, NULL);
6605 	kfree(modifiers);
6606 	if (res)
6607 		return res;
6608 
6609 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6610 	    plane_cap && plane_cap->per_pixel_alpha) {
6611 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6612 					  BIT(DRM_MODE_BLEND_PREMULTI);
6613 
6614 		drm_plane_create_alpha_property(plane);
6615 		drm_plane_create_blend_mode_property(plane, blend_caps);
6616 	}
6617 
6618 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6619 	    plane_cap &&
6620 	    (plane_cap->pixel_format_support.nv12 ||
6621 	     plane_cap->pixel_format_support.p010)) {
6622 		/* This only affects YUV formats. */
6623 		drm_plane_create_color_properties(
6624 			plane,
6625 			BIT(DRM_COLOR_YCBCR_BT601) |
6626 			BIT(DRM_COLOR_YCBCR_BT709) |
6627 			BIT(DRM_COLOR_YCBCR_BT2020),
6628 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6629 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6630 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6631 	}
6632 
6633 	supported_rotations =
6634 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6635 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6636 
6637 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
6638 	    plane->type != DRM_PLANE_TYPE_CURSOR)
6639 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6640 						   supported_rotations);
6641 
6642 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6643 
6644 	/* Create (reset) the plane state */
6645 	if (plane->funcs->reset)
6646 		plane->funcs->reset(plane);
6647 
6648 	return 0;
6649 }
6650 
6651 #ifdef CONFIG_DEBUG_FS
6652 static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
6653 				struct amdgpu_crtc *acrtc)
6654 {
6655 	drm_object_attach_property(&acrtc->base.base,
6656 				   dm->crc_win_x_start_property,
6657 				   0);
6658 	drm_object_attach_property(&acrtc->base.base,
6659 				   dm->crc_win_y_start_property,
6660 				   0);
6661 	drm_object_attach_property(&acrtc->base.base,
6662 				   dm->crc_win_x_end_property,
6663 				   0);
6664 	drm_object_attach_property(&acrtc->base.base,
6665 				   dm->crc_win_y_end_property,
6666 				   0);
6667 }
6668 #endif
6669 
6670 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6671 			       struct drm_plane *plane,
6672 			       uint32_t crtc_index)
6673 {
6674 	struct amdgpu_crtc *acrtc = NULL;
6675 	struct drm_plane *cursor_plane;
6676 
6677 	int res = -ENOMEM;
6678 
6679 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6680 	if (!cursor_plane)
6681 		goto fail;
6682 
6683 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6684 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6685 
6686 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6687 	if (!acrtc)
6688 		goto fail;
6689 
6690 	res = drm_crtc_init_with_planes(
6691 			dm->ddev,
6692 			&acrtc->base,
6693 			plane,
6694 			cursor_plane,
6695 			&amdgpu_dm_crtc_funcs, NULL);
6696 
6697 	if (res)
6698 		goto fail;
6699 
6700 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6701 
6702 	/* Create (reset) the plane state */
6703 	if (acrtc->base.funcs->reset)
6704 		acrtc->base.funcs->reset(&acrtc->base);
6705 
6706 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6707 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6708 
6709 	acrtc->crtc_id = crtc_index;
6710 	acrtc->base.enabled = false;
6711 	acrtc->otg_inst = -1;
6712 
6713 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6714 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6715 				   true, MAX_COLOR_LUT_ENTRIES);
6716 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6717 #ifdef CONFIG_DEBUG_FS
6718 	attach_crtc_crc_properties(dm, acrtc);
6719 #endif
6720 	return 0;
6721 
6722 fail:
6723 	kfree(acrtc);
6724 	kfree(cursor_plane);
6725 	return res;
6726 }
6727 
6728 
6729 static int to_drm_connector_type(enum signal_type st)
6730 {
6731 	switch (st) {
6732 	case SIGNAL_TYPE_HDMI_TYPE_A:
6733 		return DRM_MODE_CONNECTOR_HDMIA;
6734 	case SIGNAL_TYPE_EDP:
6735 		return DRM_MODE_CONNECTOR_eDP;
6736 	case SIGNAL_TYPE_LVDS:
6737 		return DRM_MODE_CONNECTOR_LVDS;
6738 	case SIGNAL_TYPE_RGB:
6739 		return DRM_MODE_CONNECTOR_VGA;
6740 	case SIGNAL_TYPE_DISPLAY_PORT:
6741 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6742 		return DRM_MODE_CONNECTOR_DisplayPort;
6743 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6744 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6745 		return DRM_MODE_CONNECTOR_DVID;
6746 	case SIGNAL_TYPE_VIRTUAL:
6747 		return DRM_MODE_CONNECTOR_VIRTUAL;
6748 
6749 	default:
6750 		return DRM_MODE_CONNECTOR_Unknown;
6751 	}
6752 }
6753 
6754 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6755 {
6756 	struct drm_encoder *encoder;
6757 
6758 	/* There is only one encoder per connector */
6759 	drm_connector_for_each_possible_encoder(connector, encoder)
6760 		return encoder;
6761 
6762 	return NULL;
6763 }
6764 
6765 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6766 {
6767 	struct drm_encoder *encoder;
6768 	struct amdgpu_encoder *amdgpu_encoder;
6769 
6770 	encoder = amdgpu_dm_connector_to_encoder(connector);
6771 
6772 	if (encoder == NULL)
6773 		return;
6774 
6775 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6776 
6777 	amdgpu_encoder->native_mode.clock = 0;
6778 
6779 	if (!list_empty(&connector->probed_modes)) {
6780 		struct drm_display_mode *preferred_mode = NULL;
6781 
6782 		list_for_each_entry(preferred_mode,
6783 				    &connector->probed_modes,
6784 				    head) {
6785 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6786 				amdgpu_encoder->native_mode = *preferred_mode;
6787 
6788 			break;
6789 		}
6790 
6791 	}
6792 }
6793 
6794 static struct drm_display_mode *
6795 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6796 			     char *name,
6797 			     int hdisplay, int vdisplay)
6798 {
6799 	struct drm_device *dev = encoder->dev;
6800 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6801 	struct drm_display_mode *mode = NULL;
6802 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6803 
6804 	mode = drm_mode_duplicate(dev, native_mode);
6805 
6806 	if (mode == NULL)
6807 		return NULL;
6808 
6809 	mode->hdisplay = hdisplay;
6810 	mode->vdisplay = vdisplay;
6811 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6812 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6813 
6814 	return mode;
6815 
6816 }
6817 
6818 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6819 						 struct drm_connector *connector)
6820 {
6821 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6822 	struct drm_display_mode *mode = NULL;
6823 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6824 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6825 				to_amdgpu_dm_connector(connector);
6826 	int i;
6827 	int n;
6828 	struct mode_size {
6829 		char name[DRM_DISPLAY_MODE_LEN];
6830 		int w;
6831 		int h;
6832 	} common_modes[] = {
6833 		{  "640x480",  640,  480},
6834 		{  "800x600",  800,  600},
6835 		{ "1024x768", 1024,  768},
6836 		{ "1280x720", 1280,  720},
6837 		{ "1280x800", 1280,  800},
6838 		{"1280x1024", 1280, 1024},
6839 		{ "1440x900", 1440,  900},
6840 		{"1680x1050", 1680, 1050},
6841 		{"1600x1200", 1600, 1200},
6842 		{"1920x1080", 1920, 1080},
6843 		{"1920x1200", 1920, 1200}
6844 	};
6845 
6846 	n = ARRAY_SIZE(common_modes);
6847 
6848 	for (i = 0; i < n; i++) {
6849 		struct drm_display_mode *curmode = NULL;
6850 		bool mode_existed = false;
6851 
6852 		if (common_modes[i].w > native_mode->hdisplay ||
6853 		    common_modes[i].h > native_mode->vdisplay ||
6854 		   (common_modes[i].w == native_mode->hdisplay &&
6855 		    common_modes[i].h == native_mode->vdisplay))
6856 			continue;
6857 
6858 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6859 			if (common_modes[i].w == curmode->hdisplay &&
6860 			    common_modes[i].h == curmode->vdisplay) {
6861 				mode_existed = true;
6862 				break;
6863 			}
6864 		}
6865 
6866 		if (mode_existed)
6867 			continue;
6868 
6869 		mode = amdgpu_dm_create_common_mode(encoder,
6870 				common_modes[i].name, common_modes[i].w,
6871 				common_modes[i].h);
6872 		drm_mode_probed_add(connector, mode);
6873 		amdgpu_dm_connector->num_modes++;
6874 	}
6875 }
6876 
6877 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6878 					      struct edid *edid)
6879 {
6880 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6881 			to_amdgpu_dm_connector(connector);
6882 
6883 	if (edid) {
6884 		/* empty probed_modes */
6885 		INIT_LIST_HEAD(&connector->probed_modes);
6886 		amdgpu_dm_connector->num_modes =
6887 				drm_add_edid_modes(connector, edid);
6888 
6889 		/* sorting the probed modes before calling function
6890 		 * amdgpu_dm_get_native_mode() since EDID can have
6891 		 * more than one preferred mode. The modes that are
6892 		 * later in the probed mode list could be of higher
6893 		 * and preferred resolution. For example, 3840x2160
6894 		 * resolution in base EDID preferred timing and 4096x2160
6895 		 * preferred resolution in DID extension block later.
6896 		 */
6897 		drm_mode_sort(&connector->probed_modes);
6898 		amdgpu_dm_get_native_mode(connector);
6899 	} else {
6900 		amdgpu_dm_connector->num_modes = 0;
6901 	}
6902 }
6903 
6904 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6905 {
6906 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6907 			to_amdgpu_dm_connector(connector);
6908 	struct drm_encoder *encoder;
6909 	struct edid *edid = amdgpu_dm_connector->edid;
6910 
6911 	encoder = amdgpu_dm_connector_to_encoder(connector);
6912 
6913 	if (!drm_edid_is_valid(edid)) {
6914 		amdgpu_dm_connector->num_modes =
6915 				drm_add_modes_noedid(connector, 640, 480);
6916 	} else {
6917 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6918 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6919 	}
6920 	amdgpu_dm_fbc_init(connector);
6921 
6922 	return amdgpu_dm_connector->num_modes;
6923 }
6924 
6925 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6926 				     struct amdgpu_dm_connector *aconnector,
6927 				     int connector_type,
6928 				     struct dc_link *link,
6929 				     int link_index)
6930 {
6931 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6932 
6933 	/*
6934 	 * Some of the properties below require access to state, like bpc.
6935 	 * Allocate some default initial connector state with our reset helper.
6936 	 */
6937 	if (aconnector->base.funcs->reset)
6938 		aconnector->base.funcs->reset(&aconnector->base);
6939 
6940 	aconnector->connector_id = link_index;
6941 	aconnector->dc_link = link;
6942 	aconnector->base.interlace_allowed = false;
6943 	aconnector->base.doublescan_allowed = false;
6944 	aconnector->base.stereo_allowed = false;
6945 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6946 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6947 	aconnector->audio_inst = -1;
6948 	mutex_init(&aconnector->hpd_lock);
6949 
6950 	/*
6951 	 * configure support HPD hot plug connector_>polled default value is 0
6952 	 * which means HPD hot plug not supported
6953 	 */
6954 	switch (connector_type) {
6955 	case DRM_MODE_CONNECTOR_HDMIA:
6956 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6957 		aconnector->base.ycbcr_420_allowed =
6958 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6959 		break;
6960 	case DRM_MODE_CONNECTOR_DisplayPort:
6961 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6962 		aconnector->base.ycbcr_420_allowed =
6963 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6964 		break;
6965 	case DRM_MODE_CONNECTOR_DVID:
6966 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6967 		break;
6968 	default:
6969 		break;
6970 	}
6971 
6972 	drm_object_attach_property(&aconnector->base.base,
6973 				dm->ddev->mode_config.scaling_mode_property,
6974 				DRM_MODE_SCALE_NONE);
6975 
6976 	drm_object_attach_property(&aconnector->base.base,
6977 				adev->mode_info.underscan_property,
6978 				UNDERSCAN_OFF);
6979 	drm_object_attach_property(&aconnector->base.base,
6980 				adev->mode_info.underscan_hborder_property,
6981 				0);
6982 	drm_object_attach_property(&aconnector->base.base,
6983 				adev->mode_info.underscan_vborder_property,
6984 				0);
6985 
6986 	if (!aconnector->mst_port)
6987 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6988 
6989 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6990 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6991 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6992 
6993 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6994 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6995 		drm_object_attach_property(&aconnector->base.base,
6996 				adev->mode_info.abm_level_property, 0);
6997 	}
6998 
6999 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7000 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7001 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7002 		drm_object_attach_property(
7003 			&aconnector->base.base,
7004 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
7005 
7006 		if (!aconnector->mst_port)
7007 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7008 
7009 #ifdef CONFIG_DRM_AMD_DC_HDCP
7010 		if (adev->dm.hdcp_workqueue)
7011 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7012 #endif
7013 	}
7014 }
7015 
7016 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7017 			      struct i2c_msg *msgs, int num)
7018 {
7019 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7020 	struct ddc_service *ddc_service = i2c->ddc_service;
7021 	struct i2c_command cmd;
7022 	int i;
7023 	int result = -EIO;
7024 
7025 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7026 
7027 	if (!cmd.payloads)
7028 		return result;
7029 
7030 	cmd.number_of_payloads = num;
7031 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7032 	cmd.speed = 100;
7033 
7034 	for (i = 0; i < num; i++) {
7035 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7036 		cmd.payloads[i].address = msgs[i].addr;
7037 		cmd.payloads[i].length = msgs[i].len;
7038 		cmd.payloads[i].data = msgs[i].buf;
7039 	}
7040 
7041 	if (dc_submit_i2c(
7042 			ddc_service->ctx->dc,
7043 			ddc_service->ddc_pin->hw_info.ddc_channel,
7044 			&cmd))
7045 		result = num;
7046 
7047 	kfree(cmd.payloads);
7048 	return result;
7049 }
7050 
7051 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7052 {
7053 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7054 }
7055 
7056 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7057 	.master_xfer = amdgpu_dm_i2c_xfer,
7058 	.functionality = amdgpu_dm_i2c_func,
7059 };
7060 
7061 static struct amdgpu_i2c_adapter *
7062 create_i2c(struct ddc_service *ddc_service,
7063 	   int link_index,
7064 	   int *res)
7065 {
7066 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7067 	struct amdgpu_i2c_adapter *i2c;
7068 
7069 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7070 	if (!i2c)
7071 		return NULL;
7072 	i2c->base.owner = THIS_MODULE;
7073 	i2c->base.class = I2C_CLASS_DDC;
7074 	i2c->base.dev.parent = &adev->pdev->dev;
7075 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7076 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7077 	i2c_set_adapdata(&i2c->base, i2c);
7078 	i2c->ddc_service = ddc_service;
7079 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7080 
7081 	return i2c;
7082 }
7083 
7084 
7085 /*
7086  * Note: this function assumes that dc_link_detect() was called for the
7087  * dc_link which will be represented by this aconnector.
7088  */
7089 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7090 				    struct amdgpu_dm_connector *aconnector,
7091 				    uint32_t link_index,
7092 				    struct amdgpu_encoder *aencoder)
7093 {
7094 	int res = 0;
7095 	int connector_type;
7096 	struct dc *dc = dm->dc;
7097 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7098 	struct amdgpu_i2c_adapter *i2c;
7099 
7100 	link->priv = aconnector;
7101 
7102 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7103 
7104 	i2c = create_i2c(link->ddc, link->link_index, &res);
7105 	if (!i2c) {
7106 		DRM_ERROR("Failed to create i2c adapter data\n");
7107 		return -ENOMEM;
7108 	}
7109 
7110 	aconnector->i2c = i2c;
7111 	res = i2c_add_adapter(&i2c->base);
7112 
7113 	if (res) {
7114 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7115 		goto out_free;
7116 	}
7117 
7118 	connector_type = to_drm_connector_type(link->connector_signal);
7119 
7120 	res = drm_connector_init_with_ddc(
7121 			dm->ddev,
7122 			&aconnector->base,
7123 			&amdgpu_dm_connector_funcs,
7124 			connector_type,
7125 			&i2c->base);
7126 
7127 	if (res) {
7128 		DRM_ERROR("connector_init failed\n");
7129 		aconnector->connector_id = -1;
7130 		goto out_free;
7131 	}
7132 
7133 	drm_connector_helper_add(
7134 			&aconnector->base,
7135 			&amdgpu_dm_connector_helper_funcs);
7136 
7137 	amdgpu_dm_connector_init_helper(
7138 		dm,
7139 		aconnector,
7140 		connector_type,
7141 		link,
7142 		link_index);
7143 
7144 	drm_connector_attach_encoder(
7145 		&aconnector->base, &aencoder->base);
7146 
7147 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7148 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7149 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7150 
7151 out_free:
7152 	if (res) {
7153 		kfree(i2c);
7154 		aconnector->i2c = NULL;
7155 	}
7156 	return res;
7157 }
7158 
7159 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7160 {
7161 	switch (adev->mode_info.num_crtc) {
7162 	case 1:
7163 		return 0x1;
7164 	case 2:
7165 		return 0x3;
7166 	case 3:
7167 		return 0x7;
7168 	case 4:
7169 		return 0xf;
7170 	case 5:
7171 		return 0x1f;
7172 	case 6:
7173 	default:
7174 		return 0x3f;
7175 	}
7176 }
7177 
7178 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7179 				  struct amdgpu_encoder *aencoder,
7180 				  uint32_t link_index)
7181 {
7182 	struct amdgpu_device *adev = drm_to_adev(dev);
7183 
7184 	int res = drm_encoder_init(dev,
7185 				   &aencoder->base,
7186 				   &amdgpu_dm_encoder_funcs,
7187 				   DRM_MODE_ENCODER_TMDS,
7188 				   NULL);
7189 
7190 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7191 
7192 	if (!res)
7193 		aencoder->encoder_id = link_index;
7194 	else
7195 		aencoder->encoder_id = -1;
7196 
7197 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7198 
7199 	return res;
7200 }
7201 
7202 static void manage_dm_interrupts(struct amdgpu_device *adev,
7203 				 struct amdgpu_crtc *acrtc,
7204 				 bool enable)
7205 {
7206 	/*
7207 	 * We have no guarantee that the frontend index maps to the same
7208 	 * backend index - some even map to more than one.
7209 	 *
7210 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7211 	 */
7212 	int irq_type =
7213 		amdgpu_display_crtc_idx_to_irq_type(
7214 			adev,
7215 			acrtc->crtc_id);
7216 
7217 	if (enable) {
7218 		drm_crtc_vblank_on(&acrtc->base);
7219 		amdgpu_irq_get(
7220 			adev,
7221 			&adev->pageflip_irq,
7222 			irq_type);
7223 	} else {
7224 
7225 		amdgpu_irq_put(
7226 			adev,
7227 			&adev->pageflip_irq,
7228 			irq_type);
7229 		drm_crtc_vblank_off(&acrtc->base);
7230 	}
7231 }
7232 
7233 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7234 				      struct amdgpu_crtc *acrtc)
7235 {
7236 	int irq_type =
7237 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7238 
7239 	/**
7240 	 * This reads the current state for the IRQ and force reapplies
7241 	 * the setting to hardware.
7242 	 */
7243 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7244 }
7245 
7246 static bool
7247 is_scaling_state_different(const struct dm_connector_state *dm_state,
7248 			   const struct dm_connector_state *old_dm_state)
7249 {
7250 	if (dm_state->scaling != old_dm_state->scaling)
7251 		return true;
7252 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7253 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7254 			return true;
7255 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7256 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7257 			return true;
7258 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7259 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7260 		return true;
7261 	return false;
7262 }
7263 
7264 #ifdef CONFIG_DRM_AMD_DC_HDCP
7265 static bool is_content_protection_different(struct drm_connector_state *state,
7266 					    const struct drm_connector_state *old_state,
7267 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7268 {
7269 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7270 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7271 
7272 	/* Handle: Type0/1 change */
7273 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7274 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7275 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7276 		return true;
7277 	}
7278 
7279 	/* CP is being re enabled, ignore this
7280 	 *
7281 	 * Handles:	ENABLED -> DESIRED
7282 	 */
7283 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7284 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7285 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7286 		return false;
7287 	}
7288 
7289 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7290 	 *
7291 	 * Handles:	UNDESIRED -> ENABLED
7292 	 */
7293 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7294 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7295 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7296 
7297 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7298 	 * hot-plug, headless s3, dpms
7299 	 *
7300 	 * Handles:	DESIRED -> DESIRED (Special case)
7301 	 */
7302 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7303 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7304 		dm_con_state->update_hdcp = false;
7305 		return true;
7306 	}
7307 
7308 	/*
7309 	 * Handles:	UNDESIRED -> UNDESIRED
7310 	 *		DESIRED -> DESIRED
7311 	 *		ENABLED -> ENABLED
7312 	 */
7313 	if (old_state->content_protection == state->content_protection)
7314 		return false;
7315 
7316 	/*
7317 	 * Handles:	UNDESIRED -> DESIRED
7318 	 *		DESIRED -> UNDESIRED
7319 	 *		ENABLED -> UNDESIRED
7320 	 */
7321 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7322 		return true;
7323 
7324 	/*
7325 	 * Handles:	DESIRED -> ENABLED
7326 	 */
7327 	return false;
7328 }
7329 
7330 #endif
7331 static void remove_stream(struct amdgpu_device *adev,
7332 			  struct amdgpu_crtc *acrtc,
7333 			  struct dc_stream_state *stream)
7334 {
7335 	/* this is the update mode case */
7336 
7337 	acrtc->otg_inst = -1;
7338 	acrtc->enabled = false;
7339 }
7340 
7341 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7342 			       struct dc_cursor_position *position)
7343 {
7344 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7345 	int x, y;
7346 	int xorigin = 0, yorigin = 0;
7347 
7348 	position->enable = false;
7349 	position->x = 0;
7350 	position->y = 0;
7351 
7352 	if (!crtc || !plane->state->fb)
7353 		return 0;
7354 
7355 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7356 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7357 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7358 			  __func__,
7359 			  plane->state->crtc_w,
7360 			  plane->state->crtc_h);
7361 		return -EINVAL;
7362 	}
7363 
7364 	x = plane->state->crtc_x;
7365 	y = plane->state->crtc_y;
7366 
7367 	if (x <= -amdgpu_crtc->max_cursor_width ||
7368 	    y <= -amdgpu_crtc->max_cursor_height)
7369 		return 0;
7370 
7371 	if (x < 0) {
7372 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7373 		x = 0;
7374 	}
7375 	if (y < 0) {
7376 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7377 		y = 0;
7378 	}
7379 	position->enable = true;
7380 	position->translate_by_source = true;
7381 	position->x = x;
7382 	position->y = y;
7383 	position->x_hotspot = xorigin;
7384 	position->y_hotspot = yorigin;
7385 
7386 	return 0;
7387 }
7388 
7389 static void handle_cursor_update(struct drm_plane *plane,
7390 				 struct drm_plane_state *old_plane_state)
7391 {
7392 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7393 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7394 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7395 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7396 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7397 	uint64_t address = afb ? afb->address : 0;
7398 	struct dc_cursor_position position;
7399 	struct dc_cursor_attributes attributes;
7400 	int ret;
7401 
7402 	if (!plane->state->fb && !old_plane_state->fb)
7403 		return;
7404 
7405 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7406 			 __func__,
7407 			 amdgpu_crtc->crtc_id,
7408 			 plane->state->crtc_w,
7409 			 plane->state->crtc_h);
7410 
7411 	ret = get_cursor_position(plane, crtc, &position);
7412 	if (ret)
7413 		return;
7414 
7415 	if (!position.enable) {
7416 		/* turn off cursor */
7417 		if (crtc_state && crtc_state->stream) {
7418 			mutex_lock(&adev->dm.dc_lock);
7419 			dc_stream_set_cursor_position(crtc_state->stream,
7420 						      &position);
7421 			mutex_unlock(&adev->dm.dc_lock);
7422 		}
7423 		return;
7424 	}
7425 
7426 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
7427 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
7428 
7429 	memset(&attributes, 0, sizeof(attributes));
7430 	attributes.address.high_part = upper_32_bits(address);
7431 	attributes.address.low_part  = lower_32_bits(address);
7432 	attributes.width             = plane->state->crtc_w;
7433 	attributes.height            = plane->state->crtc_h;
7434 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7435 	attributes.rotation_angle    = 0;
7436 	attributes.attribute_flags.value = 0;
7437 
7438 	attributes.pitch = attributes.width;
7439 
7440 	if (crtc_state->stream) {
7441 		mutex_lock(&adev->dm.dc_lock);
7442 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7443 							 &attributes))
7444 			DRM_ERROR("DC failed to set cursor attributes\n");
7445 
7446 		if (!dc_stream_set_cursor_position(crtc_state->stream,
7447 						   &position))
7448 			DRM_ERROR("DC failed to set cursor position\n");
7449 		mutex_unlock(&adev->dm.dc_lock);
7450 	}
7451 }
7452 
7453 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7454 {
7455 
7456 	assert_spin_locked(&acrtc->base.dev->event_lock);
7457 	WARN_ON(acrtc->event);
7458 
7459 	acrtc->event = acrtc->base.state->event;
7460 
7461 	/* Set the flip status */
7462 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7463 
7464 	/* Mark this event as consumed */
7465 	acrtc->base.state->event = NULL;
7466 
7467 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7468 						 acrtc->crtc_id);
7469 }
7470 
7471 static void update_freesync_state_on_stream(
7472 	struct amdgpu_display_manager *dm,
7473 	struct dm_crtc_state *new_crtc_state,
7474 	struct dc_stream_state *new_stream,
7475 	struct dc_plane_state *surface,
7476 	u32 flip_timestamp_in_us)
7477 {
7478 	struct mod_vrr_params vrr_params;
7479 	struct dc_info_packet vrr_infopacket = {0};
7480 	struct amdgpu_device *adev = dm->adev;
7481 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7482 	unsigned long flags;
7483 
7484 	if (!new_stream)
7485 		return;
7486 
7487 	/*
7488 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7489 	 * For now it's sufficient to just guard against these conditions.
7490 	 */
7491 
7492 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7493 		return;
7494 
7495 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7496         vrr_params = acrtc->dm_irq_params.vrr_params;
7497 
7498 	if (surface) {
7499 		mod_freesync_handle_preflip(
7500 			dm->freesync_module,
7501 			surface,
7502 			new_stream,
7503 			flip_timestamp_in_us,
7504 			&vrr_params);
7505 
7506 		if (adev->family < AMDGPU_FAMILY_AI &&
7507 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7508 			mod_freesync_handle_v_update(dm->freesync_module,
7509 						     new_stream, &vrr_params);
7510 
7511 			/* Need to call this before the frame ends. */
7512 			dc_stream_adjust_vmin_vmax(dm->dc,
7513 						   new_crtc_state->stream,
7514 						   &vrr_params.adjust);
7515 		}
7516 	}
7517 
7518 	mod_freesync_build_vrr_infopacket(
7519 		dm->freesync_module,
7520 		new_stream,
7521 		&vrr_params,
7522 		PACKET_TYPE_VRR,
7523 		TRANSFER_FUNC_UNKNOWN,
7524 		&vrr_infopacket);
7525 
7526 	new_crtc_state->freesync_timing_changed |=
7527 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7528 			&vrr_params.adjust,
7529 			sizeof(vrr_params.adjust)) != 0);
7530 
7531 	new_crtc_state->freesync_vrr_info_changed |=
7532 		(memcmp(&new_crtc_state->vrr_infopacket,
7533 			&vrr_infopacket,
7534 			sizeof(vrr_infopacket)) != 0);
7535 
7536 	acrtc->dm_irq_params.vrr_params = vrr_params;
7537 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7538 
7539 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7540 	new_stream->vrr_infopacket = vrr_infopacket;
7541 
7542 	if (new_crtc_state->freesync_vrr_info_changed)
7543 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7544 			      new_crtc_state->base.crtc->base.id,
7545 			      (int)new_crtc_state->base.vrr_enabled,
7546 			      (int)vrr_params.state);
7547 
7548 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7549 }
7550 
7551 static void update_stream_irq_parameters(
7552 	struct amdgpu_display_manager *dm,
7553 	struct dm_crtc_state *new_crtc_state)
7554 {
7555 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7556 	struct mod_vrr_params vrr_params;
7557 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7558 	struct amdgpu_device *adev = dm->adev;
7559 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7560 	unsigned long flags;
7561 
7562 	if (!new_stream)
7563 		return;
7564 
7565 	/*
7566 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7567 	 * For now it's sufficient to just guard against these conditions.
7568 	 */
7569 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7570 		return;
7571 
7572 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7573 	vrr_params = acrtc->dm_irq_params.vrr_params;
7574 
7575 	if (new_crtc_state->vrr_supported &&
7576 	    config.min_refresh_in_uhz &&
7577 	    config.max_refresh_in_uhz) {
7578 		config.state = new_crtc_state->base.vrr_enabled ?
7579 			VRR_STATE_ACTIVE_VARIABLE :
7580 			VRR_STATE_INACTIVE;
7581 	} else {
7582 		config.state = VRR_STATE_UNSUPPORTED;
7583 	}
7584 
7585 	mod_freesync_build_vrr_params(dm->freesync_module,
7586 				      new_stream,
7587 				      &config, &vrr_params);
7588 
7589 	new_crtc_state->freesync_timing_changed |=
7590 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7591 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7592 
7593 	new_crtc_state->freesync_config = config;
7594 	/* Copy state for access from DM IRQ handler */
7595 	acrtc->dm_irq_params.freesync_config = config;
7596 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7597 	acrtc->dm_irq_params.vrr_params = vrr_params;
7598 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7599 }
7600 
7601 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7602 					    struct dm_crtc_state *new_state)
7603 {
7604 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7605 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7606 
7607 	if (!old_vrr_active && new_vrr_active) {
7608 		/* Transition VRR inactive -> active:
7609 		 * While VRR is active, we must not disable vblank irq, as a
7610 		 * reenable after disable would compute bogus vblank/pflip
7611 		 * timestamps if it likely happened inside display front-porch.
7612 		 *
7613 		 * We also need vupdate irq for the actual core vblank handling
7614 		 * at end of vblank.
7615 		 */
7616 		dm_set_vupdate_irq(new_state->base.crtc, true);
7617 		drm_crtc_vblank_get(new_state->base.crtc);
7618 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7619 				 __func__, new_state->base.crtc->base.id);
7620 	} else if (old_vrr_active && !new_vrr_active) {
7621 		/* Transition VRR active -> inactive:
7622 		 * Allow vblank irq disable again for fixed refresh rate.
7623 		 */
7624 		dm_set_vupdate_irq(new_state->base.crtc, false);
7625 		drm_crtc_vblank_put(new_state->base.crtc);
7626 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7627 				 __func__, new_state->base.crtc->base.id);
7628 	}
7629 }
7630 
7631 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7632 {
7633 	struct drm_plane *plane;
7634 	struct drm_plane_state *old_plane_state, *new_plane_state;
7635 	int i;
7636 
7637 	/*
7638 	 * TODO: Make this per-stream so we don't issue redundant updates for
7639 	 * commits with multiple streams.
7640 	 */
7641 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7642 				       new_plane_state, i)
7643 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7644 			handle_cursor_update(plane, old_plane_state);
7645 }
7646 
7647 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7648 				    struct dc_state *dc_state,
7649 				    struct drm_device *dev,
7650 				    struct amdgpu_display_manager *dm,
7651 				    struct drm_crtc *pcrtc,
7652 				    bool wait_for_vblank)
7653 {
7654 	uint32_t i;
7655 	uint64_t timestamp_ns;
7656 	struct drm_plane *plane;
7657 	struct drm_plane_state *old_plane_state, *new_plane_state;
7658 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7659 	struct drm_crtc_state *new_pcrtc_state =
7660 			drm_atomic_get_new_crtc_state(state, pcrtc);
7661 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7662 	struct dm_crtc_state *dm_old_crtc_state =
7663 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7664 	int planes_count = 0, vpos, hpos;
7665 	long r;
7666 	unsigned long flags;
7667 	struct amdgpu_bo *abo;
7668 	uint32_t target_vblank, last_flip_vblank;
7669 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7670 	bool pflip_present = false;
7671 	struct {
7672 		struct dc_surface_update surface_updates[MAX_SURFACES];
7673 		struct dc_plane_info plane_infos[MAX_SURFACES];
7674 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7675 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7676 		struct dc_stream_update stream_update;
7677 	} *bundle;
7678 
7679 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7680 
7681 	if (!bundle) {
7682 		dm_error("Failed to allocate update bundle\n");
7683 		goto cleanup;
7684 	}
7685 
7686 	/*
7687 	 * Disable the cursor first if we're disabling all the planes.
7688 	 * It'll remain on the screen after the planes are re-enabled
7689 	 * if we don't.
7690 	 */
7691 	if (acrtc_state->active_planes == 0)
7692 		amdgpu_dm_commit_cursors(state);
7693 
7694 	/* update planes when needed */
7695 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7696 		struct drm_crtc *crtc = new_plane_state->crtc;
7697 		struct drm_crtc_state *new_crtc_state;
7698 		struct drm_framebuffer *fb = new_plane_state->fb;
7699 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7700 		bool plane_needs_flip;
7701 		struct dc_plane_state *dc_plane;
7702 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7703 
7704 		/* Cursor plane is handled after stream updates */
7705 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7706 			continue;
7707 
7708 		if (!fb || !crtc || pcrtc != crtc)
7709 			continue;
7710 
7711 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7712 		if (!new_crtc_state->active)
7713 			continue;
7714 
7715 		dc_plane = dm_new_plane_state->dc_state;
7716 
7717 		bundle->surface_updates[planes_count].surface = dc_plane;
7718 		if (new_pcrtc_state->color_mgmt_changed) {
7719 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7720 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7721 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7722 		}
7723 
7724 		fill_dc_scaling_info(new_plane_state,
7725 				     &bundle->scaling_infos[planes_count]);
7726 
7727 		bundle->surface_updates[planes_count].scaling_info =
7728 			&bundle->scaling_infos[planes_count];
7729 
7730 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7731 
7732 		pflip_present = pflip_present || plane_needs_flip;
7733 
7734 		if (!plane_needs_flip) {
7735 			planes_count += 1;
7736 			continue;
7737 		}
7738 
7739 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7740 
7741 		/*
7742 		 * Wait for all fences on this FB. Do limited wait to avoid
7743 		 * deadlock during GPU reset when this fence will not signal
7744 		 * but we hold reservation lock for the BO.
7745 		 */
7746 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7747 							false,
7748 							msecs_to_jiffies(5000));
7749 		if (unlikely(r <= 0))
7750 			DRM_ERROR("Waiting for fences timed out!");
7751 
7752 		fill_dc_plane_info_and_addr(
7753 			dm->adev, new_plane_state,
7754 			afb->tiling_flags,
7755 			&bundle->plane_infos[planes_count],
7756 			&bundle->flip_addrs[planes_count].address,
7757 			afb->tmz_surface, false);
7758 
7759 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7760 				 new_plane_state->plane->index,
7761 				 bundle->plane_infos[planes_count].dcc.enable);
7762 
7763 		bundle->surface_updates[planes_count].plane_info =
7764 			&bundle->plane_infos[planes_count];
7765 
7766 		/*
7767 		 * Only allow immediate flips for fast updates that don't
7768 		 * change FB pitch, DCC state, rotation or mirroing.
7769 		 */
7770 		bundle->flip_addrs[planes_count].flip_immediate =
7771 			crtc->state->async_flip &&
7772 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7773 
7774 		timestamp_ns = ktime_get_ns();
7775 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7776 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7777 		bundle->surface_updates[planes_count].surface = dc_plane;
7778 
7779 		if (!bundle->surface_updates[planes_count].surface) {
7780 			DRM_ERROR("No surface for CRTC: id=%d\n",
7781 					acrtc_attach->crtc_id);
7782 			continue;
7783 		}
7784 
7785 		if (plane == pcrtc->primary)
7786 			update_freesync_state_on_stream(
7787 				dm,
7788 				acrtc_state,
7789 				acrtc_state->stream,
7790 				dc_plane,
7791 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7792 
7793 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7794 				 __func__,
7795 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7796 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7797 
7798 		planes_count += 1;
7799 
7800 	}
7801 
7802 	if (pflip_present) {
7803 		if (!vrr_active) {
7804 			/* Use old throttling in non-vrr fixed refresh rate mode
7805 			 * to keep flip scheduling based on target vblank counts
7806 			 * working in a backwards compatible way, e.g., for
7807 			 * clients using the GLX_OML_sync_control extension or
7808 			 * DRI3/Present extension with defined target_msc.
7809 			 */
7810 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7811 		}
7812 		else {
7813 			/* For variable refresh rate mode only:
7814 			 * Get vblank of last completed flip to avoid > 1 vrr
7815 			 * flips per video frame by use of throttling, but allow
7816 			 * flip programming anywhere in the possibly large
7817 			 * variable vrr vblank interval for fine-grained flip
7818 			 * timing control and more opportunity to avoid stutter
7819 			 * on late submission of flips.
7820 			 */
7821 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7822 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7823 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7824 		}
7825 
7826 		target_vblank = last_flip_vblank + wait_for_vblank;
7827 
7828 		/*
7829 		 * Wait until we're out of the vertical blank period before the one
7830 		 * targeted by the flip
7831 		 */
7832 		while ((acrtc_attach->enabled &&
7833 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7834 							    0, &vpos, &hpos, NULL,
7835 							    NULL, &pcrtc->hwmode)
7836 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7837 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7838 			(int)(target_vblank -
7839 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7840 			usleep_range(1000, 1100);
7841 		}
7842 
7843 		/**
7844 		 * Prepare the flip event for the pageflip interrupt to handle.
7845 		 *
7846 		 * This only works in the case where we've already turned on the
7847 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7848 		 * from 0 -> n planes we have to skip a hardware generated event
7849 		 * and rely on sending it from software.
7850 		 */
7851 		if (acrtc_attach->base.state->event &&
7852 		    acrtc_state->active_planes > 0) {
7853 			drm_crtc_vblank_get(pcrtc);
7854 
7855 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7856 
7857 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7858 			prepare_flip_isr(acrtc_attach);
7859 
7860 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7861 		}
7862 
7863 		if (acrtc_state->stream) {
7864 			if (acrtc_state->freesync_vrr_info_changed)
7865 				bundle->stream_update.vrr_infopacket =
7866 					&acrtc_state->stream->vrr_infopacket;
7867 		}
7868 	}
7869 
7870 	/* Update the planes if changed or disable if we don't have any. */
7871 	if ((planes_count || acrtc_state->active_planes == 0) &&
7872 		acrtc_state->stream) {
7873 		bundle->stream_update.stream = acrtc_state->stream;
7874 		if (new_pcrtc_state->mode_changed) {
7875 			bundle->stream_update.src = acrtc_state->stream->src;
7876 			bundle->stream_update.dst = acrtc_state->stream->dst;
7877 		}
7878 
7879 		if (new_pcrtc_state->color_mgmt_changed) {
7880 			/*
7881 			 * TODO: This isn't fully correct since we've actually
7882 			 * already modified the stream in place.
7883 			 */
7884 			bundle->stream_update.gamut_remap =
7885 				&acrtc_state->stream->gamut_remap_matrix;
7886 			bundle->stream_update.output_csc_transform =
7887 				&acrtc_state->stream->csc_color_matrix;
7888 			bundle->stream_update.out_transfer_func =
7889 				acrtc_state->stream->out_transfer_func;
7890 		}
7891 
7892 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7893 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7894 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7895 
7896 		/*
7897 		 * If FreeSync state on the stream has changed then we need to
7898 		 * re-adjust the min/max bounds now that DC doesn't handle this
7899 		 * as part of commit.
7900 		 */
7901 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7902 		    amdgpu_dm_vrr_active(acrtc_state)) {
7903 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7904 			dc_stream_adjust_vmin_vmax(
7905 				dm->dc, acrtc_state->stream,
7906 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7907 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7908 		}
7909 		mutex_lock(&dm->dc_lock);
7910 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7911 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7912 			amdgpu_dm_psr_disable(acrtc_state->stream);
7913 
7914 		dc_commit_updates_for_stream(dm->dc,
7915 						     bundle->surface_updates,
7916 						     planes_count,
7917 						     acrtc_state->stream,
7918 						     &bundle->stream_update,
7919 						     dc_state);
7920 
7921 		/**
7922 		 * Enable or disable the interrupts on the backend.
7923 		 *
7924 		 * Most pipes are put into power gating when unused.
7925 		 *
7926 		 * When power gating is enabled on a pipe we lose the
7927 		 * interrupt enablement state when power gating is disabled.
7928 		 *
7929 		 * So we need to update the IRQ control state in hardware
7930 		 * whenever the pipe turns on (since it could be previously
7931 		 * power gated) or off (since some pipes can't be power gated
7932 		 * on some ASICs).
7933 		 */
7934 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7935 			dm_update_pflip_irq_state(drm_to_adev(dev),
7936 						  acrtc_attach);
7937 
7938 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7939 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7940 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7941 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7942 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7943 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7944 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7945 			amdgpu_dm_psr_enable(acrtc_state->stream);
7946 		}
7947 
7948 		mutex_unlock(&dm->dc_lock);
7949 	}
7950 
7951 	/*
7952 	 * Update cursor state *after* programming all the planes.
7953 	 * This avoids redundant programming in the case where we're going
7954 	 * to be disabling a single plane - those pipes are being disabled.
7955 	 */
7956 	if (acrtc_state->active_planes)
7957 		amdgpu_dm_commit_cursors(state);
7958 
7959 cleanup:
7960 	kfree(bundle);
7961 }
7962 
7963 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7964 				   struct drm_atomic_state *state)
7965 {
7966 	struct amdgpu_device *adev = drm_to_adev(dev);
7967 	struct amdgpu_dm_connector *aconnector;
7968 	struct drm_connector *connector;
7969 	struct drm_connector_state *old_con_state, *new_con_state;
7970 	struct drm_crtc_state *new_crtc_state;
7971 	struct dm_crtc_state *new_dm_crtc_state;
7972 	const struct dc_stream_status *status;
7973 	int i, inst;
7974 
7975 	/* Notify device removals. */
7976 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7977 		if (old_con_state->crtc != new_con_state->crtc) {
7978 			/* CRTC changes require notification. */
7979 			goto notify;
7980 		}
7981 
7982 		if (!new_con_state->crtc)
7983 			continue;
7984 
7985 		new_crtc_state = drm_atomic_get_new_crtc_state(
7986 			state, new_con_state->crtc);
7987 
7988 		if (!new_crtc_state)
7989 			continue;
7990 
7991 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7992 			continue;
7993 
7994 	notify:
7995 		aconnector = to_amdgpu_dm_connector(connector);
7996 
7997 		mutex_lock(&adev->dm.audio_lock);
7998 		inst = aconnector->audio_inst;
7999 		aconnector->audio_inst = -1;
8000 		mutex_unlock(&adev->dm.audio_lock);
8001 
8002 		amdgpu_dm_audio_eld_notify(adev, inst);
8003 	}
8004 
8005 	/* Notify audio device additions. */
8006 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8007 		if (!new_con_state->crtc)
8008 			continue;
8009 
8010 		new_crtc_state = drm_atomic_get_new_crtc_state(
8011 			state, new_con_state->crtc);
8012 
8013 		if (!new_crtc_state)
8014 			continue;
8015 
8016 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8017 			continue;
8018 
8019 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8020 		if (!new_dm_crtc_state->stream)
8021 			continue;
8022 
8023 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8024 		if (!status)
8025 			continue;
8026 
8027 		aconnector = to_amdgpu_dm_connector(connector);
8028 
8029 		mutex_lock(&adev->dm.audio_lock);
8030 		inst = status->audio_inst;
8031 		aconnector->audio_inst = inst;
8032 		mutex_unlock(&adev->dm.audio_lock);
8033 
8034 		amdgpu_dm_audio_eld_notify(adev, inst);
8035 	}
8036 }
8037 
8038 /*
8039  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8040  * @crtc_state: the DRM CRTC state
8041  * @stream_state: the DC stream state.
8042  *
8043  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8044  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8045  */
8046 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8047 						struct dc_stream_state *stream_state)
8048 {
8049 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8050 }
8051 
8052 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
8053 				   struct drm_atomic_state *state,
8054 				   bool nonblock)
8055 {
8056 	/*
8057 	 * Add check here for SoC's that support hardware cursor plane, to
8058 	 * unset legacy_cursor_update
8059 	 */
8060 
8061 	return drm_atomic_helper_commit(dev, state, nonblock);
8062 
8063 	/*TODO Handle EINTR, reenable IRQ*/
8064 }
8065 
8066 /**
8067  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8068  * @state: The atomic state to commit
8069  *
8070  * This will tell DC to commit the constructed DC state from atomic_check,
8071  * programming the hardware. Any failures here implies a hardware failure, since
8072  * atomic check should have filtered anything non-kosher.
8073  */
8074 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8075 {
8076 	struct drm_device *dev = state->dev;
8077 	struct amdgpu_device *adev = drm_to_adev(dev);
8078 	struct amdgpu_display_manager *dm = &adev->dm;
8079 	struct dm_atomic_state *dm_state;
8080 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8081 	uint32_t i, j;
8082 	struct drm_crtc *crtc;
8083 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8084 	unsigned long flags;
8085 	bool wait_for_vblank = true;
8086 	struct drm_connector *connector;
8087 	struct drm_connector_state *old_con_state, *new_con_state;
8088 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8089 	int crtc_disable_count = 0;
8090 	bool mode_set_reset_required = false;
8091 
8092 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8093 
8094 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8095 
8096 	dm_state = dm_atomic_get_new_state(state);
8097 	if (dm_state && dm_state->context) {
8098 		dc_state = dm_state->context;
8099 	} else {
8100 		/* No state changes, retain current state. */
8101 		dc_state_temp = dc_create_state(dm->dc);
8102 		ASSERT(dc_state_temp);
8103 		dc_state = dc_state_temp;
8104 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8105 	}
8106 
8107 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8108 				       new_crtc_state, i) {
8109 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8110 
8111 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8112 
8113 		if (old_crtc_state->active &&
8114 		    (!new_crtc_state->active ||
8115 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8116 			manage_dm_interrupts(adev, acrtc, false);
8117 			dc_stream_release(dm_old_crtc_state->stream);
8118 		}
8119 	}
8120 
8121 	drm_atomic_helper_calc_timestamping_constants(state);
8122 
8123 	/* update changed items */
8124 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8125 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8126 
8127 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8128 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8129 
8130 		DRM_DEBUG_DRIVER(
8131 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8132 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8133 			"connectors_changed:%d\n",
8134 			acrtc->crtc_id,
8135 			new_crtc_state->enable,
8136 			new_crtc_state->active,
8137 			new_crtc_state->planes_changed,
8138 			new_crtc_state->mode_changed,
8139 			new_crtc_state->active_changed,
8140 			new_crtc_state->connectors_changed);
8141 
8142 		/* Disable cursor if disabling crtc */
8143 		if (old_crtc_state->active && !new_crtc_state->active) {
8144 			struct dc_cursor_position position;
8145 
8146 			memset(&position, 0, sizeof(position));
8147 			mutex_lock(&dm->dc_lock);
8148 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8149 			mutex_unlock(&dm->dc_lock);
8150 		}
8151 
8152 		/* Copy all transient state flags into dc state */
8153 		if (dm_new_crtc_state->stream) {
8154 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8155 							    dm_new_crtc_state->stream);
8156 		}
8157 
8158 		/* handles headless hotplug case, updating new_state and
8159 		 * aconnector as needed
8160 		 */
8161 
8162 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8163 
8164 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8165 
8166 			if (!dm_new_crtc_state->stream) {
8167 				/*
8168 				 * this could happen because of issues with
8169 				 * userspace notifications delivery.
8170 				 * In this case userspace tries to set mode on
8171 				 * display which is disconnected in fact.
8172 				 * dc_sink is NULL in this case on aconnector.
8173 				 * We expect reset mode will come soon.
8174 				 *
8175 				 * This can also happen when unplug is done
8176 				 * during resume sequence ended
8177 				 *
8178 				 * In this case, we want to pretend we still
8179 				 * have a sink to keep the pipe running so that
8180 				 * hw state is consistent with the sw state
8181 				 */
8182 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8183 						__func__, acrtc->base.base.id);
8184 				continue;
8185 			}
8186 
8187 			if (dm_old_crtc_state->stream)
8188 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8189 
8190 			pm_runtime_get_noresume(dev->dev);
8191 
8192 			acrtc->enabled = true;
8193 			acrtc->hw_mode = new_crtc_state->mode;
8194 			crtc->hwmode = new_crtc_state->mode;
8195 			mode_set_reset_required = true;
8196 		} else if (modereset_required(new_crtc_state)) {
8197 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8198 			/* i.e. reset mode */
8199 			if (dm_old_crtc_state->stream)
8200 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8201 			mode_set_reset_required = true;
8202 		}
8203 	} /* for_each_crtc_in_state() */
8204 
8205 	if (dc_state) {
8206 		/* if there mode set or reset, disable eDP PSR */
8207 		if (mode_set_reset_required)
8208 			amdgpu_dm_psr_disable_all(dm);
8209 
8210 		dm_enable_per_frame_crtc_master_sync(dc_state);
8211 		mutex_lock(&dm->dc_lock);
8212 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8213 		mutex_unlock(&dm->dc_lock);
8214 	}
8215 
8216 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8217 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8218 
8219 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8220 
8221 		if (dm_new_crtc_state->stream != NULL) {
8222 			const struct dc_stream_status *status =
8223 					dc_stream_get_status(dm_new_crtc_state->stream);
8224 
8225 			if (!status)
8226 				status = dc_stream_get_status_from_state(dc_state,
8227 									 dm_new_crtc_state->stream);
8228 			if (!status)
8229 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8230 			else
8231 				acrtc->otg_inst = status->primary_otg_inst;
8232 		}
8233 	}
8234 #ifdef CONFIG_DRM_AMD_DC_HDCP
8235 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8236 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8237 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8238 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8239 
8240 		new_crtc_state = NULL;
8241 
8242 		if (acrtc)
8243 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8244 
8245 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8246 
8247 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8248 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8249 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8250 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8251 			dm_new_con_state->update_hdcp = true;
8252 			continue;
8253 		}
8254 
8255 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8256 			hdcp_update_display(
8257 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8258 				new_con_state->hdcp_content_type,
8259 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8260 													 : false);
8261 	}
8262 #endif
8263 
8264 	/* Handle connector state changes */
8265 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8266 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8267 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8268 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8269 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8270 		struct dc_stream_update stream_update;
8271 		struct dc_info_packet hdr_packet;
8272 		struct dc_stream_status *status = NULL;
8273 		bool abm_changed, hdr_changed, scaling_changed;
8274 
8275 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8276 		memset(&stream_update, 0, sizeof(stream_update));
8277 
8278 		if (acrtc) {
8279 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8280 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8281 		}
8282 
8283 		/* Skip any modesets/resets */
8284 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8285 			continue;
8286 
8287 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8288 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8289 
8290 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8291 							     dm_old_con_state);
8292 
8293 		abm_changed = dm_new_crtc_state->abm_level !=
8294 			      dm_old_crtc_state->abm_level;
8295 
8296 		hdr_changed =
8297 			is_hdr_metadata_different(old_con_state, new_con_state);
8298 
8299 		if (!scaling_changed && !abm_changed && !hdr_changed)
8300 			continue;
8301 
8302 		stream_update.stream = dm_new_crtc_state->stream;
8303 		if (scaling_changed) {
8304 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8305 					dm_new_con_state, dm_new_crtc_state->stream);
8306 
8307 			stream_update.src = dm_new_crtc_state->stream->src;
8308 			stream_update.dst = dm_new_crtc_state->stream->dst;
8309 		}
8310 
8311 		if (abm_changed) {
8312 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8313 
8314 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8315 		}
8316 
8317 		if (hdr_changed) {
8318 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8319 			stream_update.hdr_static_metadata = &hdr_packet;
8320 		}
8321 
8322 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8323 		WARN_ON(!status);
8324 		WARN_ON(!status->plane_count);
8325 
8326 		/*
8327 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8328 		 * Here we create an empty update on each plane.
8329 		 * To fix this, DC should permit updating only stream properties.
8330 		 */
8331 		for (j = 0; j < status->plane_count; j++)
8332 			dummy_updates[j].surface = status->plane_states[0];
8333 
8334 
8335 		mutex_lock(&dm->dc_lock);
8336 		dc_commit_updates_for_stream(dm->dc,
8337 						     dummy_updates,
8338 						     status->plane_count,
8339 						     dm_new_crtc_state->stream,
8340 						     &stream_update,
8341 						     dc_state);
8342 		mutex_unlock(&dm->dc_lock);
8343 	}
8344 
8345 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8346 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8347 				      new_crtc_state, i) {
8348 		if (old_crtc_state->active && !new_crtc_state->active)
8349 			crtc_disable_count++;
8350 
8351 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8352 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8353 
8354 		/* For freesync config update on crtc state and params for irq */
8355 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8356 
8357 		/* Handle vrr on->off / off->on transitions */
8358 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8359 						dm_new_crtc_state);
8360 	}
8361 
8362 	/**
8363 	 * Enable interrupts for CRTCs that are newly enabled or went through
8364 	 * a modeset. It was intentionally deferred until after the front end
8365 	 * state was modified to wait until the OTG was on and so the IRQ
8366 	 * handlers didn't access stale or invalid state.
8367 	 */
8368 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8369 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8370 		bool configure_crc = false;
8371 
8372 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8373 
8374 		if (new_crtc_state->active &&
8375 		    (!old_crtc_state->active ||
8376 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8377 			dc_stream_retain(dm_new_crtc_state->stream);
8378 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8379 			manage_dm_interrupts(adev, acrtc, true);
8380 		}
8381 #ifdef CONFIG_DEBUG_FS
8382 		if (new_crtc_state->active &&
8383 			amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8384 			/**
8385 			 * Frontend may have changed so reapply the CRC capture
8386 			 * settings for the stream.
8387 			 */
8388 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8389 			dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8390 
8391 			if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
8392 				if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
8393 					configure_crc = true;
8394 			} else {
8395 				if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
8396 					configure_crc = true;
8397 			}
8398 
8399 			if (configure_crc)
8400 				amdgpu_dm_crtc_configure_crc_source(
8401 					crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
8402 		}
8403 #endif
8404 	}
8405 
8406 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8407 		if (new_crtc_state->async_flip)
8408 			wait_for_vblank = false;
8409 
8410 	/* update planes when needed per crtc*/
8411 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8412 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8413 
8414 		if (dm_new_crtc_state->stream)
8415 			amdgpu_dm_commit_planes(state, dc_state, dev,
8416 						dm, crtc, wait_for_vblank);
8417 	}
8418 
8419 	/* Update audio instances for each connector. */
8420 	amdgpu_dm_commit_audio(dev, state);
8421 
8422 	/*
8423 	 * send vblank event on all events not handled in flip and
8424 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8425 	 */
8426 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8427 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8428 
8429 		if (new_crtc_state->event)
8430 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8431 
8432 		new_crtc_state->event = NULL;
8433 	}
8434 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8435 
8436 	/* Signal HW programming completion */
8437 	drm_atomic_helper_commit_hw_done(state);
8438 
8439 	if (wait_for_vblank)
8440 		drm_atomic_helper_wait_for_flip_done(dev, state);
8441 
8442 	drm_atomic_helper_cleanup_planes(dev, state);
8443 
8444 	/*
8445 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8446 	 * so we can put the GPU into runtime suspend if we're not driving any
8447 	 * displays anymore
8448 	 */
8449 	for (i = 0; i < crtc_disable_count; i++)
8450 		pm_runtime_put_autosuspend(dev->dev);
8451 	pm_runtime_mark_last_busy(dev->dev);
8452 
8453 	if (dc_state_temp)
8454 		dc_release_state(dc_state_temp);
8455 }
8456 
8457 
8458 static int dm_force_atomic_commit(struct drm_connector *connector)
8459 {
8460 	int ret = 0;
8461 	struct drm_device *ddev = connector->dev;
8462 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8463 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8464 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8465 	struct drm_connector_state *conn_state;
8466 	struct drm_crtc_state *crtc_state;
8467 	struct drm_plane_state *plane_state;
8468 
8469 	if (!state)
8470 		return -ENOMEM;
8471 
8472 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8473 
8474 	/* Construct an atomic state to restore previous display setting */
8475 
8476 	/*
8477 	 * Attach connectors to drm_atomic_state
8478 	 */
8479 	conn_state = drm_atomic_get_connector_state(state, connector);
8480 
8481 	ret = PTR_ERR_OR_ZERO(conn_state);
8482 	if (ret)
8483 		goto err;
8484 
8485 	/* Attach crtc to drm_atomic_state*/
8486 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8487 
8488 	ret = PTR_ERR_OR_ZERO(crtc_state);
8489 	if (ret)
8490 		goto err;
8491 
8492 	/* force a restore */
8493 	crtc_state->mode_changed = true;
8494 
8495 	/* Attach plane to drm_atomic_state */
8496 	plane_state = drm_atomic_get_plane_state(state, plane);
8497 
8498 	ret = PTR_ERR_OR_ZERO(plane_state);
8499 	if (ret)
8500 		goto err;
8501 
8502 
8503 	/* Call commit internally with the state we just constructed */
8504 	ret = drm_atomic_commit(state);
8505 	if (!ret)
8506 		return 0;
8507 
8508 err:
8509 	DRM_ERROR("Restoring old state failed with %i\n", ret);
8510 	drm_atomic_state_put(state);
8511 
8512 	return ret;
8513 }
8514 
8515 /*
8516  * This function handles all cases when set mode does not come upon hotplug.
8517  * This includes when a display is unplugged then plugged back into the
8518  * same port and when running without usermode desktop manager supprot
8519  */
8520 void dm_restore_drm_connector_state(struct drm_device *dev,
8521 				    struct drm_connector *connector)
8522 {
8523 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8524 	struct amdgpu_crtc *disconnected_acrtc;
8525 	struct dm_crtc_state *acrtc_state;
8526 
8527 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8528 		return;
8529 
8530 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8531 	if (!disconnected_acrtc)
8532 		return;
8533 
8534 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8535 	if (!acrtc_state->stream)
8536 		return;
8537 
8538 	/*
8539 	 * If the previous sink is not released and different from the current,
8540 	 * we deduce we are in a state where we can not rely on usermode call
8541 	 * to turn on the display, so we do it here
8542 	 */
8543 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8544 		dm_force_atomic_commit(&aconnector->base);
8545 }
8546 
8547 /*
8548  * Grabs all modesetting locks to serialize against any blocking commits,
8549  * Waits for completion of all non blocking commits.
8550  */
8551 static int do_aquire_global_lock(struct drm_device *dev,
8552 				 struct drm_atomic_state *state)
8553 {
8554 	struct drm_crtc *crtc;
8555 	struct drm_crtc_commit *commit;
8556 	long ret;
8557 
8558 	/*
8559 	 * Adding all modeset locks to aquire_ctx will
8560 	 * ensure that when the framework release it the
8561 	 * extra locks we are locking here will get released to
8562 	 */
8563 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8564 	if (ret)
8565 		return ret;
8566 
8567 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8568 		spin_lock(&crtc->commit_lock);
8569 		commit = list_first_entry_or_null(&crtc->commit_list,
8570 				struct drm_crtc_commit, commit_entry);
8571 		if (commit)
8572 			drm_crtc_commit_get(commit);
8573 		spin_unlock(&crtc->commit_lock);
8574 
8575 		if (!commit)
8576 			continue;
8577 
8578 		/*
8579 		 * Make sure all pending HW programming completed and
8580 		 * page flips done
8581 		 */
8582 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8583 
8584 		if (ret > 0)
8585 			ret = wait_for_completion_interruptible_timeout(
8586 					&commit->flip_done, 10*HZ);
8587 
8588 		if (ret == 0)
8589 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8590 				  "timed out\n", crtc->base.id, crtc->name);
8591 
8592 		drm_crtc_commit_put(commit);
8593 	}
8594 
8595 	return ret < 0 ? ret : 0;
8596 }
8597 
8598 static void get_freesync_config_for_crtc(
8599 	struct dm_crtc_state *new_crtc_state,
8600 	struct dm_connector_state *new_con_state)
8601 {
8602 	struct mod_freesync_config config = {0};
8603 	struct amdgpu_dm_connector *aconnector =
8604 			to_amdgpu_dm_connector(new_con_state->base.connector);
8605 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8606 	int vrefresh = drm_mode_vrefresh(mode);
8607 
8608 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8609 					vrefresh >= aconnector->min_vfreq &&
8610 					vrefresh <= aconnector->max_vfreq;
8611 
8612 	if (new_crtc_state->vrr_supported) {
8613 		new_crtc_state->stream->ignore_msa_timing_param = true;
8614 		config.state = new_crtc_state->base.vrr_enabled ?
8615 				VRR_STATE_ACTIVE_VARIABLE :
8616 				VRR_STATE_INACTIVE;
8617 		config.min_refresh_in_uhz =
8618 				aconnector->min_vfreq * 1000000;
8619 		config.max_refresh_in_uhz =
8620 				aconnector->max_vfreq * 1000000;
8621 		config.vsif_supported = true;
8622 		config.btr = true;
8623 	}
8624 
8625 	new_crtc_state->freesync_config = config;
8626 }
8627 
8628 static void reset_freesync_config_for_crtc(
8629 	struct dm_crtc_state *new_crtc_state)
8630 {
8631 	new_crtc_state->vrr_supported = false;
8632 
8633 	memset(&new_crtc_state->vrr_infopacket, 0,
8634 	       sizeof(new_crtc_state->vrr_infopacket));
8635 }
8636 
8637 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8638 				struct drm_atomic_state *state,
8639 				struct drm_crtc *crtc,
8640 				struct drm_crtc_state *old_crtc_state,
8641 				struct drm_crtc_state *new_crtc_state,
8642 				bool enable,
8643 				bool *lock_and_validation_needed)
8644 {
8645 	struct dm_atomic_state *dm_state = NULL;
8646 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8647 	struct dc_stream_state *new_stream;
8648 	int ret = 0;
8649 
8650 	/*
8651 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8652 	 * update changed items
8653 	 */
8654 	struct amdgpu_crtc *acrtc = NULL;
8655 	struct amdgpu_dm_connector *aconnector = NULL;
8656 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8657 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8658 
8659 	new_stream = NULL;
8660 
8661 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8662 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8663 	acrtc = to_amdgpu_crtc(crtc);
8664 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8665 
8666 	/* TODO This hack should go away */
8667 	if (aconnector && enable) {
8668 		/* Make sure fake sink is created in plug-in scenario */
8669 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8670 							    &aconnector->base);
8671 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8672 							    &aconnector->base);
8673 
8674 		if (IS_ERR(drm_new_conn_state)) {
8675 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8676 			goto fail;
8677 		}
8678 
8679 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8680 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8681 
8682 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8683 			goto skip_modeset;
8684 
8685 		new_stream = create_validate_stream_for_sink(aconnector,
8686 							     &new_crtc_state->mode,
8687 							     dm_new_conn_state,
8688 							     dm_old_crtc_state->stream);
8689 
8690 		/*
8691 		 * we can have no stream on ACTION_SET if a display
8692 		 * was disconnected during S3, in this case it is not an
8693 		 * error, the OS will be updated after detection, and
8694 		 * will do the right thing on next atomic commit
8695 		 */
8696 
8697 		if (!new_stream) {
8698 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8699 					__func__, acrtc->base.base.id);
8700 			ret = -ENOMEM;
8701 			goto fail;
8702 		}
8703 
8704 		/*
8705 		 * TODO: Check VSDB bits to decide whether this should
8706 		 * be enabled or not.
8707 		 */
8708 		new_stream->triggered_crtc_reset.enabled =
8709 			dm->force_timing_sync;
8710 
8711 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8712 
8713 		ret = fill_hdr_info_packet(drm_new_conn_state,
8714 					   &new_stream->hdr_static_metadata);
8715 		if (ret)
8716 			goto fail;
8717 
8718 		/*
8719 		 * If we already removed the old stream from the context
8720 		 * (and set the new stream to NULL) then we can't reuse
8721 		 * the old stream even if the stream and scaling are unchanged.
8722 		 * We'll hit the BUG_ON and black screen.
8723 		 *
8724 		 * TODO: Refactor this function to allow this check to work
8725 		 * in all conditions.
8726 		 */
8727 		if (dm_new_crtc_state->stream &&
8728 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8729 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8730 			new_crtc_state->mode_changed = false;
8731 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8732 					 new_crtc_state->mode_changed);
8733 		}
8734 	}
8735 
8736 	/* mode_changed flag may get updated above, need to check again */
8737 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8738 		goto skip_modeset;
8739 
8740 	DRM_DEBUG_DRIVER(
8741 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8742 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8743 		"connectors_changed:%d\n",
8744 		acrtc->crtc_id,
8745 		new_crtc_state->enable,
8746 		new_crtc_state->active,
8747 		new_crtc_state->planes_changed,
8748 		new_crtc_state->mode_changed,
8749 		new_crtc_state->active_changed,
8750 		new_crtc_state->connectors_changed);
8751 
8752 	/* Remove stream for any changed/disabled CRTC */
8753 	if (!enable) {
8754 
8755 		if (!dm_old_crtc_state->stream)
8756 			goto skip_modeset;
8757 
8758 		ret = dm_atomic_get_state(state, &dm_state);
8759 		if (ret)
8760 			goto fail;
8761 
8762 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8763 				crtc->base.id);
8764 
8765 		/* i.e. reset mode */
8766 		if (dc_remove_stream_from_ctx(
8767 				dm->dc,
8768 				dm_state->context,
8769 				dm_old_crtc_state->stream) != DC_OK) {
8770 			ret = -EINVAL;
8771 			goto fail;
8772 		}
8773 
8774 		dc_stream_release(dm_old_crtc_state->stream);
8775 		dm_new_crtc_state->stream = NULL;
8776 
8777 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8778 
8779 		*lock_and_validation_needed = true;
8780 
8781 	} else {/* Add stream for any updated/enabled CRTC */
8782 		/*
8783 		 * Quick fix to prevent NULL pointer on new_stream when
8784 		 * added MST connectors not found in existing crtc_state in the chained mode
8785 		 * TODO: need to dig out the root cause of that
8786 		 */
8787 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8788 			goto skip_modeset;
8789 
8790 		if (modereset_required(new_crtc_state))
8791 			goto skip_modeset;
8792 
8793 		if (modeset_required(new_crtc_state, new_stream,
8794 				     dm_old_crtc_state->stream)) {
8795 
8796 			WARN_ON(dm_new_crtc_state->stream);
8797 
8798 			ret = dm_atomic_get_state(state, &dm_state);
8799 			if (ret)
8800 				goto fail;
8801 
8802 			dm_new_crtc_state->stream = new_stream;
8803 
8804 			dc_stream_retain(new_stream);
8805 
8806 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8807 						crtc->base.id);
8808 
8809 			if (dc_add_stream_to_ctx(
8810 					dm->dc,
8811 					dm_state->context,
8812 					dm_new_crtc_state->stream) != DC_OK) {
8813 				ret = -EINVAL;
8814 				goto fail;
8815 			}
8816 
8817 			*lock_and_validation_needed = true;
8818 		}
8819 	}
8820 
8821 skip_modeset:
8822 	/* Release extra reference */
8823 	if (new_stream)
8824 		 dc_stream_release(new_stream);
8825 
8826 	/*
8827 	 * We want to do dc stream updates that do not require a
8828 	 * full modeset below.
8829 	 */
8830 	if (!(enable && aconnector && new_crtc_state->active))
8831 		return 0;
8832 	/*
8833 	 * Given above conditions, the dc state cannot be NULL because:
8834 	 * 1. We're in the process of enabling CRTCs (just been added
8835 	 *    to the dc context, or already is on the context)
8836 	 * 2. Has a valid connector attached, and
8837 	 * 3. Is currently active and enabled.
8838 	 * => The dc stream state currently exists.
8839 	 */
8840 	BUG_ON(dm_new_crtc_state->stream == NULL);
8841 
8842 	/* Scaling or underscan settings */
8843 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8844 		update_stream_scaling_settings(
8845 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8846 
8847 	/* ABM settings */
8848 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8849 
8850 	/*
8851 	 * Color management settings. We also update color properties
8852 	 * when a modeset is needed, to ensure it gets reprogrammed.
8853 	 */
8854 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8855 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8856 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8857 		if (ret)
8858 			goto fail;
8859 	}
8860 
8861 	/* Update Freesync settings. */
8862 	get_freesync_config_for_crtc(dm_new_crtc_state,
8863 				     dm_new_conn_state);
8864 
8865 	return ret;
8866 
8867 fail:
8868 	if (new_stream)
8869 		dc_stream_release(new_stream);
8870 	return ret;
8871 }
8872 
8873 static bool should_reset_plane(struct drm_atomic_state *state,
8874 			       struct drm_plane *plane,
8875 			       struct drm_plane_state *old_plane_state,
8876 			       struct drm_plane_state *new_plane_state)
8877 {
8878 	struct drm_plane *other;
8879 	struct drm_plane_state *old_other_state, *new_other_state;
8880 	struct drm_crtc_state *new_crtc_state;
8881 	int i;
8882 
8883 	/*
8884 	 * TODO: Remove this hack once the checks below are sufficient
8885 	 * enough to determine when we need to reset all the planes on
8886 	 * the stream.
8887 	 */
8888 	if (state->allow_modeset)
8889 		return true;
8890 
8891 	/* Exit early if we know that we're adding or removing the plane. */
8892 	if (old_plane_state->crtc != new_plane_state->crtc)
8893 		return true;
8894 
8895 	/* old crtc == new_crtc == NULL, plane not in context. */
8896 	if (!new_plane_state->crtc)
8897 		return false;
8898 
8899 	new_crtc_state =
8900 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8901 
8902 	if (!new_crtc_state)
8903 		return true;
8904 
8905 	/* CRTC Degamma changes currently require us to recreate planes. */
8906 	if (new_crtc_state->color_mgmt_changed)
8907 		return true;
8908 
8909 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8910 		return true;
8911 
8912 	/*
8913 	 * If there are any new primary or overlay planes being added or
8914 	 * removed then the z-order can potentially change. To ensure
8915 	 * correct z-order and pipe acquisition the current DC architecture
8916 	 * requires us to remove and recreate all existing planes.
8917 	 *
8918 	 * TODO: Come up with a more elegant solution for this.
8919 	 */
8920 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8921 		struct amdgpu_framebuffer *old_afb, *new_afb;
8922 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8923 			continue;
8924 
8925 		if (old_other_state->crtc != new_plane_state->crtc &&
8926 		    new_other_state->crtc != new_plane_state->crtc)
8927 			continue;
8928 
8929 		if (old_other_state->crtc != new_other_state->crtc)
8930 			return true;
8931 
8932 		/* Src/dst size and scaling updates. */
8933 		if (old_other_state->src_w != new_other_state->src_w ||
8934 		    old_other_state->src_h != new_other_state->src_h ||
8935 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8936 		    old_other_state->crtc_h != new_other_state->crtc_h)
8937 			return true;
8938 
8939 		/* Rotation / mirroring updates. */
8940 		if (old_other_state->rotation != new_other_state->rotation)
8941 			return true;
8942 
8943 		/* Blending updates. */
8944 		if (old_other_state->pixel_blend_mode !=
8945 		    new_other_state->pixel_blend_mode)
8946 			return true;
8947 
8948 		/* Alpha updates. */
8949 		if (old_other_state->alpha != new_other_state->alpha)
8950 			return true;
8951 
8952 		/* Colorspace changes. */
8953 		if (old_other_state->color_range != new_other_state->color_range ||
8954 		    old_other_state->color_encoding != new_other_state->color_encoding)
8955 			return true;
8956 
8957 		/* Framebuffer checks fall at the end. */
8958 		if (!old_other_state->fb || !new_other_state->fb)
8959 			continue;
8960 
8961 		/* Pixel format changes can require bandwidth updates. */
8962 		if (old_other_state->fb->format != new_other_state->fb->format)
8963 			return true;
8964 
8965 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8966 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8967 
8968 		/* Tiling and DCC changes also require bandwidth updates. */
8969 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
8970 		    old_afb->base.modifier != new_afb->base.modifier)
8971 			return true;
8972 	}
8973 
8974 	return false;
8975 }
8976 
8977 static int dm_update_plane_state(struct dc *dc,
8978 				 struct drm_atomic_state *state,
8979 				 struct drm_plane *plane,
8980 				 struct drm_plane_state *old_plane_state,
8981 				 struct drm_plane_state *new_plane_state,
8982 				 bool enable,
8983 				 bool *lock_and_validation_needed)
8984 {
8985 
8986 	struct dm_atomic_state *dm_state = NULL;
8987 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8988 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8989 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8990 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8991 	struct amdgpu_crtc *new_acrtc;
8992 	bool needs_reset;
8993 	int ret = 0;
8994 
8995 
8996 	new_plane_crtc = new_plane_state->crtc;
8997 	old_plane_crtc = old_plane_state->crtc;
8998 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8999 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9000 
9001 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9002 		if (!enable || !new_plane_crtc ||
9003 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9004 			return 0;
9005 
9006 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9007 
9008 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9009 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9010 			return -EINVAL;
9011 		}
9012 
9013 		if (new_plane_state->fb) {
9014 			if (new_plane_state->fb->width > new_acrtc->max_cursor_width ||
9015 			    new_plane_state->fb->height > new_acrtc->max_cursor_height) {
9016 				DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9017 						 new_plane_state->fb->width,
9018 						 new_plane_state->fb->height);
9019 				return -EINVAL;
9020 			}
9021 			if (new_plane_state->src_w != new_plane_state->fb->width << 16 ||
9022 			    new_plane_state->src_h != new_plane_state->fb->height << 16) {
9023 				DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9024 				return -EINVAL;
9025 			}
9026 
9027 			switch (new_plane_state->fb->width) {
9028 			case 64:
9029 			case 128:
9030 			case 256:
9031 				/* FB width is supported by cursor plane */
9032 				break;
9033 			default:
9034 				DRM_DEBUG_ATOMIC("Bad cursor FB width %d\n",
9035 						 new_plane_state->fb->width);
9036 				return -EINVAL;
9037 			}
9038 		}
9039 
9040 		return 0;
9041 	}
9042 
9043 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9044 					 new_plane_state);
9045 
9046 	/* Remove any changed/removed planes */
9047 	if (!enable) {
9048 		if (!needs_reset)
9049 			return 0;
9050 
9051 		if (!old_plane_crtc)
9052 			return 0;
9053 
9054 		old_crtc_state = drm_atomic_get_old_crtc_state(
9055 				state, old_plane_crtc);
9056 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9057 
9058 		if (!dm_old_crtc_state->stream)
9059 			return 0;
9060 
9061 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9062 				plane->base.id, old_plane_crtc->base.id);
9063 
9064 		ret = dm_atomic_get_state(state, &dm_state);
9065 		if (ret)
9066 			return ret;
9067 
9068 		if (!dc_remove_plane_from_context(
9069 				dc,
9070 				dm_old_crtc_state->stream,
9071 				dm_old_plane_state->dc_state,
9072 				dm_state->context)) {
9073 
9074 			return -EINVAL;
9075 		}
9076 
9077 
9078 		dc_plane_state_release(dm_old_plane_state->dc_state);
9079 		dm_new_plane_state->dc_state = NULL;
9080 
9081 		*lock_and_validation_needed = true;
9082 
9083 	} else { /* Add new planes */
9084 		struct dc_plane_state *dc_new_plane_state;
9085 
9086 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9087 			return 0;
9088 
9089 		if (!new_plane_crtc)
9090 			return 0;
9091 
9092 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9093 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9094 
9095 		if (!dm_new_crtc_state->stream)
9096 			return 0;
9097 
9098 		if (!needs_reset)
9099 			return 0;
9100 
9101 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9102 		if (ret)
9103 			return ret;
9104 
9105 		WARN_ON(dm_new_plane_state->dc_state);
9106 
9107 		dc_new_plane_state = dc_create_plane_state(dc);
9108 		if (!dc_new_plane_state)
9109 			return -ENOMEM;
9110 
9111 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9112 				plane->base.id, new_plane_crtc->base.id);
9113 
9114 		ret = fill_dc_plane_attributes(
9115 			drm_to_adev(new_plane_crtc->dev),
9116 			dc_new_plane_state,
9117 			new_plane_state,
9118 			new_crtc_state);
9119 		if (ret) {
9120 			dc_plane_state_release(dc_new_plane_state);
9121 			return ret;
9122 		}
9123 
9124 		ret = dm_atomic_get_state(state, &dm_state);
9125 		if (ret) {
9126 			dc_plane_state_release(dc_new_plane_state);
9127 			return ret;
9128 		}
9129 
9130 		/*
9131 		 * Any atomic check errors that occur after this will
9132 		 * not need a release. The plane state will be attached
9133 		 * to the stream, and therefore part of the atomic
9134 		 * state. It'll be released when the atomic state is
9135 		 * cleaned.
9136 		 */
9137 		if (!dc_add_plane_to_context(
9138 				dc,
9139 				dm_new_crtc_state->stream,
9140 				dc_new_plane_state,
9141 				dm_state->context)) {
9142 
9143 			dc_plane_state_release(dc_new_plane_state);
9144 			return -EINVAL;
9145 		}
9146 
9147 		dm_new_plane_state->dc_state = dc_new_plane_state;
9148 
9149 		/* Tell DC to do a full surface update every time there
9150 		 * is a plane change. Inefficient, but works for now.
9151 		 */
9152 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9153 
9154 		*lock_and_validation_needed = true;
9155 	}
9156 
9157 
9158 	return ret;
9159 }
9160 
9161 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9162 				struct drm_crtc *crtc,
9163 				struct drm_crtc_state *new_crtc_state)
9164 {
9165 	struct drm_plane_state *new_cursor_state, *new_primary_state;
9166 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9167 
9168 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9169 	 * cursor per pipe but it's going to inherit the scaling and
9170 	 * positioning from the underlying pipe. Check the cursor plane's
9171 	 * blending properties match the primary plane's. */
9172 
9173 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9174 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9175 	if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9176 		return 0;
9177 	}
9178 
9179 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9180 			 (new_cursor_state->src_w >> 16);
9181 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9182 			 (new_cursor_state->src_h >> 16);
9183 
9184 	primary_scale_w = new_primary_state->crtc_w * 1000 /
9185 			 (new_primary_state->src_w >> 16);
9186 	primary_scale_h = new_primary_state->crtc_h * 1000 /
9187 			 (new_primary_state->src_h >> 16);
9188 
9189 	if (cursor_scale_w != primary_scale_w ||
9190 	    cursor_scale_h != primary_scale_h) {
9191 		DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9192 		return -EINVAL;
9193 	}
9194 
9195 	return 0;
9196 }
9197 
9198 #if defined(CONFIG_DRM_AMD_DC_DCN)
9199 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9200 {
9201 	struct drm_connector *connector;
9202 	struct drm_connector_state *conn_state;
9203 	struct amdgpu_dm_connector *aconnector = NULL;
9204 	int i;
9205 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9206 		if (conn_state->crtc != crtc)
9207 			continue;
9208 
9209 		aconnector = to_amdgpu_dm_connector(connector);
9210 		if (!aconnector->port || !aconnector->mst_port)
9211 			aconnector = NULL;
9212 		else
9213 			break;
9214 	}
9215 
9216 	if (!aconnector)
9217 		return 0;
9218 
9219 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9220 }
9221 #endif
9222 
9223 /**
9224  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9225  * @dev: The DRM device
9226  * @state: The atomic state to commit
9227  *
9228  * Validate that the given atomic state is programmable by DC into hardware.
9229  * This involves constructing a &struct dc_state reflecting the new hardware
9230  * state we wish to commit, then querying DC to see if it is programmable. It's
9231  * important not to modify the existing DC state. Otherwise, atomic_check
9232  * may unexpectedly commit hardware changes.
9233  *
9234  * When validating the DC state, it's important that the right locks are
9235  * acquired. For full updates case which removes/adds/updates streams on one
9236  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9237  * that any such full update commit will wait for completion of any outstanding
9238  * flip using DRMs synchronization events.
9239  *
9240  * Note that DM adds the affected connectors for all CRTCs in state, when that
9241  * might not seem necessary. This is because DC stream creation requires the
9242  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9243  * be possible but non-trivial - a possible TODO item.
9244  *
9245  * Return: -Error code if validation failed.
9246  */
9247 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9248 				  struct drm_atomic_state *state)
9249 {
9250 	struct amdgpu_device *adev = drm_to_adev(dev);
9251 	struct dm_atomic_state *dm_state = NULL;
9252 	struct dc *dc = adev->dm.dc;
9253 	struct drm_connector *connector;
9254 	struct drm_connector_state *old_con_state, *new_con_state;
9255 	struct drm_crtc *crtc;
9256 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9257 	struct drm_plane *plane;
9258 	struct drm_plane_state *old_plane_state, *new_plane_state;
9259 	enum dc_status status;
9260 	int ret, i;
9261 	bool lock_and_validation_needed = false;
9262 	struct dm_crtc_state *dm_old_crtc_state;
9263 
9264 	trace_amdgpu_dm_atomic_check_begin(state);
9265 
9266 	ret = drm_atomic_helper_check_modeset(dev, state);
9267 	if (ret)
9268 		goto fail;
9269 
9270 	/* Check connector changes */
9271 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9272 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9273 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9274 
9275 		/* Skip connectors that are disabled or part of modeset already. */
9276 		if (!old_con_state->crtc && !new_con_state->crtc)
9277 			continue;
9278 
9279 		if (!new_con_state->crtc)
9280 			continue;
9281 
9282 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9283 		if (IS_ERR(new_crtc_state)) {
9284 			ret = PTR_ERR(new_crtc_state);
9285 			goto fail;
9286 		}
9287 
9288 		if (dm_old_con_state->abm_level !=
9289 		    dm_new_con_state->abm_level)
9290 			new_crtc_state->connectors_changed = true;
9291 	}
9292 
9293 #if defined(CONFIG_DRM_AMD_DC_DCN)
9294 	if (adev->asic_type >= CHIP_NAVI10) {
9295 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9296 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9297 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9298 				if (ret)
9299 					goto fail;
9300 			}
9301 		}
9302 	}
9303 #endif
9304 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9305 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9306 
9307 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9308 		    !new_crtc_state->color_mgmt_changed &&
9309 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9310 			dm_old_crtc_state->dsc_force_changed == false)
9311 			continue;
9312 
9313 		if (!new_crtc_state->enable)
9314 			continue;
9315 
9316 		ret = drm_atomic_add_affected_connectors(state, crtc);
9317 		if (ret)
9318 			return ret;
9319 
9320 		ret = drm_atomic_add_affected_planes(state, crtc);
9321 		if (ret)
9322 			goto fail;
9323 	}
9324 
9325 	/*
9326 	 * Add all primary and overlay planes on the CRTC to the state
9327 	 * whenever a plane is enabled to maintain correct z-ordering
9328 	 * and to enable fast surface updates.
9329 	 */
9330 	drm_for_each_crtc(crtc, dev) {
9331 		bool modified = false;
9332 
9333 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9334 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9335 				continue;
9336 
9337 			if (new_plane_state->crtc == crtc ||
9338 			    old_plane_state->crtc == crtc) {
9339 				modified = true;
9340 				break;
9341 			}
9342 		}
9343 
9344 		if (!modified)
9345 			continue;
9346 
9347 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9348 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9349 				continue;
9350 
9351 			new_plane_state =
9352 				drm_atomic_get_plane_state(state, plane);
9353 
9354 			if (IS_ERR(new_plane_state)) {
9355 				ret = PTR_ERR(new_plane_state);
9356 				goto fail;
9357 			}
9358 		}
9359 	}
9360 
9361 	/* Remove exiting planes if they are modified */
9362 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9363 		ret = dm_update_plane_state(dc, state, plane,
9364 					    old_plane_state,
9365 					    new_plane_state,
9366 					    false,
9367 					    &lock_and_validation_needed);
9368 		if (ret)
9369 			goto fail;
9370 	}
9371 
9372 	/* Disable all crtcs which require disable */
9373 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9374 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9375 					   old_crtc_state,
9376 					   new_crtc_state,
9377 					   false,
9378 					   &lock_and_validation_needed);
9379 		if (ret)
9380 			goto fail;
9381 	}
9382 
9383 	/* Enable all crtcs which require enable */
9384 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9385 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9386 					   old_crtc_state,
9387 					   new_crtc_state,
9388 					   true,
9389 					   &lock_and_validation_needed);
9390 		if (ret)
9391 			goto fail;
9392 	}
9393 
9394 	/* Add new/modified planes */
9395 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9396 		ret = dm_update_plane_state(dc, state, plane,
9397 					    old_plane_state,
9398 					    new_plane_state,
9399 					    true,
9400 					    &lock_and_validation_needed);
9401 		if (ret)
9402 			goto fail;
9403 	}
9404 
9405 	/* Run this here since we want to validate the streams we created */
9406 	ret = drm_atomic_helper_check_planes(dev, state);
9407 	if (ret)
9408 		goto fail;
9409 
9410 	/* Check cursor planes scaling */
9411 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9412 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9413 		if (ret)
9414 			goto fail;
9415 	}
9416 
9417 	if (state->legacy_cursor_update) {
9418 		/*
9419 		 * This is a fast cursor update coming from the plane update
9420 		 * helper, check if it can be done asynchronously for better
9421 		 * performance.
9422 		 */
9423 		state->async_update =
9424 			!drm_atomic_helper_async_check(dev, state);
9425 
9426 		/*
9427 		 * Skip the remaining global validation if this is an async
9428 		 * update. Cursor updates can be done without affecting
9429 		 * state or bandwidth calcs and this avoids the performance
9430 		 * penalty of locking the private state object and
9431 		 * allocating a new dc_state.
9432 		 */
9433 		if (state->async_update)
9434 			return 0;
9435 	}
9436 
9437 	/* Check scaling and underscan changes*/
9438 	/* TODO Removed scaling changes validation due to inability to commit
9439 	 * new stream into context w\o causing full reset. Need to
9440 	 * decide how to handle.
9441 	 */
9442 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9443 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9444 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9445 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9446 
9447 		/* Skip any modesets/resets */
9448 		if (!acrtc || drm_atomic_crtc_needs_modeset(
9449 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9450 			continue;
9451 
9452 		/* Skip any thing not scale or underscan changes */
9453 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9454 			continue;
9455 
9456 		lock_and_validation_needed = true;
9457 	}
9458 
9459 	/**
9460 	 * Streams and planes are reset when there are changes that affect
9461 	 * bandwidth. Anything that affects bandwidth needs to go through
9462 	 * DC global validation to ensure that the configuration can be applied
9463 	 * to hardware.
9464 	 *
9465 	 * We have to currently stall out here in atomic_check for outstanding
9466 	 * commits to finish in this case because our IRQ handlers reference
9467 	 * DRM state directly - we can end up disabling interrupts too early
9468 	 * if we don't.
9469 	 *
9470 	 * TODO: Remove this stall and drop DM state private objects.
9471 	 */
9472 	if (lock_and_validation_needed) {
9473 		ret = dm_atomic_get_state(state, &dm_state);
9474 		if (ret)
9475 			goto fail;
9476 
9477 		ret = do_aquire_global_lock(dev, state);
9478 		if (ret)
9479 			goto fail;
9480 
9481 #if defined(CONFIG_DRM_AMD_DC_DCN)
9482 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9483 			goto fail;
9484 
9485 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9486 		if (ret)
9487 			goto fail;
9488 #endif
9489 
9490 		/*
9491 		 * Perform validation of MST topology in the state:
9492 		 * We need to perform MST atomic check before calling
9493 		 * dc_validate_global_state(), or there is a chance
9494 		 * to get stuck in an infinite loop and hang eventually.
9495 		 */
9496 		ret = drm_dp_mst_atomic_check(state);
9497 		if (ret)
9498 			goto fail;
9499 		status = dc_validate_global_state(dc, dm_state->context, false);
9500 		if (status != DC_OK) {
9501 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
9502 				       dc_status_to_str(status), status);
9503 			ret = -EINVAL;
9504 			goto fail;
9505 		}
9506 	} else {
9507 		/*
9508 		 * The commit is a fast update. Fast updates shouldn't change
9509 		 * the DC context, affect global validation, and can have their
9510 		 * commit work done in parallel with other commits not touching
9511 		 * the same resource. If we have a new DC context as part of
9512 		 * the DM atomic state from validation we need to free it and
9513 		 * retain the existing one instead.
9514 		 *
9515 		 * Furthermore, since the DM atomic state only contains the DC
9516 		 * context and can safely be annulled, we can free the state
9517 		 * and clear the associated private object now to free
9518 		 * some memory and avoid a possible use-after-free later.
9519 		 */
9520 
9521 		for (i = 0; i < state->num_private_objs; i++) {
9522 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9523 
9524 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9525 				int j = state->num_private_objs-1;
9526 
9527 				dm_atomic_destroy_state(obj,
9528 						state->private_objs[i].state);
9529 
9530 				/* If i is not at the end of the array then the
9531 				 * last element needs to be moved to where i was
9532 				 * before the array can safely be truncated.
9533 				 */
9534 				if (i != j)
9535 					state->private_objs[i] =
9536 						state->private_objs[j];
9537 
9538 				state->private_objs[j].ptr = NULL;
9539 				state->private_objs[j].state = NULL;
9540 				state->private_objs[j].old_state = NULL;
9541 				state->private_objs[j].new_state = NULL;
9542 
9543 				state->num_private_objs = j;
9544 				break;
9545 			}
9546 		}
9547 	}
9548 
9549 	/* Store the overall update type for use later in atomic check. */
9550 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9551 		struct dm_crtc_state *dm_new_crtc_state =
9552 			to_dm_crtc_state(new_crtc_state);
9553 
9554 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9555 							 UPDATE_TYPE_FULL :
9556 							 UPDATE_TYPE_FAST;
9557 	}
9558 
9559 	/* Must be success */
9560 	WARN_ON(ret);
9561 
9562 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9563 
9564 	return ret;
9565 
9566 fail:
9567 	if (ret == -EDEADLK)
9568 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9569 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9570 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9571 	else
9572 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9573 
9574 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9575 
9576 	return ret;
9577 }
9578 
9579 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9580 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9581 {
9582 	uint8_t dpcd_data;
9583 	bool capable = false;
9584 
9585 	if (amdgpu_dm_connector->dc_link &&
9586 		dm_helpers_dp_read_dpcd(
9587 				NULL,
9588 				amdgpu_dm_connector->dc_link,
9589 				DP_DOWN_STREAM_PORT_COUNT,
9590 				&dpcd_data,
9591 				sizeof(dpcd_data))) {
9592 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9593 	}
9594 
9595 	return capable;
9596 }
9597 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9598 					struct edid *edid)
9599 {
9600 	int i;
9601 	bool edid_check_required;
9602 	struct detailed_timing *timing;
9603 	struct detailed_non_pixel *data;
9604 	struct detailed_data_monitor_range *range;
9605 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9606 			to_amdgpu_dm_connector(connector);
9607 	struct dm_connector_state *dm_con_state = NULL;
9608 
9609 	struct drm_device *dev = connector->dev;
9610 	struct amdgpu_device *adev = drm_to_adev(dev);
9611 	bool freesync_capable = false;
9612 
9613 	if (!connector->state) {
9614 		DRM_ERROR("%s - Connector has no state", __func__);
9615 		goto update;
9616 	}
9617 
9618 	if (!edid) {
9619 		dm_con_state = to_dm_connector_state(connector->state);
9620 
9621 		amdgpu_dm_connector->min_vfreq = 0;
9622 		amdgpu_dm_connector->max_vfreq = 0;
9623 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9624 
9625 		goto update;
9626 	}
9627 
9628 	dm_con_state = to_dm_connector_state(connector->state);
9629 
9630 	edid_check_required = false;
9631 	if (!amdgpu_dm_connector->dc_sink) {
9632 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9633 		goto update;
9634 	}
9635 	if (!adev->dm.freesync_module)
9636 		goto update;
9637 	/*
9638 	 * if edid non zero restrict freesync only for dp and edp
9639 	 */
9640 	if (edid) {
9641 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9642 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9643 			edid_check_required = is_dp_capable_without_timing_msa(
9644 						adev->dm.dc,
9645 						amdgpu_dm_connector);
9646 		}
9647 	}
9648 	if (edid_check_required == true && (edid->version > 1 ||
9649 	   (edid->version == 1 && edid->revision > 1))) {
9650 		for (i = 0; i < 4; i++) {
9651 
9652 			timing	= &edid->detailed_timings[i];
9653 			data	= &timing->data.other_data;
9654 			range	= &data->data.range;
9655 			/*
9656 			 * Check if monitor has continuous frequency mode
9657 			 */
9658 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
9659 				continue;
9660 			/*
9661 			 * Check for flag range limits only. If flag == 1 then
9662 			 * no additional timing information provided.
9663 			 * Default GTF, GTF Secondary curve and CVT are not
9664 			 * supported
9665 			 */
9666 			if (range->flags != 1)
9667 				continue;
9668 
9669 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9670 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9671 			amdgpu_dm_connector->pixel_clock_mhz =
9672 				range->pixel_clock_mhz * 10;
9673 			break;
9674 		}
9675 
9676 		if (amdgpu_dm_connector->max_vfreq -
9677 		    amdgpu_dm_connector->min_vfreq > 10) {
9678 
9679 			freesync_capable = true;
9680 		}
9681 	}
9682 
9683 update:
9684 	if (dm_con_state)
9685 		dm_con_state->freesync_capable = freesync_capable;
9686 
9687 	if (connector->vrr_capable_property)
9688 		drm_connector_set_vrr_capable_property(connector,
9689 						       freesync_capable);
9690 }
9691 
9692 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9693 {
9694 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9695 
9696 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9697 		return;
9698 	if (link->type == dc_connection_none)
9699 		return;
9700 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9701 					dpcd_data, sizeof(dpcd_data))) {
9702 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9703 
9704 		if (dpcd_data[0] == 0) {
9705 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9706 			link->psr_settings.psr_feature_enabled = false;
9707 		} else {
9708 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9709 			link->psr_settings.psr_feature_enabled = true;
9710 		}
9711 
9712 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9713 	}
9714 }
9715 
9716 /*
9717  * amdgpu_dm_link_setup_psr() - configure psr link
9718  * @stream: stream state
9719  *
9720  * Return: true if success
9721  */
9722 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9723 {
9724 	struct dc_link *link = NULL;
9725 	struct psr_config psr_config = {0};
9726 	struct psr_context psr_context = {0};
9727 	bool ret = false;
9728 
9729 	if (stream == NULL)
9730 		return false;
9731 
9732 	link = stream->link;
9733 
9734 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9735 
9736 	if (psr_config.psr_version > 0) {
9737 		psr_config.psr_exit_link_training_required = 0x1;
9738 		psr_config.psr_frame_capture_indication_req = 0;
9739 		psr_config.psr_rfb_setup_time = 0x37;
9740 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9741 		psr_config.allow_smu_optimizations = 0x0;
9742 
9743 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9744 
9745 	}
9746 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9747 
9748 	return ret;
9749 }
9750 
9751 /*
9752  * amdgpu_dm_psr_enable() - enable psr f/w
9753  * @stream: stream state
9754  *
9755  * Return: true if success
9756  */
9757 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9758 {
9759 	struct dc_link *link = stream->link;
9760 	unsigned int vsync_rate_hz = 0;
9761 	struct dc_static_screen_params params = {0};
9762 	/* Calculate number of static frames before generating interrupt to
9763 	 * enter PSR.
9764 	 */
9765 	// Init fail safe of 2 frames static
9766 	unsigned int num_frames_static = 2;
9767 
9768 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9769 
9770 	vsync_rate_hz = div64_u64(div64_u64((
9771 			stream->timing.pix_clk_100hz * 100),
9772 			stream->timing.v_total),
9773 			stream->timing.h_total);
9774 
9775 	/* Round up
9776 	 * Calculate number of frames such that at least 30 ms of time has
9777 	 * passed.
9778 	 */
9779 	if (vsync_rate_hz != 0) {
9780 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9781 		num_frames_static = (30000 / frame_time_microsec) + 1;
9782 	}
9783 
9784 	params.triggers.cursor_update = true;
9785 	params.triggers.overlay_update = true;
9786 	params.triggers.surface_update = true;
9787 	params.num_frames = num_frames_static;
9788 
9789 	dc_stream_set_static_screen_params(link->ctx->dc,
9790 					   &stream, 1,
9791 					   &params);
9792 
9793 	return dc_link_set_psr_allow_active(link, true, false, false);
9794 }
9795 
9796 /*
9797  * amdgpu_dm_psr_disable() - disable psr f/w
9798  * @stream:  stream state
9799  *
9800  * Return: true if success
9801  */
9802 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9803 {
9804 
9805 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9806 
9807 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
9808 }
9809 
9810 /*
9811  * amdgpu_dm_psr_disable() - disable psr f/w
9812  * if psr is enabled on any stream
9813  *
9814  * Return: true if success
9815  */
9816 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9817 {
9818 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9819 	return dc_set_psr_allow_active(dm->dc, false);
9820 }
9821 
9822 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9823 {
9824 	struct amdgpu_device *adev = drm_to_adev(dev);
9825 	struct dc *dc = adev->dm.dc;
9826 	int i;
9827 
9828 	mutex_lock(&adev->dm.dc_lock);
9829 	if (dc->current_state) {
9830 		for (i = 0; i < dc->current_state->stream_count; ++i)
9831 			dc->current_state->streams[i]
9832 				->triggered_crtc_reset.enabled =
9833 				adev->dm.force_timing_sync;
9834 
9835 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9836 		dc_trigger_sync(dc, dc->current_state);
9837 	}
9838 	mutex_unlock(&adev->dm.dc_lock);
9839 }
9840 
9841 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9842 		       uint32_t value, const char *func_name)
9843 {
9844 #ifdef DM_CHECK_ADDR_0
9845 	if (address == 0) {
9846 		DC_ERR("invalid register write. address = 0");
9847 		return;
9848 	}
9849 #endif
9850 	cgs_write_register(ctx->cgs_device, address, value);
9851 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9852 }
9853 
9854 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9855 			  const char *func_name)
9856 {
9857 	uint32_t value;
9858 #ifdef DM_CHECK_ADDR_0
9859 	if (address == 0) {
9860 		DC_ERR("invalid register read; address = 0\n");
9861 		return 0;
9862 	}
9863 #endif
9864 
9865 	if (ctx->dmub_srv &&
9866 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9867 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9868 		ASSERT(false);
9869 		return 0;
9870 	}
9871 
9872 	value = cgs_read_register(ctx->cgs_device, address);
9873 
9874 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9875 
9876 	return value;
9877 }
9878