1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38 
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50 
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58 
59 #include "ivsrcid/ivsrcid_vislands30.h"
60 
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107 
108 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110 
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113 
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116 
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119 
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129 
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135 {
136 	switch (link->dpcd_caps.dongle_type) {
137 	case DISPLAY_DONGLE_NONE:
138 		return DRM_MODE_SUBCONNECTOR_Native;
139 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 		return DRM_MODE_SUBCONNECTOR_VGA;
141 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 		return DRM_MODE_SUBCONNECTOR_DVID;
144 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 		return DRM_MODE_SUBCONNECTOR_HDMIA;
147 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148 	default:
149 		return DRM_MODE_SUBCONNECTOR_Unknown;
150 	}
151 }
152 
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154 {
155 	struct dc_link *link = aconnector->dc_link;
156 	struct drm_connector *connector = &aconnector->base;
157 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158 
159 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160 		return;
161 
162 	if (aconnector->dc_sink)
163 		subconnector = get_subconnector_type(link);
164 
165 	drm_object_property_set_value(&connector->base,
166 			connector->dev->mode_config.dp_subconnector_property,
167 			subconnector);
168 }
169 
170 /*
171  * initializes drm_device display related structures, based on the information
172  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173  * drm_encoder, drm_mode_config
174  *
175  * Returns 0 on success
176  */
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180 
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182 				struct drm_plane *plane,
183 				unsigned long possible_crtcs,
184 				const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 			       struct drm_plane *plane,
187 			       uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
190 				    uint32_t link_index,
191 				    struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 				  struct amdgpu_encoder *aencoder,
194 				  uint32_t link_index);
195 
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197 
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199 
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 				  struct drm_atomic_state *state);
202 
203 static void handle_cursor_update(struct drm_plane *plane,
204 				 struct drm_plane_state *old_plane_state);
205 
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
211 
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214 
215 /*
216  * dm_vblank_get_counter
217  *
218  * @brief
219  * Get counter for number of vertical blanks
220  *
221  * @param
222  * struct amdgpu_device *adev - [in] desired amdgpu device
223  * int disp_idx - [in] which CRTC to get the counter from
224  *
225  * @return
226  * Counter for vertical blanks
227  */
228 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
229 {
230 	if (crtc >= adev->mode_info.num_crtc)
231 		return 0;
232 	else {
233 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
234 
235 		if (acrtc->dm_irq_params.stream == NULL) {
236 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
237 				  crtc);
238 			return 0;
239 		}
240 
241 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
242 	}
243 }
244 
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246 				  u32 *vbl, u32 *position)
247 {
248 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
249 
250 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251 		return -EINVAL;
252 	else {
253 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254 
255 		if (acrtc->dm_irq_params.stream ==  NULL) {
256 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
257 				  crtc);
258 			return 0;
259 		}
260 
261 		/*
262 		 * TODO rework base driver to use values directly.
263 		 * for now parse it back into reg-format
264 		 */
265 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
266 					 &v_blank_start,
267 					 &v_blank_end,
268 					 &h_position,
269 					 &v_position);
270 
271 		*position = v_position | (h_position << 16);
272 		*vbl = v_blank_start | (v_blank_end << 16);
273 	}
274 
275 	return 0;
276 }
277 
278 static bool dm_is_idle(void *handle)
279 {
280 	/* XXX todo */
281 	return true;
282 }
283 
284 static int dm_wait_for_idle(void *handle)
285 {
286 	/* XXX todo */
287 	return 0;
288 }
289 
290 static bool dm_check_soft_reset(void *handle)
291 {
292 	return false;
293 }
294 
295 static int dm_soft_reset(void *handle)
296 {
297 	/* XXX todo */
298 	return 0;
299 }
300 
301 static struct amdgpu_crtc *
302 get_crtc_by_otg_inst(struct amdgpu_device *adev,
303 		     int otg_inst)
304 {
305 	struct drm_device *dev = adev_to_drm(adev);
306 	struct drm_crtc *crtc;
307 	struct amdgpu_crtc *amdgpu_crtc;
308 
309 	if (otg_inst == -1) {
310 		WARN_ON(1);
311 		return adev->mode_info.crtcs[0];
312 	}
313 
314 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315 		amdgpu_crtc = to_amdgpu_crtc(crtc);
316 
317 		if (amdgpu_crtc->otg_inst == otg_inst)
318 			return amdgpu_crtc;
319 	}
320 
321 	return NULL;
322 }
323 
324 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
325 {
326 	return acrtc->dm_irq_params.freesync_config.state ==
327 		       VRR_STATE_ACTIVE_VARIABLE ||
328 	       acrtc->dm_irq_params.freesync_config.state ==
329 		       VRR_STATE_ACTIVE_FIXED;
330 }
331 
332 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
333 {
334 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
336 }
337 
338 /**
339  * dm_pflip_high_irq() - Handle pageflip interrupt
340  * @interrupt_params: ignored
341  *
342  * Handles the pageflip interrupt by notifying all interested parties
343  * that the pageflip has been completed.
344  */
345 static void dm_pflip_high_irq(void *interrupt_params)
346 {
347 	struct amdgpu_crtc *amdgpu_crtc;
348 	struct common_irq_params *irq_params = interrupt_params;
349 	struct amdgpu_device *adev = irq_params->adev;
350 	unsigned long flags;
351 	struct drm_pending_vblank_event *e;
352 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
353 	bool vrr_active;
354 
355 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
356 
357 	/* IRQ could occur when in initial stage */
358 	/* TODO work and BO cleanup */
359 	if (amdgpu_crtc == NULL) {
360 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
361 		return;
362 	}
363 
364 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
365 
366 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368 						 amdgpu_crtc->pflip_status,
369 						 AMDGPU_FLIP_SUBMITTED,
370 						 amdgpu_crtc->crtc_id,
371 						 amdgpu_crtc);
372 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
373 		return;
374 	}
375 
376 	/* page flip completed. */
377 	e = amdgpu_crtc->event;
378 	amdgpu_crtc->event = NULL;
379 
380 	if (!e)
381 		WARN_ON(1);
382 
383 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
384 
385 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
386 	if (!vrr_active ||
387 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
388 				      &v_blank_end, &hpos, &vpos) ||
389 	    (vpos < v_blank_start)) {
390 		/* Update to correct count and vblank timestamp if racing with
391 		 * vblank irq. This also updates to the correct vblank timestamp
392 		 * even in VRR mode, as scanout is past the front-porch atm.
393 		 */
394 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
395 
396 		/* Wake up userspace by sending the pageflip event with proper
397 		 * count and timestamp of vblank of flip completion.
398 		 */
399 		if (e) {
400 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
401 
402 			/* Event sent, so done with vblank for this flip */
403 			drm_crtc_vblank_put(&amdgpu_crtc->base);
404 		}
405 	} else if (e) {
406 		/* VRR active and inside front-porch: vblank count and
407 		 * timestamp for pageflip event will only be up to date after
408 		 * drm_crtc_handle_vblank() has been executed from late vblank
409 		 * irq handler after start of back-porch (vline 0). We queue the
410 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
411 		 * updated timestamp and count, once it runs after us.
412 		 *
413 		 * We need to open-code this instead of using the helper
414 		 * drm_crtc_arm_vblank_event(), as that helper would
415 		 * call drm_crtc_accurate_vblank_count(), which we must
416 		 * not call in VRR mode while we are in front-porch!
417 		 */
418 
419 		/* sequence will be replaced by real count during send-out. */
420 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421 		e->pipe = amdgpu_crtc->crtc_id;
422 
423 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
424 		e = NULL;
425 	}
426 
427 	/* Keep track of vblank of this flip for flip throttling. We use the
428 	 * cooked hw counter, as that one incremented at start of this vblank
429 	 * of pageflip completion, so last_flip_vblank is the forbidden count
430 	 * for queueing new pageflips if vsync + VRR is enabled.
431 	 */
432 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
433 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
434 
435 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
436 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
437 
438 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
440 			 vrr_active, (int) !e);
441 }
442 
443 static void dm_vupdate_high_irq(void *interrupt_params)
444 {
445 	struct common_irq_params *irq_params = interrupt_params;
446 	struct amdgpu_device *adev = irq_params->adev;
447 	struct amdgpu_crtc *acrtc;
448 	unsigned long flags;
449 	int vrr_active;
450 
451 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
452 
453 	if (acrtc) {
454 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
455 
456 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
457 			      acrtc->crtc_id,
458 			      vrr_active);
459 
460 		/* Core vblank handling is done here after end of front-porch in
461 		 * vrr mode, as vblank timestamping will give valid results
462 		 * while now done after front-porch. This will also deliver
463 		 * page-flip completion events that have been queued to us
464 		 * if a pageflip happened inside front-porch.
465 		 */
466 		if (vrr_active) {
467 			drm_crtc_handle_vblank(&acrtc->base);
468 
469 			/* BTR processing for pre-DCE12 ASICs */
470 			if (acrtc->dm_irq_params.stream &&
471 			    adev->family < AMDGPU_FAMILY_AI) {
472 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
473 				mod_freesync_handle_v_update(
474 				    adev->dm.freesync_module,
475 				    acrtc->dm_irq_params.stream,
476 				    &acrtc->dm_irq_params.vrr_params);
477 
478 				dc_stream_adjust_vmin_vmax(
479 				    adev->dm.dc,
480 				    acrtc->dm_irq_params.stream,
481 				    &acrtc->dm_irq_params.vrr_params.adjust);
482 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
483 			}
484 		}
485 	}
486 }
487 
488 /**
489  * dm_crtc_high_irq() - Handles CRTC interrupt
490  * @interrupt_params: used for determining the CRTC instance
491  *
492  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
493  * event handler.
494  */
495 static void dm_crtc_high_irq(void *interrupt_params)
496 {
497 	struct common_irq_params *irq_params = interrupt_params;
498 	struct amdgpu_device *adev = irq_params->adev;
499 	struct amdgpu_crtc *acrtc;
500 	unsigned long flags;
501 	int vrr_active;
502 
503 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
504 	if (!acrtc)
505 		return;
506 
507 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
508 
509 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
510 		      vrr_active, acrtc->dm_irq_params.active_planes);
511 
512 	/**
513 	 * Core vblank handling at start of front-porch is only possible
514 	 * in non-vrr mode, as only there vblank timestamping will give
515 	 * valid results while done in front-porch. Otherwise defer it
516 	 * to dm_vupdate_high_irq after end of front-porch.
517 	 */
518 	if (!vrr_active)
519 		drm_crtc_handle_vblank(&acrtc->base);
520 
521 	/**
522 	 * Following stuff must happen at start of vblank, for crc
523 	 * computation and below-the-range btr support in vrr mode.
524 	 */
525 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
526 
527 	/* BTR updates need to happen before VUPDATE on Vega and above. */
528 	if (adev->family < AMDGPU_FAMILY_AI)
529 		return;
530 
531 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
532 
533 	if (acrtc->dm_irq_params.stream &&
534 	    acrtc->dm_irq_params.vrr_params.supported &&
535 	    acrtc->dm_irq_params.freesync_config.state ==
536 		    VRR_STATE_ACTIVE_VARIABLE) {
537 		mod_freesync_handle_v_update(adev->dm.freesync_module,
538 					     acrtc->dm_irq_params.stream,
539 					     &acrtc->dm_irq_params.vrr_params);
540 
541 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542 					   &acrtc->dm_irq_params.vrr_params.adjust);
543 	}
544 
545 	/*
546 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
547 	 * In that case, pageflip completion interrupts won't fire and pageflip
548 	 * completion events won't get delivered. Prevent this by sending
549 	 * pending pageflip events from here if a flip is still pending.
550 	 *
551 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
552 	 * avoid race conditions between flip programming and completion,
553 	 * which could cause too early flip completion events.
554 	 */
555 	if (adev->family >= AMDGPU_FAMILY_RV &&
556 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
557 	    acrtc->dm_irq_params.active_planes == 0) {
558 		if (acrtc->event) {
559 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
560 			acrtc->event = NULL;
561 			drm_crtc_vblank_put(&acrtc->base);
562 		}
563 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
564 	}
565 
566 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
567 }
568 
569 static int dm_set_clockgating_state(void *handle,
570 		  enum amd_clockgating_state state)
571 {
572 	return 0;
573 }
574 
575 static int dm_set_powergating_state(void *handle,
576 		  enum amd_powergating_state state)
577 {
578 	return 0;
579 }
580 
581 /* Prototypes of private functions */
582 static int dm_early_init(void* handle);
583 
584 /* Allocate memory for FBC compressed data  */
585 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
586 {
587 	struct drm_device *dev = connector->dev;
588 	struct amdgpu_device *adev = drm_to_adev(dev);
589 	struct dm_compressor_info *compressor = &adev->dm.compressor;
590 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591 	struct drm_display_mode *mode;
592 	unsigned long max_size = 0;
593 
594 	if (adev->dm.dc->fbc_compressor == NULL)
595 		return;
596 
597 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
598 		return;
599 
600 	if (compressor->bo_ptr)
601 		return;
602 
603 
604 	list_for_each_entry(mode, &connector->modes, head) {
605 		if (max_size < mode->htotal * mode->vtotal)
606 			max_size = mode->htotal * mode->vtotal;
607 	}
608 
609 	if (max_size) {
610 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
611 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
612 			    &compressor->gpu_addr, &compressor->cpu_addr);
613 
614 		if (r)
615 			DRM_ERROR("DM: Failed to initialize FBC\n");
616 		else {
617 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
619 		}
620 
621 	}
622 
623 }
624 
625 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626 					  int pipe, bool *enabled,
627 					  unsigned char *buf, int max_bytes)
628 {
629 	struct drm_device *dev = dev_get_drvdata(kdev);
630 	struct amdgpu_device *adev = drm_to_adev(dev);
631 	struct drm_connector *connector;
632 	struct drm_connector_list_iter conn_iter;
633 	struct amdgpu_dm_connector *aconnector;
634 	int ret = 0;
635 
636 	*enabled = false;
637 
638 	mutex_lock(&adev->dm.audio_lock);
639 
640 	drm_connector_list_iter_begin(dev, &conn_iter);
641 	drm_for_each_connector_iter(connector, &conn_iter) {
642 		aconnector = to_amdgpu_dm_connector(connector);
643 		if (aconnector->audio_inst != port)
644 			continue;
645 
646 		*enabled = true;
647 		ret = drm_eld_size(connector->eld);
648 		memcpy(buf, connector->eld, min(max_bytes, ret));
649 
650 		break;
651 	}
652 	drm_connector_list_iter_end(&conn_iter);
653 
654 	mutex_unlock(&adev->dm.audio_lock);
655 
656 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
657 
658 	return ret;
659 }
660 
661 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662 	.get_eld = amdgpu_dm_audio_component_get_eld,
663 };
664 
665 static int amdgpu_dm_audio_component_bind(struct device *kdev,
666 				       struct device *hda_kdev, void *data)
667 {
668 	struct drm_device *dev = dev_get_drvdata(kdev);
669 	struct amdgpu_device *adev = drm_to_adev(dev);
670 	struct drm_audio_component *acomp = data;
671 
672 	acomp->ops = &amdgpu_dm_audio_component_ops;
673 	acomp->dev = kdev;
674 	adev->dm.audio_component = acomp;
675 
676 	return 0;
677 }
678 
679 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680 					  struct device *hda_kdev, void *data)
681 {
682 	struct drm_device *dev = dev_get_drvdata(kdev);
683 	struct amdgpu_device *adev = drm_to_adev(dev);
684 	struct drm_audio_component *acomp = data;
685 
686 	acomp->ops = NULL;
687 	acomp->dev = NULL;
688 	adev->dm.audio_component = NULL;
689 }
690 
691 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692 	.bind	= amdgpu_dm_audio_component_bind,
693 	.unbind	= amdgpu_dm_audio_component_unbind,
694 };
695 
696 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
697 {
698 	int i, ret;
699 
700 	if (!amdgpu_audio)
701 		return 0;
702 
703 	adev->mode_info.audio.enabled = true;
704 
705 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
706 
707 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708 		adev->mode_info.audio.pin[i].channels = -1;
709 		adev->mode_info.audio.pin[i].rate = -1;
710 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
711 		adev->mode_info.audio.pin[i].status_bits = 0;
712 		adev->mode_info.audio.pin[i].category_code = 0;
713 		adev->mode_info.audio.pin[i].connected = false;
714 		adev->mode_info.audio.pin[i].id =
715 			adev->dm.dc->res_pool->audios[i]->inst;
716 		adev->mode_info.audio.pin[i].offset = 0;
717 	}
718 
719 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
720 	if (ret < 0)
721 		return ret;
722 
723 	adev->dm.audio_registered = true;
724 
725 	return 0;
726 }
727 
728 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
729 {
730 	if (!amdgpu_audio)
731 		return;
732 
733 	if (!adev->mode_info.audio.enabled)
734 		return;
735 
736 	if (adev->dm.audio_registered) {
737 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738 		adev->dm.audio_registered = false;
739 	}
740 
741 	/* TODO: Disable audio? */
742 
743 	adev->mode_info.audio.enabled = false;
744 }
745 
746 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
747 {
748 	struct drm_audio_component *acomp = adev->dm.audio_component;
749 
750 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
752 
753 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
754 						 pin, -1);
755 	}
756 }
757 
758 static int dm_dmub_hw_init(struct amdgpu_device *adev)
759 {
760 	const struct dmcub_firmware_header_v1_0 *hdr;
761 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
762 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
763 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
764 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765 	struct abm *abm = adev->dm.dc->res_pool->abm;
766 	struct dmub_srv_hw_params hw_params;
767 	enum dmub_status status;
768 	const unsigned char *fw_inst_const, *fw_bss_data;
769 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
770 	bool has_hw_support;
771 
772 	if (!dmub_srv)
773 		/* DMUB isn't supported on the ASIC. */
774 		return 0;
775 
776 	if (!fb_info) {
777 		DRM_ERROR("No framebuffer info for DMUB service.\n");
778 		return -EINVAL;
779 	}
780 
781 	if (!dmub_fw) {
782 		/* Firmware required for DMUB support. */
783 		DRM_ERROR("No firmware provided for DMUB.\n");
784 		return -EINVAL;
785 	}
786 
787 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788 	if (status != DMUB_STATUS_OK) {
789 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
790 		return -EINVAL;
791 	}
792 
793 	if (!has_hw_support) {
794 		DRM_INFO("DMUB unsupported on ASIC\n");
795 		return 0;
796 	}
797 
798 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
799 
800 	fw_inst_const = dmub_fw->data +
801 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
802 			PSP_HEADER_BYTES;
803 
804 	fw_bss_data = dmub_fw->data +
805 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 		      le32_to_cpu(hdr->inst_const_bytes);
807 
808 	/* Copy firmware and bios info into FB memory. */
809 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
811 
812 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
813 
814 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815 	 * amdgpu_ucode_init_single_fw will load dmub firmware
816 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
817 	 * will be done by dm_dmub_hw_init
818 	 */
819 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
821 				fw_inst_const_size);
822 	}
823 
824 	if (fw_bss_data_size)
825 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826 		       fw_bss_data, fw_bss_data_size);
827 
828 	/* Copy firmware bios info into FB memory. */
829 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
830 	       adev->bios_size);
831 
832 	/* Reset regions that need to be reset. */
833 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
835 
836 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
838 
839 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
841 
842 	/* Initialize hardware. */
843 	memset(&hw_params, 0, sizeof(hw_params));
844 	hw_params.fb_base = adev->gmc.fb_start;
845 	hw_params.fb_offset = adev->gmc.aper_base;
846 
847 	/* backdoor load firmware and trigger dmub running */
848 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849 		hw_params.load_inst_const = true;
850 
851 	if (dmcu)
852 		hw_params.psp_version = dmcu->psp_version;
853 
854 	for (i = 0; i < fb_info->num_fb; ++i)
855 		hw_params.fb[i] = &fb_info->fb[i];
856 
857 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
858 	if (status != DMUB_STATUS_OK) {
859 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
860 		return -EINVAL;
861 	}
862 
863 	/* Wait for firmware load to finish. */
864 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865 	if (status != DMUB_STATUS_OK)
866 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
867 
868 	/* Init DMCU and ABM if available. */
869 	if (dmcu && abm) {
870 		dmcu->funcs->dmcu_init(dmcu);
871 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
872 	}
873 
874 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 	if (!adev->dm.dc->ctx->dmub_srv) {
876 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877 		return -ENOMEM;
878 	}
879 
880 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 		 adev->dm.dmcub_fw_version);
882 
883 	return 0;
884 }
885 
886 #if defined(CONFIG_DRM_AMD_DC_DCN)
887 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
888 {
889 	uint64_t pt_base;
890 	uint32_t logical_addr_low;
891 	uint32_t logical_addr_high;
892 	uint32_t agp_base, agp_bot, agp_top;
893 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
894 
895 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
897 
898 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
899 		/*
900 		 * Raven2 has a HW issue that it is unable to use the vram which
901 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902 		 * workaround that increase system aperture high address (add 1)
903 		 * to get rid of the VM fault and hardware hang.
904 		 */
905 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
906 	else
907 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
908 
909 	agp_base = 0;
910 	agp_bot = adev->gmc.agp_start >> 24;
911 	agp_top = adev->gmc.agp_end >> 24;
912 
913 
914 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919 	page_table_base.low_part = lower_32_bits(pt_base);
920 
921 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
923 
924 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
927 
928 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
931 
932 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
935 
936 	pa_config->is_hvm_enabled = 0;
937 
938 }
939 #endif
940 #if defined(CONFIG_DRM_AMD_DC_DCN)
941 static void event_mall_stutter(struct work_struct *work)
942 {
943 
944 	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
945 	struct amdgpu_display_manager *dm = vblank_work->dm;
946 
947 	mutex_lock(&dm->dc_lock);
948 
949 	if (vblank_work->enable)
950 		dm->active_vblank_irq_count++;
951 	else
952 		dm->active_vblank_irq_count--;
953 
954 
955 	dc_allow_idle_optimizations(
956 		dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
957 
958 	DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
959 
960 
961 	mutex_unlock(&dm->dc_lock);
962 }
963 
964 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
965 {
966 
967 	int max_caps = dc->caps.max_links;
968 	struct vblank_workqueue *vblank_work;
969 	int i = 0;
970 
971 	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
972 	if (ZERO_OR_NULL_PTR(vblank_work)) {
973 		kfree(vblank_work);
974 		return NULL;
975 	}
976 
977 	for (i = 0; i < max_caps; i++)
978 		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
979 
980 	return vblank_work;
981 }
982 #endif
983 static int amdgpu_dm_init(struct amdgpu_device *adev)
984 {
985 	struct dc_init_data init_data;
986 #ifdef CONFIG_DRM_AMD_DC_HDCP
987 	struct dc_callback_init init_params;
988 #endif
989 	int r;
990 
991 	adev->dm.ddev = adev_to_drm(adev);
992 	adev->dm.adev = adev;
993 
994 	/* Zero all the fields */
995 	memset(&init_data, 0, sizeof(init_data));
996 #ifdef CONFIG_DRM_AMD_DC_HDCP
997 	memset(&init_params, 0, sizeof(init_params));
998 #endif
999 
1000 	mutex_init(&adev->dm.dc_lock);
1001 	mutex_init(&adev->dm.audio_lock);
1002 #if defined(CONFIG_DRM_AMD_DC_DCN)
1003 	spin_lock_init(&adev->dm.vblank_lock);
1004 #endif
1005 
1006 	if(amdgpu_dm_irq_init(adev)) {
1007 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1008 		goto error;
1009 	}
1010 
1011 	init_data.asic_id.chip_family = adev->family;
1012 
1013 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1014 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1015 
1016 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1017 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1018 	init_data.asic_id.atombios_base_address =
1019 		adev->mode_info.atom_context->bios;
1020 
1021 	init_data.driver = adev;
1022 
1023 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1024 
1025 	if (!adev->dm.cgs_device) {
1026 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1027 		goto error;
1028 	}
1029 
1030 	init_data.cgs_device = adev->dm.cgs_device;
1031 
1032 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1033 
1034 	switch (adev->asic_type) {
1035 	case CHIP_CARRIZO:
1036 	case CHIP_STONEY:
1037 	case CHIP_RAVEN:
1038 	case CHIP_RENOIR:
1039 		init_data.flags.gpu_vm_support = true;
1040 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1041 			init_data.flags.disable_dmcu = true;
1042 		break;
1043 #if defined(CONFIG_DRM_AMD_DC_DCN)
1044 	case CHIP_VANGOGH:
1045 		init_data.flags.gpu_vm_support = true;
1046 		break;
1047 #endif
1048 	default:
1049 		break;
1050 	}
1051 
1052 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1053 		init_data.flags.fbc_support = true;
1054 
1055 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1056 		init_data.flags.multi_mon_pp_mclk_switch = true;
1057 
1058 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1059 		init_data.flags.disable_fractional_pwm = true;
1060 
1061 	init_data.flags.power_down_display_on_boot = true;
1062 
1063 	/* Display Core create. */
1064 	adev->dm.dc = dc_create(&init_data);
1065 
1066 	if (adev->dm.dc) {
1067 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1068 	} else {
1069 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1070 		goto error;
1071 	}
1072 
1073 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1074 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1075 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1076 	}
1077 
1078 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1079 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1080 
1081 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1082 		adev->dm.dc->debug.disable_stutter = true;
1083 
1084 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1085 		adev->dm.dc->debug.disable_dsc = true;
1086 
1087 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1088 		adev->dm.dc->debug.disable_clock_gate = true;
1089 
1090 	r = dm_dmub_hw_init(adev);
1091 	if (r) {
1092 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1093 		goto error;
1094 	}
1095 
1096 	dc_hardware_init(adev->dm.dc);
1097 
1098 #if defined(CONFIG_DRM_AMD_DC_DCN)
1099 	if (adev->apu_flags) {
1100 		struct dc_phy_addr_space_config pa_config;
1101 
1102 		mmhub_read_system_context(adev, &pa_config);
1103 
1104 		// Call the DC init_memory func
1105 		dc_setup_system_context(adev->dm.dc, &pa_config);
1106 	}
1107 #endif
1108 
1109 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1110 	if (!adev->dm.freesync_module) {
1111 		DRM_ERROR(
1112 		"amdgpu: failed to initialize freesync_module.\n");
1113 	} else
1114 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1115 				adev->dm.freesync_module);
1116 
1117 	amdgpu_dm_init_color_mod();
1118 
1119 #if defined(CONFIG_DRM_AMD_DC_DCN)
1120 	if (adev->dm.dc->caps.max_links > 0) {
1121 		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1122 
1123 		if (!adev->dm.vblank_workqueue)
1124 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1125 		else
1126 			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1127 	}
1128 #endif
1129 
1130 #ifdef CONFIG_DRM_AMD_DC_HDCP
1131 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1132 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1133 
1134 		if (!adev->dm.hdcp_workqueue)
1135 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1136 		else
1137 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1138 
1139 		dc_init_callbacks(adev->dm.dc, &init_params);
1140 	}
1141 #endif
1142 	if (amdgpu_dm_initialize_drm_device(adev)) {
1143 		DRM_ERROR(
1144 		"amdgpu: failed to initialize sw for display support.\n");
1145 		goto error;
1146 	}
1147 
1148 	/* create fake encoders for MST */
1149 	dm_dp_create_fake_mst_encoders(adev);
1150 
1151 	/* TODO: Add_display_info? */
1152 
1153 	/* TODO use dynamic cursor width */
1154 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1155 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1156 
1157 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1158 		DRM_ERROR(
1159 		"amdgpu: failed to initialize sw for display support.\n");
1160 		goto error;
1161 	}
1162 
1163 
1164 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1165 
1166 	return 0;
1167 error:
1168 	amdgpu_dm_fini(adev);
1169 
1170 	return -EINVAL;
1171 }
1172 
1173 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1174 {
1175 	int i;
1176 
1177 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1178 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1179 	}
1180 
1181 	amdgpu_dm_audio_fini(adev);
1182 
1183 	amdgpu_dm_destroy_drm_device(&adev->dm);
1184 
1185 #ifdef CONFIG_DRM_AMD_DC_HDCP
1186 	if (adev->dm.hdcp_workqueue) {
1187 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1188 		adev->dm.hdcp_workqueue = NULL;
1189 	}
1190 
1191 	if (adev->dm.dc)
1192 		dc_deinit_callbacks(adev->dm.dc);
1193 #endif
1194 	if (adev->dm.dc->ctx->dmub_srv) {
1195 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1196 		adev->dm.dc->ctx->dmub_srv = NULL;
1197 	}
1198 
1199 	if (adev->dm.dmub_bo)
1200 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1201 				      &adev->dm.dmub_bo_gpu_addr,
1202 				      &adev->dm.dmub_bo_cpu_addr);
1203 
1204 	/* DC Destroy TODO: Replace destroy DAL */
1205 	if (adev->dm.dc)
1206 		dc_destroy(&adev->dm.dc);
1207 	/*
1208 	 * TODO: pageflip, vlank interrupt
1209 	 *
1210 	 * amdgpu_dm_irq_fini(adev);
1211 	 */
1212 
1213 	if (adev->dm.cgs_device) {
1214 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1215 		adev->dm.cgs_device = NULL;
1216 	}
1217 	if (adev->dm.freesync_module) {
1218 		mod_freesync_destroy(adev->dm.freesync_module);
1219 		adev->dm.freesync_module = NULL;
1220 	}
1221 
1222 	mutex_destroy(&adev->dm.audio_lock);
1223 	mutex_destroy(&adev->dm.dc_lock);
1224 
1225 	return;
1226 }
1227 
1228 static int load_dmcu_fw(struct amdgpu_device *adev)
1229 {
1230 	const char *fw_name_dmcu = NULL;
1231 	int r;
1232 	const struct dmcu_firmware_header_v1_0 *hdr;
1233 
1234 	switch(adev->asic_type) {
1235 #if defined(CONFIG_DRM_AMD_DC_SI)
1236 	case CHIP_TAHITI:
1237 	case CHIP_PITCAIRN:
1238 	case CHIP_VERDE:
1239 	case CHIP_OLAND:
1240 #endif
1241 	case CHIP_BONAIRE:
1242 	case CHIP_HAWAII:
1243 	case CHIP_KAVERI:
1244 	case CHIP_KABINI:
1245 	case CHIP_MULLINS:
1246 	case CHIP_TONGA:
1247 	case CHIP_FIJI:
1248 	case CHIP_CARRIZO:
1249 	case CHIP_STONEY:
1250 	case CHIP_POLARIS11:
1251 	case CHIP_POLARIS10:
1252 	case CHIP_POLARIS12:
1253 	case CHIP_VEGAM:
1254 	case CHIP_VEGA10:
1255 	case CHIP_VEGA12:
1256 	case CHIP_VEGA20:
1257 	case CHIP_NAVI10:
1258 	case CHIP_NAVI14:
1259 	case CHIP_RENOIR:
1260 	case CHIP_SIENNA_CICHLID:
1261 	case CHIP_NAVY_FLOUNDER:
1262 	case CHIP_DIMGREY_CAVEFISH:
1263 	case CHIP_VANGOGH:
1264 		return 0;
1265 	case CHIP_NAVI12:
1266 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1267 		break;
1268 	case CHIP_RAVEN:
1269 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1270 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1271 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1272 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1273 		else
1274 			return 0;
1275 		break;
1276 	default:
1277 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1278 		return -EINVAL;
1279 	}
1280 
1281 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1282 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1283 		return 0;
1284 	}
1285 
1286 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1287 	if (r == -ENOENT) {
1288 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1289 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1290 		adev->dm.fw_dmcu = NULL;
1291 		return 0;
1292 	}
1293 	if (r) {
1294 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1295 			fw_name_dmcu);
1296 		return r;
1297 	}
1298 
1299 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1300 	if (r) {
1301 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1302 			fw_name_dmcu);
1303 		release_firmware(adev->dm.fw_dmcu);
1304 		adev->dm.fw_dmcu = NULL;
1305 		return r;
1306 	}
1307 
1308 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1309 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1310 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1311 	adev->firmware.fw_size +=
1312 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1313 
1314 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1315 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1316 	adev->firmware.fw_size +=
1317 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1318 
1319 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1320 
1321 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1322 
1323 	return 0;
1324 }
1325 
1326 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1327 {
1328 	struct amdgpu_device *adev = ctx;
1329 
1330 	return dm_read_reg(adev->dm.dc->ctx, address);
1331 }
1332 
1333 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1334 				     uint32_t value)
1335 {
1336 	struct amdgpu_device *adev = ctx;
1337 
1338 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1339 }
1340 
1341 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1342 {
1343 	struct dmub_srv_create_params create_params;
1344 	struct dmub_srv_region_params region_params;
1345 	struct dmub_srv_region_info region_info;
1346 	struct dmub_srv_fb_params fb_params;
1347 	struct dmub_srv_fb_info *fb_info;
1348 	struct dmub_srv *dmub_srv;
1349 	const struct dmcub_firmware_header_v1_0 *hdr;
1350 	const char *fw_name_dmub;
1351 	enum dmub_asic dmub_asic;
1352 	enum dmub_status status;
1353 	int r;
1354 
1355 	switch (adev->asic_type) {
1356 	case CHIP_RENOIR:
1357 		dmub_asic = DMUB_ASIC_DCN21;
1358 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1359 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1360 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1361 		break;
1362 	case CHIP_SIENNA_CICHLID:
1363 		dmub_asic = DMUB_ASIC_DCN30;
1364 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1365 		break;
1366 	case CHIP_NAVY_FLOUNDER:
1367 		dmub_asic = DMUB_ASIC_DCN30;
1368 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1369 		break;
1370 	case CHIP_VANGOGH:
1371 		dmub_asic = DMUB_ASIC_DCN301;
1372 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1373 		break;
1374 	case CHIP_DIMGREY_CAVEFISH:
1375 		dmub_asic = DMUB_ASIC_DCN302;
1376 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1377 		break;
1378 
1379 	default:
1380 		/* ASIC doesn't support DMUB. */
1381 		return 0;
1382 	}
1383 
1384 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1385 	if (r) {
1386 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1387 		return 0;
1388 	}
1389 
1390 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1391 	if (r) {
1392 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1393 		return 0;
1394 	}
1395 
1396 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1397 
1398 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1399 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1400 			AMDGPU_UCODE_ID_DMCUB;
1401 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1402 			adev->dm.dmub_fw;
1403 		adev->firmware.fw_size +=
1404 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1405 
1406 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1407 			 adev->dm.dmcub_fw_version);
1408 	}
1409 
1410 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1411 
1412 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1413 	dmub_srv = adev->dm.dmub_srv;
1414 
1415 	if (!dmub_srv) {
1416 		DRM_ERROR("Failed to allocate DMUB service!\n");
1417 		return -ENOMEM;
1418 	}
1419 
1420 	memset(&create_params, 0, sizeof(create_params));
1421 	create_params.user_ctx = adev;
1422 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1423 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1424 	create_params.asic = dmub_asic;
1425 
1426 	/* Create the DMUB service. */
1427 	status = dmub_srv_create(dmub_srv, &create_params);
1428 	if (status != DMUB_STATUS_OK) {
1429 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1430 		return -EINVAL;
1431 	}
1432 
1433 	/* Calculate the size of all the regions for the DMUB service. */
1434 	memset(&region_params, 0, sizeof(region_params));
1435 
1436 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1437 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1438 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1439 	region_params.vbios_size = adev->bios_size;
1440 	region_params.fw_bss_data = region_params.bss_data_size ?
1441 		adev->dm.dmub_fw->data +
1442 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1443 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1444 	region_params.fw_inst_const =
1445 		adev->dm.dmub_fw->data +
1446 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1447 		PSP_HEADER_BYTES;
1448 
1449 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1450 					   &region_info);
1451 
1452 	if (status != DMUB_STATUS_OK) {
1453 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1454 		return -EINVAL;
1455 	}
1456 
1457 	/*
1458 	 * Allocate a framebuffer based on the total size of all the regions.
1459 	 * TODO: Move this into GART.
1460 	 */
1461 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1462 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1463 				    &adev->dm.dmub_bo_gpu_addr,
1464 				    &adev->dm.dmub_bo_cpu_addr);
1465 	if (r)
1466 		return r;
1467 
1468 	/* Rebase the regions on the framebuffer address. */
1469 	memset(&fb_params, 0, sizeof(fb_params));
1470 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1471 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1472 	fb_params.region_info = &region_info;
1473 
1474 	adev->dm.dmub_fb_info =
1475 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1476 	fb_info = adev->dm.dmub_fb_info;
1477 
1478 	if (!fb_info) {
1479 		DRM_ERROR(
1480 			"Failed to allocate framebuffer info for DMUB service!\n");
1481 		return -ENOMEM;
1482 	}
1483 
1484 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1485 	if (status != DMUB_STATUS_OK) {
1486 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1487 		return -EINVAL;
1488 	}
1489 
1490 	return 0;
1491 }
1492 
1493 static int dm_sw_init(void *handle)
1494 {
1495 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1496 	int r;
1497 
1498 	r = dm_dmub_sw_init(adev);
1499 	if (r)
1500 		return r;
1501 
1502 	return load_dmcu_fw(adev);
1503 }
1504 
1505 static int dm_sw_fini(void *handle)
1506 {
1507 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1508 
1509 	kfree(adev->dm.dmub_fb_info);
1510 	adev->dm.dmub_fb_info = NULL;
1511 
1512 	if (adev->dm.dmub_srv) {
1513 		dmub_srv_destroy(adev->dm.dmub_srv);
1514 		adev->dm.dmub_srv = NULL;
1515 	}
1516 
1517 	release_firmware(adev->dm.dmub_fw);
1518 	adev->dm.dmub_fw = NULL;
1519 
1520 	release_firmware(adev->dm.fw_dmcu);
1521 	adev->dm.fw_dmcu = NULL;
1522 
1523 	return 0;
1524 }
1525 
1526 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1527 {
1528 	struct amdgpu_dm_connector *aconnector;
1529 	struct drm_connector *connector;
1530 	struct drm_connector_list_iter iter;
1531 	int ret = 0;
1532 
1533 	drm_connector_list_iter_begin(dev, &iter);
1534 	drm_for_each_connector_iter(connector, &iter) {
1535 		aconnector = to_amdgpu_dm_connector(connector);
1536 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1537 		    aconnector->mst_mgr.aux) {
1538 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1539 					 aconnector,
1540 					 aconnector->base.base.id);
1541 
1542 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1543 			if (ret < 0) {
1544 				DRM_ERROR("DM_MST: Failed to start MST\n");
1545 				aconnector->dc_link->type =
1546 					dc_connection_single;
1547 				break;
1548 			}
1549 		}
1550 	}
1551 	drm_connector_list_iter_end(&iter);
1552 
1553 	return ret;
1554 }
1555 
1556 static int dm_late_init(void *handle)
1557 {
1558 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1559 
1560 	struct dmcu_iram_parameters params;
1561 	unsigned int linear_lut[16];
1562 	int i;
1563 	struct dmcu *dmcu = NULL;
1564 	bool ret = true;
1565 
1566 	dmcu = adev->dm.dc->res_pool->dmcu;
1567 
1568 	for (i = 0; i < 16; i++)
1569 		linear_lut[i] = 0xFFFF * i / 15;
1570 
1571 	params.set = 0;
1572 	params.backlight_ramping_start = 0xCCCC;
1573 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1574 	params.backlight_lut_array_size = 16;
1575 	params.backlight_lut_array = linear_lut;
1576 
1577 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1578 	 * 0xFFFF x 0.01 = 0x28F
1579 	 */
1580 	params.min_abm_backlight = 0x28F;
1581 
1582 	/* In the case where abm is implemented on dmcub,
1583 	 * dmcu object will be null.
1584 	 * ABM 2.4 and up are implemented on dmcub.
1585 	 */
1586 	if (dmcu)
1587 		ret = dmcu_load_iram(dmcu, params);
1588 	else if (adev->dm.dc->ctx->dmub_srv)
1589 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1590 
1591 	if (!ret)
1592 		return -EINVAL;
1593 
1594 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1595 }
1596 
1597 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1598 {
1599 	struct amdgpu_dm_connector *aconnector;
1600 	struct drm_connector *connector;
1601 	struct drm_connector_list_iter iter;
1602 	struct drm_dp_mst_topology_mgr *mgr;
1603 	int ret;
1604 	bool need_hotplug = false;
1605 
1606 	drm_connector_list_iter_begin(dev, &iter);
1607 	drm_for_each_connector_iter(connector, &iter) {
1608 		aconnector = to_amdgpu_dm_connector(connector);
1609 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1610 		    aconnector->mst_port)
1611 			continue;
1612 
1613 		mgr = &aconnector->mst_mgr;
1614 
1615 		if (suspend) {
1616 			drm_dp_mst_topology_mgr_suspend(mgr);
1617 		} else {
1618 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1619 			if (ret < 0) {
1620 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1621 				need_hotplug = true;
1622 			}
1623 		}
1624 	}
1625 	drm_connector_list_iter_end(&iter);
1626 
1627 	if (need_hotplug)
1628 		drm_kms_helper_hotplug_event(dev);
1629 }
1630 
1631 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1632 {
1633 	struct smu_context *smu = &adev->smu;
1634 	int ret = 0;
1635 
1636 	if (!is_support_sw_smu(adev))
1637 		return 0;
1638 
1639 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1640 	 * on window driver dc implementation.
1641 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1642 	 * should be passed to smu during boot up and resume from s3.
1643 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1644 	 * dcn20_resource_construct
1645 	 * then call pplib functions below to pass the settings to smu:
1646 	 * smu_set_watermarks_for_clock_ranges
1647 	 * smu_set_watermarks_table
1648 	 * navi10_set_watermarks_table
1649 	 * smu_write_watermarks_table
1650 	 *
1651 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1652 	 * dc has implemented different flow for window driver:
1653 	 * dc_hardware_init / dc_set_power_state
1654 	 * dcn10_init_hw
1655 	 * notify_wm_ranges
1656 	 * set_wm_ranges
1657 	 * -- Linux
1658 	 * smu_set_watermarks_for_clock_ranges
1659 	 * renoir_set_watermarks_table
1660 	 * smu_write_watermarks_table
1661 	 *
1662 	 * For Linux,
1663 	 * dc_hardware_init -> amdgpu_dm_init
1664 	 * dc_set_power_state --> dm_resume
1665 	 *
1666 	 * therefore, this function apply to navi10/12/14 but not Renoir
1667 	 * *
1668 	 */
1669 	switch(adev->asic_type) {
1670 	case CHIP_NAVI10:
1671 	case CHIP_NAVI14:
1672 	case CHIP_NAVI12:
1673 		break;
1674 	default:
1675 		return 0;
1676 	}
1677 
1678 	ret = smu_write_watermarks_table(smu);
1679 	if (ret) {
1680 		DRM_ERROR("Failed to update WMTABLE!\n");
1681 		return ret;
1682 	}
1683 
1684 	return 0;
1685 }
1686 
1687 /**
1688  * dm_hw_init() - Initialize DC device
1689  * @handle: The base driver device containing the amdgpu_dm device.
1690  *
1691  * Initialize the &struct amdgpu_display_manager device. This involves calling
1692  * the initializers of each DM component, then populating the struct with them.
1693  *
1694  * Although the function implies hardware initialization, both hardware and
1695  * software are initialized here. Splitting them out to their relevant init
1696  * hooks is a future TODO item.
1697  *
1698  * Some notable things that are initialized here:
1699  *
1700  * - Display Core, both software and hardware
1701  * - DC modules that we need (freesync and color management)
1702  * - DRM software states
1703  * - Interrupt sources and handlers
1704  * - Vblank support
1705  * - Debug FS entries, if enabled
1706  */
1707 static int dm_hw_init(void *handle)
1708 {
1709 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1710 	/* Create DAL display manager */
1711 	amdgpu_dm_init(adev);
1712 	amdgpu_dm_hpd_init(adev);
1713 
1714 	return 0;
1715 }
1716 
1717 /**
1718  * dm_hw_fini() - Teardown DC device
1719  * @handle: The base driver device containing the amdgpu_dm device.
1720  *
1721  * Teardown components within &struct amdgpu_display_manager that require
1722  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1723  * were loaded. Also flush IRQ workqueues and disable them.
1724  */
1725 static int dm_hw_fini(void *handle)
1726 {
1727 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1728 
1729 	amdgpu_dm_hpd_fini(adev);
1730 
1731 	amdgpu_dm_irq_fini(adev);
1732 	amdgpu_dm_fini(adev);
1733 	return 0;
1734 }
1735 
1736 
1737 static int dm_enable_vblank(struct drm_crtc *crtc);
1738 static void dm_disable_vblank(struct drm_crtc *crtc);
1739 
1740 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1741 				 struct dc_state *state, bool enable)
1742 {
1743 	enum dc_irq_source irq_source;
1744 	struct amdgpu_crtc *acrtc;
1745 	int rc = -EBUSY;
1746 	int i = 0;
1747 
1748 	for (i = 0; i < state->stream_count; i++) {
1749 		acrtc = get_crtc_by_otg_inst(
1750 				adev, state->stream_status[i].primary_otg_inst);
1751 
1752 		if (acrtc && state->stream_status[i].plane_count != 0) {
1753 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1754 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1755 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1756 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1757 			if (rc)
1758 				DRM_WARN("Failed to %s pflip interrupts\n",
1759 					 enable ? "enable" : "disable");
1760 
1761 			if (enable) {
1762 				rc = dm_enable_vblank(&acrtc->base);
1763 				if (rc)
1764 					DRM_WARN("Failed to enable vblank interrupts\n");
1765 			} else {
1766 				dm_disable_vblank(&acrtc->base);
1767 			}
1768 
1769 		}
1770 	}
1771 
1772 }
1773 
1774 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1775 {
1776 	struct dc_state *context = NULL;
1777 	enum dc_status res = DC_ERROR_UNEXPECTED;
1778 	int i;
1779 	struct dc_stream_state *del_streams[MAX_PIPES];
1780 	int del_streams_count = 0;
1781 
1782 	memset(del_streams, 0, sizeof(del_streams));
1783 
1784 	context = dc_create_state(dc);
1785 	if (context == NULL)
1786 		goto context_alloc_fail;
1787 
1788 	dc_resource_state_copy_construct_current(dc, context);
1789 
1790 	/* First remove from context all streams */
1791 	for (i = 0; i < context->stream_count; i++) {
1792 		struct dc_stream_state *stream = context->streams[i];
1793 
1794 		del_streams[del_streams_count++] = stream;
1795 	}
1796 
1797 	/* Remove all planes for removed streams and then remove the streams */
1798 	for (i = 0; i < del_streams_count; i++) {
1799 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1800 			res = DC_FAIL_DETACH_SURFACES;
1801 			goto fail;
1802 		}
1803 
1804 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1805 		if (res != DC_OK)
1806 			goto fail;
1807 	}
1808 
1809 
1810 	res = dc_validate_global_state(dc, context, false);
1811 
1812 	if (res != DC_OK) {
1813 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1814 		goto fail;
1815 	}
1816 
1817 	res = dc_commit_state(dc, context);
1818 
1819 fail:
1820 	dc_release_state(context);
1821 
1822 context_alloc_fail:
1823 	return res;
1824 }
1825 
1826 static int dm_suspend(void *handle)
1827 {
1828 	struct amdgpu_device *adev = handle;
1829 	struct amdgpu_display_manager *dm = &adev->dm;
1830 	int ret = 0;
1831 
1832 	if (amdgpu_in_reset(adev)) {
1833 		mutex_lock(&dm->dc_lock);
1834 
1835 #if defined(CONFIG_DRM_AMD_DC_DCN)
1836 		dc_allow_idle_optimizations(adev->dm.dc, false);
1837 #endif
1838 
1839 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1840 
1841 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1842 
1843 		amdgpu_dm_commit_zero_streams(dm->dc);
1844 
1845 		amdgpu_dm_irq_suspend(adev);
1846 
1847 		return ret;
1848 	}
1849 
1850 	WARN_ON(adev->dm.cached_state);
1851 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1852 
1853 	s3_handle_mst(adev_to_drm(adev), true);
1854 
1855 	amdgpu_dm_irq_suspend(adev);
1856 
1857 
1858 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1859 
1860 	return 0;
1861 }
1862 
1863 static struct amdgpu_dm_connector *
1864 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1865 					     struct drm_crtc *crtc)
1866 {
1867 	uint32_t i;
1868 	struct drm_connector_state *new_con_state;
1869 	struct drm_connector *connector;
1870 	struct drm_crtc *crtc_from_state;
1871 
1872 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1873 		crtc_from_state = new_con_state->crtc;
1874 
1875 		if (crtc_from_state == crtc)
1876 			return to_amdgpu_dm_connector(connector);
1877 	}
1878 
1879 	return NULL;
1880 }
1881 
1882 static void emulated_link_detect(struct dc_link *link)
1883 {
1884 	struct dc_sink_init_data sink_init_data = { 0 };
1885 	struct display_sink_capability sink_caps = { 0 };
1886 	enum dc_edid_status edid_status;
1887 	struct dc_context *dc_ctx = link->ctx;
1888 	struct dc_sink *sink = NULL;
1889 	struct dc_sink *prev_sink = NULL;
1890 
1891 	link->type = dc_connection_none;
1892 	prev_sink = link->local_sink;
1893 
1894 	if (prev_sink)
1895 		dc_sink_release(prev_sink);
1896 
1897 	switch (link->connector_signal) {
1898 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1899 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1900 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1901 		break;
1902 	}
1903 
1904 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1905 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1906 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1907 		break;
1908 	}
1909 
1910 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1911 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1912 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1913 		break;
1914 	}
1915 
1916 	case SIGNAL_TYPE_LVDS: {
1917 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1918 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1919 		break;
1920 	}
1921 
1922 	case SIGNAL_TYPE_EDP: {
1923 		sink_caps.transaction_type =
1924 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1925 		sink_caps.signal = SIGNAL_TYPE_EDP;
1926 		break;
1927 	}
1928 
1929 	case SIGNAL_TYPE_DISPLAY_PORT: {
1930 		sink_caps.transaction_type =
1931 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1932 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1933 		break;
1934 	}
1935 
1936 	default:
1937 		DC_ERROR("Invalid connector type! signal:%d\n",
1938 			link->connector_signal);
1939 		return;
1940 	}
1941 
1942 	sink_init_data.link = link;
1943 	sink_init_data.sink_signal = sink_caps.signal;
1944 
1945 	sink = dc_sink_create(&sink_init_data);
1946 	if (!sink) {
1947 		DC_ERROR("Failed to create sink!\n");
1948 		return;
1949 	}
1950 
1951 	/* dc_sink_create returns a new reference */
1952 	link->local_sink = sink;
1953 
1954 	edid_status = dm_helpers_read_local_edid(
1955 			link->ctx,
1956 			link,
1957 			sink);
1958 
1959 	if (edid_status != EDID_OK)
1960 		DC_ERROR("Failed to read EDID");
1961 
1962 }
1963 
1964 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1965 				     struct amdgpu_display_manager *dm)
1966 {
1967 	struct {
1968 		struct dc_surface_update surface_updates[MAX_SURFACES];
1969 		struct dc_plane_info plane_infos[MAX_SURFACES];
1970 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1971 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1972 		struct dc_stream_update stream_update;
1973 	} * bundle;
1974 	int k, m;
1975 
1976 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1977 
1978 	if (!bundle) {
1979 		dm_error("Failed to allocate update bundle\n");
1980 		goto cleanup;
1981 	}
1982 
1983 	for (k = 0; k < dc_state->stream_count; k++) {
1984 		bundle->stream_update.stream = dc_state->streams[k];
1985 
1986 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1987 			bundle->surface_updates[m].surface =
1988 				dc_state->stream_status->plane_states[m];
1989 			bundle->surface_updates[m].surface->force_full_update =
1990 				true;
1991 		}
1992 		dc_commit_updates_for_stream(
1993 			dm->dc, bundle->surface_updates,
1994 			dc_state->stream_status->plane_count,
1995 			dc_state->streams[k], &bundle->stream_update, dc_state);
1996 	}
1997 
1998 cleanup:
1999 	kfree(bundle);
2000 
2001 	return;
2002 }
2003 
2004 static void dm_set_dpms_off(struct dc_link *link)
2005 {
2006 	struct dc_stream_state *stream_state;
2007 	struct amdgpu_dm_connector *aconnector = link->priv;
2008 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2009 	struct dc_stream_update stream_update;
2010 	bool dpms_off = true;
2011 
2012 	memset(&stream_update, 0, sizeof(stream_update));
2013 	stream_update.dpms_off = &dpms_off;
2014 
2015 	mutex_lock(&adev->dm.dc_lock);
2016 	stream_state = dc_stream_find_from_link(link);
2017 
2018 	if (stream_state == NULL) {
2019 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2020 		mutex_unlock(&adev->dm.dc_lock);
2021 		return;
2022 	}
2023 
2024 	stream_update.stream = stream_state;
2025 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2026 				     stream_state, &stream_update,
2027 				     stream_state->ctx->dc->current_state);
2028 	mutex_unlock(&adev->dm.dc_lock);
2029 }
2030 
2031 static int dm_resume(void *handle)
2032 {
2033 	struct amdgpu_device *adev = handle;
2034 	struct drm_device *ddev = adev_to_drm(adev);
2035 	struct amdgpu_display_manager *dm = &adev->dm;
2036 	struct amdgpu_dm_connector *aconnector;
2037 	struct drm_connector *connector;
2038 	struct drm_connector_list_iter iter;
2039 	struct drm_crtc *crtc;
2040 	struct drm_crtc_state *new_crtc_state;
2041 	struct dm_crtc_state *dm_new_crtc_state;
2042 	struct drm_plane *plane;
2043 	struct drm_plane_state *new_plane_state;
2044 	struct dm_plane_state *dm_new_plane_state;
2045 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2046 	enum dc_connection_type new_connection_type = dc_connection_none;
2047 	struct dc_state *dc_state;
2048 	int i, r, j;
2049 
2050 	if (amdgpu_in_reset(adev)) {
2051 		dc_state = dm->cached_dc_state;
2052 
2053 		r = dm_dmub_hw_init(adev);
2054 		if (r)
2055 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2056 
2057 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2058 		dc_resume(dm->dc);
2059 
2060 		amdgpu_dm_irq_resume_early(adev);
2061 
2062 		for (i = 0; i < dc_state->stream_count; i++) {
2063 			dc_state->streams[i]->mode_changed = true;
2064 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2065 				dc_state->stream_status->plane_states[j]->update_flags.raw
2066 					= 0xffffffff;
2067 			}
2068 		}
2069 
2070 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2071 
2072 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2073 
2074 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2075 
2076 		dc_release_state(dm->cached_dc_state);
2077 		dm->cached_dc_state = NULL;
2078 
2079 		amdgpu_dm_irq_resume_late(adev);
2080 
2081 		mutex_unlock(&dm->dc_lock);
2082 
2083 		return 0;
2084 	}
2085 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2086 	dc_release_state(dm_state->context);
2087 	dm_state->context = dc_create_state(dm->dc);
2088 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2089 	dc_resource_state_construct(dm->dc, dm_state->context);
2090 
2091 	/* Before powering on DC we need to re-initialize DMUB. */
2092 	r = dm_dmub_hw_init(adev);
2093 	if (r)
2094 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2095 
2096 	/* power on hardware */
2097 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2098 
2099 	/* program HPD filter */
2100 	dc_resume(dm->dc);
2101 
2102 	/*
2103 	 * early enable HPD Rx IRQ, should be done before set mode as short
2104 	 * pulse interrupts are used for MST
2105 	 */
2106 	amdgpu_dm_irq_resume_early(adev);
2107 
2108 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2109 	s3_handle_mst(ddev, false);
2110 
2111 	/* Do detection*/
2112 	drm_connector_list_iter_begin(ddev, &iter);
2113 	drm_for_each_connector_iter(connector, &iter) {
2114 		aconnector = to_amdgpu_dm_connector(connector);
2115 
2116 		/*
2117 		 * this is the case when traversing through already created
2118 		 * MST connectors, should be skipped
2119 		 */
2120 		if (aconnector->mst_port)
2121 			continue;
2122 
2123 		mutex_lock(&aconnector->hpd_lock);
2124 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2125 			DRM_ERROR("KMS: Failed to detect connector\n");
2126 
2127 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2128 			emulated_link_detect(aconnector->dc_link);
2129 		else
2130 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2131 
2132 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2133 			aconnector->fake_enable = false;
2134 
2135 		if (aconnector->dc_sink)
2136 			dc_sink_release(aconnector->dc_sink);
2137 		aconnector->dc_sink = NULL;
2138 		amdgpu_dm_update_connector_after_detect(aconnector);
2139 		mutex_unlock(&aconnector->hpd_lock);
2140 	}
2141 	drm_connector_list_iter_end(&iter);
2142 
2143 	/* Force mode set in atomic commit */
2144 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2145 		new_crtc_state->active_changed = true;
2146 
2147 	/*
2148 	 * atomic_check is expected to create the dc states. We need to release
2149 	 * them here, since they were duplicated as part of the suspend
2150 	 * procedure.
2151 	 */
2152 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2153 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2154 		if (dm_new_crtc_state->stream) {
2155 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2156 			dc_stream_release(dm_new_crtc_state->stream);
2157 			dm_new_crtc_state->stream = NULL;
2158 		}
2159 	}
2160 
2161 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2162 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2163 		if (dm_new_plane_state->dc_state) {
2164 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2165 			dc_plane_state_release(dm_new_plane_state->dc_state);
2166 			dm_new_plane_state->dc_state = NULL;
2167 		}
2168 	}
2169 
2170 	drm_atomic_helper_resume(ddev, dm->cached_state);
2171 
2172 	dm->cached_state = NULL;
2173 
2174 	amdgpu_dm_irq_resume_late(adev);
2175 
2176 	amdgpu_dm_smu_write_watermarks_table(adev);
2177 
2178 	return 0;
2179 }
2180 
2181 /**
2182  * DOC: DM Lifecycle
2183  *
2184  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2185  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2186  * the base driver's device list to be initialized and torn down accordingly.
2187  *
2188  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2189  */
2190 
2191 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2192 	.name = "dm",
2193 	.early_init = dm_early_init,
2194 	.late_init = dm_late_init,
2195 	.sw_init = dm_sw_init,
2196 	.sw_fini = dm_sw_fini,
2197 	.hw_init = dm_hw_init,
2198 	.hw_fini = dm_hw_fini,
2199 	.suspend = dm_suspend,
2200 	.resume = dm_resume,
2201 	.is_idle = dm_is_idle,
2202 	.wait_for_idle = dm_wait_for_idle,
2203 	.check_soft_reset = dm_check_soft_reset,
2204 	.soft_reset = dm_soft_reset,
2205 	.set_clockgating_state = dm_set_clockgating_state,
2206 	.set_powergating_state = dm_set_powergating_state,
2207 };
2208 
2209 const struct amdgpu_ip_block_version dm_ip_block =
2210 {
2211 	.type = AMD_IP_BLOCK_TYPE_DCE,
2212 	.major = 1,
2213 	.minor = 0,
2214 	.rev = 0,
2215 	.funcs = &amdgpu_dm_funcs,
2216 };
2217 
2218 
2219 /**
2220  * DOC: atomic
2221  *
2222  * *WIP*
2223  */
2224 
2225 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2226 	.fb_create = amdgpu_display_user_framebuffer_create,
2227 	.get_format_info = amd_get_format_info,
2228 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2229 	.atomic_check = amdgpu_dm_atomic_check,
2230 	.atomic_commit = drm_atomic_helper_commit,
2231 };
2232 
2233 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2234 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2235 };
2236 
2237 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2238 {
2239 	u32 max_cll, min_cll, max, min, q, r;
2240 	struct amdgpu_dm_backlight_caps *caps;
2241 	struct amdgpu_display_manager *dm;
2242 	struct drm_connector *conn_base;
2243 	struct amdgpu_device *adev;
2244 	struct dc_link *link = NULL;
2245 	static const u8 pre_computed_values[] = {
2246 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2247 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2248 
2249 	if (!aconnector || !aconnector->dc_link)
2250 		return;
2251 
2252 	link = aconnector->dc_link;
2253 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2254 		return;
2255 
2256 	conn_base = &aconnector->base;
2257 	adev = drm_to_adev(conn_base->dev);
2258 	dm = &adev->dm;
2259 	caps = &dm->backlight_caps;
2260 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2261 	caps->aux_support = false;
2262 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2263 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2264 
2265 	if (caps->ext_caps->bits.oled == 1 ||
2266 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2267 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2268 		caps->aux_support = true;
2269 
2270 	if (amdgpu_backlight == 0)
2271 		caps->aux_support = false;
2272 	else if (amdgpu_backlight == 1)
2273 		caps->aux_support = true;
2274 
2275 	/* From the specification (CTA-861-G), for calculating the maximum
2276 	 * luminance we need to use:
2277 	 *	Luminance = 50*2**(CV/32)
2278 	 * Where CV is a one-byte value.
2279 	 * For calculating this expression we may need float point precision;
2280 	 * to avoid this complexity level, we take advantage that CV is divided
2281 	 * by a constant. From the Euclids division algorithm, we know that CV
2282 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2283 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2284 	 * need to pre-compute the value of r/32. For pre-computing the values
2285 	 * We just used the following Ruby line:
2286 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2287 	 * The results of the above expressions can be verified at
2288 	 * pre_computed_values.
2289 	 */
2290 	q = max_cll >> 5;
2291 	r = max_cll % 32;
2292 	max = (1 << q) * pre_computed_values[r];
2293 
2294 	// min luminance: maxLum * (CV/255)^2 / 100
2295 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2296 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2297 
2298 	caps->aux_max_input_signal = max;
2299 	caps->aux_min_input_signal = min;
2300 }
2301 
2302 void amdgpu_dm_update_connector_after_detect(
2303 		struct amdgpu_dm_connector *aconnector)
2304 {
2305 	struct drm_connector *connector = &aconnector->base;
2306 	struct drm_device *dev = connector->dev;
2307 	struct dc_sink *sink;
2308 
2309 	/* MST handled by drm_mst framework */
2310 	if (aconnector->mst_mgr.mst_state == true)
2311 		return;
2312 
2313 	sink = aconnector->dc_link->local_sink;
2314 	if (sink)
2315 		dc_sink_retain(sink);
2316 
2317 	/*
2318 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2319 	 * the connector sink is set to either fake or physical sink depends on link status.
2320 	 * Skip if already done during boot.
2321 	 */
2322 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2323 			&& aconnector->dc_em_sink) {
2324 
2325 		/*
2326 		 * For S3 resume with headless use eml_sink to fake stream
2327 		 * because on resume connector->sink is set to NULL
2328 		 */
2329 		mutex_lock(&dev->mode_config.mutex);
2330 
2331 		if (sink) {
2332 			if (aconnector->dc_sink) {
2333 				amdgpu_dm_update_freesync_caps(connector, NULL);
2334 				/*
2335 				 * retain and release below are used to
2336 				 * bump up refcount for sink because the link doesn't point
2337 				 * to it anymore after disconnect, so on next crtc to connector
2338 				 * reshuffle by UMD we will get into unwanted dc_sink release
2339 				 */
2340 				dc_sink_release(aconnector->dc_sink);
2341 			}
2342 			aconnector->dc_sink = sink;
2343 			dc_sink_retain(aconnector->dc_sink);
2344 			amdgpu_dm_update_freesync_caps(connector,
2345 					aconnector->edid);
2346 		} else {
2347 			amdgpu_dm_update_freesync_caps(connector, NULL);
2348 			if (!aconnector->dc_sink) {
2349 				aconnector->dc_sink = aconnector->dc_em_sink;
2350 				dc_sink_retain(aconnector->dc_sink);
2351 			}
2352 		}
2353 
2354 		mutex_unlock(&dev->mode_config.mutex);
2355 
2356 		if (sink)
2357 			dc_sink_release(sink);
2358 		return;
2359 	}
2360 
2361 	/*
2362 	 * TODO: temporary guard to look for proper fix
2363 	 * if this sink is MST sink, we should not do anything
2364 	 */
2365 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2366 		dc_sink_release(sink);
2367 		return;
2368 	}
2369 
2370 	if (aconnector->dc_sink == sink) {
2371 		/*
2372 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2373 		 * Do nothing!!
2374 		 */
2375 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2376 				aconnector->connector_id);
2377 		if (sink)
2378 			dc_sink_release(sink);
2379 		return;
2380 	}
2381 
2382 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2383 		aconnector->connector_id, aconnector->dc_sink, sink);
2384 
2385 	mutex_lock(&dev->mode_config.mutex);
2386 
2387 	/*
2388 	 * 1. Update status of the drm connector
2389 	 * 2. Send an event and let userspace tell us what to do
2390 	 */
2391 	if (sink) {
2392 		/*
2393 		 * TODO: check if we still need the S3 mode update workaround.
2394 		 * If yes, put it here.
2395 		 */
2396 		if (aconnector->dc_sink) {
2397 			amdgpu_dm_update_freesync_caps(connector, NULL);
2398 			dc_sink_release(aconnector->dc_sink);
2399 		}
2400 
2401 		aconnector->dc_sink = sink;
2402 		dc_sink_retain(aconnector->dc_sink);
2403 		if (sink->dc_edid.length == 0) {
2404 			aconnector->edid = NULL;
2405 			if (aconnector->dc_link->aux_mode) {
2406 				drm_dp_cec_unset_edid(
2407 					&aconnector->dm_dp_aux.aux);
2408 			}
2409 		} else {
2410 			aconnector->edid =
2411 				(struct edid *)sink->dc_edid.raw_edid;
2412 
2413 			drm_connector_update_edid_property(connector,
2414 							   aconnector->edid);
2415 			if (aconnector->dc_link->aux_mode)
2416 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2417 						    aconnector->edid);
2418 		}
2419 
2420 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2421 		update_connector_ext_caps(aconnector);
2422 	} else {
2423 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2424 		amdgpu_dm_update_freesync_caps(connector, NULL);
2425 		drm_connector_update_edid_property(connector, NULL);
2426 		aconnector->num_modes = 0;
2427 		dc_sink_release(aconnector->dc_sink);
2428 		aconnector->dc_sink = NULL;
2429 		aconnector->edid = NULL;
2430 #ifdef CONFIG_DRM_AMD_DC_HDCP
2431 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2432 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2433 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2434 #endif
2435 	}
2436 
2437 	mutex_unlock(&dev->mode_config.mutex);
2438 
2439 	update_subconnector_property(aconnector);
2440 
2441 	if (sink)
2442 		dc_sink_release(sink);
2443 }
2444 
2445 static void handle_hpd_irq(void *param)
2446 {
2447 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2448 	struct drm_connector *connector = &aconnector->base;
2449 	struct drm_device *dev = connector->dev;
2450 	enum dc_connection_type new_connection_type = dc_connection_none;
2451 #ifdef CONFIG_DRM_AMD_DC_HDCP
2452 	struct amdgpu_device *adev = drm_to_adev(dev);
2453 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2454 #endif
2455 
2456 	/*
2457 	 * In case of failure or MST no need to update connector status or notify the OS
2458 	 * since (for MST case) MST does this in its own context.
2459 	 */
2460 	mutex_lock(&aconnector->hpd_lock);
2461 
2462 #ifdef CONFIG_DRM_AMD_DC_HDCP
2463 	if (adev->dm.hdcp_workqueue) {
2464 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2465 		dm_con_state->update_hdcp = true;
2466 	}
2467 #endif
2468 	if (aconnector->fake_enable)
2469 		aconnector->fake_enable = false;
2470 
2471 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2472 		DRM_ERROR("KMS: Failed to detect connector\n");
2473 
2474 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2475 		emulated_link_detect(aconnector->dc_link);
2476 
2477 
2478 		drm_modeset_lock_all(dev);
2479 		dm_restore_drm_connector_state(dev, connector);
2480 		drm_modeset_unlock_all(dev);
2481 
2482 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2483 			drm_kms_helper_hotplug_event(dev);
2484 
2485 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2486 		if (new_connection_type == dc_connection_none &&
2487 		    aconnector->dc_link->type == dc_connection_none)
2488 			dm_set_dpms_off(aconnector->dc_link);
2489 
2490 		amdgpu_dm_update_connector_after_detect(aconnector);
2491 
2492 		drm_modeset_lock_all(dev);
2493 		dm_restore_drm_connector_state(dev, connector);
2494 		drm_modeset_unlock_all(dev);
2495 
2496 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2497 			drm_kms_helper_hotplug_event(dev);
2498 	}
2499 	mutex_unlock(&aconnector->hpd_lock);
2500 
2501 }
2502 
2503 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2504 {
2505 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2506 	uint8_t dret;
2507 	bool new_irq_handled = false;
2508 	int dpcd_addr;
2509 	int dpcd_bytes_to_read;
2510 
2511 	const int max_process_count = 30;
2512 	int process_count = 0;
2513 
2514 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2515 
2516 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2517 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2518 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2519 		dpcd_addr = DP_SINK_COUNT;
2520 	} else {
2521 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2522 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2523 		dpcd_addr = DP_SINK_COUNT_ESI;
2524 	}
2525 
2526 	dret = drm_dp_dpcd_read(
2527 		&aconnector->dm_dp_aux.aux,
2528 		dpcd_addr,
2529 		esi,
2530 		dpcd_bytes_to_read);
2531 
2532 	while (dret == dpcd_bytes_to_read &&
2533 		process_count < max_process_count) {
2534 		uint8_t retry;
2535 		dret = 0;
2536 
2537 		process_count++;
2538 
2539 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2540 		/* handle HPD short pulse irq */
2541 		if (aconnector->mst_mgr.mst_state)
2542 			drm_dp_mst_hpd_irq(
2543 				&aconnector->mst_mgr,
2544 				esi,
2545 				&new_irq_handled);
2546 
2547 		if (new_irq_handled) {
2548 			/* ACK at DPCD to notify down stream */
2549 			const int ack_dpcd_bytes_to_write =
2550 				dpcd_bytes_to_read - 1;
2551 
2552 			for (retry = 0; retry < 3; retry++) {
2553 				uint8_t wret;
2554 
2555 				wret = drm_dp_dpcd_write(
2556 					&aconnector->dm_dp_aux.aux,
2557 					dpcd_addr + 1,
2558 					&esi[1],
2559 					ack_dpcd_bytes_to_write);
2560 				if (wret == ack_dpcd_bytes_to_write)
2561 					break;
2562 			}
2563 
2564 			/* check if there is new irq to be handled */
2565 			dret = drm_dp_dpcd_read(
2566 				&aconnector->dm_dp_aux.aux,
2567 				dpcd_addr,
2568 				esi,
2569 				dpcd_bytes_to_read);
2570 
2571 			new_irq_handled = false;
2572 		} else {
2573 			break;
2574 		}
2575 	}
2576 
2577 	if (process_count == max_process_count)
2578 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2579 }
2580 
2581 static void handle_hpd_rx_irq(void *param)
2582 {
2583 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2584 	struct drm_connector *connector = &aconnector->base;
2585 	struct drm_device *dev = connector->dev;
2586 	struct dc_link *dc_link = aconnector->dc_link;
2587 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2588 	bool result = false;
2589 	enum dc_connection_type new_connection_type = dc_connection_none;
2590 	struct amdgpu_device *adev = drm_to_adev(dev);
2591 	union hpd_irq_data hpd_irq_data;
2592 
2593 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2594 
2595 	/*
2596 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2597 	 * conflict, after implement i2c helper, this mutex should be
2598 	 * retired.
2599 	 */
2600 	if (dc_link->type != dc_connection_mst_branch)
2601 		mutex_lock(&aconnector->hpd_lock);
2602 
2603 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2604 
2605 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2606 		(dc_link->type == dc_connection_mst_branch)) {
2607 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2608 			result = true;
2609 			dm_handle_hpd_rx_irq(aconnector);
2610 			goto out;
2611 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2612 			result = false;
2613 			dm_handle_hpd_rx_irq(aconnector);
2614 			goto out;
2615 		}
2616 	}
2617 
2618 	mutex_lock(&adev->dm.dc_lock);
2619 #ifdef CONFIG_DRM_AMD_DC_HDCP
2620 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2621 #else
2622 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2623 #endif
2624 	mutex_unlock(&adev->dm.dc_lock);
2625 
2626 out:
2627 	if (result && !is_mst_root_connector) {
2628 		/* Downstream Port status changed. */
2629 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2630 			DRM_ERROR("KMS: Failed to detect connector\n");
2631 
2632 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2633 			emulated_link_detect(dc_link);
2634 
2635 			if (aconnector->fake_enable)
2636 				aconnector->fake_enable = false;
2637 
2638 			amdgpu_dm_update_connector_after_detect(aconnector);
2639 
2640 
2641 			drm_modeset_lock_all(dev);
2642 			dm_restore_drm_connector_state(dev, connector);
2643 			drm_modeset_unlock_all(dev);
2644 
2645 			drm_kms_helper_hotplug_event(dev);
2646 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2647 
2648 			if (aconnector->fake_enable)
2649 				aconnector->fake_enable = false;
2650 
2651 			amdgpu_dm_update_connector_after_detect(aconnector);
2652 
2653 
2654 			drm_modeset_lock_all(dev);
2655 			dm_restore_drm_connector_state(dev, connector);
2656 			drm_modeset_unlock_all(dev);
2657 
2658 			drm_kms_helper_hotplug_event(dev);
2659 		}
2660 	}
2661 #ifdef CONFIG_DRM_AMD_DC_HDCP
2662 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2663 		if (adev->dm.hdcp_workqueue)
2664 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2665 	}
2666 #endif
2667 
2668 	if (dc_link->type != dc_connection_mst_branch) {
2669 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2670 		mutex_unlock(&aconnector->hpd_lock);
2671 	}
2672 }
2673 
2674 static void register_hpd_handlers(struct amdgpu_device *adev)
2675 {
2676 	struct drm_device *dev = adev_to_drm(adev);
2677 	struct drm_connector *connector;
2678 	struct amdgpu_dm_connector *aconnector;
2679 	const struct dc_link *dc_link;
2680 	struct dc_interrupt_params int_params = {0};
2681 
2682 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2683 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2684 
2685 	list_for_each_entry(connector,
2686 			&dev->mode_config.connector_list, head)	{
2687 
2688 		aconnector = to_amdgpu_dm_connector(connector);
2689 		dc_link = aconnector->dc_link;
2690 
2691 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2692 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2693 			int_params.irq_source = dc_link->irq_source_hpd;
2694 
2695 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2696 					handle_hpd_irq,
2697 					(void *) aconnector);
2698 		}
2699 
2700 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2701 
2702 			/* Also register for DP short pulse (hpd_rx). */
2703 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2704 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2705 
2706 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2707 					handle_hpd_rx_irq,
2708 					(void *) aconnector);
2709 		}
2710 	}
2711 }
2712 
2713 #if defined(CONFIG_DRM_AMD_DC_SI)
2714 /* Register IRQ sources and initialize IRQ callbacks */
2715 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2716 {
2717 	struct dc *dc = adev->dm.dc;
2718 	struct common_irq_params *c_irq_params;
2719 	struct dc_interrupt_params int_params = {0};
2720 	int r;
2721 	int i;
2722 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2723 
2724 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2725 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2726 
2727 	/*
2728 	 * Actions of amdgpu_irq_add_id():
2729 	 * 1. Register a set() function with base driver.
2730 	 *    Base driver will call set() function to enable/disable an
2731 	 *    interrupt in DC hardware.
2732 	 * 2. Register amdgpu_dm_irq_handler().
2733 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2734 	 *    coming from DC hardware.
2735 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2736 	 *    for acknowledging and handling. */
2737 
2738 	/* Use VBLANK interrupt */
2739 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2740 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2741 		if (r) {
2742 			DRM_ERROR("Failed to add crtc irq id!\n");
2743 			return r;
2744 		}
2745 
2746 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2747 		int_params.irq_source =
2748 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2749 
2750 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2751 
2752 		c_irq_params->adev = adev;
2753 		c_irq_params->irq_src = int_params.irq_source;
2754 
2755 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2756 				dm_crtc_high_irq, c_irq_params);
2757 	}
2758 
2759 	/* Use GRPH_PFLIP interrupt */
2760 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2761 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2762 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2763 		if (r) {
2764 			DRM_ERROR("Failed to add page flip irq id!\n");
2765 			return r;
2766 		}
2767 
2768 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2769 		int_params.irq_source =
2770 			dc_interrupt_to_irq_source(dc, i, 0);
2771 
2772 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2773 
2774 		c_irq_params->adev = adev;
2775 		c_irq_params->irq_src = int_params.irq_source;
2776 
2777 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2778 				dm_pflip_high_irq, c_irq_params);
2779 
2780 	}
2781 
2782 	/* HPD */
2783 	r = amdgpu_irq_add_id(adev, client_id,
2784 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2785 	if (r) {
2786 		DRM_ERROR("Failed to add hpd irq id!\n");
2787 		return r;
2788 	}
2789 
2790 	register_hpd_handlers(adev);
2791 
2792 	return 0;
2793 }
2794 #endif
2795 
2796 /* Register IRQ sources and initialize IRQ callbacks */
2797 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2798 {
2799 	struct dc *dc = adev->dm.dc;
2800 	struct common_irq_params *c_irq_params;
2801 	struct dc_interrupt_params int_params = {0};
2802 	int r;
2803 	int i;
2804 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2805 
2806 	if (adev->asic_type >= CHIP_VEGA10)
2807 		client_id = SOC15_IH_CLIENTID_DCE;
2808 
2809 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2810 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2811 
2812 	/*
2813 	 * Actions of amdgpu_irq_add_id():
2814 	 * 1. Register a set() function with base driver.
2815 	 *    Base driver will call set() function to enable/disable an
2816 	 *    interrupt in DC hardware.
2817 	 * 2. Register amdgpu_dm_irq_handler().
2818 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2819 	 *    coming from DC hardware.
2820 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2821 	 *    for acknowledging and handling. */
2822 
2823 	/* Use VBLANK interrupt */
2824 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2825 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2826 		if (r) {
2827 			DRM_ERROR("Failed to add crtc irq id!\n");
2828 			return r;
2829 		}
2830 
2831 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2832 		int_params.irq_source =
2833 			dc_interrupt_to_irq_source(dc, i, 0);
2834 
2835 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2836 
2837 		c_irq_params->adev = adev;
2838 		c_irq_params->irq_src = int_params.irq_source;
2839 
2840 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2841 				dm_crtc_high_irq, c_irq_params);
2842 	}
2843 
2844 	/* Use VUPDATE interrupt */
2845 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2846 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2847 		if (r) {
2848 			DRM_ERROR("Failed to add vupdate irq id!\n");
2849 			return r;
2850 		}
2851 
2852 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2853 		int_params.irq_source =
2854 			dc_interrupt_to_irq_source(dc, i, 0);
2855 
2856 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2857 
2858 		c_irq_params->adev = adev;
2859 		c_irq_params->irq_src = int_params.irq_source;
2860 
2861 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2862 				dm_vupdate_high_irq, c_irq_params);
2863 	}
2864 
2865 	/* Use GRPH_PFLIP interrupt */
2866 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2867 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2868 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2869 		if (r) {
2870 			DRM_ERROR("Failed to add page flip irq id!\n");
2871 			return r;
2872 		}
2873 
2874 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2875 		int_params.irq_source =
2876 			dc_interrupt_to_irq_source(dc, i, 0);
2877 
2878 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2879 
2880 		c_irq_params->adev = adev;
2881 		c_irq_params->irq_src = int_params.irq_source;
2882 
2883 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2884 				dm_pflip_high_irq, c_irq_params);
2885 
2886 	}
2887 
2888 	/* HPD */
2889 	r = amdgpu_irq_add_id(adev, client_id,
2890 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2891 	if (r) {
2892 		DRM_ERROR("Failed to add hpd irq id!\n");
2893 		return r;
2894 	}
2895 
2896 	register_hpd_handlers(adev);
2897 
2898 	return 0;
2899 }
2900 
2901 #if defined(CONFIG_DRM_AMD_DC_DCN)
2902 /* Register IRQ sources and initialize IRQ callbacks */
2903 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2904 {
2905 	struct dc *dc = adev->dm.dc;
2906 	struct common_irq_params *c_irq_params;
2907 	struct dc_interrupt_params int_params = {0};
2908 	int r;
2909 	int i;
2910 
2911 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2912 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2913 
2914 	/*
2915 	 * Actions of amdgpu_irq_add_id():
2916 	 * 1. Register a set() function with base driver.
2917 	 *    Base driver will call set() function to enable/disable an
2918 	 *    interrupt in DC hardware.
2919 	 * 2. Register amdgpu_dm_irq_handler().
2920 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2921 	 *    coming from DC hardware.
2922 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2923 	 *    for acknowledging and handling.
2924 	 */
2925 
2926 	/* Use VSTARTUP interrupt */
2927 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2928 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2929 			i++) {
2930 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2931 
2932 		if (r) {
2933 			DRM_ERROR("Failed to add crtc irq id!\n");
2934 			return r;
2935 		}
2936 
2937 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2938 		int_params.irq_source =
2939 			dc_interrupt_to_irq_source(dc, i, 0);
2940 
2941 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2942 
2943 		c_irq_params->adev = adev;
2944 		c_irq_params->irq_src = int_params.irq_source;
2945 
2946 		amdgpu_dm_irq_register_interrupt(
2947 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2948 	}
2949 
2950 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2951 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2952 	 * to trigger at end of each vblank, regardless of state of the lock,
2953 	 * matching DCE behaviour.
2954 	 */
2955 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2956 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2957 	     i++) {
2958 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2959 
2960 		if (r) {
2961 			DRM_ERROR("Failed to add vupdate irq id!\n");
2962 			return r;
2963 		}
2964 
2965 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2966 		int_params.irq_source =
2967 			dc_interrupt_to_irq_source(dc, i, 0);
2968 
2969 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2970 
2971 		c_irq_params->adev = adev;
2972 		c_irq_params->irq_src = int_params.irq_source;
2973 
2974 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2975 				dm_vupdate_high_irq, c_irq_params);
2976 	}
2977 
2978 	/* Use GRPH_PFLIP interrupt */
2979 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2980 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2981 			i++) {
2982 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2983 		if (r) {
2984 			DRM_ERROR("Failed to add page flip irq id!\n");
2985 			return r;
2986 		}
2987 
2988 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2989 		int_params.irq_source =
2990 			dc_interrupt_to_irq_source(dc, i, 0);
2991 
2992 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2993 
2994 		c_irq_params->adev = adev;
2995 		c_irq_params->irq_src = int_params.irq_source;
2996 
2997 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2998 				dm_pflip_high_irq, c_irq_params);
2999 
3000 	}
3001 
3002 	/* HPD */
3003 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3004 			&adev->hpd_irq);
3005 	if (r) {
3006 		DRM_ERROR("Failed to add hpd irq id!\n");
3007 		return r;
3008 	}
3009 
3010 	register_hpd_handlers(adev);
3011 
3012 	return 0;
3013 }
3014 #endif
3015 
3016 /*
3017  * Acquires the lock for the atomic state object and returns
3018  * the new atomic state.
3019  *
3020  * This should only be called during atomic check.
3021  */
3022 static int dm_atomic_get_state(struct drm_atomic_state *state,
3023 			       struct dm_atomic_state **dm_state)
3024 {
3025 	struct drm_device *dev = state->dev;
3026 	struct amdgpu_device *adev = drm_to_adev(dev);
3027 	struct amdgpu_display_manager *dm = &adev->dm;
3028 	struct drm_private_state *priv_state;
3029 
3030 	if (*dm_state)
3031 		return 0;
3032 
3033 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3034 	if (IS_ERR(priv_state))
3035 		return PTR_ERR(priv_state);
3036 
3037 	*dm_state = to_dm_atomic_state(priv_state);
3038 
3039 	return 0;
3040 }
3041 
3042 static struct dm_atomic_state *
3043 dm_atomic_get_new_state(struct drm_atomic_state *state)
3044 {
3045 	struct drm_device *dev = state->dev;
3046 	struct amdgpu_device *adev = drm_to_adev(dev);
3047 	struct amdgpu_display_manager *dm = &adev->dm;
3048 	struct drm_private_obj *obj;
3049 	struct drm_private_state *new_obj_state;
3050 	int i;
3051 
3052 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3053 		if (obj->funcs == dm->atomic_obj.funcs)
3054 			return to_dm_atomic_state(new_obj_state);
3055 	}
3056 
3057 	return NULL;
3058 }
3059 
3060 static struct drm_private_state *
3061 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3062 {
3063 	struct dm_atomic_state *old_state, *new_state;
3064 
3065 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3066 	if (!new_state)
3067 		return NULL;
3068 
3069 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3070 
3071 	old_state = to_dm_atomic_state(obj->state);
3072 
3073 	if (old_state && old_state->context)
3074 		new_state->context = dc_copy_state(old_state->context);
3075 
3076 	if (!new_state->context) {
3077 		kfree(new_state);
3078 		return NULL;
3079 	}
3080 
3081 	return &new_state->base;
3082 }
3083 
3084 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3085 				    struct drm_private_state *state)
3086 {
3087 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3088 
3089 	if (dm_state && dm_state->context)
3090 		dc_release_state(dm_state->context);
3091 
3092 	kfree(dm_state);
3093 }
3094 
3095 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3096 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3097 	.atomic_destroy_state = dm_atomic_destroy_state,
3098 };
3099 
3100 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3101 {
3102 	struct dm_atomic_state *state;
3103 	int r;
3104 
3105 	adev->mode_info.mode_config_initialized = true;
3106 
3107 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3108 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3109 
3110 	adev_to_drm(adev)->mode_config.max_width = 16384;
3111 	adev_to_drm(adev)->mode_config.max_height = 16384;
3112 
3113 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3114 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3115 	/* indicates support for immediate flip */
3116 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3117 
3118 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3119 
3120 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3121 	if (!state)
3122 		return -ENOMEM;
3123 
3124 	state->context = dc_create_state(adev->dm.dc);
3125 	if (!state->context) {
3126 		kfree(state);
3127 		return -ENOMEM;
3128 	}
3129 
3130 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3131 
3132 	drm_atomic_private_obj_init(adev_to_drm(adev),
3133 				    &adev->dm.atomic_obj,
3134 				    &state->base,
3135 				    &dm_atomic_state_funcs);
3136 
3137 	r = amdgpu_display_modeset_create_props(adev);
3138 	if (r) {
3139 		dc_release_state(state->context);
3140 		kfree(state);
3141 		return r;
3142 	}
3143 
3144 	r = amdgpu_dm_audio_init(adev);
3145 	if (r) {
3146 		dc_release_state(state->context);
3147 		kfree(state);
3148 		return r;
3149 	}
3150 
3151 	return 0;
3152 }
3153 
3154 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3155 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3156 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3157 
3158 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3159 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3160 
3161 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3162 {
3163 #if defined(CONFIG_ACPI)
3164 	struct amdgpu_dm_backlight_caps caps;
3165 
3166 	memset(&caps, 0, sizeof(caps));
3167 
3168 	if (dm->backlight_caps.caps_valid)
3169 		return;
3170 
3171 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3172 	if (caps.caps_valid) {
3173 		dm->backlight_caps.caps_valid = true;
3174 		if (caps.aux_support)
3175 			return;
3176 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3177 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3178 	} else {
3179 		dm->backlight_caps.min_input_signal =
3180 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3181 		dm->backlight_caps.max_input_signal =
3182 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3183 	}
3184 #else
3185 	if (dm->backlight_caps.aux_support)
3186 		return;
3187 
3188 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3189 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3190 #endif
3191 }
3192 
3193 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3194 				unsigned *min, unsigned *max)
3195 {
3196 	if (!caps)
3197 		return 0;
3198 
3199 	if (caps->aux_support) {
3200 		// Firmware limits are in nits, DC API wants millinits.
3201 		*max = 1000 * caps->aux_max_input_signal;
3202 		*min = 1000 * caps->aux_min_input_signal;
3203 	} else {
3204 		// Firmware limits are 8-bit, PWM control is 16-bit.
3205 		*max = 0x101 * caps->max_input_signal;
3206 		*min = 0x101 * caps->min_input_signal;
3207 	}
3208 	return 1;
3209 }
3210 
3211 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3212 					uint32_t brightness)
3213 {
3214 	unsigned min, max;
3215 
3216 	if (!get_brightness_range(caps, &min, &max))
3217 		return brightness;
3218 
3219 	// Rescale 0..255 to min..max
3220 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3221 				       AMDGPU_MAX_BL_LEVEL);
3222 }
3223 
3224 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3225 				      uint32_t brightness)
3226 {
3227 	unsigned min, max;
3228 
3229 	if (!get_brightness_range(caps, &min, &max))
3230 		return brightness;
3231 
3232 	if (brightness < min)
3233 		return 0;
3234 	// Rescale min..max to 0..255
3235 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3236 				 max - min);
3237 }
3238 
3239 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3240 {
3241 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3242 	struct amdgpu_dm_backlight_caps caps;
3243 	struct dc_link *link = NULL;
3244 	u32 brightness;
3245 	bool rc;
3246 
3247 	amdgpu_dm_update_backlight_caps(dm);
3248 	caps = dm->backlight_caps;
3249 
3250 	link = (struct dc_link *)dm->backlight_link;
3251 
3252 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3253 	// Change brightness based on AUX property
3254 	if (caps.aux_support)
3255 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3256 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3257 	else
3258 		rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3259 
3260 	return rc ? 0 : 1;
3261 }
3262 
3263 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3264 {
3265 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3266 	struct amdgpu_dm_backlight_caps caps;
3267 
3268 	amdgpu_dm_update_backlight_caps(dm);
3269 	caps = dm->backlight_caps;
3270 
3271 	if (caps.aux_support) {
3272 		struct dc_link *link = (struct dc_link *)dm->backlight_link;
3273 		u32 avg, peak;
3274 		bool rc;
3275 
3276 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3277 		if (!rc)
3278 			return bd->props.brightness;
3279 		return convert_brightness_to_user(&caps, avg);
3280 	} else {
3281 		int ret = dc_link_get_backlight_level(dm->backlight_link);
3282 
3283 		if (ret == DC_ERROR_UNEXPECTED)
3284 			return bd->props.brightness;
3285 		return convert_brightness_to_user(&caps, ret);
3286 	}
3287 }
3288 
3289 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3290 	.options = BL_CORE_SUSPENDRESUME,
3291 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3292 	.update_status	= amdgpu_dm_backlight_update_status,
3293 };
3294 
3295 static void
3296 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3297 {
3298 	char bl_name[16];
3299 	struct backlight_properties props = { 0 };
3300 
3301 	amdgpu_dm_update_backlight_caps(dm);
3302 
3303 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3304 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3305 	props.type = BACKLIGHT_RAW;
3306 
3307 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3308 		 adev_to_drm(dm->adev)->primary->index);
3309 
3310 	dm->backlight_dev = backlight_device_register(bl_name,
3311 						      adev_to_drm(dm->adev)->dev,
3312 						      dm,
3313 						      &amdgpu_dm_backlight_ops,
3314 						      &props);
3315 
3316 	if (IS_ERR(dm->backlight_dev))
3317 		DRM_ERROR("DM: Backlight registration failed!\n");
3318 	else
3319 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3320 }
3321 
3322 #endif
3323 
3324 static int initialize_plane(struct amdgpu_display_manager *dm,
3325 			    struct amdgpu_mode_info *mode_info, int plane_id,
3326 			    enum drm_plane_type plane_type,
3327 			    const struct dc_plane_cap *plane_cap)
3328 {
3329 	struct drm_plane *plane;
3330 	unsigned long possible_crtcs;
3331 	int ret = 0;
3332 
3333 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3334 	if (!plane) {
3335 		DRM_ERROR("KMS: Failed to allocate plane\n");
3336 		return -ENOMEM;
3337 	}
3338 	plane->type = plane_type;
3339 
3340 	/*
3341 	 * HACK: IGT tests expect that the primary plane for a CRTC
3342 	 * can only have one possible CRTC. Only expose support for
3343 	 * any CRTC if they're not going to be used as a primary plane
3344 	 * for a CRTC - like overlay or underlay planes.
3345 	 */
3346 	possible_crtcs = 1 << plane_id;
3347 	if (plane_id >= dm->dc->caps.max_streams)
3348 		possible_crtcs = 0xff;
3349 
3350 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3351 
3352 	if (ret) {
3353 		DRM_ERROR("KMS: Failed to initialize plane\n");
3354 		kfree(plane);
3355 		return ret;
3356 	}
3357 
3358 	if (mode_info)
3359 		mode_info->planes[plane_id] = plane;
3360 
3361 	return ret;
3362 }
3363 
3364 
3365 static void register_backlight_device(struct amdgpu_display_manager *dm,
3366 				      struct dc_link *link)
3367 {
3368 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3369 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3370 
3371 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3372 	    link->type != dc_connection_none) {
3373 		/*
3374 		 * Event if registration failed, we should continue with
3375 		 * DM initialization because not having a backlight control
3376 		 * is better then a black screen.
3377 		 */
3378 		amdgpu_dm_register_backlight_device(dm);
3379 
3380 		if (dm->backlight_dev)
3381 			dm->backlight_link = link;
3382 	}
3383 #endif
3384 }
3385 
3386 
3387 /*
3388  * In this architecture, the association
3389  * connector -> encoder -> crtc
3390  * id not really requried. The crtc and connector will hold the
3391  * display_index as an abstraction to use with DAL component
3392  *
3393  * Returns 0 on success
3394  */
3395 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3396 {
3397 	struct amdgpu_display_manager *dm = &adev->dm;
3398 	int32_t i;
3399 	struct amdgpu_dm_connector *aconnector = NULL;
3400 	struct amdgpu_encoder *aencoder = NULL;
3401 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3402 	uint32_t link_cnt;
3403 	int32_t primary_planes;
3404 	enum dc_connection_type new_connection_type = dc_connection_none;
3405 	const struct dc_plane_cap *plane;
3406 
3407 	dm->display_indexes_num = dm->dc->caps.max_streams;
3408 	/* Update the actual used number of crtc */
3409 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3410 
3411 	link_cnt = dm->dc->caps.max_links;
3412 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3413 		DRM_ERROR("DM: Failed to initialize mode config\n");
3414 		return -EINVAL;
3415 	}
3416 
3417 	/* There is one primary plane per CRTC */
3418 	primary_planes = dm->dc->caps.max_streams;
3419 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3420 
3421 	/*
3422 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3423 	 * Order is reversed to match iteration order in atomic check.
3424 	 */
3425 	for (i = (primary_planes - 1); i >= 0; i--) {
3426 		plane = &dm->dc->caps.planes[i];
3427 
3428 		if (initialize_plane(dm, mode_info, i,
3429 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3430 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3431 			goto fail;
3432 		}
3433 	}
3434 
3435 	/*
3436 	 * Initialize overlay planes, index starting after primary planes.
3437 	 * These planes have a higher DRM index than the primary planes since
3438 	 * they should be considered as having a higher z-order.
3439 	 * Order is reversed to match iteration order in atomic check.
3440 	 *
3441 	 * Only support DCN for now, and only expose one so we don't encourage
3442 	 * userspace to use up all the pipes.
3443 	 */
3444 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3445 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3446 
3447 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3448 			continue;
3449 
3450 		if (!plane->blends_with_above || !plane->blends_with_below)
3451 			continue;
3452 
3453 		if (!plane->pixel_format_support.argb8888)
3454 			continue;
3455 
3456 		if (initialize_plane(dm, NULL, primary_planes + i,
3457 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3458 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3459 			goto fail;
3460 		}
3461 
3462 		/* Only create one overlay plane. */
3463 		break;
3464 	}
3465 
3466 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3467 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3468 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3469 			goto fail;
3470 		}
3471 
3472 	/* loops over all connectors on the board */
3473 	for (i = 0; i < link_cnt; i++) {
3474 		struct dc_link *link = NULL;
3475 
3476 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3477 			DRM_ERROR(
3478 				"KMS: Cannot support more than %d display indexes\n",
3479 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3480 			continue;
3481 		}
3482 
3483 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3484 		if (!aconnector)
3485 			goto fail;
3486 
3487 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3488 		if (!aencoder)
3489 			goto fail;
3490 
3491 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3492 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3493 			goto fail;
3494 		}
3495 
3496 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3497 			DRM_ERROR("KMS: Failed to initialize connector\n");
3498 			goto fail;
3499 		}
3500 
3501 		link = dc_get_link_at_index(dm->dc, i);
3502 
3503 		if (!dc_link_detect_sink(link, &new_connection_type))
3504 			DRM_ERROR("KMS: Failed to detect connector\n");
3505 
3506 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3507 			emulated_link_detect(link);
3508 			amdgpu_dm_update_connector_after_detect(aconnector);
3509 
3510 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3511 			amdgpu_dm_update_connector_after_detect(aconnector);
3512 			register_backlight_device(dm, link);
3513 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3514 				amdgpu_dm_set_psr_caps(link);
3515 		}
3516 
3517 
3518 	}
3519 
3520 	/* Software is initialized. Now we can register interrupt handlers. */
3521 	switch (adev->asic_type) {
3522 #if defined(CONFIG_DRM_AMD_DC_SI)
3523 	case CHIP_TAHITI:
3524 	case CHIP_PITCAIRN:
3525 	case CHIP_VERDE:
3526 	case CHIP_OLAND:
3527 		if (dce60_register_irq_handlers(dm->adev)) {
3528 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3529 			goto fail;
3530 		}
3531 		break;
3532 #endif
3533 	case CHIP_BONAIRE:
3534 	case CHIP_HAWAII:
3535 	case CHIP_KAVERI:
3536 	case CHIP_KABINI:
3537 	case CHIP_MULLINS:
3538 	case CHIP_TONGA:
3539 	case CHIP_FIJI:
3540 	case CHIP_CARRIZO:
3541 	case CHIP_STONEY:
3542 	case CHIP_POLARIS11:
3543 	case CHIP_POLARIS10:
3544 	case CHIP_POLARIS12:
3545 	case CHIP_VEGAM:
3546 	case CHIP_VEGA10:
3547 	case CHIP_VEGA12:
3548 	case CHIP_VEGA20:
3549 		if (dce110_register_irq_handlers(dm->adev)) {
3550 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3551 			goto fail;
3552 		}
3553 		break;
3554 #if defined(CONFIG_DRM_AMD_DC_DCN)
3555 	case CHIP_RAVEN:
3556 	case CHIP_NAVI12:
3557 	case CHIP_NAVI10:
3558 	case CHIP_NAVI14:
3559 	case CHIP_RENOIR:
3560 	case CHIP_SIENNA_CICHLID:
3561 	case CHIP_NAVY_FLOUNDER:
3562 	case CHIP_DIMGREY_CAVEFISH:
3563 	case CHIP_VANGOGH:
3564 		if (dcn10_register_irq_handlers(dm->adev)) {
3565 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3566 			goto fail;
3567 		}
3568 		break;
3569 #endif
3570 	default:
3571 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3572 		goto fail;
3573 	}
3574 
3575 	return 0;
3576 fail:
3577 	kfree(aencoder);
3578 	kfree(aconnector);
3579 
3580 	return -EINVAL;
3581 }
3582 
3583 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3584 {
3585 	drm_mode_config_cleanup(dm->ddev);
3586 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3587 	return;
3588 }
3589 
3590 /******************************************************************************
3591  * amdgpu_display_funcs functions
3592  *****************************************************************************/
3593 
3594 /*
3595  * dm_bandwidth_update - program display watermarks
3596  *
3597  * @adev: amdgpu_device pointer
3598  *
3599  * Calculate and program the display watermarks and line buffer allocation.
3600  */
3601 static void dm_bandwidth_update(struct amdgpu_device *adev)
3602 {
3603 	/* TODO: implement later */
3604 }
3605 
3606 static const struct amdgpu_display_funcs dm_display_funcs = {
3607 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3608 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3609 	.backlight_set_level = NULL, /* never called for DC */
3610 	.backlight_get_level = NULL, /* never called for DC */
3611 	.hpd_sense = NULL,/* called unconditionally */
3612 	.hpd_set_polarity = NULL, /* called unconditionally */
3613 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3614 	.page_flip_get_scanoutpos =
3615 		dm_crtc_get_scanoutpos,/* called unconditionally */
3616 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3617 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3618 };
3619 
3620 #if defined(CONFIG_DEBUG_KERNEL_DC)
3621 
3622 static ssize_t s3_debug_store(struct device *device,
3623 			      struct device_attribute *attr,
3624 			      const char *buf,
3625 			      size_t count)
3626 {
3627 	int ret;
3628 	int s3_state;
3629 	struct drm_device *drm_dev = dev_get_drvdata(device);
3630 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3631 
3632 	ret = kstrtoint(buf, 0, &s3_state);
3633 
3634 	if (ret == 0) {
3635 		if (s3_state) {
3636 			dm_resume(adev);
3637 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3638 		} else
3639 			dm_suspend(adev);
3640 	}
3641 
3642 	return ret == 0 ? count : 0;
3643 }
3644 
3645 DEVICE_ATTR_WO(s3_debug);
3646 
3647 #endif
3648 
3649 static int dm_early_init(void *handle)
3650 {
3651 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3652 
3653 	switch (adev->asic_type) {
3654 #if defined(CONFIG_DRM_AMD_DC_SI)
3655 	case CHIP_TAHITI:
3656 	case CHIP_PITCAIRN:
3657 	case CHIP_VERDE:
3658 		adev->mode_info.num_crtc = 6;
3659 		adev->mode_info.num_hpd = 6;
3660 		adev->mode_info.num_dig = 6;
3661 		break;
3662 	case CHIP_OLAND:
3663 		adev->mode_info.num_crtc = 2;
3664 		adev->mode_info.num_hpd = 2;
3665 		adev->mode_info.num_dig = 2;
3666 		break;
3667 #endif
3668 	case CHIP_BONAIRE:
3669 	case CHIP_HAWAII:
3670 		adev->mode_info.num_crtc = 6;
3671 		adev->mode_info.num_hpd = 6;
3672 		adev->mode_info.num_dig = 6;
3673 		break;
3674 	case CHIP_KAVERI:
3675 		adev->mode_info.num_crtc = 4;
3676 		adev->mode_info.num_hpd = 6;
3677 		adev->mode_info.num_dig = 7;
3678 		break;
3679 	case CHIP_KABINI:
3680 	case CHIP_MULLINS:
3681 		adev->mode_info.num_crtc = 2;
3682 		adev->mode_info.num_hpd = 6;
3683 		adev->mode_info.num_dig = 6;
3684 		break;
3685 	case CHIP_FIJI:
3686 	case CHIP_TONGA:
3687 		adev->mode_info.num_crtc = 6;
3688 		adev->mode_info.num_hpd = 6;
3689 		adev->mode_info.num_dig = 7;
3690 		break;
3691 	case CHIP_CARRIZO:
3692 		adev->mode_info.num_crtc = 3;
3693 		adev->mode_info.num_hpd = 6;
3694 		adev->mode_info.num_dig = 9;
3695 		break;
3696 	case CHIP_STONEY:
3697 		adev->mode_info.num_crtc = 2;
3698 		adev->mode_info.num_hpd = 6;
3699 		adev->mode_info.num_dig = 9;
3700 		break;
3701 	case CHIP_POLARIS11:
3702 	case CHIP_POLARIS12:
3703 		adev->mode_info.num_crtc = 5;
3704 		adev->mode_info.num_hpd = 5;
3705 		adev->mode_info.num_dig = 5;
3706 		break;
3707 	case CHIP_POLARIS10:
3708 	case CHIP_VEGAM:
3709 		adev->mode_info.num_crtc = 6;
3710 		adev->mode_info.num_hpd = 6;
3711 		adev->mode_info.num_dig = 6;
3712 		break;
3713 	case CHIP_VEGA10:
3714 	case CHIP_VEGA12:
3715 	case CHIP_VEGA20:
3716 		adev->mode_info.num_crtc = 6;
3717 		adev->mode_info.num_hpd = 6;
3718 		adev->mode_info.num_dig = 6;
3719 		break;
3720 #if defined(CONFIG_DRM_AMD_DC_DCN)
3721 	case CHIP_RAVEN:
3722 	case CHIP_RENOIR:
3723 	case CHIP_VANGOGH:
3724 		adev->mode_info.num_crtc = 4;
3725 		adev->mode_info.num_hpd = 4;
3726 		adev->mode_info.num_dig = 4;
3727 		break;
3728 	case CHIP_NAVI10:
3729 	case CHIP_NAVI12:
3730 	case CHIP_SIENNA_CICHLID:
3731 	case CHIP_NAVY_FLOUNDER:
3732 		adev->mode_info.num_crtc = 6;
3733 		adev->mode_info.num_hpd = 6;
3734 		adev->mode_info.num_dig = 6;
3735 		break;
3736 	case CHIP_NAVI14:
3737 	case CHIP_DIMGREY_CAVEFISH:
3738 		adev->mode_info.num_crtc = 5;
3739 		adev->mode_info.num_hpd = 5;
3740 		adev->mode_info.num_dig = 5;
3741 		break;
3742 #endif
3743 	default:
3744 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3745 		return -EINVAL;
3746 	}
3747 
3748 	amdgpu_dm_set_irq_funcs(adev);
3749 
3750 	if (adev->mode_info.funcs == NULL)
3751 		adev->mode_info.funcs = &dm_display_funcs;
3752 
3753 	/*
3754 	 * Note: Do NOT change adev->audio_endpt_rreg and
3755 	 * adev->audio_endpt_wreg because they are initialised in
3756 	 * amdgpu_device_init()
3757 	 */
3758 #if defined(CONFIG_DEBUG_KERNEL_DC)
3759 	device_create_file(
3760 		adev_to_drm(adev)->dev,
3761 		&dev_attr_s3_debug);
3762 #endif
3763 
3764 	return 0;
3765 }
3766 
3767 static bool modeset_required(struct drm_crtc_state *crtc_state,
3768 			     struct dc_stream_state *new_stream,
3769 			     struct dc_stream_state *old_stream)
3770 {
3771 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3772 }
3773 
3774 static bool modereset_required(struct drm_crtc_state *crtc_state)
3775 {
3776 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3777 }
3778 
3779 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3780 {
3781 	drm_encoder_cleanup(encoder);
3782 	kfree(encoder);
3783 }
3784 
3785 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3786 	.destroy = amdgpu_dm_encoder_destroy,
3787 };
3788 
3789 
3790 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3791 					 struct drm_framebuffer *fb,
3792 					 int *min_downscale, int *max_upscale)
3793 {
3794 	struct amdgpu_device *adev = drm_to_adev(dev);
3795 	struct dc *dc = adev->dm.dc;
3796 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3797 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3798 
3799 	switch (fb->format->format) {
3800 	case DRM_FORMAT_P010:
3801 	case DRM_FORMAT_NV12:
3802 	case DRM_FORMAT_NV21:
3803 		*max_upscale = plane_cap->max_upscale_factor.nv12;
3804 		*min_downscale = plane_cap->max_downscale_factor.nv12;
3805 		break;
3806 
3807 	case DRM_FORMAT_XRGB16161616F:
3808 	case DRM_FORMAT_ARGB16161616F:
3809 	case DRM_FORMAT_XBGR16161616F:
3810 	case DRM_FORMAT_ABGR16161616F:
3811 		*max_upscale = plane_cap->max_upscale_factor.fp16;
3812 		*min_downscale = plane_cap->max_downscale_factor.fp16;
3813 		break;
3814 
3815 	default:
3816 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
3817 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
3818 		break;
3819 	}
3820 
3821 	/*
3822 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3823 	 * scaling factor of 1.0 == 1000 units.
3824 	 */
3825 	if (*max_upscale == 1)
3826 		*max_upscale = 1000;
3827 
3828 	if (*min_downscale == 1)
3829 		*min_downscale = 1000;
3830 }
3831 
3832 
3833 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3834 				struct dc_scaling_info *scaling_info)
3835 {
3836 	int scale_w, scale_h, min_downscale, max_upscale;
3837 
3838 	memset(scaling_info, 0, sizeof(*scaling_info));
3839 
3840 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3841 	scaling_info->src_rect.x = state->src_x >> 16;
3842 	scaling_info->src_rect.y = state->src_y >> 16;
3843 
3844 	scaling_info->src_rect.width = state->src_w >> 16;
3845 	if (scaling_info->src_rect.width == 0)
3846 		return -EINVAL;
3847 
3848 	scaling_info->src_rect.height = state->src_h >> 16;
3849 	if (scaling_info->src_rect.height == 0)
3850 		return -EINVAL;
3851 
3852 	scaling_info->dst_rect.x = state->crtc_x;
3853 	scaling_info->dst_rect.y = state->crtc_y;
3854 
3855 	if (state->crtc_w == 0)
3856 		return -EINVAL;
3857 
3858 	scaling_info->dst_rect.width = state->crtc_w;
3859 
3860 	if (state->crtc_h == 0)
3861 		return -EINVAL;
3862 
3863 	scaling_info->dst_rect.height = state->crtc_h;
3864 
3865 	/* DRM doesn't specify clipping on destination output. */
3866 	scaling_info->clip_rect = scaling_info->dst_rect;
3867 
3868 	/* Validate scaling per-format with DC plane caps */
3869 	if (state->plane && state->plane->dev && state->fb) {
3870 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3871 					     &min_downscale, &max_upscale);
3872 	} else {
3873 		min_downscale = 250;
3874 		max_upscale = 16000;
3875 	}
3876 
3877 	scale_w = scaling_info->dst_rect.width * 1000 /
3878 		  scaling_info->src_rect.width;
3879 
3880 	if (scale_w < min_downscale || scale_w > max_upscale)
3881 		return -EINVAL;
3882 
3883 	scale_h = scaling_info->dst_rect.height * 1000 /
3884 		  scaling_info->src_rect.height;
3885 
3886 	if (scale_h < min_downscale || scale_h > max_upscale)
3887 		return -EINVAL;
3888 
3889 	/*
3890 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3891 	 * assume reasonable defaults based on the format.
3892 	 */
3893 
3894 	return 0;
3895 }
3896 
3897 static void
3898 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3899 				 uint64_t tiling_flags)
3900 {
3901 	/* Fill GFX8 params */
3902 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3903 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3904 
3905 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3906 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3907 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3908 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3909 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3910 
3911 		/* XXX fix me for VI */
3912 		tiling_info->gfx8.num_banks = num_banks;
3913 		tiling_info->gfx8.array_mode =
3914 				DC_ARRAY_2D_TILED_THIN1;
3915 		tiling_info->gfx8.tile_split = tile_split;
3916 		tiling_info->gfx8.bank_width = bankw;
3917 		tiling_info->gfx8.bank_height = bankh;
3918 		tiling_info->gfx8.tile_aspect = mtaspect;
3919 		tiling_info->gfx8.tile_mode =
3920 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3921 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3922 			== DC_ARRAY_1D_TILED_THIN1) {
3923 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3924 	}
3925 
3926 	tiling_info->gfx8.pipe_config =
3927 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3928 }
3929 
3930 static void
3931 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3932 				  union dc_tiling_info *tiling_info)
3933 {
3934 	tiling_info->gfx9.num_pipes =
3935 		adev->gfx.config.gb_addr_config_fields.num_pipes;
3936 	tiling_info->gfx9.num_banks =
3937 		adev->gfx.config.gb_addr_config_fields.num_banks;
3938 	tiling_info->gfx9.pipe_interleave =
3939 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3940 	tiling_info->gfx9.num_shader_engines =
3941 		adev->gfx.config.gb_addr_config_fields.num_se;
3942 	tiling_info->gfx9.max_compressed_frags =
3943 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3944 	tiling_info->gfx9.num_rb_per_se =
3945 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3946 	tiling_info->gfx9.shaderEnable = 1;
3947 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3948 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
3949 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3950 	    adev->asic_type == CHIP_VANGOGH)
3951 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3952 }
3953 
3954 static int
3955 validate_dcc(struct amdgpu_device *adev,
3956 	     const enum surface_pixel_format format,
3957 	     const enum dc_rotation_angle rotation,
3958 	     const union dc_tiling_info *tiling_info,
3959 	     const struct dc_plane_dcc_param *dcc,
3960 	     const struct dc_plane_address *address,
3961 	     const struct plane_size *plane_size)
3962 {
3963 	struct dc *dc = adev->dm.dc;
3964 	struct dc_dcc_surface_param input;
3965 	struct dc_surface_dcc_cap output;
3966 
3967 	memset(&input, 0, sizeof(input));
3968 	memset(&output, 0, sizeof(output));
3969 
3970 	if (!dcc->enable)
3971 		return 0;
3972 
3973 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3974 	    !dc->cap_funcs.get_dcc_compression_cap)
3975 		return -EINVAL;
3976 
3977 	input.format = format;
3978 	input.surface_size.width = plane_size->surface_size.width;
3979 	input.surface_size.height = plane_size->surface_size.height;
3980 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3981 
3982 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3983 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3984 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3985 		input.scan = SCAN_DIRECTION_VERTICAL;
3986 
3987 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3988 		return -EINVAL;
3989 
3990 	if (!output.capable)
3991 		return -EINVAL;
3992 
3993 	if (dcc->independent_64b_blks == 0 &&
3994 	    output.grph.rgb.independent_64b_blks != 0)
3995 		return -EINVAL;
3996 
3997 	return 0;
3998 }
3999 
4000 static bool
4001 modifier_has_dcc(uint64_t modifier)
4002 {
4003 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4004 }
4005 
4006 static unsigned
4007 modifier_gfx9_swizzle_mode(uint64_t modifier)
4008 {
4009 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4010 		return 0;
4011 
4012 	return AMD_FMT_MOD_GET(TILE, modifier);
4013 }
4014 
4015 static const struct drm_format_info *
4016 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4017 {
4018 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4019 }
4020 
4021 static void
4022 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4023 				    union dc_tiling_info *tiling_info,
4024 				    uint64_t modifier)
4025 {
4026 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4027 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4028 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4029 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4030 
4031 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4032 
4033 	if (!IS_AMD_FMT_MOD(modifier))
4034 		return;
4035 
4036 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4037 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4038 
4039 	if (adev->family >= AMDGPU_FAMILY_NV) {
4040 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4041 	} else {
4042 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4043 
4044 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4045 	}
4046 }
4047 
4048 enum dm_micro_swizzle {
4049 	MICRO_SWIZZLE_Z = 0,
4050 	MICRO_SWIZZLE_S = 1,
4051 	MICRO_SWIZZLE_D = 2,
4052 	MICRO_SWIZZLE_R = 3
4053 };
4054 
4055 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4056 					  uint32_t format,
4057 					  uint64_t modifier)
4058 {
4059 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4060 	const struct drm_format_info *info = drm_format_info(format);
4061 
4062 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4063 
4064 	if (!info)
4065 		return false;
4066 
4067 	/*
4068 	 * We always have to allow this modifier, because core DRM still
4069 	 * checks LINEAR support if userspace does not provide modifers.
4070 	 */
4071 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4072 		return true;
4073 
4074 	/*
4075 	 * The arbitrary tiling support for multiplane formats has not been hooked
4076 	 * up.
4077 	 */
4078 	if (info->num_planes > 1)
4079 		return false;
4080 
4081 	/*
4082 	 * For D swizzle the canonical modifier depends on the bpp, so check
4083 	 * it here.
4084 	 */
4085 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4086 	    adev->family >= AMDGPU_FAMILY_NV) {
4087 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4088 			return false;
4089 	}
4090 
4091 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4092 	    info->cpp[0] < 8)
4093 		return false;
4094 
4095 	if (modifier_has_dcc(modifier)) {
4096 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4097 		if (info->cpp[0] != 4)
4098 			return false;
4099 	}
4100 
4101 	return true;
4102 }
4103 
4104 static void
4105 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4106 {
4107 	if (!*mods)
4108 		return;
4109 
4110 	if (*cap - *size < 1) {
4111 		uint64_t new_cap = *cap * 2;
4112 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4113 
4114 		if (!new_mods) {
4115 			kfree(*mods);
4116 			*mods = NULL;
4117 			return;
4118 		}
4119 
4120 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4121 		kfree(*mods);
4122 		*mods = new_mods;
4123 		*cap = new_cap;
4124 	}
4125 
4126 	(*mods)[*size] = mod;
4127 	*size += 1;
4128 }
4129 
4130 static void
4131 add_gfx9_modifiers(const struct amdgpu_device *adev,
4132 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4133 {
4134 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4135 	int pipe_xor_bits = min(8, pipes +
4136 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4137 	int bank_xor_bits = min(8 - pipe_xor_bits,
4138 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4139 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4140 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4141 
4142 
4143 	if (adev->family == AMDGPU_FAMILY_RV) {
4144 		/* Raven2 and later */
4145 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4146 
4147 		/*
4148 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4149 		 * doesn't support _D on DCN
4150 		 */
4151 
4152 		if (has_constant_encode) {
4153 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4154 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4155 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4156 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4157 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4158 				    AMD_FMT_MOD_SET(DCC, 1) |
4159 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4160 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4161 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4162 		}
4163 
4164 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4165 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4166 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4167 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4168 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4169 			    AMD_FMT_MOD_SET(DCC, 1) |
4170 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4171 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4172 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4173 
4174 		if (has_constant_encode) {
4175 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4176 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4177 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4178 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4179 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4180 				    AMD_FMT_MOD_SET(DCC, 1) |
4181 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4182 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4183 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4184 
4185 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4186 				    AMD_FMT_MOD_SET(RB, rb) |
4187 				    AMD_FMT_MOD_SET(PIPE, pipes));
4188 		}
4189 
4190 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4191 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4192 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4193 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4194 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4195 			    AMD_FMT_MOD_SET(DCC, 1) |
4196 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4197 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4198 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4199 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4200 			    AMD_FMT_MOD_SET(RB, rb) |
4201 			    AMD_FMT_MOD_SET(PIPE, pipes));
4202 	}
4203 
4204 	/*
4205 	 * Only supported for 64bpp on Raven, will be filtered on format in
4206 	 * dm_plane_format_mod_supported.
4207 	 */
4208 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4209 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4210 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4211 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4212 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4213 
4214 	if (adev->family == AMDGPU_FAMILY_RV) {
4215 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4216 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4217 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4218 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4219 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4220 	}
4221 
4222 	/*
4223 	 * Only supported for 64bpp on Raven, will be filtered on format in
4224 	 * dm_plane_format_mod_supported.
4225 	 */
4226 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4227 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4228 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4229 
4230 	if (adev->family == AMDGPU_FAMILY_RV) {
4231 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4232 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4233 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4234 	}
4235 }
4236 
4237 static void
4238 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4239 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4240 {
4241 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4242 
4243 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4244 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4245 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4246 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4247 		    AMD_FMT_MOD_SET(DCC, 1) |
4248 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4249 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4250 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4251 
4252 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4253 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4254 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4255 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4256 		    AMD_FMT_MOD_SET(DCC, 1) |
4257 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4258 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4259 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4260 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4261 
4262 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4263 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4264 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4265 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4266 
4267 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4268 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4269 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4270 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4271 
4272 
4273 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4274 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4275 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4276 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4277 
4278 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4279 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4280 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4281 }
4282 
4283 static void
4284 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4285 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4286 {
4287 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4288 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4289 
4290 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4291 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4292 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4293 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4294 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4295 		    AMD_FMT_MOD_SET(DCC, 1) |
4296 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4297 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4298 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4299 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4300 
4301 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4302 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4303 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4304 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4305 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4306 		    AMD_FMT_MOD_SET(DCC, 1) |
4307 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4308 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4309 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4310 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4311 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4312 
4313 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4314 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4315 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4316 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4317 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4318 
4319 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4320 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4321 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4322 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4323 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4324 
4325 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4326 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4327 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4328 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4329 
4330 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4331 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4332 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4333 }
4334 
4335 static int
4336 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4337 {
4338 	uint64_t size = 0, capacity = 128;
4339 	*mods = NULL;
4340 
4341 	/* We have not hooked up any pre-GFX9 modifiers. */
4342 	if (adev->family < AMDGPU_FAMILY_AI)
4343 		return 0;
4344 
4345 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4346 
4347 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4348 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4349 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4350 		return *mods ? 0 : -ENOMEM;
4351 	}
4352 
4353 	switch (adev->family) {
4354 	case AMDGPU_FAMILY_AI:
4355 	case AMDGPU_FAMILY_RV:
4356 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4357 		break;
4358 	case AMDGPU_FAMILY_NV:
4359 	case AMDGPU_FAMILY_VGH:
4360 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4361 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4362 		else
4363 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4364 		break;
4365 	}
4366 
4367 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4368 
4369 	/* INVALID marks the end of the list. */
4370 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4371 
4372 	if (!*mods)
4373 		return -ENOMEM;
4374 
4375 	return 0;
4376 }
4377 
4378 static int
4379 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4380 					  const struct amdgpu_framebuffer *afb,
4381 					  const enum surface_pixel_format format,
4382 					  const enum dc_rotation_angle rotation,
4383 					  const struct plane_size *plane_size,
4384 					  union dc_tiling_info *tiling_info,
4385 					  struct dc_plane_dcc_param *dcc,
4386 					  struct dc_plane_address *address,
4387 					  const bool force_disable_dcc)
4388 {
4389 	const uint64_t modifier = afb->base.modifier;
4390 	int ret;
4391 
4392 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4393 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4394 
4395 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4396 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4397 
4398 		dcc->enable = 1;
4399 		dcc->meta_pitch = afb->base.pitches[1];
4400 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4401 
4402 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4403 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4404 	}
4405 
4406 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4407 	if (ret)
4408 		return ret;
4409 
4410 	return 0;
4411 }
4412 
4413 static int
4414 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4415 			     const struct amdgpu_framebuffer *afb,
4416 			     const enum surface_pixel_format format,
4417 			     const enum dc_rotation_angle rotation,
4418 			     const uint64_t tiling_flags,
4419 			     union dc_tiling_info *tiling_info,
4420 			     struct plane_size *plane_size,
4421 			     struct dc_plane_dcc_param *dcc,
4422 			     struct dc_plane_address *address,
4423 			     bool tmz_surface,
4424 			     bool force_disable_dcc)
4425 {
4426 	const struct drm_framebuffer *fb = &afb->base;
4427 	int ret;
4428 
4429 	memset(tiling_info, 0, sizeof(*tiling_info));
4430 	memset(plane_size, 0, sizeof(*plane_size));
4431 	memset(dcc, 0, sizeof(*dcc));
4432 	memset(address, 0, sizeof(*address));
4433 
4434 	address->tmz_surface = tmz_surface;
4435 
4436 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4437 		uint64_t addr = afb->address + fb->offsets[0];
4438 
4439 		plane_size->surface_size.x = 0;
4440 		plane_size->surface_size.y = 0;
4441 		plane_size->surface_size.width = fb->width;
4442 		plane_size->surface_size.height = fb->height;
4443 		plane_size->surface_pitch =
4444 			fb->pitches[0] / fb->format->cpp[0];
4445 
4446 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4447 		address->grph.addr.low_part = lower_32_bits(addr);
4448 		address->grph.addr.high_part = upper_32_bits(addr);
4449 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4450 		uint64_t luma_addr = afb->address + fb->offsets[0];
4451 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4452 
4453 		plane_size->surface_size.x = 0;
4454 		plane_size->surface_size.y = 0;
4455 		plane_size->surface_size.width = fb->width;
4456 		plane_size->surface_size.height = fb->height;
4457 		plane_size->surface_pitch =
4458 			fb->pitches[0] / fb->format->cpp[0];
4459 
4460 		plane_size->chroma_size.x = 0;
4461 		plane_size->chroma_size.y = 0;
4462 		/* TODO: set these based on surface format */
4463 		plane_size->chroma_size.width = fb->width / 2;
4464 		plane_size->chroma_size.height = fb->height / 2;
4465 
4466 		plane_size->chroma_pitch =
4467 			fb->pitches[1] / fb->format->cpp[1];
4468 
4469 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4470 		address->video_progressive.luma_addr.low_part =
4471 			lower_32_bits(luma_addr);
4472 		address->video_progressive.luma_addr.high_part =
4473 			upper_32_bits(luma_addr);
4474 		address->video_progressive.chroma_addr.low_part =
4475 			lower_32_bits(chroma_addr);
4476 		address->video_progressive.chroma_addr.high_part =
4477 			upper_32_bits(chroma_addr);
4478 	}
4479 
4480 	if (adev->family >= AMDGPU_FAMILY_AI) {
4481 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4482 								rotation, plane_size,
4483 								tiling_info, dcc,
4484 								address,
4485 								force_disable_dcc);
4486 		if (ret)
4487 			return ret;
4488 	} else {
4489 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4490 	}
4491 
4492 	return 0;
4493 }
4494 
4495 static void
4496 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4497 			       bool *per_pixel_alpha, bool *global_alpha,
4498 			       int *global_alpha_value)
4499 {
4500 	*per_pixel_alpha = false;
4501 	*global_alpha = false;
4502 	*global_alpha_value = 0xff;
4503 
4504 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4505 		return;
4506 
4507 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4508 		static const uint32_t alpha_formats[] = {
4509 			DRM_FORMAT_ARGB8888,
4510 			DRM_FORMAT_RGBA8888,
4511 			DRM_FORMAT_ABGR8888,
4512 		};
4513 		uint32_t format = plane_state->fb->format->format;
4514 		unsigned int i;
4515 
4516 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4517 			if (format == alpha_formats[i]) {
4518 				*per_pixel_alpha = true;
4519 				break;
4520 			}
4521 		}
4522 	}
4523 
4524 	if (plane_state->alpha < 0xffff) {
4525 		*global_alpha = true;
4526 		*global_alpha_value = plane_state->alpha >> 8;
4527 	}
4528 }
4529 
4530 static int
4531 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4532 			    const enum surface_pixel_format format,
4533 			    enum dc_color_space *color_space)
4534 {
4535 	bool full_range;
4536 
4537 	*color_space = COLOR_SPACE_SRGB;
4538 
4539 	/* DRM color properties only affect non-RGB formats. */
4540 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4541 		return 0;
4542 
4543 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4544 
4545 	switch (plane_state->color_encoding) {
4546 	case DRM_COLOR_YCBCR_BT601:
4547 		if (full_range)
4548 			*color_space = COLOR_SPACE_YCBCR601;
4549 		else
4550 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4551 		break;
4552 
4553 	case DRM_COLOR_YCBCR_BT709:
4554 		if (full_range)
4555 			*color_space = COLOR_SPACE_YCBCR709;
4556 		else
4557 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4558 		break;
4559 
4560 	case DRM_COLOR_YCBCR_BT2020:
4561 		if (full_range)
4562 			*color_space = COLOR_SPACE_2020_YCBCR;
4563 		else
4564 			return -EINVAL;
4565 		break;
4566 
4567 	default:
4568 		return -EINVAL;
4569 	}
4570 
4571 	return 0;
4572 }
4573 
4574 static int
4575 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4576 			    const struct drm_plane_state *plane_state,
4577 			    const uint64_t tiling_flags,
4578 			    struct dc_plane_info *plane_info,
4579 			    struct dc_plane_address *address,
4580 			    bool tmz_surface,
4581 			    bool force_disable_dcc)
4582 {
4583 	const struct drm_framebuffer *fb = plane_state->fb;
4584 	const struct amdgpu_framebuffer *afb =
4585 		to_amdgpu_framebuffer(plane_state->fb);
4586 	struct drm_format_name_buf format_name;
4587 	int ret;
4588 
4589 	memset(plane_info, 0, sizeof(*plane_info));
4590 
4591 	switch (fb->format->format) {
4592 	case DRM_FORMAT_C8:
4593 		plane_info->format =
4594 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4595 		break;
4596 	case DRM_FORMAT_RGB565:
4597 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4598 		break;
4599 	case DRM_FORMAT_XRGB8888:
4600 	case DRM_FORMAT_ARGB8888:
4601 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4602 		break;
4603 	case DRM_FORMAT_XRGB2101010:
4604 	case DRM_FORMAT_ARGB2101010:
4605 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4606 		break;
4607 	case DRM_FORMAT_XBGR2101010:
4608 	case DRM_FORMAT_ABGR2101010:
4609 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4610 		break;
4611 	case DRM_FORMAT_XBGR8888:
4612 	case DRM_FORMAT_ABGR8888:
4613 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4614 		break;
4615 	case DRM_FORMAT_NV21:
4616 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4617 		break;
4618 	case DRM_FORMAT_NV12:
4619 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4620 		break;
4621 	case DRM_FORMAT_P010:
4622 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4623 		break;
4624 	case DRM_FORMAT_XRGB16161616F:
4625 	case DRM_FORMAT_ARGB16161616F:
4626 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4627 		break;
4628 	case DRM_FORMAT_XBGR16161616F:
4629 	case DRM_FORMAT_ABGR16161616F:
4630 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4631 		break;
4632 	default:
4633 		DRM_ERROR(
4634 			"Unsupported screen format %s\n",
4635 			drm_get_format_name(fb->format->format, &format_name));
4636 		return -EINVAL;
4637 	}
4638 
4639 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4640 	case DRM_MODE_ROTATE_0:
4641 		plane_info->rotation = ROTATION_ANGLE_0;
4642 		break;
4643 	case DRM_MODE_ROTATE_90:
4644 		plane_info->rotation = ROTATION_ANGLE_90;
4645 		break;
4646 	case DRM_MODE_ROTATE_180:
4647 		plane_info->rotation = ROTATION_ANGLE_180;
4648 		break;
4649 	case DRM_MODE_ROTATE_270:
4650 		plane_info->rotation = ROTATION_ANGLE_270;
4651 		break;
4652 	default:
4653 		plane_info->rotation = ROTATION_ANGLE_0;
4654 		break;
4655 	}
4656 
4657 	plane_info->visible = true;
4658 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4659 
4660 	plane_info->layer_index = 0;
4661 
4662 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4663 					  &plane_info->color_space);
4664 	if (ret)
4665 		return ret;
4666 
4667 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4668 					   plane_info->rotation, tiling_flags,
4669 					   &plane_info->tiling_info,
4670 					   &plane_info->plane_size,
4671 					   &plane_info->dcc, address, tmz_surface,
4672 					   force_disable_dcc);
4673 	if (ret)
4674 		return ret;
4675 
4676 	fill_blending_from_plane_state(
4677 		plane_state, &plane_info->per_pixel_alpha,
4678 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4679 
4680 	return 0;
4681 }
4682 
4683 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4684 				    struct dc_plane_state *dc_plane_state,
4685 				    struct drm_plane_state *plane_state,
4686 				    struct drm_crtc_state *crtc_state)
4687 {
4688 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4689 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4690 	struct dc_scaling_info scaling_info;
4691 	struct dc_plane_info plane_info;
4692 	int ret;
4693 	bool force_disable_dcc = false;
4694 
4695 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4696 	if (ret)
4697 		return ret;
4698 
4699 	dc_plane_state->src_rect = scaling_info.src_rect;
4700 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4701 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4702 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4703 
4704 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4705 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4706 					  afb->tiling_flags,
4707 					  &plane_info,
4708 					  &dc_plane_state->address,
4709 					  afb->tmz_surface,
4710 					  force_disable_dcc);
4711 	if (ret)
4712 		return ret;
4713 
4714 	dc_plane_state->format = plane_info.format;
4715 	dc_plane_state->color_space = plane_info.color_space;
4716 	dc_plane_state->format = plane_info.format;
4717 	dc_plane_state->plane_size = plane_info.plane_size;
4718 	dc_plane_state->rotation = plane_info.rotation;
4719 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4720 	dc_plane_state->stereo_format = plane_info.stereo_format;
4721 	dc_plane_state->tiling_info = plane_info.tiling_info;
4722 	dc_plane_state->visible = plane_info.visible;
4723 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4724 	dc_plane_state->global_alpha = plane_info.global_alpha;
4725 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4726 	dc_plane_state->dcc = plane_info.dcc;
4727 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4728 	dc_plane_state->flip_int_enabled = true;
4729 
4730 	/*
4731 	 * Always set input transfer function, since plane state is refreshed
4732 	 * every time.
4733 	 */
4734 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4735 	if (ret)
4736 		return ret;
4737 
4738 	return 0;
4739 }
4740 
4741 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4742 					   const struct dm_connector_state *dm_state,
4743 					   struct dc_stream_state *stream)
4744 {
4745 	enum amdgpu_rmx_type rmx_type;
4746 
4747 	struct rect src = { 0 }; /* viewport in composition space*/
4748 	struct rect dst = { 0 }; /* stream addressable area */
4749 
4750 	/* no mode. nothing to be done */
4751 	if (!mode)
4752 		return;
4753 
4754 	/* Full screen scaling by default */
4755 	src.width = mode->hdisplay;
4756 	src.height = mode->vdisplay;
4757 	dst.width = stream->timing.h_addressable;
4758 	dst.height = stream->timing.v_addressable;
4759 
4760 	if (dm_state) {
4761 		rmx_type = dm_state->scaling;
4762 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4763 			if (src.width * dst.height <
4764 					src.height * dst.width) {
4765 				/* height needs less upscaling/more downscaling */
4766 				dst.width = src.width *
4767 						dst.height / src.height;
4768 			} else {
4769 				/* width needs less upscaling/more downscaling */
4770 				dst.height = src.height *
4771 						dst.width / src.width;
4772 			}
4773 		} else if (rmx_type == RMX_CENTER) {
4774 			dst = src;
4775 		}
4776 
4777 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4778 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4779 
4780 		if (dm_state->underscan_enable) {
4781 			dst.x += dm_state->underscan_hborder / 2;
4782 			dst.y += dm_state->underscan_vborder / 2;
4783 			dst.width -= dm_state->underscan_hborder;
4784 			dst.height -= dm_state->underscan_vborder;
4785 		}
4786 	}
4787 
4788 	stream->src = src;
4789 	stream->dst = dst;
4790 
4791 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4792 			dst.x, dst.y, dst.width, dst.height);
4793 
4794 }
4795 
4796 static enum dc_color_depth
4797 convert_color_depth_from_display_info(const struct drm_connector *connector,
4798 				      bool is_y420, int requested_bpc)
4799 {
4800 	uint8_t bpc;
4801 
4802 	if (is_y420) {
4803 		bpc = 8;
4804 
4805 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4806 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4807 			bpc = 16;
4808 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4809 			bpc = 12;
4810 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4811 			bpc = 10;
4812 	} else {
4813 		bpc = (uint8_t)connector->display_info.bpc;
4814 		/* Assume 8 bpc by default if no bpc is specified. */
4815 		bpc = bpc ? bpc : 8;
4816 	}
4817 
4818 	if (requested_bpc > 0) {
4819 		/*
4820 		 * Cap display bpc based on the user requested value.
4821 		 *
4822 		 * The value for state->max_bpc may not correctly updated
4823 		 * depending on when the connector gets added to the state
4824 		 * or if this was called outside of atomic check, so it
4825 		 * can't be used directly.
4826 		 */
4827 		bpc = min_t(u8, bpc, requested_bpc);
4828 
4829 		/* Round down to the nearest even number. */
4830 		bpc = bpc - (bpc & 1);
4831 	}
4832 
4833 	switch (bpc) {
4834 	case 0:
4835 		/*
4836 		 * Temporary Work around, DRM doesn't parse color depth for
4837 		 * EDID revision before 1.4
4838 		 * TODO: Fix edid parsing
4839 		 */
4840 		return COLOR_DEPTH_888;
4841 	case 6:
4842 		return COLOR_DEPTH_666;
4843 	case 8:
4844 		return COLOR_DEPTH_888;
4845 	case 10:
4846 		return COLOR_DEPTH_101010;
4847 	case 12:
4848 		return COLOR_DEPTH_121212;
4849 	case 14:
4850 		return COLOR_DEPTH_141414;
4851 	case 16:
4852 		return COLOR_DEPTH_161616;
4853 	default:
4854 		return COLOR_DEPTH_UNDEFINED;
4855 	}
4856 }
4857 
4858 static enum dc_aspect_ratio
4859 get_aspect_ratio(const struct drm_display_mode *mode_in)
4860 {
4861 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4862 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4863 }
4864 
4865 static enum dc_color_space
4866 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4867 {
4868 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4869 
4870 	switch (dc_crtc_timing->pixel_encoding)	{
4871 	case PIXEL_ENCODING_YCBCR422:
4872 	case PIXEL_ENCODING_YCBCR444:
4873 	case PIXEL_ENCODING_YCBCR420:
4874 	{
4875 		/*
4876 		 * 27030khz is the separation point between HDTV and SDTV
4877 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4878 		 * respectively
4879 		 */
4880 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4881 			if (dc_crtc_timing->flags.Y_ONLY)
4882 				color_space =
4883 					COLOR_SPACE_YCBCR709_LIMITED;
4884 			else
4885 				color_space = COLOR_SPACE_YCBCR709;
4886 		} else {
4887 			if (dc_crtc_timing->flags.Y_ONLY)
4888 				color_space =
4889 					COLOR_SPACE_YCBCR601_LIMITED;
4890 			else
4891 				color_space = COLOR_SPACE_YCBCR601;
4892 		}
4893 
4894 	}
4895 	break;
4896 	case PIXEL_ENCODING_RGB:
4897 		color_space = COLOR_SPACE_SRGB;
4898 		break;
4899 
4900 	default:
4901 		WARN_ON(1);
4902 		break;
4903 	}
4904 
4905 	return color_space;
4906 }
4907 
4908 static bool adjust_colour_depth_from_display_info(
4909 	struct dc_crtc_timing *timing_out,
4910 	const struct drm_display_info *info)
4911 {
4912 	enum dc_color_depth depth = timing_out->display_color_depth;
4913 	int normalized_clk;
4914 	do {
4915 		normalized_clk = timing_out->pix_clk_100hz / 10;
4916 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4917 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4918 			normalized_clk /= 2;
4919 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4920 		switch (depth) {
4921 		case COLOR_DEPTH_888:
4922 			break;
4923 		case COLOR_DEPTH_101010:
4924 			normalized_clk = (normalized_clk * 30) / 24;
4925 			break;
4926 		case COLOR_DEPTH_121212:
4927 			normalized_clk = (normalized_clk * 36) / 24;
4928 			break;
4929 		case COLOR_DEPTH_161616:
4930 			normalized_clk = (normalized_clk * 48) / 24;
4931 			break;
4932 		default:
4933 			/* The above depths are the only ones valid for HDMI. */
4934 			return false;
4935 		}
4936 		if (normalized_clk <= info->max_tmds_clock) {
4937 			timing_out->display_color_depth = depth;
4938 			return true;
4939 		}
4940 	} while (--depth > COLOR_DEPTH_666);
4941 	return false;
4942 }
4943 
4944 static void fill_stream_properties_from_drm_display_mode(
4945 	struct dc_stream_state *stream,
4946 	const struct drm_display_mode *mode_in,
4947 	const struct drm_connector *connector,
4948 	const struct drm_connector_state *connector_state,
4949 	const struct dc_stream_state *old_stream,
4950 	int requested_bpc)
4951 {
4952 	struct dc_crtc_timing *timing_out = &stream->timing;
4953 	const struct drm_display_info *info = &connector->display_info;
4954 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4955 	struct hdmi_vendor_infoframe hv_frame;
4956 	struct hdmi_avi_infoframe avi_frame;
4957 
4958 	memset(&hv_frame, 0, sizeof(hv_frame));
4959 	memset(&avi_frame, 0, sizeof(avi_frame));
4960 
4961 	timing_out->h_border_left = 0;
4962 	timing_out->h_border_right = 0;
4963 	timing_out->v_border_top = 0;
4964 	timing_out->v_border_bottom = 0;
4965 	/* TODO: un-hardcode */
4966 	if (drm_mode_is_420_only(info, mode_in)
4967 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4968 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4969 	else if (drm_mode_is_420_also(info, mode_in)
4970 			&& aconnector->force_yuv420_output)
4971 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4972 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4973 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4974 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4975 	else
4976 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4977 
4978 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4979 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4980 		connector,
4981 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4982 		requested_bpc);
4983 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4984 	timing_out->hdmi_vic = 0;
4985 
4986 	if(old_stream) {
4987 		timing_out->vic = old_stream->timing.vic;
4988 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4989 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4990 	} else {
4991 		timing_out->vic = drm_match_cea_mode(mode_in);
4992 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4993 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4994 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4995 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4996 	}
4997 
4998 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4999 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5000 		timing_out->vic = avi_frame.video_code;
5001 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5002 		timing_out->hdmi_vic = hv_frame.vic;
5003 	}
5004 
5005 	timing_out->h_addressable = mode_in->crtc_hdisplay;
5006 	timing_out->h_total = mode_in->crtc_htotal;
5007 	timing_out->h_sync_width =
5008 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5009 	timing_out->h_front_porch =
5010 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5011 	timing_out->v_total = mode_in->crtc_vtotal;
5012 	timing_out->v_addressable = mode_in->crtc_vdisplay;
5013 	timing_out->v_front_porch =
5014 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5015 	timing_out->v_sync_width =
5016 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5017 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5018 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5019 
5020 	stream->output_color_space = get_output_color_space(timing_out);
5021 
5022 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5023 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5024 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5025 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5026 		    drm_mode_is_420_also(info, mode_in) &&
5027 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5028 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5029 			adjust_colour_depth_from_display_info(timing_out, info);
5030 		}
5031 	}
5032 }
5033 
5034 static void fill_audio_info(struct audio_info *audio_info,
5035 			    const struct drm_connector *drm_connector,
5036 			    const struct dc_sink *dc_sink)
5037 {
5038 	int i = 0;
5039 	int cea_revision = 0;
5040 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5041 
5042 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5043 	audio_info->product_id = edid_caps->product_id;
5044 
5045 	cea_revision = drm_connector->display_info.cea_rev;
5046 
5047 	strscpy(audio_info->display_name,
5048 		edid_caps->display_name,
5049 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5050 
5051 	if (cea_revision >= 3) {
5052 		audio_info->mode_count = edid_caps->audio_mode_count;
5053 
5054 		for (i = 0; i < audio_info->mode_count; ++i) {
5055 			audio_info->modes[i].format_code =
5056 					(enum audio_format_code)
5057 					(edid_caps->audio_modes[i].format_code);
5058 			audio_info->modes[i].channel_count =
5059 					edid_caps->audio_modes[i].channel_count;
5060 			audio_info->modes[i].sample_rates.all =
5061 					edid_caps->audio_modes[i].sample_rate;
5062 			audio_info->modes[i].sample_size =
5063 					edid_caps->audio_modes[i].sample_size;
5064 		}
5065 	}
5066 
5067 	audio_info->flags.all = edid_caps->speaker_flags;
5068 
5069 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5070 	if (drm_connector->latency_present[0]) {
5071 		audio_info->video_latency = drm_connector->video_latency[0];
5072 		audio_info->audio_latency = drm_connector->audio_latency[0];
5073 	}
5074 
5075 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5076 
5077 }
5078 
5079 static void
5080 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5081 				      struct drm_display_mode *dst_mode)
5082 {
5083 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5084 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5085 	dst_mode->crtc_clock = src_mode->crtc_clock;
5086 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5087 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5088 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5089 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5090 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5091 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5092 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5093 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5094 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5095 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5096 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5097 }
5098 
5099 static void
5100 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5101 					const struct drm_display_mode *native_mode,
5102 					bool scale_enabled)
5103 {
5104 	if (scale_enabled) {
5105 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5106 	} else if (native_mode->clock == drm_mode->clock &&
5107 			native_mode->htotal == drm_mode->htotal &&
5108 			native_mode->vtotal == drm_mode->vtotal) {
5109 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5110 	} else {
5111 		/* no scaling nor amdgpu inserted, no need to patch */
5112 	}
5113 }
5114 
5115 static struct dc_sink *
5116 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5117 {
5118 	struct dc_sink_init_data sink_init_data = { 0 };
5119 	struct dc_sink *sink = NULL;
5120 	sink_init_data.link = aconnector->dc_link;
5121 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5122 
5123 	sink = dc_sink_create(&sink_init_data);
5124 	if (!sink) {
5125 		DRM_ERROR("Failed to create sink!\n");
5126 		return NULL;
5127 	}
5128 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5129 
5130 	return sink;
5131 }
5132 
5133 static void set_multisync_trigger_params(
5134 		struct dc_stream_state *stream)
5135 {
5136 	if (stream->triggered_crtc_reset.enabled) {
5137 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5138 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5139 	}
5140 }
5141 
5142 static void set_master_stream(struct dc_stream_state *stream_set[],
5143 			      int stream_count)
5144 {
5145 	int j, highest_rfr = 0, master_stream = 0;
5146 
5147 	for (j = 0;  j < stream_count; j++) {
5148 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5149 			int refresh_rate = 0;
5150 
5151 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5152 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5153 			if (refresh_rate > highest_rfr) {
5154 				highest_rfr = refresh_rate;
5155 				master_stream = j;
5156 			}
5157 		}
5158 	}
5159 	for (j = 0;  j < stream_count; j++) {
5160 		if (stream_set[j])
5161 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5162 	}
5163 }
5164 
5165 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5166 {
5167 	int i = 0;
5168 
5169 	if (context->stream_count < 2)
5170 		return;
5171 	for (i = 0; i < context->stream_count ; i++) {
5172 		if (!context->streams[i])
5173 			continue;
5174 		/*
5175 		 * TODO: add a function to read AMD VSDB bits and set
5176 		 * crtc_sync_master.multi_sync_enabled flag
5177 		 * For now it's set to false
5178 		 */
5179 		set_multisync_trigger_params(context->streams[i]);
5180 	}
5181 	set_master_stream(context->streams, context->stream_count);
5182 }
5183 
5184 static struct dc_stream_state *
5185 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5186 		       const struct drm_display_mode *drm_mode,
5187 		       const struct dm_connector_state *dm_state,
5188 		       const struct dc_stream_state *old_stream,
5189 		       int requested_bpc)
5190 {
5191 	struct drm_display_mode *preferred_mode = NULL;
5192 	struct drm_connector *drm_connector;
5193 	const struct drm_connector_state *con_state =
5194 		dm_state ? &dm_state->base : NULL;
5195 	struct dc_stream_state *stream = NULL;
5196 	struct drm_display_mode mode = *drm_mode;
5197 	bool native_mode_found = false;
5198 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5199 	int mode_refresh;
5200 	int preferred_refresh = 0;
5201 #if defined(CONFIG_DRM_AMD_DC_DCN)
5202 	struct dsc_dec_dpcd_caps dsc_caps;
5203 	uint32_t link_bandwidth_kbps;
5204 #endif
5205 	struct dc_sink *sink = NULL;
5206 	if (aconnector == NULL) {
5207 		DRM_ERROR("aconnector is NULL!\n");
5208 		return stream;
5209 	}
5210 
5211 	drm_connector = &aconnector->base;
5212 
5213 	if (!aconnector->dc_sink) {
5214 		sink = create_fake_sink(aconnector);
5215 		if (!sink)
5216 			return stream;
5217 	} else {
5218 		sink = aconnector->dc_sink;
5219 		dc_sink_retain(sink);
5220 	}
5221 
5222 	stream = dc_create_stream_for_sink(sink);
5223 
5224 	if (stream == NULL) {
5225 		DRM_ERROR("Failed to create stream for sink!\n");
5226 		goto finish;
5227 	}
5228 
5229 	stream->dm_stream_context = aconnector;
5230 
5231 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5232 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5233 
5234 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5235 		/* Search for preferred mode */
5236 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5237 			native_mode_found = true;
5238 			break;
5239 		}
5240 	}
5241 	if (!native_mode_found)
5242 		preferred_mode = list_first_entry_or_null(
5243 				&aconnector->base.modes,
5244 				struct drm_display_mode,
5245 				head);
5246 
5247 	mode_refresh = drm_mode_vrefresh(&mode);
5248 
5249 	if (preferred_mode == NULL) {
5250 		/*
5251 		 * This may not be an error, the use case is when we have no
5252 		 * usermode calls to reset and set mode upon hotplug. In this
5253 		 * case, we call set mode ourselves to restore the previous mode
5254 		 * and the modelist may not be filled in in time.
5255 		 */
5256 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5257 	} else {
5258 		decide_crtc_timing_for_drm_display_mode(
5259 				&mode, preferred_mode,
5260 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5261 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5262 	}
5263 
5264 	if (!dm_state)
5265 		drm_mode_set_crtcinfo(&mode, 0);
5266 
5267 	/*
5268 	* If scaling is enabled and refresh rate didn't change
5269 	* we copy the vic and polarities of the old timings
5270 	*/
5271 	if (!scale || mode_refresh != preferred_refresh)
5272 		fill_stream_properties_from_drm_display_mode(stream,
5273 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
5274 	else
5275 		fill_stream_properties_from_drm_display_mode(stream,
5276 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
5277 
5278 	stream->timing.flags.DSC = 0;
5279 
5280 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5281 #if defined(CONFIG_DRM_AMD_DC_DCN)
5282 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5283 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5284 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5285 				      &dsc_caps);
5286 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5287 							     dc_link_get_link_cap(aconnector->dc_link));
5288 
5289 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5290 			/* Set DSC policy according to dsc_clock_en */
5291 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5292 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5293 
5294 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5295 						  &dsc_caps,
5296 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5297 						  0,
5298 						  link_bandwidth_kbps,
5299 						  &stream->timing,
5300 						  &stream->timing.dsc_cfg))
5301 				stream->timing.flags.DSC = 1;
5302 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5303 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5304 				stream->timing.flags.DSC = 1;
5305 
5306 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5307 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5308 
5309 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5310 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5311 
5312 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5313 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5314 		}
5315 #endif
5316 	}
5317 
5318 	update_stream_scaling_settings(&mode, dm_state, stream);
5319 
5320 	fill_audio_info(
5321 		&stream->audio_info,
5322 		drm_connector,
5323 		sink);
5324 
5325 	update_stream_signal(stream, sink);
5326 
5327 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5328 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5329 
5330 	if (stream->link->psr_settings.psr_feature_enabled) {
5331 		//
5332 		// should decide stream support vsc sdp colorimetry capability
5333 		// before building vsc info packet
5334 		//
5335 		stream->use_vsc_sdp_for_colorimetry = false;
5336 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5337 			stream->use_vsc_sdp_for_colorimetry =
5338 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5339 		} else {
5340 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5341 				stream->use_vsc_sdp_for_colorimetry = true;
5342 		}
5343 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5344 	}
5345 finish:
5346 	dc_sink_release(sink);
5347 
5348 	return stream;
5349 }
5350 
5351 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5352 {
5353 	drm_crtc_cleanup(crtc);
5354 	kfree(crtc);
5355 }
5356 
5357 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5358 				  struct drm_crtc_state *state)
5359 {
5360 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5361 
5362 	/* TODO Destroy dc_stream objects are stream object is flattened */
5363 	if (cur->stream)
5364 		dc_stream_release(cur->stream);
5365 
5366 
5367 	__drm_atomic_helper_crtc_destroy_state(state);
5368 
5369 
5370 	kfree(state);
5371 }
5372 
5373 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5374 {
5375 	struct dm_crtc_state *state;
5376 
5377 	if (crtc->state)
5378 		dm_crtc_destroy_state(crtc, crtc->state);
5379 
5380 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5381 	if (WARN_ON(!state))
5382 		return;
5383 
5384 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5385 }
5386 
5387 static struct drm_crtc_state *
5388 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5389 {
5390 	struct dm_crtc_state *state, *cur;
5391 
5392 	cur = to_dm_crtc_state(crtc->state);
5393 
5394 	if (WARN_ON(!crtc->state))
5395 		return NULL;
5396 
5397 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5398 	if (!state)
5399 		return NULL;
5400 
5401 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5402 
5403 	if (cur->stream) {
5404 		state->stream = cur->stream;
5405 		dc_stream_retain(state->stream);
5406 	}
5407 
5408 	state->active_planes = cur->active_planes;
5409 	state->vrr_infopacket = cur->vrr_infopacket;
5410 	state->abm_level = cur->abm_level;
5411 	state->vrr_supported = cur->vrr_supported;
5412 	state->freesync_config = cur->freesync_config;
5413 	state->crc_src = cur->crc_src;
5414 	state->cm_has_degamma = cur->cm_has_degamma;
5415 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5416 
5417 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5418 
5419 	return &state->base;
5420 }
5421 
5422 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5423 {
5424 	enum dc_irq_source irq_source;
5425 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5426 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5427 	int rc;
5428 
5429 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5430 
5431 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5432 
5433 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5434 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
5435 	return rc;
5436 }
5437 
5438 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5439 {
5440 	enum dc_irq_source irq_source;
5441 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5442 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5443 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5444 #if defined(CONFIG_DRM_AMD_DC_DCN)
5445 	struct amdgpu_display_manager *dm = &adev->dm;
5446 	unsigned long flags;
5447 #endif
5448 	int rc = 0;
5449 
5450 	if (enable) {
5451 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5452 		if (amdgpu_dm_vrr_active(acrtc_state))
5453 			rc = dm_set_vupdate_irq(crtc, true);
5454 	} else {
5455 		/* vblank irq off -> vupdate irq off */
5456 		rc = dm_set_vupdate_irq(crtc, false);
5457 	}
5458 
5459 	if (rc)
5460 		return rc;
5461 
5462 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5463 
5464 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5465 		return -EBUSY;
5466 
5467 	if (amdgpu_in_reset(adev))
5468 		return 0;
5469 
5470 #if defined(CONFIG_DRM_AMD_DC_DCN)
5471 	spin_lock_irqsave(&dm->vblank_lock, flags);
5472 	dm->vblank_workqueue->dm = dm;
5473 	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5474 	dm->vblank_workqueue->enable = enable;
5475 	spin_unlock_irqrestore(&dm->vblank_lock, flags);
5476 	schedule_work(&dm->vblank_workqueue->mall_work);
5477 #endif
5478 
5479 	return 0;
5480 }
5481 
5482 static int dm_enable_vblank(struct drm_crtc *crtc)
5483 {
5484 	return dm_set_vblank(crtc, true);
5485 }
5486 
5487 static void dm_disable_vblank(struct drm_crtc *crtc)
5488 {
5489 	dm_set_vblank(crtc, false);
5490 }
5491 
5492 /* Implemented only the options currently availible for the driver */
5493 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5494 	.reset = dm_crtc_reset_state,
5495 	.destroy = amdgpu_dm_crtc_destroy,
5496 	.set_config = drm_atomic_helper_set_config,
5497 	.page_flip = drm_atomic_helper_page_flip,
5498 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5499 	.atomic_destroy_state = dm_crtc_destroy_state,
5500 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5501 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5502 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5503 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5504 	.enable_vblank = dm_enable_vblank,
5505 	.disable_vblank = dm_disable_vblank,
5506 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5507 };
5508 
5509 static enum drm_connector_status
5510 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5511 {
5512 	bool connected;
5513 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5514 
5515 	/*
5516 	 * Notes:
5517 	 * 1. This interface is NOT called in context of HPD irq.
5518 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5519 	 * makes it a bad place for *any* MST-related activity.
5520 	 */
5521 
5522 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5523 	    !aconnector->fake_enable)
5524 		connected = (aconnector->dc_sink != NULL);
5525 	else
5526 		connected = (aconnector->base.force == DRM_FORCE_ON);
5527 
5528 	update_subconnector_property(aconnector);
5529 
5530 	return (connected ? connector_status_connected :
5531 			connector_status_disconnected);
5532 }
5533 
5534 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5535 					    struct drm_connector_state *connector_state,
5536 					    struct drm_property *property,
5537 					    uint64_t val)
5538 {
5539 	struct drm_device *dev = connector->dev;
5540 	struct amdgpu_device *adev = drm_to_adev(dev);
5541 	struct dm_connector_state *dm_old_state =
5542 		to_dm_connector_state(connector->state);
5543 	struct dm_connector_state *dm_new_state =
5544 		to_dm_connector_state(connector_state);
5545 
5546 	int ret = -EINVAL;
5547 
5548 	if (property == dev->mode_config.scaling_mode_property) {
5549 		enum amdgpu_rmx_type rmx_type;
5550 
5551 		switch (val) {
5552 		case DRM_MODE_SCALE_CENTER:
5553 			rmx_type = RMX_CENTER;
5554 			break;
5555 		case DRM_MODE_SCALE_ASPECT:
5556 			rmx_type = RMX_ASPECT;
5557 			break;
5558 		case DRM_MODE_SCALE_FULLSCREEN:
5559 			rmx_type = RMX_FULL;
5560 			break;
5561 		case DRM_MODE_SCALE_NONE:
5562 		default:
5563 			rmx_type = RMX_OFF;
5564 			break;
5565 		}
5566 
5567 		if (dm_old_state->scaling == rmx_type)
5568 			return 0;
5569 
5570 		dm_new_state->scaling = rmx_type;
5571 		ret = 0;
5572 	} else if (property == adev->mode_info.underscan_hborder_property) {
5573 		dm_new_state->underscan_hborder = val;
5574 		ret = 0;
5575 	} else if (property == adev->mode_info.underscan_vborder_property) {
5576 		dm_new_state->underscan_vborder = val;
5577 		ret = 0;
5578 	} else if (property == adev->mode_info.underscan_property) {
5579 		dm_new_state->underscan_enable = val;
5580 		ret = 0;
5581 	} else if (property == adev->mode_info.abm_level_property) {
5582 		dm_new_state->abm_level = val;
5583 		ret = 0;
5584 	}
5585 
5586 	return ret;
5587 }
5588 
5589 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5590 					    const struct drm_connector_state *state,
5591 					    struct drm_property *property,
5592 					    uint64_t *val)
5593 {
5594 	struct drm_device *dev = connector->dev;
5595 	struct amdgpu_device *adev = drm_to_adev(dev);
5596 	struct dm_connector_state *dm_state =
5597 		to_dm_connector_state(state);
5598 	int ret = -EINVAL;
5599 
5600 	if (property == dev->mode_config.scaling_mode_property) {
5601 		switch (dm_state->scaling) {
5602 		case RMX_CENTER:
5603 			*val = DRM_MODE_SCALE_CENTER;
5604 			break;
5605 		case RMX_ASPECT:
5606 			*val = DRM_MODE_SCALE_ASPECT;
5607 			break;
5608 		case RMX_FULL:
5609 			*val = DRM_MODE_SCALE_FULLSCREEN;
5610 			break;
5611 		case RMX_OFF:
5612 		default:
5613 			*val = DRM_MODE_SCALE_NONE;
5614 			break;
5615 		}
5616 		ret = 0;
5617 	} else if (property == adev->mode_info.underscan_hborder_property) {
5618 		*val = dm_state->underscan_hborder;
5619 		ret = 0;
5620 	} else if (property == adev->mode_info.underscan_vborder_property) {
5621 		*val = dm_state->underscan_vborder;
5622 		ret = 0;
5623 	} else if (property == adev->mode_info.underscan_property) {
5624 		*val = dm_state->underscan_enable;
5625 		ret = 0;
5626 	} else if (property == adev->mode_info.abm_level_property) {
5627 		*val = dm_state->abm_level;
5628 		ret = 0;
5629 	}
5630 
5631 	return ret;
5632 }
5633 
5634 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5635 {
5636 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5637 
5638 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5639 }
5640 
5641 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5642 {
5643 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5644 	const struct dc_link *link = aconnector->dc_link;
5645 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5646 	struct amdgpu_display_manager *dm = &adev->dm;
5647 
5648 	/*
5649 	 * Call only if mst_mgr was iniitalized before since it's not done
5650 	 * for all connector types.
5651 	 */
5652 	if (aconnector->mst_mgr.dev)
5653 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5654 
5655 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5656 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5657 
5658 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5659 	    link->type != dc_connection_none &&
5660 	    dm->backlight_dev) {
5661 		backlight_device_unregister(dm->backlight_dev);
5662 		dm->backlight_dev = NULL;
5663 	}
5664 #endif
5665 
5666 	if (aconnector->dc_em_sink)
5667 		dc_sink_release(aconnector->dc_em_sink);
5668 	aconnector->dc_em_sink = NULL;
5669 	if (aconnector->dc_sink)
5670 		dc_sink_release(aconnector->dc_sink);
5671 	aconnector->dc_sink = NULL;
5672 
5673 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5674 	drm_connector_unregister(connector);
5675 	drm_connector_cleanup(connector);
5676 	if (aconnector->i2c) {
5677 		i2c_del_adapter(&aconnector->i2c->base);
5678 		kfree(aconnector->i2c);
5679 	}
5680 	kfree(aconnector->dm_dp_aux.aux.name);
5681 
5682 	kfree(connector);
5683 }
5684 
5685 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5686 {
5687 	struct dm_connector_state *state =
5688 		to_dm_connector_state(connector->state);
5689 
5690 	if (connector->state)
5691 		__drm_atomic_helper_connector_destroy_state(connector->state);
5692 
5693 	kfree(state);
5694 
5695 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5696 
5697 	if (state) {
5698 		state->scaling = RMX_OFF;
5699 		state->underscan_enable = false;
5700 		state->underscan_hborder = 0;
5701 		state->underscan_vborder = 0;
5702 		state->base.max_requested_bpc = 8;
5703 		state->vcpi_slots = 0;
5704 		state->pbn = 0;
5705 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5706 			state->abm_level = amdgpu_dm_abm_level;
5707 
5708 		__drm_atomic_helper_connector_reset(connector, &state->base);
5709 	}
5710 }
5711 
5712 struct drm_connector_state *
5713 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5714 {
5715 	struct dm_connector_state *state =
5716 		to_dm_connector_state(connector->state);
5717 
5718 	struct dm_connector_state *new_state =
5719 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5720 
5721 	if (!new_state)
5722 		return NULL;
5723 
5724 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5725 
5726 	new_state->freesync_capable = state->freesync_capable;
5727 	new_state->abm_level = state->abm_level;
5728 	new_state->scaling = state->scaling;
5729 	new_state->underscan_enable = state->underscan_enable;
5730 	new_state->underscan_hborder = state->underscan_hborder;
5731 	new_state->underscan_vborder = state->underscan_vborder;
5732 	new_state->vcpi_slots = state->vcpi_slots;
5733 	new_state->pbn = state->pbn;
5734 	return &new_state->base;
5735 }
5736 
5737 static int
5738 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5739 {
5740 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5741 		to_amdgpu_dm_connector(connector);
5742 	int r;
5743 
5744 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5745 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5746 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5747 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5748 		if (r)
5749 			return r;
5750 	}
5751 
5752 #if defined(CONFIG_DEBUG_FS)
5753 	connector_debugfs_init(amdgpu_dm_connector);
5754 #endif
5755 
5756 	return 0;
5757 }
5758 
5759 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5760 	.reset = amdgpu_dm_connector_funcs_reset,
5761 	.detect = amdgpu_dm_connector_detect,
5762 	.fill_modes = drm_helper_probe_single_connector_modes,
5763 	.destroy = amdgpu_dm_connector_destroy,
5764 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5765 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5766 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5767 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5768 	.late_register = amdgpu_dm_connector_late_register,
5769 	.early_unregister = amdgpu_dm_connector_unregister
5770 };
5771 
5772 static int get_modes(struct drm_connector *connector)
5773 {
5774 	return amdgpu_dm_connector_get_modes(connector);
5775 }
5776 
5777 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5778 {
5779 	struct dc_sink_init_data init_params = {
5780 			.link = aconnector->dc_link,
5781 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5782 	};
5783 	struct edid *edid;
5784 
5785 	if (!aconnector->base.edid_blob_ptr) {
5786 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5787 				aconnector->base.name);
5788 
5789 		aconnector->base.force = DRM_FORCE_OFF;
5790 		aconnector->base.override_edid = false;
5791 		return;
5792 	}
5793 
5794 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5795 
5796 	aconnector->edid = edid;
5797 
5798 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5799 		aconnector->dc_link,
5800 		(uint8_t *)edid,
5801 		(edid->extensions + 1) * EDID_LENGTH,
5802 		&init_params);
5803 
5804 	if (aconnector->base.force == DRM_FORCE_ON) {
5805 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5806 		aconnector->dc_link->local_sink :
5807 		aconnector->dc_em_sink;
5808 		dc_sink_retain(aconnector->dc_sink);
5809 	}
5810 }
5811 
5812 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5813 {
5814 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5815 
5816 	/*
5817 	 * In case of headless boot with force on for DP managed connector
5818 	 * Those settings have to be != 0 to get initial modeset
5819 	 */
5820 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5821 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5822 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5823 	}
5824 
5825 
5826 	aconnector->base.override_edid = true;
5827 	create_eml_sink(aconnector);
5828 }
5829 
5830 static struct dc_stream_state *
5831 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5832 				const struct drm_display_mode *drm_mode,
5833 				const struct dm_connector_state *dm_state,
5834 				const struct dc_stream_state *old_stream)
5835 {
5836 	struct drm_connector *connector = &aconnector->base;
5837 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5838 	struct dc_stream_state *stream;
5839 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5840 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5841 	enum dc_status dc_result = DC_OK;
5842 
5843 	do {
5844 		stream = create_stream_for_sink(aconnector, drm_mode,
5845 						dm_state, old_stream,
5846 						requested_bpc);
5847 		if (stream == NULL) {
5848 			DRM_ERROR("Failed to create stream for sink!\n");
5849 			break;
5850 		}
5851 
5852 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5853 
5854 		if (dc_result != DC_OK) {
5855 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5856 				      drm_mode->hdisplay,
5857 				      drm_mode->vdisplay,
5858 				      drm_mode->clock,
5859 				      dc_result,
5860 				      dc_status_to_str(dc_result));
5861 
5862 			dc_stream_release(stream);
5863 			stream = NULL;
5864 			requested_bpc -= 2; /* lower bpc to retry validation */
5865 		}
5866 
5867 	} while (stream == NULL && requested_bpc >= 6);
5868 
5869 	return stream;
5870 }
5871 
5872 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5873 				   struct drm_display_mode *mode)
5874 {
5875 	int result = MODE_ERROR;
5876 	struct dc_sink *dc_sink;
5877 	/* TODO: Unhardcode stream count */
5878 	struct dc_stream_state *stream;
5879 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5880 
5881 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5882 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5883 		return result;
5884 
5885 	/*
5886 	 * Only run this the first time mode_valid is called to initilialize
5887 	 * EDID mgmt
5888 	 */
5889 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5890 		!aconnector->dc_em_sink)
5891 		handle_edid_mgmt(aconnector);
5892 
5893 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5894 
5895 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5896 				aconnector->base.force != DRM_FORCE_ON) {
5897 		DRM_ERROR("dc_sink is NULL!\n");
5898 		goto fail;
5899 	}
5900 
5901 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5902 	if (stream) {
5903 		dc_stream_release(stream);
5904 		result = MODE_OK;
5905 	}
5906 
5907 fail:
5908 	/* TODO: error handling*/
5909 	return result;
5910 }
5911 
5912 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5913 				struct dc_info_packet *out)
5914 {
5915 	struct hdmi_drm_infoframe frame;
5916 	unsigned char buf[30]; /* 26 + 4 */
5917 	ssize_t len;
5918 	int ret, i;
5919 
5920 	memset(out, 0, sizeof(*out));
5921 
5922 	if (!state->hdr_output_metadata)
5923 		return 0;
5924 
5925 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5926 	if (ret)
5927 		return ret;
5928 
5929 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5930 	if (len < 0)
5931 		return (int)len;
5932 
5933 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5934 	if (len != 30)
5935 		return -EINVAL;
5936 
5937 	/* Prepare the infopacket for DC. */
5938 	switch (state->connector->connector_type) {
5939 	case DRM_MODE_CONNECTOR_HDMIA:
5940 		out->hb0 = 0x87; /* type */
5941 		out->hb1 = 0x01; /* version */
5942 		out->hb2 = 0x1A; /* length */
5943 		out->sb[0] = buf[3]; /* checksum */
5944 		i = 1;
5945 		break;
5946 
5947 	case DRM_MODE_CONNECTOR_DisplayPort:
5948 	case DRM_MODE_CONNECTOR_eDP:
5949 		out->hb0 = 0x00; /* sdp id, zero */
5950 		out->hb1 = 0x87; /* type */
5951 		out->hb2 = 0x1D; /* payload len - 1 */
5952 		out->hb3 = (0x13 << 2); /* sdp version */
5953 		out->sb[0] = 0x01; /* version */
5954 		out->sb[1] = 0x1A; /* length */
5955 		i = 2;
5956 		break;
5957 
5958 	default:
5959 		return -EINVAL;
5960 	}
5961 
5962 	memcpy(&out->sb[i], &buf[4], 26);
5963 	out->valid = true;
5964 
5965 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5966 		       sizeof(out->sb), false);
5967 
5968 	return 0;
5969 }
5970 
5971 static bool
5972 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5973 			  const struct drm_connector_state *new_state)
5974 {
5975 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5976 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5977 
5978 	if (old_blob != new_blob) {
5979 		if (old_blob && new_blob &&
5980 		    old_blob->length == new_blob->length)
5981 			return memcmp(old_blob->data, new_blob->data,
5982 				      old_blob->length);
5983 
5984 		return true;
5985 	}
5986 
5987 	return false;
5988 }
5989 
5990 static int
5991 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5992 				 struct drm_atomic_state *state)
5993 {
5994 	struct drm_connector_state *new_con_state =
5995 		drm_atomic_get_new_connector_state(state, conn);
5996 	struct drm_connector_state *old_con_state =
5997 		drm_atomic_get_old_connector_state(state, conn);
5998 	struct drm_crtc *crtc = new_con_state->crtc;
5999 	struct drm_crtc_state *new_crtc_state;
6000 	int ret;
6001 
6002 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6003 
6004 	if (!crtc)
6005 		return 0;
6006 
6007 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6008 		struct dc_info_packet hdr_infopacket;
6009 
6010 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6011 		if (ret)
6012 			return ret;
6013 
6014 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6015 		if (IS_ERR(new_crtc_state))
6016 			return PTR_ERR(new_crtc_state);
6017 
6018 		/*
6019 		 * DC considers the stream backends changed if the
6020 		 * static metadata changes. Forcing the modeset also
6021 		 * gives a simple way for userspace to switch from
6022 		 * 8bpc to 10bpc when setting the metadata to enter
6023 		 * or exit HDR.
6024 		 *
6025 		 * Changing the static metadata after it's been
6026 		 * set is permissible, however. So only force a
6027 		 * modeset if we're entering or exiting HDR.
6028 		 */
6029 		new_crtc_state->mode_changed =
6030 			!old_con_state->hdr_output_metadata ||
6031 			!new_con_state->hdr_output_metadata;
6032 	}
6033 
6034 	return 0;
6035 }
6036 
6037 static const struct drm_connector_helper_funcs
6038 amdgpu_dm_connector_helper_funcs = {
6039 	/*
6040 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6041 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6042 	 * are missing after user start lightdm. So we need to renew modes list.
6043 	 * in get_modes call back, not just return the modes count
6044 	 */
6045 	.get_modes = get_modes,
6046 	.mode_valid = amdgpu_dm_connector_mode_valid,
6047 	.atomic_check = amdgpu_dm_connector_atomic_check,
6048 };
6049 
6050 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6051 {
6052 }
6053 
6054 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6055 {
6056 	struct drm_atomic_state *state = new_crtc_state->state;
6057 	struct drm_plane *plane;
6058 	int num_active = 0;
6059 
6060 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6061 		struct drm_plane_state *new_plane_state;
6062 
6063 		/* Cursor planes are "fake". */
6064 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6065 			continue;
6066 
6067 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6068 
6069 		if (!new_plane_state) {
6070 			/*
6071 			 * The plane is enable on the CRTC and hasn't changed
6072 			 * state. This means that it previously passed
6073 			 * validation and is therefore enabled.
6074 			 */
6075 			num_active += 1;
6076 			continue;
6077 		}
6078 
6079 		/* We need a framebuffer to be considered enabled. */
6080 		num_active += (new_plane_state->fb != NULL);
6081 	}
6082 
6083 	return num_active;
6084 }
6085 
6086 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6087 					 struct drm_crtc_state *new_crtc_state)
6088 {
6089 	struct dm_crtc_state *dm_new_crtc_state =
6090 		to_dm_crtc_state(new_crtc_state);
6091 
6092 	dm_new_crtc_state->active_planes = 0;
6093 
6094 	if (!dm_new_crtc_state->stream)
6095 		return;
6096 
6097 	dm_new_crtc_state->active_planes =
6098 		count_crtc_active_planes(new_crtc_state);
6099 }
6100 
6101 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6102 				       struct drm_atomic_state *state)
6103 {
6104 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6105 									  crtc);
6106 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6107 	struct dc *dc = adev->dm.dc;
6108 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6109 	int ret = -EINVAL;
6110 
6111 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6112 
6113 	dm_update_crtc_active_planes(crtc, crtc_state);
6114 
6115 	if (unlikely(!dm_crtc_state->stream &&
6116 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6117 		WARN_ON(1);
6118 		return ret;
6119 	}
6120 
6121 	/*
6122 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6123 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6124 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6125 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6126 	 */
6127 	if (crtc_state->enable &&
6128 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6129 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6130 		return -EINVAL;
6131 	}
6132 
6133 	/* In some use cases, like reset, no stream is attached */
6134 	if (!dm_crtc_state->stream)
6135 		return 0;
6136 
6137 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6138 		return 0;
6139 
6140 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6141 	return ret;
6142 }
6143 
6144 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6145 				      const struct drm_display_mode *mode,
6146 				      struct drm_display_mode *adjusted_mode)
6147 {
6148 	return true;
6149 }
6150 
6151 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6152 	.disable = dm_crtc_helper_disable,
6153 	.atomic_check = dm_crtc_helper_atomic_check,
6154 	.mode_fixup = dm_crtc_helper_mode_fixup,
6155 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6156 };
6157 
6158 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6159 {
6160 
6161 }
6162 
6163 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6164 {
6165 	switch (display_color_depth) {
6166 		case COLOR_DEPTH_666:
6167 			return 6;
6168 		case COLOR_DEPTH_888:
6169 			return 8;
6170 		case COLOR_DEPTH_101010:
6171 			return 10;
6172 		case COLOR_DEPTH_121212:
6173 			return 12;
6174 		case COLOR_DEPTH_141414:
6175 			return 14;
6176 		case COLOR_DEPTH_161616:
6177 			return 16;
6178 		default:
6179 			break;
6180 		}
6181 	return 0;
6182 }
6183 
6184 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6185 					  struct drm_crtc_state *crtc_state,
6186 					  struct drm_connector_state *conn_state)
6187 {
6188 	struct drm_atomic_state *state = crtc_state->state;
6189 	struct drm_connector *connector = conn_state->connector;
6190 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6191 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6192 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6193 	struct drm_dp_mst_topology_mgr *mst_mgr;
6194 	struct drm_dp_mst_port *mst_port;
6195 	enum dc_color_depth color_depth;
6196 	int clock, bpp = 0;
6197 	bool is_y420 = false;
6198 
6199 	if (!aconnector->port || !aconnector->dc_sink)
6200 		return 0;
6201 
6202 	mst_port = aconnector->port;
6203 	mst_mgr = &aconnector->mst_port->mst_mgr;
6204 
6205 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6206 		return 0;
6207 
6208 	if (!state->duplicated) {
6209 		int max_bpc = conn_state->max_requested_bpc;
6210 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6211 				aconnector->force_yuv420_output;
6212 		color_depth = convert_color_depth_from_display_info(connector,
6213 								    is_y420,
6214 								    max_bpc);
6215 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6216 		clock = adjusted_mode->clock;
6217 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6218 	}
6219 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6220 									   mst_mgr,
6221 									   mst_port,
6222 									   dm_new_connector_state->pbn,
6223 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6224 	if (dm_new_connector_state->vcpi_slots < 0) {
6225 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6226 		return dm_new_connector_state->vcpi_slots;
6227 	}
6228 	return 0;
6229 }
6230 
6231 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6232 	.disable = dm_encoder_helper_disable,
6233 	.atomic_check = dm_encoder_helper_atomic_check
6234 };
6235 
6236 #if defined(CONFIG_DRM_AMD_DC_DCN)
6237 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6238 					    struct dc_state *dc_state)
6239 {
6240 	struct dc_stream_state *stream = NULL;
6241 	struct drm_connector *connector;
6242 	struct drm_connector_state *new_con_state, *old_con_state;
6243 	struct amdgpu_dm_connector *aconnector;
6244 	struct dm_connector_state *dm_conn_state;
6245 	int i, j, clock, bpp;
6246 	int vcpi, pbn_div, pbn = 0;
6247 
6248 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6249 
6250 		aconnector = to_amdgpu_dm_connector(connector);
6251 
6252 		if (!aconnector->port)
6253 			continue;
6254 
6255 		if (!new_con_state || !new_con_state->crtc)
6256 			continue;
6257 
6258 		dm_conn_state = to_dm_connector_state(new_con_state);
6259 
6260 		for (j = 0; j < dc_state->stream_count; j++) {
6261 			stream = dc_state->streams[j];
6262 			if (!stream)
6263 				continue;
6264 
6265 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6266 				break;
6267 
6268 			stream = NULL;
6269 		}
6270 
6271 		if (!stream)
6272 			continue;
6273 
6274 		if (stream->timing.flags.DSC != 1) {
6275 			drm_dp_mst_atomic_enable_dsc(state,
6276 						     aconnector->port,
6277 						     dm_conn_state->pbn,
6278 						     0,
6279 						     false);
6280 			continue;
6281 		}
6282 
6283 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6284 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6285 		clock = stream->timing.pix_clk_100hz / 10;
6286 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6287 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6288 						    aconnector->port,
6289 						    pbn, pbn_div,
6290 						    true);
6291 		if (vcpi < 0)
6292 			return vcpi;
6293 
6294 		dm_conn_state->pbn = pbn;
6295 		dm_conn_state->vcpi_slots = vcpi;
6296 	}
6297 	return 0;
6298 }
6299 #endif
6300 
6301 static void dm_drm_plane_reset(struct drm_plane *plane)
6302 {
6303 	struct dm_plane_state *amdgpu_state = NULL;
6304 
6305 	if (plane->state)
6306 		plane->funcs->atomic_destroy_state(plane, plane->state);
6307 
6308 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6309 	WARN_ON(amdgpu_state == NULL);
6310 
6311 	if (amdgpu_state)
6312 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6313 }
6314 
6315 static struct drm_plane_state *
6316 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6317 {
6318 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6319 
6320 	old_dm_plane_state = to_dm_plane_state(plane->state);
6321 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6322 	if (!dm_plane_state)
6323 		return NULL;
6324 
6325 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6326 
6327 	if (old_dm_plane_state->dc_state) {
6328 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6329 		dc_plane_state_retain(dm_plane_state->dc_state);
6330 	}
6331 
6332 	return &dm_plane_state->base;
6333 }
6334 
6335 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6336 				struct drm_plane_state *state)
6337 {
6338 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6339 
6340 	if (dm_plane_state->dc_state)
6341 		dc_plane_state_release(dm_plane_state->dc_state);
6342 
6343 	drm_atomic_helper_plane_destroy_state(plane, state);
6344 }
6345 
6346 static const struct drm_plane_funcs dm_plane_funcs = {
6347 	.update_plane	= drm_atomic_helper_update_plane,
6348 	.disable_plane	= drm_atomic_helper_disable_plane,
6349 	.destroy	= drm_primary_helper_destroy,
6350 	.reset = dm_drm_plane_reset,
6351 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6352 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6353 	.format_mod_supported = dm_plane_format_mod_supported,
6354 };
6355 
6356 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6357 				      struct drm_plane_state *new_state)
6358 {
6359 	struct amdgpu_framebuffer *afb;
6360 	struct drm_gem_object *obj;
6361 	struct amdgpu_device *adev;
6362 	struct amdgpu_bo *rbo;
6363 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6364 	struct list_head list;
6365 	struct ttm_validate_buffer tv;
6366 	struct ww_acquire_ctx ticket;
6367 	uint32_t domain;
6368 	int r;
6369 
6370 	if (!new_state->fb) {
6371 		DRM_DEBUG_DRIVER("No FB bound\n");
6372 		return 0;
6373 	}
6374 
6375 	afb = to_amdgpu_framebuffer(new_state->fb);
6376 	obj = new_state->fb->obj[0];
6377 	rbo = gem_to_amdgpu_bo(obj);
6378 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6379 	INIT_LIST_HEAD(&list);
6380 
6381 	tv.bo = &rbo->tbo;
6382 	tv.num_shared = 1;
6383 	list_add(&tv.head, &list);
6384 
6385 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6386 	if (r) {
6387 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6388 		return r;
6389 	}
6390 
6391 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6392 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6393 	else
6394 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6395 
6396 	r = amdgpu_bo_pin(rbo, domain);
6397 	if (unlikely(r != 0)) {
6398 		if (r != -ERESTARTSYS)
6399 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6400 		ttm_eu_backoff_reservation(&ticket, &list);
6401 		return r;
6402 	}
6403 
6404 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6405 	if (unlikely(r != 0)) {
6406 		amdgpu_bo_unpin(rbo);
6407 		ttm_eu_backoff_reservation(&ticket, &list);
6408 		DRM_ERROR("%p bind failed\n", rbo);
6409 		return r;
6410 	}
6411 
6412 	ttm_eu_backoff_reservation(&ticket, &list);
6413 
6414 	afb->address = amdgpu_bo_gpu_offset(rbo);
6415 
6416 	amdgpu_bo_ref(rbo);
6417 
6418 	/**
6419 	 * We don't do surface updates on planes that have been newly created,
6420 	 * but we also don't have the afb->address during atomic check.
6421 	 *
6422 	 * Fill in buffer attributes depending on the address here, but only on
6423 	 * newly created planes since they're not being used by DC yet and this
6424 	 * won't modify global state.
6425 	 */
6426 	dm_plane_state_old = to_dm_plane_state(plane->state);
6427 	dm_plane_state_new = to_dm_plane_state(new_state);
6428 
6429 	if (dm_plane_state_new->dc_state &&
6430 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6431 		struct dc_plane_state *plane_state =
6432 			dm_plane_state_new->dc_state;
6433 		bool force_disable_dcc = !plane_state->dcc.enable;
6434 
6435 		fill_plane_buffer_attributes(
6436 			adev, afb, plane_state->format, plane_state->rotation,
6437 			afb->tiling_flags,
6438 			&plane_state->tiling_info, &plane_state->plane_size,
6439 			&plane_state->dcc, &plane_state->address,
6440 			afb->tmz_surface, force_disable_dcc);
6441 	}
6442 
6443 	return 0;
6444 }
6445 
6446 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6447 				       struct drm_plane_state *old_state)
6448 {
6449 	struct amdgpu_bo *rbo;
6450 	int r;
6451 
6452 	if (!old_state->fb)
6453 		return;
6454 
6455 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6456 	r = amdgpu_bo_reserve(rbo, false);
6457 	if (unlikely(r)) {
6458 		DRM_ERROR("failed to reserve rbo before unpin\n");
6459 		return;
6460 	}
6461 
6462 	amdgpu_bo_unpin(rbo);
6463 	amdgpu_bo_unreserve(rbo);
6464 	amdgpu_bo_unref(&rbo);
6465 }
6466 
6467 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6468 				       struct drm_crtc_state *new_crtc_state)
6469 {
6470 	struct drm_framebuffer *fb = state->fb;
6471 	int min_downscale, max_upscale;
6472 	int min_scale = 0;
6473 	int max_scale = INT_MAX;
6474 
6475 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6476 	if (fb && state->crtc) {
6477 		/* Validate viewport to cover the case when only the position changes */
6478 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6479 			int viewport_width = state->crtc_w;
6480 			int viewport_height = state->crtc_h;
6481 
6482 			if (state->crtc_x < 0)
6483 				viewport_width += state->crtc_x;
6484 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6485 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6486 
6487 			if (state->crtc_y < 0)
6488 				viewport_height += state->crtc_y;
6489 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6490 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6491 
6492 			/* If completely outside of screen, viewport_width and/or viewport_height will be negative,
6493 			 * which is still OK to satisfy the condition below, thereby also covering these cases
6494 			 * (when plane is completely outside of screen).
6495 			 * x2 for width is because of pipe-split.
6496 			 */
6497 			if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
6498 				return -EINVAL;
6499 		}
6500 
6501 		/* Get min/max allowed scaling factors from plane caps. */
6502 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6503 					     &min_downscale, &max_upscale);
6504 		/*
6505 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
6506 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6507 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6508 		 */
6509 		min_scale = (1000 << 16) / max_upscale;
6510 		max_scale = (1000 << 16) / min_downscale;
6511 	}
6512 
6513 	return drm_atomic_helper_check_plane_state(
6514 		state, new_crtc_state, min_scale, max_scale, true, true);
6515 }
6516 
6517 static int dm_plane_atomic_check(struct drm_plane *plane,
6518 				 struct drm_plane_state *state)
6519 {
6520 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6521 	struct dc *dc = adev->dm.dc;
6522 	struct dm_plane_state *dm_plane_state;
6523 	struct dc_scaling_info scaling_info;
6524 	struct drm_crtc_state *new_crtc_state;
6525 	int ret;
6526 
6527 	trace_amdgpu_dm_plane_atomic_check(state);
6528 
6529 	dm_plane_state = to_dm_plane_state(state);
6530 
6531 	if (!dm_plane_state->dc_state)
6532 		return 0;
6533 
6534 	new_crtc_state =
6535 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
6536 	if (!new_crtc_state)
6537 		return -EINVAL;
6538 
6539 	ret = dm_plane_helper_check_state(state, new_crtc_state);
6540 	if (ret)
6541 		return ret;
6542 
6543 	ret = fill_dc_scaling_info(state, &scaling_info);
6544 	if (ret)
6545 		return ret;
6546 
6547 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6548 		return 0;
6549 
6550 	return -EINVAL;
6551 }
6552 
6553 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6554 				       struct drm_plane_state *new_plane_state)
6555 {
6556 	/* Only support async updates on cursor planes. */
6557 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6558 		return -EINVAL;
6559 
6560 	return 0;
6561 }
6562 
6563 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6564 					 struct drm_plane_state *new_state)
6565 {
6566 	struct drm_plane_state *old_state =
6567 		drm_atomic_get_old_plane_state(new_state->state, plane);
6568 
6569 	trace_amdgpu_dm_atomic_update_cursor(new_state);
6570 
6571 	swap(plane->state->fb, new_state->fb);
6572 
6573 	plane->state->src_x = new_state->src_x;
6574 	plane->state->src_y = new_state->src_y;
6575 	plane->state->src_w = new_state->src_w;
6576 	plane->state->src_h = new_state->src_h;
6577 	plane->state->crtc_x = new_state->crtc_x;
6578 	plane->state->crtc_y = new_state->crtc_y;
6579 	plane->state->crtc_w = new_state->crtc_w;
6580 	plane->state->crtc_h = new_state->crtc_h;
6581 
6582 	handle_cursor_update(plane, old_state);
6583 }
6584 
6585 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6586 	.prepare_fb = dm_plane_helper_prepare_fb,
6587 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6588 	.atomic_check = dm_plane_atomic_check,
6589 	.atomic_async_check = dm_plane_atomic_async_check,
6590 	.atomic_async_update = dm_plane_atomic_async_update
6591 };
6592 
6593 /*
6594  * TODO: these are currently initialized to rgb formats only.
6595  * For future use cases we should either initialize them dynamically based on
6596  * plane capabilities, or initialize this array to all formats, so internal drm
6597  * check will succeed, and let DC implement proper check
6598  */
6599 static const uint32_t rgb_formats[] = {
6600 	DRM_FORMAT_XRGB8888,
6601 	DRM_FORMAT_ARGB8888,
6602 	DRM_FORMAT_RGBA8888,
6603 	DRM_FORMAT_XRGB2101010,
6604 	DRM_FORMAT_XBGR2101010,
6605 	DRM_FORMAT_ARGB2101010,
6606 	DRM_FORMAT_ABGR2101010,
6607 	DRM_FORMAT_XBGR8888,
6608 	DRM_FORMAT_ABGR8888,
6609 	DRM_FORMAT_RGB565,
6610 };
6611 
6612 static const uint32_t overlay_formats[] = {
6613 	DRM_FORMAT_XRGB8888,
6614 	DRM_FORMAT_ARGB8888,
6615 	DRM_FORMAT_RGBA8888,
6616 	DRM_FORMAT_XBGR8888,
6617 	DRM_FORMAT_ABGR8888,
6618 	DRM_FORMAT_RGB565
6619 };
6620 
6621 static const u32 cursor_formats[] = {
6622 	DRM_FORMAT_ARGB8888
6623 };
6624 
6625 static int get_plane_formats(const struct drm_plane *plane,
6626 			     const struct dc_plane_cap *plane_cap,
6627 			     uint32_t *formats, int max_formats)
6628 {
6629 	int i, num_formats = 0;
6630 
6631 	/*
6632 	 * TODO: Query support for each group of formats directly from
6633 	 * DC plane caps. This will require adding more formats to the
6634 	 * caps list.
6635 	 */
6636 
6637 	switch (plane->type) {
6638 	case DRM_PLANE_TYPE_PRIMARY:
6639 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6640 			if (num_formats >= max_formats)
6641 				break;
6642 
6643 			formats[num_formats++] = rgb_formats[i];
6644 		}
6645 
6646 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6647 			formats[num_formats++] = DRM_FORMAT_NV12;
6648 		if (plane_cap && plane_cap->pixel_format_support.p010)
6649 			formats[num_formats++] = DRM_FORMAT_P010;
6650 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6651 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6652 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6653 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6654 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6655 		}
6656 		break;
6657 
6658 	case DRM_PLANE_TYPE_OVERLAY:
6659 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6660 			if (num_formats >= max_formats)
6661 				break;
6662 
6663 			formats[num_formats++] = overlay_formats[i];
6664 		}
6665 		break;
6666 
6667 	case DRM_PLANE_TYPE_CURSOR:
6668 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6669 			if (num_formats >= max_formats)
6670 				break;
6671 
6672 			formats[num_formats++] = cursor_formats[i];
6673 		}
6674 		break;
6675 	}
6676 
6677 	return num_formats;
6678 }
6679 
6680 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6681 				struct drm_plane *plane,
6682 				unsigned long possible_crtcs,
6683 				const struct dc_plane_cap *plane_cap)
6684 {
6685 	uint32_t formats[32];
6686 	int num_formats;
6687 	int res = -EPERM;
6688 	unsigned int supported_rotations;
6689 	uint64_t *modifiers = NULL;
6690 
6691 	num_formats = get_plane_formats(plane, plane_cap, formats,
6692 					ARRAY_SIZE(formats));
6693 
6694 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6695 	if (res)
6696 		return res;
6697 
6698 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6699 				       &dm_plane_funcs, formats, num_formats,
6700 				       modifiers, plane->type, NULL);
6701 	kfree(modifiers);
6702 	if (res)
6703 		return res;
6704 
6705 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6706 	    plane_cap && plane_cap->per_pixel_alpha) {
6707 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6708 					  BIT(DRM_MODE_BLEND_PREMULTI);
6709 
6710 		drm_plane_create_alpha_property(plane);
6711 		drm_plane_create_blend_mode_property(plane, blend_caps);
6712 	}
6713 
6714 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6715 	    plane_cap &&
6716 	    (plane_cap->pixel_format_support.nv12 ||
6717 	     plane_cap->pixel_format_support.p010)) {
6718 		/* This only affects YUV formats. */
6719 		drm_plane_create_color_properties(
6720 			plane,
6721 			BIT(DRM_COLOR_YCBCR_BT601) |
6722 			BIT(DRM_COLOR_YCBCR_BT709) |
6723 			BIT(DRM_COLOR_YCBCR_BT2020),
6724 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6725 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6726 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6727 	}
6728 
6729 	supported_rotations =
6730 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6731 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6732 
6733 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
6734 	    plane->type != DRM_PLANE_TYPE_CURSOR)
6735 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6736 						   supported_rotations);
6737 
6738 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6739 
6740 	/* Create (reset) the plane state */
6741 	if (plane->funcs->reset)
6742 		plane->funcs->reset(plane);
6743 
6744 	return 0;
6745 }
6746 
6747 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6748 			       struct drm_plane *plane,
6749 			       uint32_t crtc_index)
6750 {
6751 	struct amdgpu_crtc *acrtc = NULL;
6752 	struct drm_plane *cursor_plane;
6753 
6754 	int res = -ENOMEM;
6755 
6756 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6757 	if (!cursor_plane)
6758 		goto fail;
6759 
6760 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6761 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6762 
6763 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6764 	if (!acrtc)
6765 		goto fail;
6766 
6767 	res = drm_crtc_init_with_planes(
6768 			dm->ddev,
6769 			&acrtc->base,
6770 			plane,
6771 			cursor_plane,
6772 			&amdgpu_dm_crtc_funcs, NULL);
6773 
6774 	if (res)
6775 		goto fail;
6776 
6777 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6778 
6779 	/* Create (reset) the plane state */
6780 	if (acrtc->base.funcs->reset)
6781 		acrtc->base.funcs->reset(&acrtc->base);
6782 
6783 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6784 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6785 
6786 	acrtc->crtc_id = crtc_index;
6787 	acrtc->base.enabled = false;
6788 	acrtc->otg_inst = -1;
6789 
6790 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6791 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6792 				   true, MAX_COLOR_LUT_ENTRIES);
6793 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6794 
6795 	return 0;
6796 
6797 fail:
6798 	kfree(acrtc);
6799 	kfree(cursor_plane);
6800 	return res;
6801 }
6802 
6803 
6804 static int to_drm_connector_type(enum signal_type st)
6805 {
6806 	switch (st) {
6807 	case SIGNAL_TYPE_HDMI_TYPE_A:
6808 		return DRM_MODE_CONNECTOR_HDMIA;
6809 	case SIGNAL_TYPE_EDP:
6810 		return DRM_MODE_CONNECTOR_eDP;
6811 	case SIGNAL_TYPE_LVDS:
6812 		return DRM_MODE_CONNECTOR_LVDS;
6813 	case SIGNAL_TYPE_RGB:
6814 		return DRM_MODE_CONNECTOR_VGA;
6815 	case SIGNAL_TYPE_DISPLAY_PORT:
6816 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6817 		return DRM_MODE_CONNECTOR_DisplayPort;
6818 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6819 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6820 		return DRM_MODE_CONNECTOR_DVID;
6821 	case SIGNAL_TYPE_VIRTUAL:
6822 		return DRM_MODE_CONNECTOR_VIRTUAL;
6823 
6824 	default:
6825 		return DRM_MODE_CONNECTOR_Unknown;
6826 	}
6827 }
6828 
6829 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6830 {
6831 	struct drm_encoder *encoder;
6832 
6833 	/* There is only one encoder per connector */
6834 	drm_connector_for_each_possible_encoder(connector, encoder)
6835 		return encoder;
6836 
6837 	return NULL;
6838 }
6839 
6840 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6841 {
6842 	struct drm_encoder *encoder;
6843 	struct amdgpu_encoder *amdgpu_encoder;
6844 
6845 	encoder = amdgpu_dm_connector_to_encoder(connector);
6846 
6847 	if (encoder == NULL)
6848 		return;
6849 
6850 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6851 
6852 	amdgpu_encoder->native_mode.clock = 0;
6853 
6854 	if (!list_empty(&connector->probed_modes)) {
6855 		struct drm_display_mode *preferred_mode = NULL;
6856 
6857 		list_for_each_entry(preferred_mode,
6858 				    &connector->probed_modes,
6859 				    head) {
6860 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6861 				amdgpu_encoder->native_mode = *preferred_mode;
6862 
6863 			break;
6864 		}
6865 
6866 	}
6867 }
6868 
6869 static struct drm_display_mode *
6870 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6871 			     char *name,
6872 			     int hdisplay, int vdisplay)
6873 {
6874 	struct drm_device *dev = encoder->dev;
6875 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6876 	struct drm_display_mode *mode = NULL;
6877 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6878 
6879 	mode = drm_mode_duplicate(dev, native_mode);
6880 
6881 	if (mode == NULL)
6882 		return NULL;
6883 
6884 	mode->hdisplay = hdisplay;
6885 	mode->vdisplay = vdisplay;
6886 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6887 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6888 
6889 	return mode;
6890 
6891 }
6892 
6893 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6894 						 struct drm_connector *connector)
6895 {
6896 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6897 	struct drm_display_mode *mode = NULL;
6898 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6899 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6900 				to_amdgpu_dm_connector(connector);
6901 	int i;
6902 	int n;
6903 	struct mode_size {
6904 		char name[DRM_DISPLAY_MODE_LEN];
6905 		int w;
6906 		int h;
6907 	} common_modes[] = {
6908 		{  "640x480",  640,  480},
6909 		{  "800x600",  800,  600},
6910 		{ "1024x768", 1024,  768},
6911 		{ "1280x720", 1280,  720},
6912 		{ "1280x800", 1280,  800},
6913 		{"1280x1024", 1280, 1024},
6914 		{ "1440x900", 1440,  900},
6915 		{"1680x1050", 1680, 1050},
6916 		{"1600x1200", 1600, 1200},
6917 		{"1920x1080", 1920, 1080},
6918 		{"1920x1200", 1920, 1200}
6919 	};
6920 
6921 	n = ARRAY_SIZE(common_modes);
6922 
6923 	for (i = 0; i < n; i++) {
6924 		struct drm_display_mode *curmode = NULL;
6925 		bool mode_existed = false;
6926 
6927 		if (common_modes[i].w > native_mode->hdisplay ||
6928 		    common_modes[i].h > native_mode->vdisplay ||
6929 		   (common_modes[i].w == native_mode->hdisplay &&
6930 		    common_modes[i].h == native_mode->vdisplay))
6931 			continue;
6932 
6933 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6934 			if (common_modes[i].w == curmode->hdisplay &&
6935 			    common_modes[i].h == curmode->vdisplay) {
6936 				mode_existed = true;
6937 				break;
6938 			}
6939 		}
6940 
6941 		if (mode_existed)
6942 			continue;
6943 
6944 		mode = amdgpu_dm_create_common_mode(encoder,
6945 				common_modes[i].name, common_modes[i].w,
6946 				common_modes[i].h);
6947 		drm_mode_probed_add(connector, mode);
6948 		amdgpu_dm_connector->num_modes++;
6949 	}
6950 }
6951 
6952 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6953 					      struct edid *edid)
6954 {
6955 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6956 			to_amdgpu_dm_connector(connector);
6957 
6958 	if (edid) {
6959 		/* empty probed_modes */
6960 		INIT_LIST_HEAD(&connector->probed_modes);
6961 		amdgpu_dm_connector->num_modes =
6962 				drm_add_edid_modes(connector, edid);
6963 
6964 		/* sorting the probed modes before calling function
6965 		 * amdgpu_dm_get_native_mode() since EDID can have
6966 		 * more than one preferred mode. The modes that are
6967 		 * later in the probed mode list could be of higher
6968 		 * and preferred resolution. For example, 3840x2160
6969 		 * resolution in base EDID preferred timing and 4096x2160
6970 		 * preferred resolution in DID extension block later.
6971 		 */
6972 		drm_mode_sort(&connector->probed_modes);
6973 		amdgpu_dm_get_native_mode(connector);
6974 	} else {
6975 		amdgpu_dm_connector->num_modes = 0;
6976 	}
6977 }
6978 
6979 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6980 {
6981 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6982 			to_amdgpu_dm_connector(connector);
6983 	struct drm_encoder *encoder;
6984 	struct edid *edid = amdgpu_dm_connector->edid;
6985 
6986 	encoder = amdgpu_dm_connector_to_encoder(connector);
6987 
6988 	if (!drm_edid_is_valid(edid)) {
6989 		amdgpu_dm_connector->num_modes =
6990 				drm_add_modes_noedid(connector, 640, 480);
6991 	} else {
6992 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6993 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6994 	}
6995 	amdgpu_dm_fbc_init(connector);
6996 
6997 	return amdgpu_dm_connector->num_modes;
6998 }
6999 
7000 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7001 				     struct amdgpu_dm_connector *aconnector,
7002 				     int connector_type,
7003 				     struct dc_link *link,
7004 				     int link_index)
7005 {
7006 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7007 
7008 	/*
7009 	 * Some of the properties below require access to state, like bpc.
7010 	 * Allocate some default initial connector state with our reset helper.
7011 	 */
7012 	if (aconnector->base.funcs->reset)
7013 		aconnector->base.funcs->reset(&aconnector->base);
7014 
7015 	aconnector->connector_id = link_index;
7016 	aconnector->dc_link = link;
7017 	aconnector->base.interlace_allowed = false;
7018 	aconnector->base.doublescan_allowed = false;
7019 	aconnector->base.stereo_allowed = false;
7020 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7021 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7022 	aconnector->audio_inst = -1;
7023 	mutex_init(&aconnector->hpd_lock);
7024 
7025 	/*
7026 	 * configure support HPD hot plug connector_>polled default value is 0
7027 	 * which means HPD hot plug not supported
7028 	 */
7029 	switch (connector_type) {
7030 	case DRM_MODE_CONNECTOR_HDMIA:
7031 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7032 		aconnector->base.ycbcr_420_allowed =
7033 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7034 		break;
7035 	case DRM_MODE_CONNECTOR_DisplayPort:
7036 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7037 		aconnector->base.ycbcr_420_allowed =
7038 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7039 		break;
7040 	case DRM_MODE_CONNECTOR_DVID:
7041 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7042 		break;
7043 	default:
7044 		break;
7045 	}
7046 
7047 	drm_object_attach_property(&aconnector->base.base,
7048 				dm->ddev->mode_config.scaling_mode_property,
7049 				DRM_MODE_SCALE_NONE);
7050 
7051 	drm_object_attach_property(&aconnector->base.base,
7052 				adev->mode_info.underscan_property,
7053 				UNDERSCAN_OFF);
7054 	drm_object_attach_property(&aconnector->base.base,
7055 				adev->mode_info.underscan_hborder_property,
7056 				0);
7057 	drm_object_attach_property(&aconnector->base.base,
7058 				adev->mode_info.underscan_vborder_property,
7059 				0);
7060 
7061 	if (!aconnector->mst_port)
7062 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7063 
7064 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7065 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7066 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7067 
7068 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7069 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7070 		drm_object_attach_property(&aconnector->base.base,
7071 				adev->mode_info.abm_level_property, 0);
7072 	}
7073 
7074 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7075 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7076 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7077 		drm_object_attach_property(
7078 			&aconnector->base.base,
7079 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
7080 
7081 		if (!aconnector->mst_port)
7082 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7083 
7084 #ifdef CONFIG_DRM_AMD_DC_HDCP
7085 		if (adev->dm.hdcp_workqueue)
7086 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7087 #endif
7088 	}
7089 }
7090 
7091 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7092 			      struct i2c_msg *msgs, int num)
7093 {
7094 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7095 	struct ddc_service *ddc_service = i2c->ddc_service;
7096 	struct i2c_command cmd;
7097 	int i;
7098 	int result = -EIO;
7099 
7100 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7101 
7102 	if (!cmd.payloads)
7103 		return result;
7104 
7105 	cmd.number_of_payloads = num;
7106 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7107 	cmd.speed = 100;
7108 
7109 	for (i = 0; i < num; i++) {
7110 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7111 		cmd.payloads[i].address = msgs[i].addr;
7112 		cmd.payloads[i].length = msgs[i].len;
7113 		cmd.payloads[i].data = msgs[i].buf;
7114 	}
7115 
7116 	if (dc_submit_i2c(
7117 			ddc_service->ctx->dc,
7118 			ddc_service->ddc_pin->hw_info.ddc_channel,
7119 			&cmd))
7120 		result = num;
7121 
7122 	kfree(cmd.payloads);
7123 	return result;
7124 }
7125 
7126 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7127 {
7128 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7129 }
7130 
7131 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7132 	.master_xfer = amdgpu_dm_i2c_xfer,
7133 	.functionality = amdgpu_dm_i2c_func,
7134 };
7135 
7136 static struct amdgpu_i2c_adapter *
7137 create_i2c(struct ddc_service *ddc_service,
7138 	   int link_index,
7139 	   int *res)
7140 {
7141 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7142 	struct amdgpu_i2c_adapter *i2c;
7143 
7144 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7145 	if (!i2c)
7146 		return NULL;
7147 	i2c->base.owner = THIS_MODULE;
7148 	i2c->base.class = I2C_CLASS_DDC;
7149 	i2c->base.dev.parent = &adev->pdev->dev;
7150 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7151 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7152 	i2c_set_adapdata(&i2c->base, i2c);
7153 	i2c->ddc_service = ddc_service;
7154 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7155 
7156 	return i2c;
7157 }
7158 
7159 
7160 /*
7161  * Note: this function assumes that dc_link_detect() was called for the
7162  * dc_link which will be represented by this aconnector.
7163  */
7164 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7165 				    struct amdgpu_dm_connector *aconnector,
7166 				    uint32_t link_index,
7167 				    struct amdgpu_encoder *aencoder)
7168 {
7169 	int res = 0;
7170 	int connector_type;
7171 	struct dc *dc = dm->dc;
7172 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7173 	struct amdgpu_i2c_adapter *i2c;
7174 
7175 	link->priv = aconnector;
7176 
7177 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7178 
7179 	i2c = create_i2c(link->ddc, link->link_index, &res);
7180 	if (!i2c) {
7181 		DRM_ERROR("Failed to create i2c adapter data\n");
7182 		return -ENOMEM;
7183 	}
7184 
7185 	aconnector->i2c = i2c;
7186 	res = i2c_add_adapter(&i2c->base);
7187 
7188 	if (res) {
7189 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7190 		goto out_free;
7191 	}
7192 
7193 	connector_type = to_drm_connector_type(link->connector_signal);
7194 
7195 	res = drm_connector_init_with_ddc(
7196 			dm->ddev,
7197 			&aconnector->base,
7198 			&amdgpu_dm_connector_funcs,
7199 			connector_type,
7200 			&i2c->base);
7201 
7202 	if (res) {
7203 		DRM_ERROR("connector_init failed\n");
7204 		aconnector->connector_id = -1;
7205 		goto out_free;
7206 	}
7207 
7208 	drm_connector_helper_add(
7209 			&aconnector->base,
7210 			&amdgpu_dm_connector_helper_funcs);
7211 
7212 	amdgpu_dm_connector_init_helper(
7213 		dm,
7214 		aconnector,
7215 		connector_type,
7216 		link,
7217 		link_index);
7218 
7219 	drm_connector_attach_encoder(
7220 		&aconnector->base, &aencoder->base);
7221 
7222 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7223 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7224 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7225 
7226 out_free:
7227 	if (res) {
7228 		kfree(i2c);
7229 		aconnector->i2c = NULL;
7230 	}
7231 	return res;
7232 }
7233 
7234 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7235 {
7236 	switch (adev->mode_info.num_crtc) {
7237 	case 1:
7238 		return 0x1;
7239 	case 2:
7240 		return 0x3;
7241 	case 3:
7242 		return 0x7;
7243 	case 4:
7244 		return 0xf;
7245 	case 5:
7246 		return 0x1f;
7247 	case 6:
7248 	default:
7249 		return 0x3f;
7250 	}
7251 }
7252 
7253 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7254 				  struct amdgpu_encoder *aencoder,
7255 				  uint32_t link_index)
7256 {
7257 	struct amdgpu_device *adev = drm_to_adev(dev);
7258 
7259 	int res = drm_encoder_init(dev,
7260 				   &aencoder->base,
7261 				   &amdgpu_dm_encoder_funcs,
7262 				   DRM_MODE_ENCODER_TMDS,
7263 				   NULL);
7264 
7265 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7266 
7267 	if (!res)
7268 		aencoder->encoder_id = link_index;
7269 	else
7270 		aencoder->encoder_id = -1;
7271 
7272 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7273 
7274 	return res;
7275 }
7276 
7277 static void manage_dm_interrupts(struct amdgpu_device *adev,
7278 				 struct amdgpu_crtc *acrtc,
7279 				 bool enable)
7280 {
7281 	/*
7282 	 * We have no guarantee that the frontend index maps to the same
7283 	 * backend index - some even map to more than one.
7284 	 *
7285 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7286 	 */
7287 	int irq_type =
7288 		amdgpu_display_crtc_idx_to_irq_type(
7289 			adev,
7290 			acrtc->crtc_id);
7291 
7292 	if (enable) {
7293 		drm_crtc_vblank_on(&acrtc->base);
7294 		amdgpu_irq_get(
7295 			adev,
7296 			&adev->pageflip_irq,
7297 			irq_type);
7298 	} else {
7299 
7300 		amdgpu_irq_put(
7301 			adev,
7302 			&adev->pageflip_irq,
7303 			irq_type);
7304 		drm_crtc_vblank_off(&acrtc->base);
7305 	}
7306 }
7307 
7308 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7309 				      struct amdgpu_crtc *acrtc)
7310 {
7311 	int irq_type =
7312 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7313 
7314 	/**
7315 	 * This reads the current state for the IRQ and force reapplies
7316 	 * the setting to hardware.
7317 	 */
7318 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7319 }
7320 
7321 static bool
7322 is_scaling_state_different(const struct dm_connector_state *dm_state,
7323 			   const struct dm_connector_state *old_dm_state)
7324 {
7325 	if (dm_state->scaling != old_dm_state->scaling)
7326 		return true;
7327 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7328 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7329 			return true;
7330 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7331 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7332 			return true;
7333 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7334 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7335 		return true;
7336 	return false;
7337 }
7338 
7339 #ifdef CONFIG_DRM_AMD_DC_HDCP
7340 static bool is_content_protection_different(struct drm_connector_state *state,
7341 					    const struct drm_connector_state *old_state,
7342 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7343 {
7344 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7345 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7346 
7347 	/* Handle: Type0/1 change */
7348 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7349 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7350 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7351 		return true;
7352 	}
7353 
7354 	/* CP is being re enabled, ignore this
7355 	 *
7356 	 * Handles:	ENABLED -> DESIRED
7357 	 */
7358 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7359 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7360 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7361 		return false;
7362 	}
7363 
7364 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7365 	 *
7366 	 * Handles:	UNDESIRED -> ENABLED
7367 	 */
7368 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7369 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7370 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7371 
7372 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7373 	 * hot-plug, headless s3, dpms
7374 	 *
7375 	 * Handles:	DESIRED -> DESIRED (Special case)
7376 	 */
7377 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7378 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7379 		dm_con_state->update_hdcp = false;
7380 		return true;
7381 	}
7382 
7383 	/*
7384 	 * Handles:	UNDESIRED -> UNDESIRED
7385 	 *		DESIRED -> DESIRED
7386 	 *		ENABLED -> ENABLED
7387 	 */
7388 	if (old_state->content_protection == state->content_protection)
7389 		return false;
7390 
7391 	/*
7392 	 * Handles:	UNDESIRED -> DESIRED
7393 	 *		DESIRED -> UNDESIRED
7394 	 *		ENABLED -> UNDESIRED
7395 	 */
7396 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7397 		return true;
7398 
7399 	/*
7400 	 * Handles:	DESIRED -> ENABLED
7401 	 */
7402 	return false;
7403 }
7404 
7405 #endif
7406 static void remove_stream(struct amdgpu_device *adev,
7407 			  struct amdgpu_crtc *acrtc,
7408 			  struct dc_stream_state *stream)
7409 {
7410 	/* this is the update mode case */
7411 
7412 	acrtc->otg_inst = -1;
7413 	acrtc->enabled = false;
7414 }
7415 
7416 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7417 			       struct dc_cursor_position *position)
7418 {
7419 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7420 	int x, y;
7421 	int xorigin = 0, yorigin = 0;
7422 
7423 	position->enable = false;
7424 	position->x = 0;
7425 	position->y = 0;
7426 
7427 	if (!crtc || !plane->state->fb)
7428 		return 0;
7429 
7430 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7431 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7432 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7433 			  __func__,
7434 			  plane->state->crtc_w,
7435 			  plane->state->crtc_h);
7436 		return -EINVAL;
7437 	}
7438 
7439 	x = plane->state->crtc_x;
7440 	y = plane->state->crtc_y;
7441 
7442 	if (x <= -amdgpu_crtc->max_cursor_width ||
7443 	    y <= -amdgpu_crtc->max_cursor_height)
7444 		return 0;
7445 
7446 	if (x < 0) {
7447 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7448 		x = 0;
7449 	}
7450 	if (y < 0) {
7451 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7452 		y = 0;
7453 	}
7454 	position->enable = true;
7455 	position->translate_by_source = true;
7456 	position->x = x;
7457 	position->y = y;
7458 	position->x_hotspot = xorigin;
7459 	position->y_hotspot = yorigin;
7460 
7461 	return 0;
7462 }
7463 
7464 static void handle_cursor_update(struct drm_plane *plane,
7465 				 struct drm_plane_state *old_plane_state)
7466 {
7467 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7468 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7469 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7470 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7471 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7472 	uint64_t address = afb ? afb->address : 0;
7473 	struct dc_cursor_position position;
7474 	struct dc_cursor_attributes attributes;
7475 	int ret;
7476 
7477 	if (!plane->state->fb && !old_plane_state->fb)
7478 		return;
7479 
7480 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7481 			 __func__,
7482 			 amdgpu_crtc->crtc_id,
7483 			 plane->state->crtc_w,
7484 			 plane->state->crtc_h);
7485 
7486 	ret = get_cursor_position(plane, crtc, &position);
7487 	if (ret)
7488 		return;
7489 
7490 	if (!position.enable) {
7491 		/* turn off cursor */
7492 		if (crtc_state && crtc_state->stream) {
7493 			mutex_lock(&adev->dm.dc_lock);
7494 			dc_stream_set_cursor_position(crtc_state->stream,
7495 						      &position);
7496 			mutex_unlock(&adev->dm.dc_lock);
7497 		}
7498 		return;
7499 	}
7500 
7501 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
7502 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
7503 
7504 	memset(&attributes, 0, sizeof(attributes));
7505 	attributes.address.high_part = upper_32_bits(address);
7506 	attributes.address.low_part  = lower_32_bits(address);
7507 	attributes.width             = plane->state->crtc_w;
7508 	attributes.height            = plane->state->crtc_h;
7509 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7510 	attributes.rotation_angle    = 0;
7511 	attributes.attribute_flags.value = 0;
7512 
7513 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7514 
7515 	if (crtc_state->stream) {
7516 		mutex_lock(&adev->dm.dc_lock);
7517 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7518 							 &attributes))
7519 			DRM_ERROR("DC failed to set cursor attributes\n");
7520 
7521 		if (!dc_stream_set_cursor_position(crtc_state->stream,
7522 						   &position))
7523 			DRM_ERROR("DC failed to set cursor position\n");
7524 		mutex_unlock(&adev->dm.dc_lock);
7525 	}
7526 }
7527 
7528 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7529 {
7530 
7531 	assert_spin_locked(&acrtc->base.dev->event_lock);
7532 	WARN_ON(acrtc->event);
7533 
7534 	acrtc->event = acrtc->base.state->event;
7535 
7536 	/* Set the flip status */
7537 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7538 
7539 	/* Mark this event as consumed */
7540 	acrtc->base.state->event = NULL;
7541 
7542 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7543 						 acrtc->crtc_id);
7544 }
7545 
7546 static void update_freesync_state_on_stream(
7547 	struct amdgpu_display_manager *dm,
7548 	struct dm_crtc_state *new_crtc_state,
7549 	struct dc_stream_state *new_stream,
7550 	struct dc_plane_state *surface,
7551 	u32 flip_timestamp_in_us)
7552 {
7553 	struct mod_vrr_params vrr_params;
7554 	struct dc_info_packet vrr_infopacket = {0};
7555 	struct amdgpu_device *adev = dm->adev;
7556 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7557 	unsigned long flags;
7558 
7559 	if (!new_stream)
7560 		return;
7561 
7562 	/*
7563 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7564 	 * For now it's sufficient to just guard against these conditions.
7565 	 */
7566 
7567 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7568 		return;
7569 
7570 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7571         vrr_params = acrtc->dm_irq_params.vrr_params;
7572 
7573 	if (surface) {
7574 		mod_freesync_handle_preflip(
7575 			dm->freesync_module,
7576 			surface,
7577 			new_stream,
7578 			flip_timestamp_in_us,
7579 			&vrr_params);
7580 
7581 		if (adev->family < AMDGPU_FAMILY_AI &&
7582 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7583 			mod_freesync_handle_v_update(dm->freesync_module,
7584 						     new_stream, &vrr_params);
7585 
7586 			/* Need to call this before the frame ends. */
7587 			dc_stream_adjust_vmin_vmax(dm->dc,
7588 						   new_crtc_state->stream,
7589 						   &vrr_params.adjust);
7590 		}
7591 	}
7592 
7593 	mod_freesync_build_vrr_infopacket(
7594 		dm->freesync_module,
7595 		new_stream,
7596 		&vrr_params,
7597 		PACKET_TYPE_VRR,
7598 		TRANSFER_FUNC_UNKNOWN,
7599 		&vrr_infopacket);
7600 
7601 	new_crtc_state->freesync_timing_changed |=
7602 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7603 			&vrr_params.adjust,
7604 			sizeof(vrr_params.adjust)) != 0);
7605 
7606 	new_crtc_state->freesync_vrr_info_changed |=
7607 		(memcmp(&new_crtc_state->vrr_infopacket,
7608 			&vrr_infopacket,
7609 			sizeof(vrr_infopacket)) != 0);
7610 
7611 	acrtc->dm_irq_params.vrr_params = vrr_params;
7612 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7613 
7614 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7615 	new_stream->vrr_infopacket = vrr_infopacket;
7616 
7617 	if (new_crtc_state->freesync_vrr_info_changed)
7618 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7619 			      new_crtc_state->base.crtc->base.id,
7620 			      (int)new_crtc_state->base.vrr_enabled,
7621 			      (int)vrr_params.state);
7622 
7623 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7624 }
7625 
7626 static void update_stream_irq_parameters(
7627 	struct amdgpu_display_manager *dm,
7628 	struct dm_crtc_state *new_crtc_state)
7629 {
7630 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7631 	struct mod_vrr_params vrr_params;
7632 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7633 	struct amdgpu_device *adev = dm->adev;
7634 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7635 	unsigned long flags;
7636 
7637 	if (!new_stream)
7638 		return;
7639 
7640 	/*
7641 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7642 	 * For now it's sufficient to just guard against these conditions.
7643 	 */
7644 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7645 		return;
7646 
7647 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7648 	vrr_params = acrtc->dm_irq_params.vrr_params;
7649 
7650 	if (new_crtc_state->vrr_supported &&
7651 	    config.min_refresh_in_uhz &&
7652 	    config.max_refresh_in_uhz) {
7653 		config.state = new_crtc_state->base.vrr_enabled ?
7654 			VRR_STATE_ACTIVE_VARIABLE :
7655 			VRR_STATE_INACTIVE;
7656 	} else {
7657 		config.state = VRR_STATE_UNSUPPORTED;
7658 	}
7659 
7660 	mod_freesync_build_vrr_params(dm->freesync_module,
7661 				      new_stream,
7662 				      &config, &vrr_params);
7663 
7664 	new_crtc_state->freesync_timing_changed |=
7665 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7666 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7667 
7668 	new_crtc_state->freesync_config = config;
7669 	/* Copy state for access from DM IRQ handler */
7670 	acrtc->dm_irq_params.freesync_config = config;
7671 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7672 	acrtc->dm_irq_params.vrr_params = vrr_params;
7673 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7674 }
7675 
7676 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7677 					    struct dm_crtc_state *new_state)
7678 {
7679 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7680 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7681 
7682 	if (!old_vrr_active && new_vrr_active) {
7683 		/* Transition VRR inactive -> active:
7684 		 * While VRR is active, we must not disable vblank irq, as a
7685 		 * reenable after disable would compute bogus vblank/pflip
7686 		 * timestamps if it likely happened inside display front-porch.
7687 		 *
7688 		 * We also need vupdate irq for the actual core vblank handling
7689 		 * at end of vblank.
7690 		 */
7691 		dm_set_vupdate_irq(new_state->base.crtc, true);
7692 		drm_crtc_vblank_get(new_state->base.crtc);
7693 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7694 				 __func__, new_state->base.crtc->base.id);
7695 	} else if (old_vrr_active && !new_vrr_active) {
7696 		/* Transition VRR active -> inactive:
7697 		 * Allow vblank irq disable again for fixed refresh rate.
7698 		 */
7699 		dm_set_vupdate_irq(new_state->base.crtc, false);
7700 		drm_crtc_vblank_put(new_state->base.crtc);
7701 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7702 				 __func__, new_state->base.crtc->base.id);
7703 	}
7704 }
7705 
7706 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7707 {
7708 	struct drm_plane *plane;
7709 	struct drm_plane_state *old_plane_state, *new_plane_state;
7710 	int i;
7711 
7712 	/*
7713 	 * TODO: Make this per-stream so we don't issue redundant updates for
7714 	 * commits with multiple streams.
7715 	 */
7716 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7717 				       new_plane_state, i)
7718 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7719 			handle_cursor_update(plane, old_plane_state);
7720 }
7721 
7722 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7723 				    struct dc_state *dc_state,
7724 				    struct drm_device *dev,
7725 				    struct amdgpu_display_manager *dm,
7726 				    struct drm_crtc *pcrtc,
7727 				    bool wait_for_vblank)
7728 {
7729 	uint32_t i;
7730 	uint64_t timestamp_ns;
7731 	struct drm_plane *plane;
7732 	struct drm_plane_state *old_plane_state, *new_plane_state;
7733 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7734 	struct drm_crtc_state *new_pcrtc_state =
7735 			drm_atomic_get_new_crtc_state(state, pcrtc);
7736 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7737 	struct dm_crtc_state *dm_old_crtc_state =
7738 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7739 	int planes_count = 0, vpos, hpos;
7740 	long r;
7741 	unsigned long flags;
7742 	struct amdgpu_bo *abo;
7743 	uint32_t target_vblank, last_flip_vblank;
7744 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7745 	bool pflip_present = false;
7746 	struct {
7747 		struct dc_surface_update surface_updates[MAX_SURFACES];
7748 		struct dc_plane_info plane_infos[MAX_SURFACES];
7749 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7750 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7751 		struct dc_stream_update stream_update;
7752 	} *bundle;
7753 
7754 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7755 
7756 	if (!bundle) {
7757 		dm_error("Failed to allocate update bundle\n");
7758 		goto cleanup;
7759 	}
7760 
7761 	/*
7762 	 * Disable the cursor first if we're disabling all the planes.
7763 	 * It'll remain on the screen after the planes are re-enabled
7764 	 * if we don't.
7765 	 */
7766 	if (acrtc_state->active_planes == 0)
7767 		amdgpu_dm_commit_cursors(state);
7768 
7769 	/* update planes when needed */
7770 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7771 		struct drm_crtc *crtc = new_plane_state->crtc;
7772 		struct drm_crtc_state *new_crtc_state;
7773 		struct drm_framebuffer *fb = new_plane_state->fb;
7774 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7775 		bool plane_needs_flip;
7776 		struct dc_plane_state *dc_plane;
7777 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7778 
7779 		/* Cursor plane is handled after stream updates */
7780 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7781 			continue;
7782 
7783 		if (!fb || !crtc || pcrtc != crtc)
7784 			continue;
7785 
7786 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7787 		if (!new_crtc_state->active)
7788 			continue;
7789 
7790 		dc_plane = dm_new_plane_state->dc_state;
7791 
7792 		bundle->surface_updates[planes_count].surface = dc_plane;
7793 		if (new_pcrtc_state->color_mgmt_changed) {
7794 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7795 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7796 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7797 		}
7798 
7799 		fill_dc_scaling_info(new_plane_state,
7800 				     &bundle->scaling_infos[planes_count]);
7801 
7802 		bundle->surface_updates[planes_count].scaling_info =
7803 			&bundle->scaling_infos[planes_count];
7804 
7805 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7806 
7807 		pflip_present = pflip_present || plane_needs_flip;
7808 
7809 		if (!plane_needs_flip) {
7810 			planes_count += 1;
7811 			continue;
7812 		}
7813 
7814 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7815 
7816 		/*
7817 		 * Wait for all fences on this FB. Do limited wait to avoid
7818 		 * deadlock during GPU reset when this fence will not signal
7819 		 * but we hold reservation lock for the BO.
7820 		 */
7821 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7822 							false,
7823 							msecs_to_jiffies(5000));
7824 		if (unlikely(r <= 0))
7825 			DRM_ERROR("Waiting for fences timed out!");
7826 
7827 		fill_dc_plane_info_and_addr(
7828 			dm->adev, new_plane_state,
7829 			afb->tiling_flags,
7830 			&bundle->plane_infos[planes_count],
7831 			&bundle->flip_addrs[planes_count].address,
7832 			afb->tmz_surface, false);
7833 
7834 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7835 				 new_plane_state->plane->index,
7836 				 bundle->plane_infos[planes_count].dcc.enable);
7837 
7838 		bundle->surface_updates[planes_count].plane_info =
7839 			&bundle->plane_infos[planes_count];
7840 
7841 		/*
7842 		 * Only allow immediate flips for fast updates that don't
7843 		 * change FB pitch, DCC state, rotation or mirroing.
7844 		 */
7845 		bundle->flip_addrs[planes_count].flip_immediate =
7846 			crtc->state->async_flip &&
7847 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7848 
7849 		timestamp_ns = ktime_get_ns();
7850 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7851 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7852 		bundle->surface_updates[planes_count].surface = dc_plane;
7853 
7854 		if (!bundle->surface_updates[planes_count].surface) {
7855 			DRM_ERROR("No surface for CRTC: id=%d\n",
7856 					acrtc_attach->crtc_id);
7857 			continue;
7858 		}
7859 
7860 		if (plane == pcrtc->primary)
7861 			update_freesync_state_on_stream(
7862 				dm,
7863 				acrtc_state,
7864 				acrtc_state->stream,
7865 				dc_plane,
7866 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7867 
7868 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7869 				 __func__,
7870 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7871 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7872 
7873 		planes_count += 1;
7874 
7875 	}
7876 
7877 	if (pflip_present) {
7878 		if (!vrr_active) {
7879 			/* Use old throttling in non-vrr fixed refresh rate mode
7880 			 * to keep flip scheduling based on target vblank counts
7881 			 * working in a backwards compatible way, e.g., for
7882 			 * clients using the GLX_OML_sync_control extension or
7883 			 * DRI3/Present extension with defined target_msc.
7884 			 */
7885 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7886 		}
7887 		else {
7888 			/* For variable refresh rate mode only:
7889 			 * Get vblank of last completed flip to avoid > 1 vrr
7890 			 * flips per video frame by use of throttling, but allow
7891 			 * flip programming anywhere in the possibly large
7892 			 * variable vrr vblank interval for fine-grained flip
7893 			 * timing control and more opportunity to avoid stutter
7894 			 * on late submission of flips.
7895 			 */
7896 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7897 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7898 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7899 		}
7900 
7901 		target_vblank = last_flip_vblank + wait_for_vblank;
7902 
7903 		/*
7904 		 * Wait until we're out of the vertical blank period before the one
7905 		 * targeted by the flip
7906 		 */
7907 		while ((acrtc_attach->enabled &&
7908 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7909 							    0, &vpos, &hpos, NULL,
7910 							    NULL, &pcrtc->hwmode)
7911 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7912 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7913 			(int)(target_vblank -
7914 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7915 			usleep_range(1000, 1100);
7916 		}
7917 
7918 		/**
7919 		 * Prepare the flip event for the pageflip interrupt to handle.
7920 		 *
7921 		 * This only works in the case where we've already turned on the
7922 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7923 		 * from 0 -> n planes we have to skip a hardware generated event
7924 		 * and rely on sending it from software.
7925 		 */
7926 		if (acrtc_attach->base.state->event &&
7927 		    acrtc_state->active_planes > 0) {
7928 			drm_crtc_vblank_get(pcrtc);
7929 
7930 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7931 
7932 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7933 			prepare_flip_isr(acrtc_attach);
7934 
7935 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7936 		}
7937 
7938 		if (acrtc_state->stream) {
7939 			if (acrtc_state->freesync_vrr_info_changed)
7940 				bundle->stream_update.vrr_infopacket =
7941 					&acrtc_state->stream->vrr_infopacket;
7942 		}
7943 	}
7944 
7945 	/* Update the planes if changed or disable if we don't have any. */
7946 	if ((planes_count || acrtc_state->active_planes == 0) &&
7947 		acrtc_state->stream) {
7948 		bundle->stream_update.stream = acrtc_state->stream;
7949 		if (new_pcrtc_state->mode_changed) {
7950 			bundle->stream_update.src = acrtc_state->stream->src;
7951 			bundle->stream_update.dst = acrtc_state->stream->dst;
7952 		}
7953 
7954 		if (new_pcrtc_state->color_mgmt_changed) {
7955 			/*
7956 			 * TODO: This isn't fully correct since we've actually
7957 			 * already modified the stream in place.
7958 			 */
7959 			bundle->stream_update.gamut_remap =
7960 				&acrtc_state->stream->gamut_remap_matrix;
7961 			bundle->stream_update.output_csc_transform =
7962 				&acrtc_state->stream->csc_color_matrix;
7963 			bundle->stream_update.out_transfer_func =
7964 				acrtc_state->stream->out_transfer_func;
7965 		}
7966 
7967 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7968 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7969 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7970 
7971 		/*
7972 		 * If FreeSync state on the stream has changed then we need to
7973 		 * re-adjust the min/max bounds now that DC doesn't handle this
7974 		 * as part of commit.
7975 		 */
7976 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7977 		    amdgpu_dm_vrr_active(acrtc_state)) {
7978 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7979 			dc_stream_adjust_vmin_vmax(
7980 				dm->dc, acrtc_state->stream,
7981 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7982 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7983 		}
7984 		mutex_lock(&dm->dc_lock);
7985 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7986 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7987 			amdgpu_dm_psr_disable(acrtc_state->stream);
7988 
7989 		dc_commit_updates_for_stream(dm->dc,
7990 						     bundle->surface_updates,
7991 						     planes_count,
7992 						     acrtc_state->stream,
7993 						     &bundle->stream_update,
7994 						     dc_state);
7995 
7996 		/**
7997 		 * Enable or disable the interrupts on the backend.
7998 		 *
7999 		 * Most pipes are put into power gating when unused.
8000 		 *
8001 		 * When power gating is enabled on a pipe we lose the
8002 		 * interrupt enablement state when power gating is disabled.
8003 		 *
8004 		 * So we need to update the IRQ control state in hardware
8005 		 * whenever the pipe turns on (since it could be previously
8006 		 * power gated) or off (since some pipes can't be power gated
8007 		 * on some ASICs).
8008 		 */
8009 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8010 			dm_update_pflip_irq_state(drm_to_adev(dev),
8011 						  acrtc_attach);
8012 
8013 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8014 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8015 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8016 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8017 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8018 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8019 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
8020 			amdgpu_dm_psr_enable(acrtc_state->stream);
8021 		}
8022 
8023 		mutex_unlock(&dm->dc_lock);
8024 	}
8025 
8026 	/*
8027 	 * Update cursor state *after* programming all the planes.
8028 	 * This avoids redundant programming in the case where we're going
8029 	 * to be disabling a single plane - those pipes are being disabled.
8030 	 */
8031 	if (acrtc_state->active_planes)
8032 		amdgpu_dm_commit_cursors(state);
8033 
8034 cleanup:
8035 	kfree(bundle);
8036 }
8037 
8038 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8039 				   struct drm_atomic_state *state)
8040 {
8041 	struct amdgpu_device *adev = drm_to_adev(dev);
8042 	struct amdgpu_dm_connector *aconnector;
8043 	struct drm_connector *connector;
8044 	struct drm_connector_state *old_con_state, *new_con_state;
8045 	struct drm_crtc_state *new_crtc_state;
8046 	struct dm_crtc_state *new_dm_crtc_state;
8047 	const struct dc_stream_status *status;
8048 	int i, inst;
8049 
8050 	/* Notify device removals. */
8051 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8052 		if (old_con_state->crtc != new_con_state->crtc) {
8053 			/* CRTC changes require notification. */
8054 			goto notify;
8055 		}
8056 
8057 		if (!new_con_state->crtc)
8058 			continue;
8059 
8060 		new_crtc_state = drm_atomic_get_new_crtc_state(
8061 			state, new_con_state->crtc);
8062 
8063 		if (!new_crtc_state)
8064 			continue;
8065 
8066 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8067 			continue;
8068 
8069 	notify:
8070 		aconnector = to_amdgpu_dm_connector(connector);
8071 
8072 		mutex_lock(&adev->dm.audio_lock);
8073 		inst = aconnector->audio_inst;
8074 		aconnector->audio_inst = -1;
8075 		mutex_unlock(&adev->dm.audio_lock);
8076 
8077 		amdgpu_dm_audio_eld_notify(adev, inst);
8078 	}
8079 
8080 	/* Notify audio device additions. */
8081 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8082 		if (!new_con_state->crtc)
8083 			continue;
8084 
8085 		new_crtc_state = drm_atomic_get_new_crtc_state(
8086 			state, new_con_state->crtc);
8087 
8088 		if (!new_crtc_state)
8089 			continue;
8090 
8091 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8092 			continue;
8093 
8094 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8095 		if (!new_dm_crtc_state->stream)
8096 			continue;
8097 
8098 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8099 		if (!status)
8100 			continue;
8101 
8102 		aconnector = to_amdgpu_dm_connector(connector);
8103 
8104 		mutex_lock(&adev->dm.audio_lock);
8105 		inst = status->audio_inst;
8106 		aconnector->audio_inst = inst;
8107 		mutex_unlock(&adev->dm.audio_lock);
8108 
8109 		amdgpu_dm_audio_eld_notify(adev, inst);
8110 	}
8111 }
8112 
8113 /*
8114  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8115  * @crtc_state: the DRM CRTC state
8116  * @stream_state: the DC stream state.
8117  *
8118  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8119  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8120  */
8121 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8122 						struct dc_stream_state *stream_state)
8123 {
8124 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8125 }
8126 
8127 /**
8128  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8129  * @state: The atomic state to commit
8130  *
8131  * This will tell DC to commit the constructed DC state from atomic_check,
8132  * programming the hardware. Any failures here implies a hardware failure, since
8133  * atomic check should have filtered anything non-kosher.
8134  */
8135 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8136 {
8137 	struct drm_device *dev = state->dev;
8138 	struct amdgpu_device *adev = drm_to_adev(dev);
8139 	struct amdgpu_display_manager *dm = &adev->dm;
8140 	struct dm_atomic_state *dm_state;
8141 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8142 	uint32_t i, j;
8143 	struct drm_crtc *crtc;
8144 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8145 	unsigned long flags;
8146 	bool wait_for_vblank = true;
8147 	struct drm_connector *connector;
8148 	struct drm_connector_state *old_con_state, *new_con_state;
8149 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8150 	int crtc_disable_count = 0;
8151 	bool mode_set_reset_required = false;
8152 
8153 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8154 
8155 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8156 
8157 	dm_state = dm_atomic_get_new_state(state);
8158 	if (dm_state && dm_state->context) {
8159 		dc_state = dm_state->context;
8160 	} else {
8161 		/* No state changes, retain current state. */
8162 		dc_state_temp = dc_create_state(dm->dc);
8163 		ASSERT(dc_state_temp);
8164 		dc_state = dc_state_temp;
8165 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8166 	}
8167 
8168 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8169 				       new_crtc_state, i) {
8170 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8171 
8172 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8173 
8174 		if (old_crtc_state->active &&
8175 		    (!new_crtc_state->active ||
8176 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8177 			manage_dm_interrupts(adev, acrtc, false);
8178 			dc_stream_release(dm_old_crtc_state->stream);
8179 		}
8180 	}
8181 
8182 	drm_atomic_helper_calc_timestamping_constants(state);
8183 
8184 	/* update changed items */
8185 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8186 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8187 
8188 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8189 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8190 
8191 		DRM_DEBUG_DRIVER(
8192 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8193 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8194 			"connectors_changed:%d\n",
8195 			acrtc->crtc_id,
8196 			new_crtc_state->enable,
8197 			new_crtc_state->active,
8198 			new_crtc_state->planes_changed,
8199 			new_crtc_state->mode_changed,
8200 			new_crtc_state->active_changed,
8201 			new_crtc_state->connectors_changed);
8202 
8203 		/* Disable cursor if disabling crtc */
8204 		if (old_crtc_state->active && !new_crtc_state->active) {
8205 			struct dc_cursor_position position;
8206 
8207 			memset(&position, 0, sizeof(position));
8208 			mutex_lock(&dm->dc_lock);
8209 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8210 			mutex_unlock(&dm->dc_lock);
8211 		}
8212 
8213 		/* Copy all transient state flags into dc state */
8214 		if (dm_new_crtc_state->stream) {
8215 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8216 							    dm_new_crtc_state->stream);
8217 		}
8218 
8219 		/* handles headless hotplug case, updating new_state and
8220 		 * aconnector as needed
8221 		 */
8222 
8223 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8224 
8225 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8226 
8227 			if (!dm_new_crtc_state->stream) {
8228 				/*
8229 				 * this could happen because of issues with
8230 				 * userspace notifications delivery.
8231 				 * In this case userspace tries to set mode on
8232 				 * display which is disconnected in fact.
8233 				 * dc_sink is NULL in this case on aconnector.
8234 				 * We expect reset mode will come soon.
8235 				 *
8236 				 * This can also happen when unplug is done
8237 				 * during resume sequence ended
8238 				 *
8239 				 * In this case, we want to pretend we still
8240 				 * have a sink to keep the pipe running so that
8241 				 * hw state is consistent with the sw state
8242 				 */
8243 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8244 						__func__, acrtc->base.base.id);
8245 				continue;
8246 			}
8247 
8248 			if (dm_old_crtc_state->stream)
8249 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8250 
8251 			pm_runtime_get_noresume(dev->dev);
8252 
8253 			acrtc->enabled = true;
8254 			acrtc->hw_mode = new_crtc_state->mode;
8255 			crtc->hwmode = new_crtc_state->mode;
8256 			mode_set_reset_required = true;
8257 		} else if (modereset_required(new_crtc_state)) {
8258 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8259 			/* i.e. reset mode */
8260 			if (dm_old_crtc_state->stream)
8261 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8262 			mode_set_reset_required = true;
8263 		}
8264 	} /* for_each_crtc_in_state() */
8265 
8266 	if (dc_state) {
8267 		/* if there mode set or reset, disable eDP PSR */
8268 		if (mode_set_reset_required)
8269 			amdgpu_dm_psr_disable_all(dm);
8270 
8271 		dm_enable_per_frame_crtc_master_sync(dc_state);
8272 		mutex_lock(&dm->dc_lock);
8273 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8274 		mutex_unlock(&dm->dc_lock);
8275 	}
8276 
8277 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8278 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8279 
8280 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8281 
8282 		if (dm_new_crtc_state->stream != NULL) {
8283 			const struct dc_stream_status *status =
8284 					dc_stream_get_status(dm_new_crtc_state->stream);
8285 
8286 			if (!status)
8287 				status = dc_stream_get_status_from_state(dc_state,
8288 									 dm_new_crtc_state->stream);
8289 			if (!status)
8290 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8291 			else
8292 				acrtc->otg_inst = status->primary_otg_inst;
8293 		}
8294 	}
8295 #ifdef CONFIG_DRM_AMD_DC_HDCP
8296 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8297 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8298 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8299 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8300 
8301 		new_crtc_state = NULL;
8302 
8303 		if (acrtc)
8304 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8305 
8306 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8307 
8308 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8309 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8310 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8311 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8312 			dm_new_con_state->update_hdcp = true;
8313 			continue;
8314 		}
8315 
8316 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8317 			hdcp_update_display(
8318 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8319 				new_con_state->hdcp_content_type,
8320 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8321 													 : false);
8322 	}
8323 #endif
8324 
8325 	/* Handle connector state changes */
8326 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8327 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8328 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8329 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8330 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8331 		struct dc_stream_update stream_update;
8332 		struct dc_info_packet hdr_packet;
8333 		struct dc_stream_status *status = NULL;
8334 		bool abm_changed, hdr_changed, scaling_changed;
8335 
8336 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8337 		memset(&stream_update, 0, sizeof(stream_update));
8338 
8339 		if (acrtc) {
8340 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8341 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8342 		}
8343 
8344 		/* Skip any modesets/resets */
8345 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8346 			continue;
8347 
8348 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8349 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8350 
8351 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8352 							     dm_old_con_state);
8353 
8354 		abm_changed = dm_new_crtc_state->abm_level !=
8355 			      dm_old_crtc_state->abm_level;
8356 
8357 		hdr_changed =
8358 			is_hdr_metadata_different(old_con_state, new_con_state);
8359 
8360 		if (!scaling_changed && !abm_changed && !hdr_changed)
8361 			continue;
8362 
8363 		stream_update.stream = dm_new_crtc_state->stream;
8364 		if (scaling_changed) {
8365 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8366 					dm_new_con_state, dm_new_crtc_state->stream);
8367 
8368 			stream_update.src = dm_new_crtc_state->stream->src;
8369 			stream_update.dst = dm_new_crtc_state->stream->dst;
8370 		}
8371 
8372 		if (abm_changed) {
8373 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8374 
8375 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8376 		}
8377 
8378 		if (hdr_changed) {
8379 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8380 			stream_update.hdr_static_metadata = &hdr_packet;
8381 		}
8382 
8383 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8384 		WARN_ON(!status);
8385 		WARN_ON(!status->plane_count);
8386 
8387 		/*
8388 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8389 		 * Here we create an empty update on each plane.
8390 		 * To fix this, DC should permit updating only stream properties.
8391 		 */
8392 		for (j = 0; j < status->plane_count; j++)
8393 			dummy_updates[j].surface = status->plane_states[0];
8394 
8395 
8396 		mutex_lock(&dm->dc_lock);
8397 		dc_commit_updates_for_stream(dm->dc,
8398 						     dummy_updates,
8399 						     status->plane_count,
8400 						     dm_new_crtc_state->stream,
8401 						     &stream_update,
8402 						     dc_state);
8403 		mutex_unlock(&dm->dc_lock);
8404 	}
8405 
8406 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8407 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8408 				      new_crtc_state, i) {
8409 		if (old_crtc_state->active && !new_crtc_state->active)
8410 			crtc_disable_count++;
8411 
8412 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8413 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8414 
8415 		/* For freesync config update on crtc state and params for irq */
8416 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8417 
8418 		/* Handle vrr on->off / off->on transitions */
8419 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8420 						dm_new_crtc_state);
8421 	}
8422 
8423 	/**
8424 	 * Enable interrupts for CRTCs that are newly enabled or went through
8425 	 * a modeset. It was intentionally deferred until after the front end
8426 	 * state was modified to wait until the OTG was on and so the IRQ
8427 	 * handlers didn't access stale or invalid state.
8428 	 */
8429 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8430 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8431 
8432 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8433 
8434 		if (new_crtc_state->active &&
8435 		    (!old_crtc_state->active ||
8436 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8437 			dc_stream_retain(dm_new_crtc_state->stream);
8438 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8439 			manage_dm_interrupts(adev, acrtc, true);
8440 
8441 #ifdef CONFIG_DEBUG_FS
8442 			/**
8443 			 * Frontend may have changed so reapply the CRC capture
8444 			 * settings for the stream.
8445 			 */
8446 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8447 
8448 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8449 				amdgpu_dm_crtc_configure_crc_source(
8450 					crtc, dm_new_crtc_state,
8451 					dm_new_crtc_state->crc_src);
8452 			}
8453 #endif
8454 		}
8455 	}
8456 
8457 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8458 		if (new_crtc_state->async_flip)
8459 			wait_for_vblank = false;
8460 
8461 	/* update planes when needed per crtc*/
8462 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8463 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8464 
8465 		if (dm_new_crtc_state->stream)
8466 			amdgpu_dm_commit_planes(state, dc_state, dev,
8467 						dm, crtc, wait_for_vblank);
8468 	}
8469 
8470 	/* Update audio instances for each connector. */
8471 	amdgpu_dm_commit_audio(dev, state);
8472 
8473 	/*
8474 	 * send vblank event on all events not handled in flip and
8475 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8476 	 */
8477 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8478 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8479 
8480 		if (new_crtc_state->event)
8481 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8482 
8483 		new_crtc_state->event = NULL;
8484 	}
8485 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8486 
8487 	/* Signal HW programming completion */
8488 	drm_atomic_helper_commit_hw_done(state);
8489 
8490 	if (wait_for_vblank)
8491 		drm_atomic_helper_wait_for_flip_done(dev, state);
8492 
8493 	drm_atomic_helper_cleanup_planes(dev, state);
8494 
8495 	/* return the stolen vga memory back to VRAM */
8496 	if (!adev->mman.keep_stolen_vga_memory)
8497 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8498 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8499 
8500 	/*
8501 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8502 	 * so we can put the GPU into runtime suspend if we're not driving any
8503 	 * displays anymore
8504 	 */
8505 	for (i = 0; i < crtc_disable_count; i++)
8506 		pm_runtime_put_autosuspend(dev->dev);
8507 	pm_runtime_mark_last_busy(dev->dev);
8508 
8509 	if (dc_state_temp)
8510 		dc_release_state(dc_state_temp);
8511 }
8512 
8513 
8514 static int dm_force_atomic_commit(struct drm_connector *connector)
8515 {
8516 	int ret = 0;
8517 	struct drm_device *ddev = connector->dev;
8518 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8519 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8520 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8521 	struct drm_connector_state *conn_state;
8522 	struct drm_crtc_state *crtc_state;
8523 	struct drm_plane_state *plane_state;
8524 
8525 	if (!state)
8526 		return -ENOMEM;
8527 
8528 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8529 
8530 	/* Construct an atomic state to restore previous display setting */
8531 
8532 	/*
8533 	 * Attach connectors to drm_atomic_state
8534 	 */
8535 	conn_state = drm_atomic_get_connector_state(state, connector);
8536 
8537 	ret = PTR_ERR_OR_ZERO(conn_state);
8538 	if (ret)
8539 		goto out;
8540 
8541 	/* Attach crtc to drm_atomic_state*/
8542 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8543 
8544 	ret = PTR_ERR_OR_ZERO(crtc_state);
8545 	if (ret)
8546 		goto out;
8547 
8548 	/* force a restore */
8549 	crtc_state->mode_changed = true;
8550 
8551 	/* Attach plane to drm_atomic_state */
8552 	plane_state = drm_atomic_get_plane_state(state, plane);
8553 
8554 	ret = PTR_ERR_OR_ZERO(plane_state);
8555 	if (ret)
8556 		goto out;
8557 
8558 	/* Call commit internally with the state we just constructed */
8559 	ret = drm_atomic_commit(state);
8560 
8561 out:
8562 	drm_atomic_state_put(state);
8563 	if (ret)
8564 		DRM_ERROR("Restoring old state failed with %i\n", ret);
8565 
8566 	return ret;
8567 }
8568 
8569 /*
8570  * This function handles all cases when set mode does not come upon hotplug.
8571  * This includes when a display is unplugged then plugged back into the
8572  * same port and when running without usermode desktop manager supprot
8573  */
8574 void dm_restore_drm_connector_state(struct drm_device *dev,
8575 				    struct drm_connector *connector)
8576 {
8577 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8578 	struct amdgpu_crtc *disconnected_acrtc;
8579 	struct dm_crtc_state *acrtc_state;
8580 
8581 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8582 		return;
8583 
8584 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8585 	if (!disconnected_acrtc)
8586 		return;
8587 
8588 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8589 	if (!acrtc_state->stream)
8590 		return;
8591 
8592 	/*
8593 	 * If the previous sink is not released and different from the current,
8594 	 * we deduce we are in a state where we can not rely on usermode call
8595 	 * to turn on the display, so we do it here
8596 	 */
8597 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8598 		dm_force_atomic_commit(&aconnector->base);
8599 }
8600 
8601 /*
8602  * Grabs all modesetting locks to serialize against any blocking commits,
8603  * Waits for completion of all non blocking commits.
8604  */
8605 static int do_aquire_global_lock(struct drm_device *dev,
8606 				 struct drm_atomic_state *state)
8607 {
8608 	struct drm_crtc *crtc;
8609 	struct drm_crtc_commit *commit;
8610 	long ret;
8611 
8612 	/*
8613 	 * Adding all modeset locks to aquire_ctx will
8614 	 * ensure that when the framework release it the
8615 	 * extra locks we are locking here will get released to
8616 	 */
8617 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8618 	if (ret)
8619 		return ret;
8620 
8621 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8622 		spin_lock(&crtc->commit_lock);
8623 		commit = list_first_entry_or_null(&crtc->commit_list,
8624 				struct drm_crtc_commit, commit_entry);
8625 		if (commit)
8626 			drm_crtc_commit_get(commit);
8627 		spin_unlock(&crtc->commit_lock);
8628 
8629 		if (!commit)
8630 			continue;
8631 
8632 		/*
8633 		 * Make sure all pending HW programming completed and
8634 		 * page flips done
8635 		 */
8636 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8637 
8638 		if (ret > 0)
8639 			ret = wait_for_completion_interruptible_timeout(
8640 					&commit->flip_done, 10*HZ);
8641 
8642 		if (ret == 0)
8643 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8644 				  "timed out\n", crtc->base.id, crtc->name);
8645 
8646 		drm_crtc_commit_put(commit);
8647 	}
8648 
8649 	return ret < 0 ? ret : 0;
8650 }
8651 
8652 static void get_freesync_config_for_crtc(
8653 	struct dm_crtc_state *new_crtc_state,
8654 	struct dm_connector_state *new_con_state)
8655 {
8656 	struct mod_freesync_config config = {0};
8657 	struct amdgpu_dm_connector *aconnector =
8658 			to_amdgpu_dm_connector(new_con_state->base.connector);
8659 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8660 	int vrefresh = drm_mode_vrefresh(mode);
8661 
8662 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8663 					vrefresh >= aconnector->min_vfreq &&
8664 					vrefresh <= aconnector->max_vfreq;
8665 
8666 	if (new_crtc_state->vrr_supported) {
8667 		new_crtc_state->stream->ignore_msa_timing_param = true;
8668 		config.state = new_crtc_state->base.vrr_enabled ?
8669 				VRR_STATE_ACTIVE_VARIABLE :
8670 				VRR_STATE_INACTIVE;
8671 		config.min_refresh_in_uhz =
8672 				aconnector->min_vfreq * 1000000;
8673 		config.max_refresh_in_uhz =
8674 				aconnector->max_vfreq * 1000000;
8675 		config.vsif_supported = true;
8676 		config.btr = true;
8677 	}
8678 
8679 	new_crtc_state->freesync_config = config;
8680 }
8681 
8682 static void reset_freesync_config_for_crtc(
8683 	struct dm_crtc_state *new_crtc_state)
8684 {
8685 	new_crtc_state->vrr_supported = false;
8686 
8687 	memset(&new_crtc_state->vrr_infopacket, 0,
8688 	       sizeof(new_crtc_state->vrr_infopacket));
8689 }
8690 
8691 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8692 				struct drm_atomic_state *state,
8693 				struct drm_crtc *crtc,
8694 				struct drm_crtc_state *old_crtc_state,
8695 				struct drm_crtc_state *new_crtc_state,
8696 				bool enable,
8697 				bool *lock_and_validation_needed)
8698 {
8699 	struct dm_atomic_state *dm_state = NULL;
8700 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8701 	struct dc_stream_state *new_stream;
8702 	int ret = 0;
8703 
8704 	/*
8705 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8706 	 * update changed items
8707 	 */
8708 	struct amdgpu_crtc *acrtc = NULL;
8709 	struct amdgpu_dm_connector *aconnector = NULL;
8710 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8711 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8712 
8713 	new_stream = NULL;
8714 
8715 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8716 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8717 	acrtc = to_amdgpu_crtc(crtc);
8718 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8719 
8720 	/* TODO This hack should go away */
8721 	if (aconnector && enable) {
8722 		/* Make sure fake sink is created in plug-in scenario */
8723 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8724 							    &aconnector->base);
8725 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8726 							    &aconnector->base);
8727 
8728 		if (IS_ERR(drm_new_conn_state)) {
8729 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8730 			goto fail;
8731 		}
8732 
8733 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8734 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8735 
8736 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8737 			goto skip_modeset;
8738 
8739 		new_stream = create_validate_stream_for_sink(aconnector,
8740 							     &new_crtc_state->mode,
8741 							     dm_new_conn_state,
8742 							     dm_old_crtc_state->stream);
8743 
8744 		/*
8745 		 * we can have no stream on ACTION_SET if a display
8746 		 * was disconnected during S3, in this case it is not an
8747 		 * error, the OS will be updated after detection, and
8748 		 * will do the right thing on next atomic commit
8749 		 */
8750 
8751 		if (!new_stream) {
8752 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8753 					__func__, acrtc->base.base.id);
8754 			ret = -ENOMEM;
8755 			goto fail;
8756 		}
8757 
8758 		/*
8759 		 * TODO: Check VSDB bits to decide whether this should
8760 		 * be enabled or not.
8761 		 */
8762 		new_stream->triggered_crtc_reset.enabled =
8763 			dm->force_timing_sync;
8764 
8765 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8766 
8767 		ret = fill_hdr_info_packet(drm_new_conn_state,
8768 					   &new_stream->hdr_static_metadata);
8769 		if (ret)
8770 			goto fail;
8771 
8772 		/*
8773 		 * If we already removed the old stream from the context
8774 		 * (and set the new stream to NULL) then we can't reuse
8775 		 * the old stream even if the stream and scaling are unchanged.
8776 		 * We'll hit the BUG_ON and black screen.
8777 		 *
8778 		 * TODO: Refactor this function to allow this check to work
8779 		 * in all conditions.
8780 		 */
8781 		if (dm_new_crtc_state->stream &&
8782 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8783 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8784 			new_crtc_state->mode_changed = false;
8785 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8786 					 new_crtc_state->mode_changed);
8787 		}
8788 	}
8789 
8790 	/* mode_changed flag may get updated above, need to check again */
8791 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8792 		goto skip_modeset;
8793 
8794 	DRM_DEBUG_DRIVER(
8795 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8796 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8797 		"connectors_changed:%d\n",
8798 		acrtc->crtc_id,
8799 		new_crtc_state->enable,
8800 		new_crtc_state->active,
8801 		new_crtc_state->planes_changed,
8802 		new_crtc_state->mode_changed,
8803 		new_crtc_state->active_changed,
8804 		new_crtc_state->connectors_changed);
8805 
8806 	/* Remove stream for any changed/disabled CRTC */
8807 	if (!enable) {
8808 
8809 		if (!dm_old_crtc_state->stream)
8810 			goto skip_modeset;
8811 
8812 		ret = dm_atomic_get_state(state, &dm_state);
8813 		if (ret)
8814 			goto fail;
8815 
8816 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8817 				crtc->base.id);
8818 
8819 		/* i.e. reset mode */
8820 		if (dc_remove_stream_from_ctx(
8821 				dm->dc,
8822 				dm_state->context,
8823 				dm_old_crtc_state->stream) != DC_OK) {
8824 			ret = -EINVAL;
8825 			goto fail;
8826 		}
8827 
8828 		dc_stream_release(dm_old_crtc_state->stream);
8829 		dm_new_crtc_state->stream = NULL;
8830 
8831 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8832 
8833 		*lock_and_validation_needed = true;
8834 
8835 	} else {/* Add stream for any updated/enabled CRTC */
8836 		/*
8837 		 * Quick fix to prevent NULL pointer on new_stream when
8838 		 * added MST connectors not found in existing crtc_state in the chained mode
8839 		 * TODO: need to dig out the root cause of that
8840 		 */
8841 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8842 			goto skip_modeset;
8843 
8844 		if (modereset_required(new_crtc_state))
8845 			goto skip_modeset;
8846 
8847 		if (modeset_required(new_crtc_state, new_stream,
8848 				     dm_old_crtc_state->stream)) {
8849 
8850 			WARN_ON(dm_new_crtc_state->stream);
8851 
8852 			ret = dm_atomic_get_state(state, &dm_state);
8853 			if (ret)
8854 				goto fail;
8855 
8856 			dm_new_crtc_state->stream = new_stream;
8857 
8858 			dc_stream_retain(new_stream);
8859 
8860 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8861 						crtc->base.id);
8862 
8863 			if (dc_add_stream_to_ctx(
8864 					dm->dc,
8865 					dm_state->context,
8866 					dm_new_crtc_state->stream) != DC_OK) {
8867 				ret = -EINVAL;
8868 				goto fail;
8869 			}
8870 
8871 			*lock_and_validation_needed = true;
8872 		}
8873 	}
8874 
8875 skip_modeset:
8876 	/* Release extra reference */
8877 	if (new_stream)
8878 		 dc_stream_release(new_stream);
8879 
8880 	/*
8881 	 * We want to do dc stream updates that do not require a
8882 	 * full modeset below.
8883 	 */
8884 	if (!(enable && aconnector && new_crtc_state->active))
8885 		return 0;
8886 	/*
8887 	 * Given above conditions, the dc state cannot be NULL because:
8888 	 * 1. We're in the process of enabling CRTCs (just been added
8889 	 *    to the dc context, or already is on the context)
8890 	 * 2. Has a valid connector attached, and
8891 	 * 3. Is currently active and enabled.
8892 	 * => The dc stream state currently exists.
8893 	 */
8894 	BUG_ON(dm_new_crtc_state->stream == NULL);
8895 
8896 	/* Scaling or underscan settings */
8897 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8898 		update_stream_scaling_settings(
8899 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8900 
8901 	/* ABM settings */
8902 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8903 
8904 	/*
8905 	 * Color management settings. We also update color properties
8906 	 * when a modeset is needed, to ensure it gets reprogrammed.
8907 	 */
8908 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8909 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8910 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8911 		if (ret)
8912 			goto fail;
8913 	}
8914 
8915 	/* Update Freesync settings. */
8916 	get_freesync_config_for_crtc(dm_new_crtc_state,
8917 				     dm_new_conn_state);
8918 
8919 	return ret;
8920 
8921 fail:
8922 	if (new_stream)
8923 		dc_stream_release(new_stream);
8924 	return ret;
8925 }
8926 
8927 static bool should_reset_plane(struct drm_atomic_state *state,
8928 			       struct drm_plane *plane,
8929 			       struct drm_plane_state *old_plane_state,
8930 			       struct drm_plane_state *new_plane_state)
8931 {
8932 	struct drm_plane *other;
8933 	struct drm_plane_state *old_other_state, *new_other_state;
8934 	struct drm_crtc_state *new_crtc_state;
8935 	int i;
8936 
8937 	/*
8938 	 * TODO: Remove this hack once the checks below are sufficient
8939 	 * enough to determine when we need to reset all the planes on
8940 	 * the stream.
8941 	 */
8942 	if (state->allow_modeset)
8943 		return true;
8944 
8945 	/* Exit early if we know that we're adding or removing the plane. */
8946 	if (old_plane_state->crtc != new_plane_state->crtc)
8947 		return true;
8948 
8949 	/* old crtc == new_crtc == NULL, plane not in context. */
8950 	if (!new_plane_state->crtc)
8951 		return false;
8952 
8953 	new_crtc_state =
8954 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8955 
8956 	if (!new_crtc_state)
8957 		return true;
8958 
8959 	/* CRTC Degamma changes currently require us to recreate planes. */
8960 	if (new_crtc_state->color_mgmt_changed)
8961 		return true;
8962 
8963 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8964 		return true;
8965 
8966 	/*
8967 	 * If there are any new primary or overlay planes being added or
8968 	 * removed then the z-order can potentially change. To ensure
8969 	 * correct z-order and pipe acquisition the current DC architecture
8970 	 * requires us to remove and recreate all existing planes.
8971 	 *
8972 	 * TODO: Come up with a more elegant solution for this.
8973 	 */
8974 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8975 		struct amdgpu_framebuffer *old_afb, *new_afb;
8976 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8977 			continue;
8978 
8979 		if (old_other_state->crtc != new_plane_state->crtc &&
8980 		    new_other_state->crtc != new_plane_state->crtc)
8981 			continue;
8982 
8983 		if (old_other_state->crtc != new_other_state->crtc)
8984 			return true;
8985 
8986 		/* Src/dst size and scaling updates. */
8987 		if (old_other_state->src_w != new_other_state->src_w ||
8988 		    old_other_state->src_h != new_other_state->src_h ||
8989 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8990 		    old_other_state->crtc_h != new_other_state->crtc_h)
8991 			return true;
8992 
8993 		/* Rotation / mirroring updates. */
8994 		if (old_other_state->rotation != new_other_state->rotation)
8995 			return true;
8996 
8997 		/* Blending updates. */
8998 		if (old_other_state->pixel_blend_mode !=
8999 		    new_other_state->pixel_blend_mode)
9000 			return true;
9001 
9002 		/* Alpha updates. */
9003 		if (old_other_state->alpha != new_other_state->alpha)
9004 			return true;
9005 
9006 		/* Colorspace changes. */
9007 		if (old_other_state->color_range != new_other_state->color_range ||
9008 		    old_other_state->color_encoding != new_other_state->color_encoding)
9009 			return true;
9010 
9011 		/* Framebuffer checks fall at the end. */
9012 		if (!old_other_state->fb || !new_other_state->fb)
9013 			continue;
9014 
9015 		/* Pixel format changes can require bandwidth updates. */
9016 		if (old_other_state->fb->format != new_other_state->fb->format)
9017 			return true;
9018 
9019 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9020 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9021 
9022 		/* Tiling and DCC changes also require bandwidth updates. */
9023 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9024 		    old_afb->base.modifier != new_afb->base.modifier)
9025 			return true;
9026 	}
9027 
9028 	return false;
9029 }
9030 
9031 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9032 			      struct drm_plane_state *new_plane_state,
9033 			      struct drm_framebuffer *fb)
9034 {
9035 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9036 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9037 	unsigned int pitch;
9038 	bool linear;
9039 
9040 	if (fb->width > new_acrtc->max_cursor_width ||
9041 	    fb->height > new_acrtc->max_cursor_height) {
9042 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9043 				 new_plane_state->fb->width,
9044 				 new_plane_state->fb->height);
9045 		return -EINVAL;
9046 	}
9047 	if (new_plane_state->src_w != fb->width << 16 ||
9048 	    new_plane_state->src_h != fb->height << 16) {
9049 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9050 		return -EINVAL;
9051 	}
9052 
9053 	/* Pitch in pixels */
9054 	pitch = fb->pitches[0] / fb->format->cpp[0];
9055 
9056 	if (fb->width != pitch) {
9057 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9058 				 fb->width, pitch);
9059 		return -EINVAL;
9060 	}
9061 
9062 	switch (pitch) {
9063 	case 64:
9064 	case 128:
9065 	case 256:
9066 		/* FB pitch is supported by cursor plane */
9067 		break;
9068 	default:
9069 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9070 		return -EINVAL;
9071 	}
9072 
9073 	/* Core DRM takes care of checking FB modifiers, so we only need to
9074 	 * check tiling flags when the FB doesn't have a modifier. */
9075 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9076 		if (adev->family < AMDGPU_FAMILY_AI) {
9077 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9078 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9079 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9080 		} else {
9081 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9082 		}
9083 		if (!linear) {
9084 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9085 			return -EINVAL;
9086 		}
9087 	}
9088 
9089 	return 0;
9090 }
9091 
9092 static int dm_update_plane_state(struct dc *dc,
9093 				 struct drm_atomic_state *state,
9094 				 struct drm_plane *plane,
9095 				 struct drm_plane_state *old_plane_state,
9096 				 struct drm_plane_state *new_plane_state,
9097 				 bool enable,
9098 				 bool *lock_and_validation_needed)
9099 {
9100 
9101 	struct dm_atomic_state *dm_state = NULL;
9102 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9103 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9104 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9105 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9106 	struct amdgpu_crtc *new_acrtc;
9107 	bool needs_reset;
9108 	int ret = 0;
9109 
9110 
9111 	new_plane_crtc = new_plane_state->crtc;
9112 	old_plane_crtc = old_plane_state->crtc;
9113 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9114 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9115 
9116 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9117 		if (!enable || !new_plane_crtc ||
9118 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9119 			return 0;
9120 
9121 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9122 
9123 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9124 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9125 			return -EINVAL;
9126 		}
9127 
9128 		if (new_plane_state->fb) {
9129 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9130 						 new_plane_state->fb);
9131 			if (ret)
9132 				return ret;
9133 		}
9134 
9135 		return 0;
9136 	}
9137 
9138 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9139 					 new_plane_state);
9140 
9141 	/* Remove any changed/removed planes */
9142 	if (!enable) {
9143 		if (!needs_reset)
9144 			return 0;
9145 
9146 		if (!old_plane_crtc)
9147 			return 0;
9148 
9149 		old_crtc_state = drm_atomic_get_old_crtc_state(
9150 				state, old_plane_crtc);
9151 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9152 
9153 		if (!dm_old_crtc_state->stream)
9154 			return 0;
9155 
9156 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9157 				plane->base.id, old_plane_crtc->base.id);
9158 
9159 		ret = dm_atomic_get_state(state, &dm_state);
9160 		if (ret)
9161 			return ret;
9162 
9163 		if (!dc_remove_plane_from_context(
9164 				dc,
9165 				dm_old_crtc_state->stream,
9166 				dm_old_plane_state->dc_state,
9167 				dm_state->context)) {
9168 
9169 			return -EINVAL;
9170 		}
9171 
9172 
9173 		dc_plane_state_release(dm_old_plane_state->dc_state);
9174 		dm_new_plane_state->dc_state = NULL;
9175 
9176 		*lock_and_validation_needed = true;
9177 
9178 	} else { /* Add new planes */
9179 		struct dc_plane_state *dc_new_plane_state;
9180 
9181 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9182 			return 0;
9183 
9184 		if (!new_plane_crtc)
9185 			return 0;
9186 
9187 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9188 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9189 
9190 		if (!dm_new_crtc_state->stream)
9191 			return 0;
9192 
9193 		if (!needs_reset)
9194 			return 0;
9195 
9196 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9197 		if (ret)
9198 			return ret;
9199 
9200 		WARN_ON(dm_new_plane_state->dc_state);
9201 
9202 		dc_new_plane_state = dc_create_plane_state(dc);
9203 		if (!dc_new_plane_state)
9204 			return -ENOMEM;
9205 
9206 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9207 				plane->base.id, new_plane_crtc->base.id);
9208 
9209 		ret = fill_dc_plane_attributes(
9210 			drm_to_adev(new_plane_crtc->dev),
9211 			dc_new_plane_state,
9212 			new_plane_state,
9213 			new_crtc_state);
9214 		if (ret) {
9215 			dc_plane_state_release(dc_new_plane_state);
9216 			return ret;
9217 		}
9218 
9219 		ret = dm_atomic_get_state(state, &dm_state);
9220 		if (ret) {
9221 			dc_plane_state_release(dc_new_plane_state);
9222 			return ret;
9223 		}
9224 
9225 		/*
9226 		 * Any atomic check errors that occur after this will
9227 		 * not need a release. The plane state will be attached
9228 		 * to the stream, and therefore part of the atomic
9229 		 * state. It'll be released when the atomic state is
9230 		 * cleaned.
9231 		 */
9232 		if (!dc_add_plane_to_context(
9233 				dc,
9234 				dm_new_crtc_state->stream,
9235 				dc_new_plane_state,
9236 				dm_state->context)) {
9237 
9238 			dc_plane_state_release(dc_new_plane_state);
9239 			return -EINVAL;
9240 		}
9241 
9242 		dm_new_plane_state->dc_state = dc_new_plane_state;
9243 
9244 		/* Tell DC to do a full surface update every time there
9245 		 * is a plane change. Inefficient, but works for now.
9246 		 */
9247 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9248 
9249 		*lock_and_validation_needed = true;
9250 	}
9251 
9252 
9253 	return ret;
9254 }
9255 
9256 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9257 				struct drm_crtc *crtc,
9258 				struct drm_crtc_state *new_crtc_state)
9259 {
9260 	struct drm_plane_state *new_cursor_state, *new_primary_state;
9261 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9262 
9263 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9264 	 * cursor per pipe but it's going to inherit the scaling and
9265 	 * positioning from the underlying pipe. Check the cursor plane's
9266 	 * blending properties match the primary plane's. */
9267 
9268 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9269 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9270 	if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9271 		return 0;
9272 	}
9273 
9274 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9275 			 (new_cursor_state->src_w >> 16);
9276 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9277 			 (new_cursor_state->src_h >> 16);
9278 
9279 	primary_scale_w = new_primary_state->crtc_w * 1000 /
9280 			 (new_primary_state->src_w >> 16);
9281 	primary_scale_h = new_primary_state->crtc_h * 1000 /
9282 			 (new_primary_state->src_h >> 16);
9283 
9284 	if (cursor_scale_w != primary_scale_w ||
9285 	    cursor_scale_h != primary_scale_h) {
9286 		DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9287 		return -EINVAL;
9288 	}
9289 
9290 	return 0;
9291 }
9292 
9293 #if defined(CONFIG_DRM_AMD_DC_DCN)
9294 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9295 {
9296 	struct drm_connector *connector;
9297 	struct drm_connector_state *conn_state;
9298 	struct amdgpu_dm_connector *aconnector = NULL;
9299 	int i;
9300 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9301 		if (conn_state->crtc != crtc)
9302 			continue;
9303 
9304 		aconnector = to_amdgpu_dm_connector(connector);
9305 		if (!aconnector->port || !aconnector->mst_port)
9306 			aconnector = NULL;
9307 		else
9308 			break;
9309 	}
9310 
9311 	if (!aconnector)
9312 		return 0;
9313 
9314 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9315 }
9316 #endif
9317 
9318 /**
9319  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9320  * @dev: The DRM device
9321  * @state: The atomic state to commit
9322  *
9323  * Validate that the given atomic state is programmable by DC into hardware.
9324  * This involves constructing a &struct dc_state reflecting the new hardware
9325  * state we wish to commit, then querying DC to see if it is programmable. It's
9326  * important not to modify the existing DC state. Otherwise, atomic_check
9327  * may unexpectedly commit hardware changes.
9328  *
9329  * When validating the DC state, it's important that the right locks are
9330  * acquired. For full updates case which removes/adds/updates streams on one
9331  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9332  * that any such full update commit will wait for completion of any outstanding
9333  * flip using DRMs synchronization events.
9334  *
9335  * Note that DM adds the affected connectors for all CRTCs in state, when that
9336  * might not seem necessary. This is because DC stream creation requires the
9337  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9338  * be possible but non-trivial - a possible TODO item.
9339  *
9340  * Return: -Error code if validation failed.
9341  */
9342 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9343 				  struct drm_atomic_state *state)
9344 {
9345 	struct amdgpu_device *adev = drm_to_adev(dev);
9346 	struct dm_atomic_state *dm_state = NULL;
9347 	struct dc *dc = adev->dm.dc;
9348 	struct drm_connector *connector;
9349 	struct drm_connector_state *old_con_state, *new_con_state;
9350 	struct drm_crtc *crtc;
9351 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9352 	struct drm_plane *plane;
9353 	struct drm_plane_state *old_plane_state, *new_plane_state;
9354 	enum dc_status status;
9355 	int ret, i;
9356 	bool lock_and_validation_needed = false;
9357 	struct dm_crtc_state *dm_old_crtc_state;
9358 
9359 	trace_amdgpu_dm_atomic_check_begin(state);
9360 
9361 	ret = drm_atomic_helper_check_modeset(dev, state);
9362 	if (ret)
9363 		goto fail;
9364 
9365 	/* Check connector changes */
9366 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9367 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9368 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9369 
9370 		/* Skip connectors that are disabled or part of modeset already. */
9371 		if (!old_con_state->crtc && !new_con_state->crtc)
9372 			continue;
9373 
9374 		if (!new_con_state->crtc)
9375 			continue;
9376 
9377 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9378 		if (IS_ERR(new_crtc_state)) {
9379 			ret = PTR_ERR(new_crtc_state);
9380 			goto fail;
9381 		}
9382 
9383 		if (dm_old_con_state->abm_level !=
9384 		    dm_new_con_state->abm_level)
9385 			new_crtc_state->connectors_changed = true;
9386 	}
9387 
9388 #if defined(CONFIG_DRM_AMD_DC_DCN)
9389 	if (adev->asic_type >= CHIP_NAVI10) {
9390 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9391 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9392 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9393 				if (ret)
9394 					goto fail;
9395 			}
9396 		}
9397 	}
9398 #endif
9399 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9400 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9401 
9402 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9403 		    !new_crtc_state->color_mgmt_changed &&
9404 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9405 			dm_old_crtc_state->dsc_force_changed == false)
9406 			continue;
9407 
9408 		if (!new_crtc_state->enable)
9409 			continue;
9410 
9411 		ret = drm_atomic_add_affected_connectors(state, crtc);
9412 		if (ret)
9413 			return ret;
9414 
9415 		ret = drm_atomic_add_affected_planes(state, crtc);
9416 		if (ret)
9417 			goto fail;
9418 
9419 		if (dm_old_crtc_state->dsc_force_changed)
9420 			new_crtc_state->mode_changed = true;
9421 	}
9422 
9423 	/*
9424 	 * Add all primary and overlay planes on the CRTC to the state
9425 	 * whenever a plane is enabled to maintain correct z-ordering
9426 	 * and to enable fast surface updates.
9427 	 */
9428 	drm_for_each_crtc(crtc, dev) {
9429 		bool modified = false;
9430 
9431 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9432 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9433 				continue;
9434 
9435 			if (new_plane_state->crtc == crtc ||
9436 			    old_plane_state->crtc == crtc) {
9437 				modified = true;
9438 				break;
9439 			}
9440 		}
9441 
9442 		if (!modified)
9443 			continue;
9444 
9445 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9446 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9447 				continue;
9448 
9449 			new_plane_state =
9450 				drm_atomic_get_plane_state(state, plane);
9451 
9452 			if (IS_ERR(new_plane_state)) {
9453 				ret = PTR_ERR(new_plane_state);
9454 				goto fail;
9455 			}
9456 		}
9457 	}
9458 
9459 	/* Remove exiting planes if they are modified */
9460 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9461 		ret = dm_update_plane_state(dc, state, plane,
9462 					    old_plane_state,
9463 					    new_plane_state,
9464 					    false,
9465 					    &lock_and_validation_needed);
9466 		if (ret)
9467 			goto fail;
9468 	}
9469 
9470 	/* Disable all crtcs which require disable */
9471 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9472 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9473 					   old_crtc_state,
9474 					   new_crtc_state,
9475 					   false,
9476 					   &lock_and_validation_needed);
9477 		if (ret)
9478 			goto fail;
9479 	}
9480 
9481 	/* Enable all crtcs which require enable */
9482 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9483 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9484 					   old_crtc_state,
9485 					   new_crtc_state,
9486 					   true,
9487 					   &lock_and_validation_needed);
9488 		if (ret)
9489 			goto fail;
9490 	}
9491 
9492 	/* Add new/modified planes */
9493 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9494 		ret = dm_update_plane_state(dc, state, plane,
9495 					    old_plane_state,
9496 					    new_plane_state,
9497 					    true,
9498 					    &lock_and_validation_needed);
9499 		if (ret)
9500 			goto fail;
9501 	}
9502 
9503 	/* Run this here since we want to validate the streams we created */
9504 	ret = drm_atomic_helper_check_planes(dev, state);
9505 	if (ret)
9506 		goto fail;
9507 
9508 	/* Check cursor planes scaling */
9509 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9510 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9511 		if (ret)
9512 			goto fail;
9513 	}
9514 
9515 	if (state->legacy_cursor_update) {
9516 		/*
9517 		 * This is a fast cursor update coming from the plane update
9518 		 * helper, check if it can be done asynchronously for better
9519 		 * performance.
9520 		 */
9521 		state->async_update =
9522 			!drm_atomic_helper_async_check(dev, state);
9523 
9524 		/*
9525 		 * Skip the remaining global validation if this is an async
9526 		 * update. Cursor updates can be done without affecting
9527 		 * state or bandwidth calcs and this avoids the performance
9528 		 * penalty of locking the private state object and
9529 		 * allocating a new dc_state.
9530 		 */
9531 		if (state->async_update)
9532 			return 0;
9533 	}
9534 
9535 	/* Check scaling and underscan changes*/
9536 	/* TODO Removed scaling changes validation due to inability to commit
9537 	 * new stream into context w\o causing full reset. Need to
9538 	 * decide how to handle.
9539 	 */
9540 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9541 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9542 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9543 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9544 
9545 		/* Skip any modesets/resets */
9546 		if (!acrtc || drm_atomic_crtc_needs_modeset(
9547 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9548 			continue;
9549 
9550 		/* Skip any thing not scale or underscan changes */
9551 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9552 			continue;
9553 
9554 		lock_and_validation_needed = true;
9555 	}
9556 
9557 	/**
9558 	 * Streams and planes are reset when there are changes that affect
9559 	 * bandwidth. Anything that affects bandwidth needs to go through
9560 	 * DC global validation to ensure that the configuration can be applied
9561 	 * to hardware.
9562 	 *
9563 	 * We have to currently stall out here in atomic_check for outstanding
9564 	 * commits to finish in this case because our IRQ handlers reference
9565 	 * DRM state directly - we can end up disabling interrupts too early
9566 	 * if we don't.
9567 	 *
9568 	 * TODO: Remove this stall and drop DM state private objects.
9569 	 */
9570 	if (lock_and_validation_needed) {
9571 		ret = dm_atomic_get_state(state, &dm_state);
9572 		if (ret)
9573 			goto fail;
9574 
9575 		ret = do_aquire_global_lock(dev, state);
9576 		if (ret)
9577 			goto fail;
9578 
9579 #if defined(CONFIG_DRM_AMD_DC_DCN)
9580 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9581 			goto fail;
9582 
9583 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9584 		if (ret)
9585 			goto fail;
9586 #endif
9587 
9588 		/*
9589 		 * Perform validation of MST topology in the state:
9590 		 * We need to perform MST atomic check before calling
9591 		 * dc_validate_global_state(), or there is a chance
9592 		 * to get stuck in an infinite loop and hang eventually.
9593 		 */
9594 		ret = drm_dp_mst_atomic_check(state);
9595 		if (ret)
9596 			goto fail;
9597 		status = dc_validate_global_state(dc, dm_state->context, false);
9598 		if (status != DC_OK) {
9599 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
9600 				       dc_status_to_str(status), status);
9601 			ret = -EINVAL;
9602 			goto fail;
9603 		}
9604 	} else {
9605 		/*
9606 		 * The commit is a fast update. Fast updates shouldn't change
9607 		 * the DC context, affect global validation, and can have their
9608 		 * commit work done in parallel with other commits not touching
9609 		 * the same resource. If we have a new DC context as part of
9610 		 * the DM atomic state from validation we need to free it and
9611 		 * retain the existing one instead.
9612 		 *
9613 		 * Furthermore, since the DM atomic state only contains the DC
9614 		 * context and can safely be annulled, we can free the state
9615 		 * and clear the associated private object now to free
9616 		 * some memory and avoid a possible use-after-free later.
9617 		 */
9618 
9619 		for (i = 0; i < state->num_private_objs; i++) {
9620 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9621 
9622 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9623 				int j = state->num_private_objs-1;
9624 
9625 				dm_atomic_destroy_state(obj,
9626 						state->private_objs[i].state);
9627 
9628 				/* If i is not at the end of the array then the
9629 				 * last element needs to be moved to where i was
9630 				 * before the array can safely be truncated.
9631 				 */
9632 				if (i != j)
9633 					state->private_objs[i] =
9634 						state->private_objs[j];
9635 
9636 				state->private_objs[j].ptr = NULL;
9637 				state->private_objs[j].state = NULL;
9638 				state->private_objs[j].old_state = NULL;
9639 				state->private_objs[j].new_state = NULL;
9640 
9641 				state->num_private_objs = j;
9642 				break;
9643 			}
9644 		}
9645 	}
9646 
9647 	/* Store the overall update type for use later in atomic check. */
9648 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9649 		struct dm_crtc_state *dm_new_crtc_state =
9650 			to_dm_crtc_state(new_crtc_state);
9651 
9652 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9653 							 UPDATE_TYPE_FULL :
9654 							 UPDATE_TYPE_FAST;
9655 	}
9656 
9657 	/* Must be success */
9658 	WARN_ON(ret);
9659 
9660 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9661 
9662 	return ret;
9663 
9664 fail:
9665 	if (ret == -EDEADLK)
9666 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9667 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9668 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9669 	else
9670 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9671 
9672 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9673 
9674 	return ret;
9675 }
9676 
9677 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9678 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9679 {
9680 	uint8_t dpcd_data;
9681 	bool capable = false;
9682 
9683 	if (amdgpu_dm_connector->dc_link &&
9684 		dm_helpers_dp_read_dpcd(
9685 				NULL,
9686 				amdgpu_dm_connector->dc_link,
9687 				DP_DOWN_STREAM_PORT_COUNT,
9688 				&dpcd_data,
9689 				sizeof(dpcd_data))) {
9690 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9691 	}
9692 
9693 	return capable;
9694 }
9695 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9696 					struct edid *edid)
9697 {
9698 	int i;
9699 	bool edid_check_required;
9700 	struct detailed_timing *timing;
9701 	struct detailed_non_pixel *data;
9702 	struct detailed_data_monitor_range *range;
9703 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9704 			to_amdgpu_dm_connector(connector);
9705 	struct dm_connector_state *dm_con_state = NULL;
9706 
9707 	struct drm_device *dev = connector->dev;
9708 	struct amdgpu_device *adev = drm_to_adev(dev);
9709 	bool freesync_capable = false;
9710 
9711 	if (!connector->state) {
9712 		DRM_ERROR("%s - Connector has no state", __func__);
9713 		goto update;
9714 	}
9715 
9716 	if (!edid) {
9717 		dm_con_state = to_dm_connector_state(connector->state);
9718 
9719 		amdgpu_dm_connector->min_vfreq = 0;
9720 		amdgpu_dm_connector->max_vfreq = 0;
9721 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9722 
9723 		goto update;
9724 	}
9725 
9726 	dm_con_state = to_dm_connector_state(connector->state);
9727 
9728 	edid_check_required = false;
9729 	if (!amdgpu_dm_connector->dc_sink) {
9730 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9731 		goto update;
9732 	}
9733 	if (!adev->dm.freesync_module)
9734 		goto update;
9735 	/*
9736 	 * if edid non zero restrict freesync only for dp and edp
9737 	 */
9738 	if (edid) {
9739 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9740 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9741 			edid_check_required = is_dp_capable_without_timing_msa(
9742 						adev->dm.dc,
9743 						amdgpu_dm_connector);
9744 		}
9745 	}
9746 	if (edid_check_required == true && (edid->version > 1 ||
9747 	   (edid->version == 1 && edid->revision > 1))) {
9748 		for (i = 0; i < 4; i++) {
9749 
9750 			timing	= &edid->detailed_timings[i];
9751 			data	= &timing->data.other_data;
9752 			range	= &data->data.range;
9753 			/*
9754 			 * Check if monitor has continuous frequency mode
9755 			 */
9756 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
9757 				continue;
9758 			/*
9759 			 * Check for flag range limits only. If flag == 1 then
9760 			 * no additional timing information provided.
9761 			 * Default GTF, GTF Secondary curve and CVT are not
9762 			 * supported
9763 			 */
9764 			if (range->flags != 1)
9765 				continue;
9766 
9767 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9768 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9769 			amdgpu_dm_connector->pixel_clock_mhz =
9770 				range->pixel_clock_mhz * 10;
9771 
9772 			connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
9773 			connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
9774 
9775 			break;
9776 		}
9777 
9778 		if (amdgpu_dm_connector->max_vfreq -
9779 		    amdgpu_dm_connector->min_vfreq > 10) {
9780 
9781 			freesync_capable = true;
9782 		}
9783 	}
9784 
9785 update:
9786 	if (dm_con_state)
9787 		dm_con_state->freesync_capable = freesync_capable;
9788 
9789 	if (connector->vrr_capable_property)
9790 		drm_connector_set_vrr_capable_property(connector,
9791 						       freesync_capable);
9792 }
9793 
9794 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9795 {
9796 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9797 
9798 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9799 		return;
9800 	if (link->type == dc_connection_none)
9801 		return;
9802 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9803 					dpcd_data, sizeof(dpcd_data))) {
9804 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9805 
9806 		if (dpcd_data[0] == 0) {
9807 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9808 			link->psr_settings.psr_feature_enabled = false;
9809 		} else {
9810 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9811 			link->psr_settings.psr_feature_enabled = true;
9812 		}
9813 
9814 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9815 	}
9816 }
9817 
9818 /*
9819  * amdgpu_dm_link_setup_psr() - configure psr link
9820  * @stream: stream state
9821  *
9822  * Return: true if success
9823  */
9824 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9825 {
9826 	struct dc_link *link = NULL;
9827 	struct psr_config psr_config = {0};
9828 	struct psr_context psr_context = {0};
9829 	bool ret = false;
9830 
9831 	if (stream == NULL)
9832 		return false;
9833 
9834 	link = stream->link;
9835 
9836 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9837 
9838 	if (psr_config.psr_version > 0) {
9839 		psr_config.psr_exit_link_training_required = 0x1;
9840 		psr_config.psr_frame_capture_indication_req = 0;
9841 		psr_config.psr_rfb_setup_time = 0x37;
9842 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9843 		psr_config.allow_smu_optimizations = 0x0;
9844 
9845 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9846 
9847 	}
9848 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9849 
9850 	return ret;
9851 }
9852 
9853 /*
9854  * amdgpu_dm_psr_enable() - enable psr f/w
9855  * @stream: stream state
9856  *
9857  * Return: true if success
9858  */
9859 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9860 {
9861 	struct dc_link *link = stream->link;
9862 	unsigned int vsync_rate_hz = 0;
9863 	struct dc_static_screen_params params = {0};
9864 	/* Calculate number of static frames before generating interrupt to
9865 	 * enter PSR.
9866 	 */
9867 	// Init fail safe of 2 frames static
9868 	unsigned int num_frames_static = 2;
9869 
9870 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9871 
9872 	vsync_rate_hz = div64_u64(div64_u64((
9873 			stream->timing.pix_clk_100hz * 100),
9874 			stream->timing.v_total),
9875 			stream->timing.h_total);
9876 
9877 	/* Round up
9878 	 * Calculate number of frames such that at least 30 ms of time has
9879 	 * passed.
9880 	 */
9881 	if (vsync_rate_hz != 0) {
9882 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9883 		num_frames_static = (30000 / frame_time_microsec) + 1;
9884 	}
9885 
9886 	params.triggers.cursor_update = true;
9887 	params.triggers.overlay_update = true;
9888 	params.triggers.surface_update = true;
9889 	params.num_frames = num_frames_static;
9890 
9891 	dc_stream_set_static_screen_params(link->ctx->dc,
9892 					   &stream, 1,
9893 					   &params);
9894 
9895 	return dc_link_set_psr_allow_active(link, true, false, false);
9896 }
9897 
9898 /*
9899  * amdgpu_dm_psr_disable() - disable psr f/w
9900  * @stream:  stream state
9901  *
9902  * Return: true if success
9903  */
9904 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9905 {
9906 
9907 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9908 
9909 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
9910 }
9911 
9912 /*
9913  * amdgpu_dm_psr_disable() - disable psr f/w
9914  * if psr is enabled on any stream
9915  *
9916  * Return: true if success
9917  */
9918 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9919 {
9920 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9921 	return dc_set_psr_allow_active(dm->dc, false);
9922 }
9923 
9924 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9925 {
9926 	struct amdgpu_device *adev = drm_to_adev(dev);
9927 	struct dc *dc = adev->dm.dc;
9928 	int i;
9929 
9930 	mutex_lock(&adev->dm.dc_lock);
9931 	if (dc->current_state) {
9932 		for (i = 0; i < dc->current_state->stream_count; ++i)
9933 			dc->current_state->streams[i]
9934 				->triggered_crtc_reset.enabled =
9935 				adev->dm.force_timing_sync;
9936 
9937 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9938 		dc_trigger_sync(dc, dc->current_state);
9939 	}
9940 	mutex_unlock(&adev->dm.dc_lock);
9941 }
9942 
9943 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9944 		       uint32_t value, const char *func_name)
9945 {
9946 #ifdef DM_CHECK_ADDR_0
9947 	if (address == 0) {
9948 		DC_ERR("invalid register write. address = 0");
9949 		return;
9950 	}
9951 #endif
9952 	cgs_write_register(ctx->cgs_device, address, value);
9953 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9954 }
9955 
9956 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9957 			  const char *func_name)
9958 {
9959 	uint32_t value;
9960 #ifdef DM_CHECK_ADDR_0
9961 	if (address == 0) {
9962 		DC_ERR("invalid register read; address = 0\n");
9963 		return 0;
9964 	}
9965 #endif
9966 
9967 	if (ctx->dmub_srv &&
9968 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9969 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9970 		ASSERT(false);
9971 		return 0;
9972 	}
9973 
9974 	value = cgs_read_register(ctx->cgs_device, address);
9975 
9976 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9977 
9978 	return value;
9979 }
9980