1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38 
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50 
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58 
59 #include "ivsrcid/ivsrcid_vislands30.h"
60 
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107 
108 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110 
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113 
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116 
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119 
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129 
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135 {
136 	switch (link->dpcd_caps.dongle_type) {
137 	case DISPLAY_DONGLE_NONE:
138 		return DRM_MODE_SUBCONNECTOR_Native;
139 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 		return DRM_MODE_SUBCONNECTOR_VGA;
141 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 		return DRM_MODE_SUBCONNECTOR_DVID;
144 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 		return DRM_MODE_SUBCONNECTOR_HDMIA;
147 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148 	default:
149 		return DRM_MODE_SUBCONNECTOR_Unknown;
150 	}
151 }
152 
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154 {
155 	struct dc_link *link = aconnector->dc_link;
156 	struct drm_connector *connector = &aconnector->base;
157 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158 
159 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160 		return;
161 
162 	if (aconnector->dc_sink)
163 		subconnector = get_subconnector_type(link);
164 
165 	drm_object_property_set_value(&connector->base,
166 			connector->dev->mode_config.dp_subconnector_property,
167 			subconnector);
168 }
169 
170 /*
171  * initializes drm_device display related structures, based on the information
172  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173  * drm_encoder, drm_mode_config
174  *
175  * Returns 0 on success
176  */
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180 
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182 				struct drm_plane *plane,
183 				unsigned long possible_crtcs,
184 				const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 			       struct drm_plane *plane,
187 			       uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
190 				    uint32_t link_index,
191 				    struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 				  struct amdgpu_encoder *aencoder,
194 				  uint32_t link_index);
195 
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197 
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199 
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 				  struct drm_atomic_state *state);
202 
203 static void handle_cursor_update(struct drm_plane *plane,
204 				 struct drm_plane_state *old_plane_state);
205 
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
211 
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214 
215 /*
216  * dm_vblank_get_counter
217  *
218  * @brief
219  * Get counter for number of vertical blanks
220  *
221  * @param
222  * struct amdgpu_device *adev - [in] desired amdgpu device
223  * int disp_idx - [in] which CRTC to get the counter from
224  *
225  * @return
226  * Counter for vertical blanks
227  */
228 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
229 {
230 	if (crtc >= adev->mode_info.num_crtc)
231 		return 0;
232 	else {
233 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
234 
235 		if (acrtc->dm_irq_params.stream == NULL) {
236 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
237 				  crtc);
238 			return 0;
239 		}
240 
241 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
242 	}
243 }
244 
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246 				  u32 *vbl, u32 *position)
247 {
248 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
249 
250 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251 		return -EINVAL;
252 	else {
253 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254 
255 		if (acrtc->dm_irq_params.stream ==  NULL) {
256 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
257 				  crtc);
258 			return 0;
259 		}
260 
261 		/*
262 		 * TODO rework base driver to use values directly.
263 		 * for now parse it back into reg-format
264 		 */
265 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
266 					 &v_blank_start,
267 					 &v_blank_end,
268 					 &h_position,
269 					 &v_position);
270 
271 		*position = v_position | (h_position << 16);
272 		*vbl = v_blank_start | (v_blank_end << 16);
273 	}
274 
275 	return 0;
276 }
277 
278 static bool dm_is_idle(void *handle)
279 {
280 	/* XXX todo */
281 	return true;
282 }
283 
284 static int dm_wait_for_idle(void *handle)
285 {
286 	/* XXX todo */
287 	return 0;
288 }
289 
290 static bool dm_check_soft_reset(void *handle)
291 {
292 	return false;
293 }
294 
295 static int dm_soft_reset(void *handle)
296 {
297 	/* XXX todo */
298 	return 0;
299 }
300 
301 static struct amdgpu_crtc *
302 get_crtc_by_otg_inst(struct amdgpu_device *adev,
303 		     int otg_inst)
304 {
305 	struct drm_device *dev = adev_to_drm(adev);
306 	struct drm_crtc *crtc;
307 	struct amdgpu_crtc *amdgpu_crtc;
308 
309 	if (otg_inst == -1) {
310 		WARN_ON(1);
311 		return adev->mode_info.crtcs[0];
312 	}
313 
314 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315 		amdgpu_crtc = to_amdgpu_crtc(crtc);
316 
317 		if (amdgpu_crtc->otg_inst == otg_inst)
318 			return amdgpu_crtc;
319 	}
320 
321 	return NULL;
322 }
323 
324 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
325 {
326 	return acrtc->dm_irq_params.freesync_config.state ==
327 		       VRR_STATE_ACTIVE_VARIABLE ||
328 	       acrtc->dm_irq_params.freesync_config.state ==
329 		       VRR_STATE_ACTIVE_FIXED;
330 }
331 
332 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
333 {
334 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
336 }
337 
338 /**
339  * dm_pflip_high_irq() - Handle pageflip interrupt
340  * @interrupt_params: ignored
341  *
342  * Handles the pageflip interrupt by notifying all interested parties
343  * that the pageflip has been completed.
344  */
345 static void dm_pflip_high_irq(void *interrupt_params)
346 {
347 	struct amdgpu_crtc *amdgpu_crtc;
348 	struct common_irq_params *irq_params = interrupt_params;
349 	struct amdgpu_device *adev = irq_params->adev;
350 	unsigned long flags;
351 	struct drm_pending_vblank_event *e;
352 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
353 	bool vrr_active;
354 
355 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
356 
357 	/* IRQ could occur when in initial stage */
358 	/* TODO work and BO cleanup */
359 	if (amdgpu_crtc == NULL) {
360 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
361 		return;
362 	}
363 
364 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
365 
366 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368 						 amdgpu_crtc->pflip_status,
369 						 AMDGPU_FLIP_SUBMITTED,
370 						 amdgpu_crtc->crtc_id,
371 						 amdgpu_crtc);
372 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
373 		return;
374 	}
375 
376 	/* page flip completed. */
377 	e = amdgpu_crtc->event;
378 	amdgpu_crtc->event = NULL;
379 
380 	if (!e)
381 		WARN_ON(1);
382 
383 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
384 
385 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
386 	if (!vrr_active ||
387 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
388 				      &v_blank_end, &hpos, &vpos) ||
389 	    (vpos < v_blank_start)) {
390 		/* Update to correct count and vblank timestamp if racing with
391 		 * vblank irq. This also updates to the correct vblank timestamp
392 		 * even in VRR mode, as scanout is past the front-porch atm.
393 		 */
394 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
395 
396 		/* Wake up userspace by sending the pageflip event with proper
397 		 * count and timestamp of vblank of flip completion.
398 		 */
399 		if (e) {
400 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
401 
402 			/* Event sent, so done with vblank for this flip */
403 			drm_crtc_vblank_put(&amdgpu_crtc->base);
404 		}
405 	} else if (e) {
406 		/* VRR active and inside front-porch: vblank count and
407 		 * timestamp for pageflip event will only be up to date after
408 		 * drm_crtc_handle_vblank() has been executed from late vblank
409 		 * irq handler after start of back-porch (vline 0). We queue the
410 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
411 		 * updated timestamp and count, once it runs after us.
412 		 *
413 		 * We need to open-code this instead of using the helper
414 		 * drm_crtc_arm_vblank_event(), as that helper would
415 		 * call drm_crtc_accurate_vblank_count(), which we must
416 		 * not call in VRR mode while we are in front-porch!
417 		 */
418 
419 		/* sequence will be replaced by real count during send-out. */
420 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421 		e->pipe = amdgpu_crtc->crtc_id;
422 
423 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
424 		e = NULL;
425 	}
426 
427 	/* Keep track of vblank of this flip for flip throttling. We use the
428 	 * cooked hw counter, as that one incremented at start of this vblank
429 	 * of pageflip completion, so last_flip_vblank is the forbidden count
430 	 * for queueing new pageflips if vsync + VRR is enabled.
431 	 */
432 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
433 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
434 
435 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
436 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
437 
438 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
440 			 vrr_active, (int) !e);
441 }
442 
443 static void dm_vupdate_high_irq(void *interrupt_params)
444 {
445 	struct common_irq_params *irq_params = interrupt_params;
446 	struct amdgpu_device *adev = irq_params->adev;
447 	struct amdgpu_crtc *acrtc;
448 	unsigned long flags;
449 	int vrr_active;
450 
451 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
452 
453 	if (acrtc) {
454 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
455 
456 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
457 			      acrtc->crtc_id,
458 			      vrr_active);
459 
460 		/* Core vblank handling is done here after end of front-porch in
461 		 * vrr mode, as vblank timestamping will give valid results
462 		 * while now done after front-porch. This will also deliver
463 		 * page-flip completion events that have been queued to us
464 		 * if a pageflip happened inside front-porch.
465 		 */
466 		if (vrr_active) {
467 			drm_crtc_handle_vblank(&acrtc->base);
468 
469 			/* BTR processing for pre-DCE12 ASICs */
470 			if (acrtc->dm_irq_params.stream &&
471 			    adev->family < AMDGPU_FAMILY_AI) {
472 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
473 				mod_freesync_handle_v_update(
474 				    adev->dm.freesync_module,
475 				    acrtc->dm_irq_params.stream,
476 				    &acrtc->dm_irq_params.vrr_params);
477 
478 				dc_stream_adjust_vmin_vmax(
479 				    adev->dm.dc,
480 				    acrtc->dm_irq_params.stream,
481 				    &acrtc->dm_irq_params.vrr_params.adjust);
482 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
483 			}
484 		}
485 	}
486 }
487 
488 /**
489  * dm_crtc_high_irq() - Handles CRTC interrupt
490  * @interrupt_params: used for determining the CRTC instance
491  *
492  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
493  * event handler.
494  */
495 static void dm_crtc_high_irq(void *interrupt_params)
496 {
497 	struct common_irq_params *irq_params = interrupt_params;
498 	struct amdgpu_device *adev = irq_params->adev;
499 	struct amdgpu_crtc *acrtc;
500 	unsigned long flags;
501 	int vrr_active;
502 
503 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
504 	if (!acrtc)
505 		return;
506 
507 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
508 
509 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
510 		      vrr_active, acrtc->dm_irq_params.active_planes);
511 
512 	/**
513 	 * Core vblank handling at start of front-porch is only possible
514 	 * in non-vrr mode, as only there vblank timestamping will give
515 	 * valid results while done in front-porch. Otherwise defer it
516 	 * to dm_vupdate_high_irq after end of front-porch.
517 	 */
518 	if (!vrr_active)
519 		drm_crtc_handle_vblank(&acrtc->base);
520 
521 	/**
522 	 * Following stuff must happen at start of vblank, for crc
523 	 * computation and below-the-range btr support in vrr mode.
524 	 */
525 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
526 
527 	/* BTR updates need to happen before VUPDATE on Vega and above. */
528 	if (adev->family < AMDGPU_FAMILY_AI)
529 		return;
530 
531 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
532 
533 	if (acrtc->dm_irq_params.stream &&
534 	    acrtc->dm_irq_params.vrr_params.supported &&
535 	    acrtc->dm_irq_params.freesync_config.state ==
536 		    VRR_STATE_ACTIVE_VARIABLE) {
537 		mod_freesync_handle_v_update(adev->dm.freesync_module,
538 					     acrtc->dm_irq_params.stream,
539 					     &acrtc->dm_irq_params.vrr_params);
540 
541 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542 					   &acrtc->dm_irq_params.vrr_params.adjust);
543 	}
544 
545 	/*
546 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
547 	 * In that case, pageflip completion interrupts won't fire and pageflip
548 	 * completion events won't get delivered. Prevent this by sending
549 	 * pending pageflip events from here if a flip is still pending.
550 	 *
551 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
552 	 * avoid race conditions between flip programming and completion,
553 	 * which could cause too early flip completion events.
554 	 */
555 	if (adev->family >= AMDGPU_FAMILY_RV &&
556 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
557 	    acrtc->dm_irq_params.active_planes == 0) {
558 		if (acrtc->event) {
559 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
560 			acrtc->event = NULL;
561 			drm_crtc_vblank_put(&acrtc->base);
562 		}
563 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
564 	}
565 
566 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
567 }
568 
569 static int dm_set_clockgating_state(void *handle,
570 		  enum amd_clockgating_state state)
571 {
572 	return 0;
573 }
574 
575 static int dm_set_powergating_state(void *handle,
576 		  enum amd_powergating_state state)
577 {
578 	return 0;
579 }
580 
581 /* Prototypes of private functions */
582 static int dm_early_init(void* handle);
583 
584 /* Allocate memory for FBC compressed data  */
585 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
586 {
587 	struct drm_device *dev = connector->dev;
588 	struct amdgpu_device *adev = drm_to_adev(dev);
589 	struct dm_compressor_info *compressor = &adev->dm.compressor;
590 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591 	struct drm_display_mode *mode;
592 	unsigned long max_size = 0;
593 
594 	if (adev->dm.dc->fbc_compressor == NULL)
595 		return;
596 
597 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
598 		return;
599 
600 	if (compressor->bo_ptr)
601 		return;
602 
603 
604 	list_for_each_entry(mode, &connector->modes, head) {
605 		if (max_size < mode->htotal * mode->vtotal)
606 			max_size = mode->htotal * mode->vtotal;
607 	}
608 
609 	if (max_size) {
610 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
611 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
612 			    &compressor->gpu_addr, &compressor->cpu_addr);
613 
614 		if (r)
615 			DRM_ERROR("DM: Failed to initialize FBC\n");
616 		else {
617 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
619 		}
620 
621 	}
622 
623 }
624 
625 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626 					  int pipe, bool *enabled,
627 					  unsigned char *buf, int max_bytes)
628 {
629 	struct drm_device *dev = dev_get_drvdata(kdev);
630 	struct amdgpu_device *adev = drm_to_adev(dev);
631 	struct drm_connector *connector;
632 	struct drm_connector_list_iter conn_iter;
633 	struct amdgpu_dm_connector *aconnector;
634 	int ret = 0;
635 
636 	*enabled = false;
637 
638 	mutex_lock(&adev->dm.audio_lock);
639 
640 	drm_connector_list_iter_begin(dev, &conn_iter);
641 	drm_for_each_connector_iter(connector, &conn_iter) {
642 		aconnector = to_amdgpu_dm_connector(connector);
643 		if (aconnector->audio_inst != port)
644 			continue;
645 
646 		*enabled = true;
647 		ret = drm_eld_size(connector->eld);
648 		memcpy(buf, connector->eld, min(max_bytes, ret));
649 
650 		break;
651 	}
652 	drm_connector_list_iter_end(&conn_iter);
653 
654 	mutex_unlock(&adev->dm.audio_lock);
655 
656 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
657 
658 	return ret;
659 }
660 
661 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662 	.get_eld = amdgpu_dm_audio_component_get_eld,
663 };
664 
665 static int amdgpu_dm_audio_component_bind(struct device *kdev,
666 				       struct device *hda_kdev, void *data)
667 {
668 	struct drm_device *dev = dev_get_drvdata(kdev);
669 	struct amdgpu_device *adev = drm_to_adev(dev);
670 	struct drm_audio_component *acomp = data;
671 
672 	acomp->ops = &amdgpu_dm_audio_component_ops;
673 	acomp->dev = kdev;
674 	adev->dm.audio_component = acomp;
675 
676 	return 0;
677 }
678 
679 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680 					  struct device *hda_kdev, void *data)
681 {
682 	struct drm_device *dev = dev_get_drvdata(kdev);
683 	struct amdgpu_device *adev = drm_to_adev(dev);
684 	struct drm_audio_component *acomp = data;
685 
686 	acomp->ops = NULL;
687 	acomp->dev = NULL;
688 	adev->dm.audio_component = NULL;
689 }
690 
691 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692 	.bind	= amdgpu_dm_audio_component_bind,
693 	.unbind	= amdgpu_dm_audio_component_unbind,
694 };
695 
696 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
697 {
698 	int i, ret;
699 
700 	if (!amdgpu_audio)
701 		return 0;
702 
703 	adev->mode_info.audio.enabled = true;
704 
705 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
706 
707 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708 		adev->mode_info.audio.pin[i].channels = -1;
709 		adev->mode_info.audio.pin[i].rate = -1;
710 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
711 		adev->mode_info.audio.pin[i].status_bits = 0;
712 		adev->mode_info.audio.pin[i].category_code = 0;
713 		adev->mode_info.audio.pin[i].connected = false;
714 		adev->mode_info.audio.pin[i].id =
715 			adev->dm.dc->res_pool->audios[i]->inst;
716 		adev->mode_info.audio.pin[i].offset = 0;
717 	}
718 
719 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
720 	if (ret < 0)
721 		return ret;
722 
723 	adev->dm.audio_registered = true;
724 
725 	return 0;
726 }
727 
728 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
729 {
730 	if (!amdgpu_audio)
731 		return;
732 
733 	if (!adev->mode_info.audio.enabled)
734 		return;
735 
736 	if (adev->dm.audio_registered) {
737 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738 		adev->dm.audio_registered = false;
739 	}
740 
741 	/* TODO: Disable audio? */
742 
743 	adev->mode_info.audio.enabled = false;
744 }
745 
746 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
747 {
748 	struct drm_audio_component *acomp = adev->dm.audio_component;
749 
750 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
752 
753 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
754 						 pin, -1);
755 	}
756 }
757 
758 static int dm_dmub_hw_init(struct amdgpu_device *adev)
759 {
760 	const struct dmcub_firmware_header_v1_0 *hdr;
761 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
762 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
763 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
764 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765 	struct abm *abm = adev->dm.dc->res_pool->abm;
766 	struct dmub_srv_hw_params hw_params;
767 	enum dmub_status status;
768 	const unsigned char *fw_inst_const, *fw_bss_data;
769 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
770 	bool has_hw_support;
771 
772 	if (!dmub_srv)
773 		/* DMUB isn't supported on the ASIC. */
774 		return 0;
775 
776 	if (!fb_info) {
777 		DRM_ERROR("No framebuffer info for DMUB service.\n");
778 		return -EINVAL;
779 	}
780 
781 	if (!dmub_fw) {
782 		/* Firmware required for DMUB support. */
783 		DRM_ERROR("No firmware provided for DMUB.\n");
784 		return -EINVAL;
785 	}
786 
787 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788 	if (status != DMUB_STATUS_OK) {
789 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
790 		return -EINVAL;
791 	}
792 
793 	if (!has_hw_support) {
794 		DRM_INFO("DMUB unsupported on ASIC\n");
795 		return 0;
796 	}
797 
798 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
799 
800 	fw_inst_const = dmub_fw->data +
801 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
802 			PSP_HEADER_BYTES;
803 
804 	fw_bss_data = dmub_fw->data +
805 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 		      le32_to_cpu(hdr->inst_const_bytes);
807 
808 	/* Copy firmware and bios info into FB memory. */
809 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
811 
812 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
813 
814 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815 	 * amdgpu_ucode_init_single_fw will load dmub firmware
816 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
817 	 * will be done by dm_dmub_hw_init
818 	 */
819 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
821 				fw_inst_const_size);
822 	}
823 
824 	if (fw_bss_data_size)
825 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826 		       fw_bss_data, fw_bss_data_size);
827 
828 	/* Copy firmware bios info into FB memory. */
829 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
830 	       adev->bios_size);
831 
832 	/* Reset regions that need to be reset. */
833 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
835 
836 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
838 
839 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
841 
842 	/* Initialize hardware. */
843 	memset(&hw_params, 0, sizeof(hw_params));
844 	hw_params.fb_base = adev->gmc.fb_start;
845 	hw_params.fb_offset = adev->gmc.aper_base;
846 
847 	/* backdoor load firmware and trigger dmub running */
848 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849 		hw_params.load_inst_const = true;
850 
851 	if (dmcu)
852 		hw_params.psp_version = dmcu->psp_version;
853 
854 	for (i = 0; i < fb_info->num_fb; ++i)
855 		hw_params.fb[i] = &fb_info->fb[i];
856 
857 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
858 	if (status != DMUB_STATUS_OK) {
859 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
860 		return -EINVAL;
861 	}
862 
863 	/* Wait for firmware load to finish. */
864 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865 	if (status != DMUB_STATUS_OK)
866 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
867 
868 	/* Init DMCU and ABM if available. */
869 	if (dmcu && abm) {
870 		dmcu->funcs->dmcu_init(dmcu);
871 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
872 	}
873 
874 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 	if (!adev->dm.dc->ctx->dmub_srv) {
876 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877 		return -ENOMEM;
878 	}
879 
880 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 		 adev->dm.dmcub_fw_version);
882 
883 	return 0;
884 }
885 
886 #if defined(CONFIG_DRM_AMD_DC_DCN)
887 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
888 {
889 	uint64_t pt_base;
890 	uint32_t logical_addr_low;
891 	uint32_t logical_addr_high;
892 	uint32_t agp_base, agp_bot, agp_top;
893 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
894 
895 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
897 
898 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
899 		/*
900 		 * Raven2 has a HW issue that it is unable to use the vram which
901 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902 		 * workaround that increase system aperture high address (add 1)
903 		 * to get rid of the VM fault and hardware hang.
904 		 */
905 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
906 	else
907 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
908 
909 	agp_base = 0;
910 	agp_bot = adev->gmc.agp_start >> 24;
911 	agp_top = adev->gmc.agp_end >> 24;
912 
913 
914 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919 	page_table_base.low_part = lower_32_bits(pt_base);
920 
921 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
923 
924 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
927 
928 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
931 
932 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
935 
936 	pa_config->is_hvm_enabled = 0;
937 
938 }
939 #endif
940 
941 static int amdgpu_dm_init(struct amdgpu_device *adev)
942 {
943 	struct dc_init_data init_data;
944 #ifdef CONFIG_DRM_AMD_DC_HDCP
945 	struct dc_callback_init init_params;
946 #endif
947 	int r;
948 
949 	adev->dm.ddev = adev_to_drm(adev);
950 	adev->dm.adev = adev;
951 
952 	/* Zero all the fields */
953 	memset(&init_data, 0, sizeof(init_data));
954 #ifdef CONFIG_DRM_AMD_DC_HDCP
955 	memset(&init_params, 0, sizeof(init_params));
956 #endif
957 
958 	mutex_init(&adev->dm.dc_lock);
959 	mutex_init(&adev->dm.audio_lock);
960 
961 	if(amdgpu_dm_irq_init(adev)) {
962 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
963 		goto error;
964 	}
965 
966 	init_data.asic_id.chip_family = adev->family;
967 
968 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
969 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
970 
971 	init_data.asic_id.vram_width = adev->gmc.vram_width;
972 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
973 	init_data.asic_id.atombios_base_address =
974 		adev->mode_info.atom_context->bios;
975 
976 	init_data.driver = adev;
977 
978 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
979 
980 	if (!adev->dm.cgs_device) {
981 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
982 		goto error;
983 	}
984 
985 	init_data.cgs_device = adev->dm.cgs_device;
986 
987 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
988 
989 	switch (adev->asic_type) {
990 	case CHIP_CARRIZO:
991 	case CHIP_STONEY:
992 	case CHIP_RAVEN:
993 	case CHIP_RENOIR:
994 		init_data.flags.gpu_vm_support = true;
995 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
996 			init_data.flags.disable_dmcu = true;
997 		break;
998 #if defined(CONFIG_DRM_AMD_DC_DCN)
999 	case CHIP_VANGOGH:
1000 		init_data.flags.gpu_vm_support = true;
1001 		break;
1002 #endif
1003 	default:
1004 		break;
1005 	}
1006 
1007 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1008 		init_data.flags.fbc_support = true;
1009 
1010 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1011 		init_data.flags.multi_mon_pp_mclk_switch = true;
1012 
1013 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1014 		init_data.flags.disable_fractional_pwm = true;
1015 
1016 	init_data.flags.power_down_display_on_boot = true;
1017 
1018 	/* Display Core create. */
1019 	adev->dm.dc = dc_create(&init_data);
1020 
1021 	if (adev->dm.dc) {
1022 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1023 	} else {
1024 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1025 		goto error;
1026 	}
1027 
1028 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1029 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1030 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1031 	}
1032 
1033 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1034 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1035 
1036 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1037 		adev->dm.dc->debug.disable_stutter = true;
1038 
1039 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1040 		adev->dm.dc->debug.disable_dsc = true;
1041 
1042 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1043 		adev->dm.dc->debug.disable_clock_gate = true;
1044 
1045 	r = dm_dmub_hw_init(adev);
1046 	if (r) {
1047 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1048 		goto error;
1049 	}
1050 
1051 	dc_hardware_init(adev->dm.dc);
1052 
1053 #if defined(CONFIG_DRM_AMD_DC_DCN)
1054 	if (adev->apu_flags) {
1055 		struct dc_phy_addr_space_config pa_config;
1056 
1057 		mmhub_read_system_context(adev, &pa_config);
1058 
1059 		// Call the DC init_memory func
1060 		dc_setup_system_context(adev->dm.dc, &pa_config);
1061 	}
1062 #endif
1063 
1064 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1065 	if (!adev->dm.freesync_module) {
1066 		DRM_ERROR(
1067 		"amdgpu: failed to initialize freesync_module.\n");
1068 	} else
1069 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1070 				adev->dm.freesync_module);
1071 
1072 	amdgpu_dm_init_color_mod();
1073 
1074 #ifdef CONFIG_DRM_AMD_DC_HDCP
1075 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1076 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1077 
1078 		if (!adev->dm.hdcp_workqueue)
1079 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1080 		else
1081 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1082 
1083 		dc_init_callbacks(adev->dm.dc, &init_params);
1084 	}
1085 #endif
1086 	if (amdgpu_dm_initialize_drm_device(adev)) {
1087 		DRM_ERROR(
1088 		"amdgpu: failed to initialize sw for display support.\n");
1089 		goto error;
1090 	}
1091 
1092 	/* create fake encoders for MST */
1093 	dm_dp_create_fake_mst_encoders(adev);
1094 
1095 	/* TODO: Add_display_info? */
1096 
1097 	/* TODO use dynamic cursor width */
1098 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1099 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1100 
1101 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1102 		DRM_ERROR(
1103 		"amdgpu: failed to initialize sw for display support.\n");
1104 		goto error;
1105 	}
1106 
1107 
1108 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1109 
1110 	return 0;
1111 error:
1112 	amdgpu_dm_fini(adev);
1113 
1114 	return -EINVAL;
1115 }
1116 
1117 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1118 {
1119 	int i;
1120 
1121 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1122 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1123 	}
1124 
1125 	amdgpu_dm_audio_fini(adev);
1126 
1127 	amdgpu_dm_destroy_drm_device(&adev->dm);
1128 
1129 #ifdef CONFIG_DRM_AMD_DC_HDCP
1130 	if (adev->dm.hdcp_workqueue) {
1131 		hdcp_destroy(adev->dm.hdcp_workqueue);
1132 		adev->dm.hdcp_workqueue = NULL;
1133 	}
1134 
1135 	if (adev->dm.dc)
1136 		dc_deinit_callbacks(adev->dm.dc);
1137 #endif
1138 	if (adev->dm.dc->ctx->dmub_srv) {
1139 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1140 		adev->dm.dc->ctx->dmub_srv = NULL;
1141 	}
1142 
1143 	if (adev->dm.dmub_bo)
1144 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1145 				      &adev->dm.dmub_bo_gpu_addr,
1146 				      &adev->dm.dmub_bo_cpu_addr);
1147 
1148 	/* DC Destroy TODO: Replace destroy DAL */
1149 	if (adev->dm.dc)
1150 		dc_destroy(&adev->dm.dc);
1151 	/*
1152 	 * TODO: pageflip, vlank interrupt
1153 	 *
1154 	 * amdgpu_dm_irq_fini(adev);
1155 	 */
1156 
1157 	if (adev->dm.cgs_device) {
1158 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1159 		adev->dm.cgs_device = NULL;
1160 	}
1161 	if (adev->dm.freesync_module) {
1162 		mod_freesync_destroy(adev->dm.freesync_module);
1163 		adev->dm.freesync_module = NULL;
1164 	}
1165 
1166 	mutex_destroy(&adev->dm.audio_lock);
1167 	mutex_destroy(&adev->dm.dc_lock);
1168 
1169 	return;
1170 }
1171 
1172 static int load_dmcu_fw(struct amdgpu_device *adev)
1173 {
1174 	const char *fw_name_dmcu = NULL;
1175 	int r;
1176 	const struct dmcu_firmware_header_v1_0 *hdr;
1177 
1178 	switch(adev->asic_type) {
1179 #if defined(CONFIG_DRM_AMD_DC_SI)
1180 	case CHIP_TAHITI:
1181 	case CHIP_PITCAIRN:
1182 	case CHIP_VERDE:
1183 	case CHIP_OLAND:
1184 #endif
1185 	case CHIP_BONAIRE:
1186 	case CHIP_HAWAII:
1187 	case CHIP_KAVERI:
1188 	case CHIP_KABINI:
1189 	case CHIP_MULLINS:
1190 	case CHIP_TONGA:
1191 	case CHIP_FIJI:
1192 	case CHIP_CARRIZO:
1193 	case CHIP_STONEY:
1194 	case CHIP_POLARIS11:
1195 	case CHIP_POLARIS10:
1196 	case CHIP_POLARIS12:
1197 	case CHIP_VEGAM:
1198 	case CHIP_VEGA10:
1199 	case CHIP_VEGA12:
1200 	case CHIP_VEGA20:
1201 	case CHIP_NAVI10:
1202 	case CHIP_NAVI14:
1203 	case CHIP_RENOIR:
1204 	case CHIP_SIENNA_CICHLID:
1205 	case CHIP_NAVY_FLOUNDER:
1206 	case CHIP_DIMGREY_CAVEFISH:
1207 	case CHIP_VANGOGH:
1208 		return 0;
1209 	case CHIP_NAVI12:
1210 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1211 		break;
1212 	case CHIP_RAVEN:
1213 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1214 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1215 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1216 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1217 		else
1218 			return 0;
1219 		break;
1220 	default:
1221 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1222 		return -EINVAL;
1223 	}
1224 
1225 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1226 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1227 		return 0;
1228 	}
1229 
1230 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1231 	if (r == -ENOENT) {
1232 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1233 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1234 		adev->dm.fw_dmcu = NULL;
1235 		return 0;
1236 	}
1237 	if (r) {
1238 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1239 			fw_name_dmcu);
1240 		return r;
1241 	}
1242 
1243 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1244 	if (r) {
1245 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1246 			fw_name_dmcu);
1247 		release_firmware(adev->dm.fw_dmcu);
1248 		adev->dm.fw_dmcu = NULL;
1249 		return r;
1250 	}
1251 
1252 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1253 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1254 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1255 	adev->firmware.fw_size +=
1256 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1257 
1258 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1259 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1260 	adev->firmware.fw_size +=
1261 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1262 
1263 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1264 
1265 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1266 
1267 	return 0;
1268 }
1269 
1270 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1271 {
1272 	struct amdgpu_device *adev = ctx;
1273 
1274 	return dm_read_reg(adev->dm.dc->ctx, address);
1275 }
1276 
1277 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1278 				     uint32_t value)
1279 {
1280 	struct amdgpu_device *adev = ctx;
1281 
1282 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1283 }
1284 
1285 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1286 {
1287 	struct dmub_srv_create_params create_params;
1288 	struct dmub_srv_region_params region_params;
1289 	struct dmub_srv_region_info region_info;
1290 	struct dmub_srv_fb_params fb_params;
1291 	struct dmub_srv_fb_info *fb_info;
1292 	struct dmub_srv *dmub_srv;
1293 	const struct dmcub_firmware_header_v1_0 *hdr;
1294 	const char *fw_name_dmub;
1295 	enum dmub_asic dmub_asic;
1296 	enum dmub_status status;
1297 	int r;
1298 
1299 	switch (adev->asic_type) {
1300 	case CHIP_RENOIR:
1301 		dmub_asic = DMUB_ASIC_DCN21;
1302 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1303 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1304 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1305 		break;
1306 	case CHIP_SIENNA_CICHLID:
1307 		dmub_asic = DMUB_ASIC_DCN30;
1308 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1309 		break;
1310 	case CHIP_NAVY_FLOUNDER:
1311 		dmub_asic = DMUB_ASIC_DCN30;
1312 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1313 		break;
1314 	case CHIP_VANGOGH:
1315 		dmub_asic = DMUB_ASIC_DCN301;
1316 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1317 		break;
1318 	case CHIP_DIMGREY_CAVEFISH:
1319 		dmub_asic = DMUB_ASIC_DCN302;
1320 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1321 		break;
1322 
1323 	default:
1324 		/* ASIC doesn't support DMUB. */
1325 		return 0;
1326 	}
1327 
1328 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1329 	if (r) {
1330 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1331 		return 0;
1332 	}
1333 
1334 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1335 	if (r) {
1336 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1337 		return 0;
1338 	}
1339 
1340 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1341 
1342 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1343 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1344 			AMDGPU_UCODE_ID_DMCUB;
1345 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1346 			adev->dm.dmub_fw;
1347 		adev->firmware.fw_size +=
1348 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1349 
1350 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1351 			 adev->dm.dmcub_fw_version);
1352 	}
1353 
1354 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1355 
1356 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1357 	dmub_srv = adev->dm.dmub_srv;
1358 
1359 	if (!dmub_srv) {
1360 		DRM_ERROR("Failed to allocate DMUB service!\n");
1361 		return -ENOMEM;
1362 	}
1363 
1364 	memset(&create_params, 0, sizeof(create_params));
1365 	create_params.user_ctx = adev;
1366 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1367 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1368 	create_params.asic = dmub_asic;
1369 
1370 	/* Create the DMUB service. */
1371 	status = dmub_srv_create(dmub_srv, &create_params);
1372 	if (status != DMUB_STATUS_OK) {
1373 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1374 		return -EINVAL;
1375 	}
1376 
1377 	/* Calculate the size of all the regions for the DMUB service. */
1378 	memset(&region_params, 0, sizeof(region_params));
1379 
1380 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1381 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1382 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1383 	region_params.vbios_size = adev->bios_size;
1384 	region_params.fw_bss_data = region_params.bss_data_size ?
1385 		adev->dm.dmub_fw->data +
1386 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1387 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1388 	region_params.fw_inst_const =
1389 		adev->dm.dmub_fw->data +
1390 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1391 		PSP_HEADER_BYTES;
1392 
1393 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1394 					   &region_info);
1395 
1396 	if (status != DMUB_STATUS_OK) {
1397 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1398 		return -EINVAL;
1399 	}
1400 
1401 	/*
1402 	 * Allocate a framebuffer based on the total size of all the regions.
1403 	 * TODO: Move this into GART.
1404 	 */
1405 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1406 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1407 				    &adev->dm.dmub_bo_gpu_addr,
1408 				    &adev->dm.dmub_bo_cpu_addr);
1409 	if (r)
1410 		return r;
1411 
1412 	/* Rebase the regions on the framebuffer address. */
1413 	memset(&fb_params, 0, sizeof(fb_params));
1414 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1415 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1416 	fb_params.region_info = &region_info;
1417 
1418 	adev->dm.dmub_fb_info =
1419 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1420 	fb_info = adev->dm.dmub_fb_info;
1421 
1422 	if (!fb_info) {
1423 		DRM_ERROR(
1424 			"Failed to allocate framebuffer info for DMUB service!\n");
1425 		return -ENOMEM;
1426 	}
1427 
1428 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1429 	if (status != DMUB_STATUS_OK) {
1430 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1431 		return -EINVAL;
1432 	}
1433 
1434 	return 0;
1435 }
1436 
1437 static int dm_sw_init(void *handle)
1438 {
1439 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1440 	int r;
1441 
1442 	r = dm_dmub_sw_init(adev);
1443 	if (r)
1444 		return r;
1445 
1446 	return load_dmcu_fw(adev);
1447 }
1448 
1449 static int dm_sw_fini(void *handle)
1450 {
1451 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1452 
1453 	kfree(adev->dm.dmub_fb_info);
1454 	adev->dm.dmub_fb_info = NULL;
1455 
1456 	if (adev->dm.dmub_srv) {
1457 		dmub_srv_destroy(adev->dm.dmub_srv);
1458 		adev->dm.dmub_srv = NULL;
1459 	}
1460 
1461 	release_firmware(adev->dm.dmub_fw);
1462 	adev->dm.dmub_fw = NULL;
1463 
1464 	release_firmware(adev->dm.fw_dmcu);
1465 	adev->dm.fw_dmcu = NULL;
1466 
1467 	return 0;
1468 }
1469 
1470 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1471 {
1472 	struct amdgpu_dm_connector *aconnector;
1473 	struct drm_connector *connector;
1474 	struct drm_connector_list_iter iter;
1475 	int ret = 0;
1476 
1477 	drm_connector_list_iter_begin(dev, &iter);
1478 	drm_for_each_connector_iter(connector, &iter) {
1479 		aconnector = to_amdgpu_dm_connector(connector);
1480 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1481 		    aconnector->mst_mgr.aux) {
1482 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1483 					 aconnector,
1484 					 aconnector->base.base.id);
1485 
1486 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1487 			if (ret < 0) {
1488 				DRM_ERROR("DM_MST: Failed to start MST\n");
1489 				aconnector->dc_link->type =
1490 					dc_connection_single;
1491 				break;
1492 			}
1493 		}
1494 	}
1495 	drm_connector_list_iter_end(&iter);
1496 
1497 	return ret;
1498 }
1499 
1500 static int dm_late_init(void *handle)
1501 {
1502 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1503 
1504 	struct dmcu_iram_parameters params;
1505 	unsigned int linear_lut[16];
1506 	int i;
1507 	struct dmcu *dmcu = NULL;
1508 	bool ret = true;
1509 
1510 	dmcu = adev->dm.dc->res_pool->dmcu;
1511 
1512 	for (i = 0; i < 16; i++)
1513 		linear_lut[i] = 0xFFFF * i / 15;
1514 
1515 	params.set = 0;
1516 	params.backlight_ramping_start = 0xCCCC;
1517 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1518 	params.backlight_lut_array_size = 16;
1519 	params.backlight_lut_array = linear_lut;
1520 
1521 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1522 	 * 0xFFFF x 0.01 = 0x28F
1523 	 */
1524 	params.min_abm_backlight = 0x28F;
1525 
1526 	/* In the case where abm is implemented on dmcub,
1527 	 * dmcu object will be null.
1528 	 * ABM 2.4 and up are implemented on dmcub.
1529 	 */
1530 	if (dmcu)
1531 		ret = dmcu_load_iram(dmcu, params);
1532 	else if (adev->dm.dc->ctx->dmub_srv)
1533 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1534 
1535 	if (!ret)
1536 		return -EINVAL;
1537 
1538 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1539 }
1540 
1541 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1542 {
1543 	struct amdgpu_dm_connector *aconnector;
1544 	struct drm_connector *connector;
1545 	struct drm_connector_list_iter iter;
1546 	struct drm_dp_mst_topology_mgr *mgr;
1547 	int ret;
1548 	bool need_hotplug = false;
1549 
1550 	drm_connector_list_iter_begin(dev, &iter);
1551 	drm_for_each_connector_iter(connector, &iter) {
1552 		aconnector = to_amdgpu_dm_connector(connector);
1553 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1554 		    aconnector->mst_port)
1555 			continue;
1556 
1557 		mgr = &aconnector->mst_mgr;
1558 
1559 		if (suspend) {
1560 			drm_dp_mst_topology_mgr_suspend(mgr);
1561 		} else {
1562 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1563 			if (ret < 0) {
1564 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1565 				need_hotplug = true;
1566 			}
1567 		}
1568 	}
1569 	drm_connector_list_iter_end(&iter);
1570 
1571 	if (need_hotplug)
1572 		drm_kms_helper_hotplug_event(dev);
1573 }
1574 
1575 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1576 {
1577 	struct smu_context *smu = &adev->smu;
1578 	int ret = 0;
1579 
1580 	if (!is_support_sw_smu(adev))
1581 		return 0;
1582 
1583 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1584 	 * on window driver dc implementation.
1585 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1586 	 * should be passed to smu during boot up and resume from s3.
1587 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1588 	 * dcn20_resource_construct
1589 	 * then call pplib functions below to pass the settings to smu:
1590 	 * smu_set_watermarks_for_clock_ranges
1591 	 * smu_set_watermarks_table
1592 	 * navi10_set_watermarks_table
1593 	 * smu_write_watermarks_table
1594 	 *
1595 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1596 	 * dc has implemented different flow for window driver:
1597 	 * dc_hardware_init / dc_set_power_state
1598 	 * dcn10_init_hw
1599 	 * notify_wm_ranges
1600 	 * set_wm_ranges
1601 	 * -- Linux
1602 	 * smu_set_watermarks_for_clock_ranges
1603 	 * renoir_set_watermarks_table
1604 	 * smu_write_watermarks_table
1605 	 *
1606 	 * For Linux,
1607 	 * dc_hardware_init -> amdgpu_dm_init
1608 	 * dc_set_power_state --> dm_resume
1609 	 *
1610 	 * therefore, this function apply to navi10/12/14 but not Renoir
1611 	 * *
1612 	 */
1613 	switch(adev->asic_type) {
1614 	case CHIP_NAVI10:
1615 	case CHIP_NAVI14:
1616 	case CHIP_NAVI12:
1617 		break;
1618 	default:
1619 		return 0;
1620 	}
1621 
1622 	ret = smu_write_watermarks_table(smu);
1623 	if (ret) {
1624 		DRM_ERROR("Failed to update WMTABLE!\n");
1625 		return ret;
1626 	}
1627 
1628 	return 0;
1629 }
1630 
1631 /**
1632  * dm_hw_init() - Initialize DC device
1633  * @handle: The base driver device containing the amdgpu_dm device.
1634  *
1635  * Initialize the &struct amdgpu_display_manager device. This involves calling
1636  * the initializers of each DM component, then populating the struct with them.
1637  *
1638  * Although the function implies hardware initialization, both hardware and
1639  * software are initialized here. Splitting them out to their relevant init
1640  * hooks is a future TODO item.
1641  *
1642  * Some notable things that are initialized here:
1643  *
1644  * - Display Core, both software and hardware
1645  * - DC modules that we need (freesync and color management)
1646  * - DRM software states
1647  * - Interrupt sources and handlers
1648  * - Vblank support
1649  * - Debug FS entries, if enabled
1650  */
1651 static int dm_hw_init(void *handle)
1652 {
1653 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1654 	/* Create DAL display manager */
1655 	amdgpu_dm_init(adev);
1656 	amdgpu_dm_hpd_init(adev);
1657 
1658 	return 0;
1659 }
1660 
1661 /**
1662  * dm_hw_fini() - Teardown DC device
1663  * @handle: The base driver device containing the amdgpu_dm device.
1664  *
1665  * Teardown components within &struct amdgpu_display_manager that require
1666  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1667  * were loaded. Also flush IRQ workqueues and disable them.
1668  */
1669 static int dm_hw_fini(void *handle)
1670 {
1671 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1672 
1673 	amdgpu_dm_hpd_fini(adev);
1674 
1675 	amdgpu_dm_irq_fini(adev);
1676 	amdgpu_dm_fini(adev);
1677 	return 0;
1678 }
1679 
1680 
1681 static int dm_enable_vblank(struct drm_crtc *crtc);
1682 static void dm_disable_vblank(struct drm_crtc *crtc);
1683 
1684 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1685 				 struct dc_state *state, bool enable)
1686 {
1687 	enum dc_irq_source irq_source;
1688 	struct amdgpu_crtc *acrtc;
1689 	int rc = -EBUSY;
1690 	int i = 0;
1691 
1692 	for (i = 0; i < state->stream_count; i++) {
1693 		acrtc = get_crtc_by_otg_inst(
1694 				adev, state->stream_status[i].primary_otg_inst);
1695 
1696 		if (acrtc && state->stream_status[i].plane_count != 0) {
1697 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1698 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1699 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1700 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1701 			if (rc)
1702 				DRM_WARN("Failed to %s pflip interrupts\n",
1703 					 enable ? "enable" : "disable");
1704 
1705 			if (enable) {
1706 				rc = dm_enable_vblank(&acrtc->base);
1707 				if (rc)
1708 					DRM_WARN("Failed to enable vblank interrupts\n");
1709 			} else {
1710 				dm_disable_vblank(&acrtc->base);
1711 			}
1712 
1713 		}
1714 	}
1715 
1716 }
1717 
1718 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1719 {
1720 	struct dc_state *context = NULL;
1721 	enum dc_status res = DC_ERROR_UNEXPECTED;
1722 	int i;
1723 	struct dc_stream_state *del_streams[MAX_PIPES];
1724 	int del_streams_count = 0;
1725 
1726 	memset(del_streams, 0, sizeof(del_streams));
1727 
1728 	context = dc_create_state(dc);
1729 	if (context == NULL)
1730 		goto context_alloc_fail;
1731 
1732 	dc_resource_state_copy_construct_current(dc, context);
1733 
1734 	/* First remove from context all streams */
1735 	for (i = 0; i < context->stream_count; i++) {
1736 		struct dc_stream_state *stream = context->streams[i];
1737 
1738 		del_streams[del_streams_count++] = stream;
1739 	}
1740 
1741 	/* Remove all planes for removed streams and then remove the streams */
1742 	for (i = 0; i < del_streams_count; i++) {
1743 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1744 			res = DC_FAIL_DETACH_SURFACES;
1745 			goto fail;
1746 		}
1747 
1748 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1749 		if (res != DC_OK)
1750 			goto fail;
1751 	}
1752 
1753 
1754 	res = dc_validate_global_state(dc, context, false);
1755 
1756 	if (res != DC_OK) {
1757 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1758 		goto fail;
1759 	}
1760 
1761 	res = dc_commit_state(dc, context);
1762 
1763 fail:
1764 	dc_release_state(context);
1765 
1766 context_alloc_fail:
1767 	return res;
1768 }
1769 
1770 static int dm_suspend(void *handle)
1771 {
1772 	struct amdgpu_device *adev = handle;
1773 	struct amdgpu_display_manager *dm = &adev->dm;
1774 	int ret = 0;
1775 
1776 	if (amdgpu_in_reset(adev)) {
1777 		mutex_lock(&dm->dc_lock);
1778 
1779 #if defined(CONFIG_DRM_AMD_DC_DCN)
1780 		dc_allow_idle_optimizations(adev->dm.dc, false);
1781 #endif
1782 
1783 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1784 
1785 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1786 
1787 		amdgpu_dm_commit_zero_streams(dm->dc);
1788 
1789 		amdgpu_dm_irq_suspend(adev);
1790 
1791 		return ret;
1792 	}
1793 
1794 	WARN_ON(adev->dm.cached_state);
1795 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1796 
1797 	s3_handle_mst(adev_to_drm(adev), true);
1798 
1799 	amdgpu_dm_irq_suspend(adev);
1800 
1801 
1802 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1803 
1804 	return 0;
1805 }
1806 
1807 static struct amdgpu_dm_connector *
1808 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1809 					     struct drm_crtc *crtc)
1810 {
1811 	uint32_t i;
1812 	struct drm_connector_state *new_con_state;
1813 	struct drm_connector *connector;
1814 	struct drm_crtc *crtc_from_state;
1815 
1816 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1817 		crtc_from_state = new_con_state->crtc;
1818 
1819 		if (crtc_from_state == crtc)
1820 			return to_amdgpu_dm_connector(connector);
1821 	}
1822 
1823 	return NULL;
1824 }
1825 
1826 static void emulated_link_detect(struct dc_link *link)
1827 {
1828 	struct dc_sink_init_data sink_init_data = { 0 };
1829 	struct display_sink_capability sink_caps = { 0 };
1830 	enum dc_edid_status edid_status;
1831 	struct dc_context *dc_ctx = link->ctx;
1832 	struct dc_sink *sink = NULL;
1833 	struct dc_sink *prev_sink = NULL;
1834 
1835 	link->type = dc_connection_none;
1836 	prev_sink = link->local_sink;
1837 
1838 	if (prev_sink)
1839 		dc_sink_release(prev_sink);
1840 
1841 	switch (link->connector_signal) {
1842 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1843 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1844 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1845 		break;
1846 	}
1847 
1848 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1849 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1850 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1851 		break;
1852 	}
1853 
1854 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1855 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1856 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1857 		break;
1858 	}
1859 
1860 	case SIGNAL_TYPE_LVDS: {
1861 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1862 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1863 		break;
1864 	}
1865 
1866 	case SIGNAL_TYPE_EDP: {
1867 		sink_caps.transaction_type =
1868 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1869 		sink_caps.signal = SIGNAL_TYPE_EDP;
1870 		break;
1871 	}
1872 
1873 	case SIGNAL_TYPE_DISPLAY_PORT: {
1874 		sink_caps.transaction_type =
1875 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1876 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1877 		break;
1878 	}
1879 
1880 	default:
1881 		DC_ERROR("Invalid connector type! signal:%d\n",
1882 			link->connector_signal);
1883 		return;
1884 	}
1885 
1886 	sink_init_data.link = link;
1887 	sink_init_data.sink_signal = sink_caps.signal;
1888 
1889 	sink = dc_sink_create(&sink_init_data);
1890 	if (!sink) {
1891 		DC_ERROR("Failed to create sink!\n");
1892 		return;
1893 	}
1894 
1895 	/* dc_sink_create returns a new reference */
1896 	link->local_sink = sink;
1897 
1898 	edid_status = dm_helpers_read_local_edid(
1899 			link->ctx,
1900 			link,
1901 			sink);
1902 
1903 	if (edid_status != EDID_OK)
1904 		DC_ERROR("Failed to read EDID");
1905 
1906 }
1907 
1908 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1909 				     struct amdgpu_display_manager *dm)
1910 {
1911 	struct {
1912 		struct dc_surface_update surface_updates[MAX_SURFACES];
1913 		struct dc_plane_info plane_infos[MAX_SURFACES];
1914 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1915 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1916 		struct dc_stream_update stream_update;
1917 	} * bundle;
1918 	int k, m;
1919 
1920 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1921 
1922 	if (!bundle) {
1923 		dm_error("Failed to allocate update bundle\n");
1924 		goto cleanup;
1925 	}
1926 
1927 	for (k = 0; k < dc_state->stream_count; k++) {
1928 		bundle->stream_update.stream = dc_state->streams[k];
1929 
1930 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1931 			bundle->surface_updates[m].surface =
1932 				dc_state->stream_status->plane_states[m];
1933 			bundle->surface_updates[m].surface->force_full_update =
1934 				true;
1935 		}
1936 		dc_commit_updates_for_stream(
1937 			dm->dc, bundle->surface_updates,
1938 			dc_state->stream_status->plane_count,
1939 			dc_state->streams[k], &bundle->stream_update);
1940 	}
1941 
1942 cleanup:
1943 	kfree(bundle);
1944 
1945 	return;
1946 }
1947 
1948 static void dm_set_dpms_off(struct dc_link *link)
1949 {
1950 	struct dc_stream_state *stream_state;
1951 	struct amdgpu_dm_connector *aconnector = link->priv;
1952 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1953 	struct dc_stream_update stream_update;
1954 	bool dpms_off = true;
1955 
1956 	memset(&stream_update, 0, sizeof(stream_update));
1957 	stream_update.dpms_off = &dpms_off;
1958 
1959 	mutex_lock(&adev->dm.dc_lock);
1960 	stream_state = dc_stream_find_from_link(link);
1961 
1962 	if (stream_state == NULL) {
1963 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1964 		mutex_unlock(&adev->dm.dc_lock);
1965 		return;
1966 	}
1967 
1968 	stream_update.stream = stream_state;
1969 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1970 				     stream_state, &stream_update);
1971 	mutex_unlock(&adev->dm.dc_lock);
1972 }
1973 
1974 static int dm_resume(void *handle)
1975 {
1976 	struct amdgpu_device *adev = handle;
1977 	struct drm_device *ddev = adev_to_drm(adev);
1978 	struct amdgpu_display_manager *dm = &adev->dm;
1979 	struct amdgpu_dm_connector *aconnector;
1980 	struct drm_connector *connector;
1981 	struct drm_connector_list_iter iter;
1982 	struct drm_crtc *crtc;
1983 	struct drm_crtc_state *new_crtc_state;
1984 	struct dm_crtc_state *dm_new_crtc_state;
1985 	struct drm_plane *plane;
1986 	struct drm_plane_state *new_plane_state;
1987 	struct dm_plane_state *dm_new_plane_state;
1988 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1989 	enum dc_connection_type new_connection_type = dc_connection_none;
1990 	struct dc_state *dc_state;
1991 	int i, r, j;
1992 
1993 	if (amdgpu_in_reset(adev)) {
1994 		dc_state = dm->cached_dc_state;
1995 
1996 		r = dm_dmub_hw_init(adev);
1997 		if (r)
1998 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1999 
2000 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2001 		dc_resume(dm->dc);
2002 
2003 		amdgpu_dm_irq_resume_early(adev);
2004 
2005 		for (i = 0; i < dc_state->stream_count; i++) {
2006 			dc_state->streams[i]->mode_changed = true;
2007 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2008 				dc_state->stream_status->plane_states[j]->update_flags.raw
2009 					= 0xffffffff;
2010 			}
2011 		}
2012 
2013 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2014 
2015 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2016 
2017 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2018 
2019 		dc_release_state(dm->cached_dc_state);
2020 		dm->cached_dc_state = NULL;
2021 
2022 		amdgpu_dm_irq_resume_late(adev);
2023 
2024 		mutex_unlock(&dm->dc_lock);
2025 
2026 		return 0;
2027 	}
2028 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2029 	dc_release_state(dm_state->context);
2030 	dm_state->context = dc_create_state(dm->dc);
2031 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2032 	dc_resource_state_construct(dm->dc, dm_state->context);
2033 
2034 	/* Before powering on DC we need to re-initialize DMUB. */
2035 	r = dm_dmub_hw_init(adev);
2036 	if (r)
2037 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2038 
2039 	/* power on hardware */
2040 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2041 
2042 	/* program HPD filter */
2043 	dc_resume(dm->dc);
2044 
2045 	/*
2046 	 * early enable HPD Rx IRQ, should be done before set mode as short
2047 	 * pulse interrupts are used for MST
2048 	 */
2049 	amdgpu_dm_irq_resume_early(adev);
2050 
2051 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2052 	s3_handle_mst(ddev, false);
2053 
2054 	/* Do detection*/
2055 	drm_connector_list_iter_begin(ddev, &iter);
2056 	drm_for_each_connector_iter(connector, &iter) {
2057 		aconnector = to_amdgpu_dm_connector(connector);
2058 
2059 		/*
2060 		 * this is the case when traversing through already created
2061 		 * MST connectors, should be skipped
2062 		 */
2063 		if (aconnector->mst_port)
2064 			continue;
2065 
2066 		mutex_lock(&aconnector->hpd_lock);
2067 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2068 			DRM_ERROR("KMS: Failed to detect connector\n");
2069 
2070 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2071 			emulated_link_detect(aconnector->dc_link);
2072 		else
2073 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2074 
2075 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2076 			aconnector->fake_enable = false;
2077 
2078 		if (aconnector->dc_sink)
2079 			dc_sink_release(aconnector->dc_sink);
2080 		aconnector->dc_sink = NULL;
2081 		amdgpu_dm_update_connector_after_detect(aconnector);
2082 		mutex_unlock(&aconnector->hpd_lock);
2083 	}
2084 	drm_connector_list_iter_end(&iter);
2085 
2086 	/* Force mode set in atomic commit */
2087 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2088 		new_crtc_state->active_changed = true;
2089 
2090 	/*
2091 	 * atomic_check is expected to create the dc states. We need to release
2092 	 * them here, since they were duplicated as part of the suspend
2093 	 * procedure.
2094 	 */
2095 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2096 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2097 		if (dm_new_crtc_state->stream) {
2098 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2099 			dc_stream_release(dm_new_crtc_state->stream);
2100 			dm_new_crtc_state->stream = NULL;
2101 		}
2102 	}
2103 
2104 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2105 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2106 		if (dm_new_plane_state->dc_state) {
2107 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2108 			dc_plane_state_release(dm_new_plane_state->dc_state);
2109 			dm_new_plane_state->dc_state = NULL;
2110 		}
2111 	}
2112 
2113 	drm_atomic_helper_resume(ddev, dm->cached_state);
2114 
2115 	dm->cached_state = NULL;
2116 
2117 	amdgpu_dm_irq_resume_late(adev);
2118 
2119 	amdgpu_dm_smu_write_watermarks_table(adev);
2120 
2121 	return 0;
2122 }
2123 
2124 /**
2125  * DOC: DM Lifecycle
2126  *
2127  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2128  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2129  * the base driver's device list to be initialized and torn down accordingly.
2130  *
2131  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2132  */
2133 
2134 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2135 	.name = "dm",
2136 	.early_init = dm_early_init,
2137 	.late_init = dm_late_init,
2138 	.sw_init = dm_sw_init,
2139 	.sw_fini = dm_sw_fini,
2140 	.hw_init = dm_hw_init,
2141 	.hw_fini = dm_hw_fini,
2142 	.suspend = dm_suspend,
2143 	.resume = dm_resume,
2144 	.is_idle = dm_is_idle,
2145 	.wait_for_idle = dm_wait_for_idle,
2146 	.check_soft_reset = dm_check_soft_reset,
2147 	.soft_reset = dm_soft_reset,
2148 	.set_clockgating_state = dm_set_clockgating_state,
2149 	.set_powergating_state = dm_set_powergating_state,
2150 };
2151 
2152 const struct amdgpu_ip_block_version dm_ip_block =
2153 {
2154 	.type = AMD_IP_BLOCK_TYPE_DCE,
2155 	.major = 1,
2156 	.minor = 0,
2157 	.rev = 0,
2158 	.funcs = &amdgpu_dm_funcs,
2159 };
2160 
2161 
2162 /**
2163  * DOC: atomic
2164  *
2165  * *WIP*
2166  */
2167 
2168 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2169 	.fb_create = amdgpu_display_user_framebuffer_create,
2170 	.get_format_info = amd_get_format_info,
2171 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2172 	.atomic_check = amdgpu_dm_atomic_check,
2173 	.atomic_commit = drm_atomic_helper_commit,
2174 };
2175 
2176 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2177 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2178 };
2179 
2180 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2181 {
2182 	u32 max_cll, min_cll, max, min, q, r;
2183 	struct amdgpu_dm_backlight_caps *caps;
2184 	struct amdgpu_display_manager *dm;
2185 	struct drm_connector *conn_base;
2186 	struct amdgpu_device *adev;
2187 	struct dc_link *link = NULL;
2188 	static const u8 pre_computed_values[] = {
2189 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2190 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2191 
2192 	if (!aconnector || !aconnector->dc_link)
2193 		return;
2194 
2195 	link = aconnector->dc_link;
2196 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2197 		return;
2198 
2199 	conn_base = &aconnector->base;
2200 	adev = drm_to_adev(conn_base->dev);
2201 	dm = &adev->dm;
2202 	caps = &dm->backlight_caps;
2203 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2204 	caps->aux_support = false;
2205 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2206 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2207 
2208 	if (caps->ext_caps->bits.oled == 1 ||
2209 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2210 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2211 		caps->aux_support = true;
2212 
2213 	/* From the specification (CTA-861-G), for calculating the maximum
2214 	 * luminance we need to use:
2215 	 *	Luminance = 50*2**(CV/32)
2216 	 * Where CV is a one-byte value.
2217 	 * For calculating this expression we may need float point precision;
2218 	 * to avoid this complexity level, we take advantage that CV is divided
2219 	 * by a constant. From the Euclids division algorithm, we know that CV
2220 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2221 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2222 	 * need to pre-compute the value of r/32. For pre-computing the values
2223 	 * We just used the following Ruby line:
2224 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2225 	 * The results of the above expressions can be verified at
2226 	 * pre_computed_values.
2227 	 */
2228 	q = max_cll >> 5;
2229 	r = max_cll % 32;
2230 	max = (1 << q) * pre_computed_values[r];
2231 
2232 	// min luminance: maxLum * (CV/255)^2 / 100
2233 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2234 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2235 
2236 	caps->aux_max_input_signal = max;
2237 	caps->aux_min_input_signal = min;
2238 }
2239 
2240 void amdgpu_dm_update_connector_after_detect(
2241 		struct amdgpu_dm_connector *aconnector)
2242 {
2243 	struct drm_connector *connector = &aconnector->base;
2244 	struct drm_device *dev = connector->dev;
2245 	struct dc_sink *sink;
2246 
2247 	/* MST handled by drm_mst framework */
2248 	if (aconnector->mst_mgr.mst_state == true)
2249 		return;
2250 
2251 	sink = aconnector->dc_link->local_sink;
2252 	if (sink)
2253 		dc_sink_retain(sink);
2254 
2255 	/*
2256 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2257 	 * the connector sink is set to either fake or physical sink depends on link status.
2258 	 * Skip if already done during boot.
2259 	 */
2260 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2261 			&& aconnector->dc_em_sink) {
2262 
2263 		/*
2264 		 * For S3 resume with headless use eml_sink to fake stream
2265 		 * because on resume connector->sink is set to NULL
2266 		 */
2267 		mutex_lock(&dev->mode_config.mutex);
2268 
2269 		if (sink) {
2270 			if (aconnector->dc_sink) {
2271 				amdgpu_dm_update_freesync_caps(connector, NULL);
2272 				/*
2273 				 * retain and release below are used to
2274 				 * bump up refcount for sink because the link doesn't point
2275 				 * to it anymore after disconnect, so on next crtc to connector
2276 				 * reshuffle by UMD we will get into unwanted dc_sink release
2277 				 */
2278 				dc_sink_release(aconnector->dc_sink);
2279 			}
2280 			aconnector->dc_sink = sink;
2281 			dc_sink_retain(aconnector->dc_sink);
2282 			amdgpu_dm_update_freesync_caps(connector,
2283 					aconnector->edid);
2284 		} else {
2285 			amdgpu_dm_update_freesync_caps(connector, NULL);
2286 			if (!aconnector->dc_sink) {
2287 				aconnector->dc_sink = aconnector->dc_em_sink;
2288 				dc_sink_retain(aconnector->dc_sink);
2289 			}
2290 		}
2291 
2292 		mutex_unlock(&dev->mode_config.mutex);
2293 
2294 		if (sink)
2295 			dc_sink_release(sink);
2296 		return;
2297 	}
2298 
2299 	/*
2300 	 * TODO: temporary guard to look for proper fix
2301 	 * if this sink is MST sink, we should not do anything
2302 	 */
2303 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2304 		dc_sink_release(sink);
2305 		return;
2306 	}
2307 
2308 	if (aconnector->dc_sink == sink) {
2309 		/*
2310 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2311 		 * Do nothing!!
2312 		 */
2313 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2314 				aconnector->connector_id);
2315 		if (sink)
2316 			dc_sink_release(sink);
2317 		return;
2318 	}
2319 
2320 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2321 		aconnector->connector_id, aconnector->dc_sink, sink);
2322 
2323 	mutex_lock(&dev->mode_config.mutex);
2324 
2325 	/*
2326 	 * 1. Update status of the drm connector
2327 	 * 2. Send an event and let userspace tell us what to do
2328 	 */
2329 	if (sink) {
2330 		/*
2331 		 * TODO: check if we still need the S3 mode update workaround.
2332 		 * If yes, put it here.
2333 		 */
2334 		if (aconnector->dc_sink) {
2335 			amdgpu_dm_update_freesync_caps(connector, NULL);
2336 			dc_sink_release(aconnector->dc_sink);
2337 		}
2338 
2339 		aconnector->dc_sink = sink;
2340 		dc_sink_retain(aconnector->dc_sink);
2341 		if (sink->dc_edid.length == 0) {
2342 			aconnector->edid = NULL;
2343 			if (aconnector->dc_link->aux_mode) {
2344 				drm_dp_cec_unset_edid(
2345 					&aconnector->dm_dp_aux.aux);
2346 			}
2347 		} else {
2348 			aconnector->edid =
2349 				(struct edid *)sink->dc_edid.raw_edid;
2350 
2351 			drm_connector_update_edid_property(connector,
2352 							   aconnector->edid);
2353 			if (aconnector->dc_link->aux_mode)
2354 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2355 						    aconnector->edid);
2356 		}
2357 
2358 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2359 		update_connector_ext_caps(aconnector);
2360 	} else {
2361 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2362 		amdgpu_dm_update_freesync_caps(connector, NULL);
2363 		drm_connector_update_edid_property(connector, NULL);
2364 		aconnector->num_modes = 0;
2365 		dc_sink_release(aconnector->dc_sink);
2366 		aconnector->dc_sink = NULL;
2367 		aconnector->edid = NULL;
2368 #ifdef CONFIG_DRM_AMD_DC_HDCP
2369 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2370 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2371 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2372 #endif
2373 	}
2374 
2375 	mutex_unlock(&dev->mode_config.mutex);
2376 
2377 	update_subconnector_property(aconnector);
2378 
2379 	if (sink)
2380 		dc_sink_release(sink);
2381 }
2382 
2383 static void handle_hpd_irq(void *param)
2384 {
2385 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2386 	struct drm_connector *connector = &aconnector->base;
2387 	struct drm_device *dev = connector->dev;
2388 	enum dc_connection_type new_connection_type = dc_connection_none;
2389 #ifdef CONFIG_DRM_AMD_DC_HDCP
2390 	struct amdgpu_device *adev = drm_to_adev(dev);
2391 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2392 #endif
2393 
2394 	/*
2395 	 * In case of failure or MST no need to update connector status or notify the OS
2396 	 * since (for MST case) MST does this in its own context.
2397 	 */
2398 	mutex_lock(&aconnector->hpd_lock);
2399 
2400 #ifdef CONFIG_DRM_AMD_DC_HDCP
2401 	if (adev->dm.hdcp_workqueue) {
2402 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2403 		dm_con_state->update_hdcp = true;
2404 	}
2405 #endif
2406 	if (aconnector->fake_enable)
2407 		aconnector->fake_enable = false;
2408 
2409 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2410 		DRM_ERROR("KMS: Failed to detect connector\n");
2411 
2412 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2413 		emulated_link_detect(aconnector->dc_link);
2414 
2415 
2416 		drm_modeset_lock_all(dev);
2417 		dm_restore_drm_connector_state(dev, connector);
2418 		drm_modeset_unlock_all(dev);
2419 
2420 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2421 			drm_kms_helper_hotplug_event(dev);
2422 
2423 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2424 		if (new_connection_type == dc_connection_none &&
2425 		    aconnector->dc_link->type == dc_connection_none)
2426 			dm_set_dpms_off(aconnector->dc_link);
2427 
2428 		amdgpu_dm_update_connector_after_detect(aconnector);
2429 
2430 		drm_modeset_lock_all(dev);
2431 		dm_restore_drm_connector_state(dev, connector);
2432 		drm_modeset_unlock_all(dev);
2433 
2434 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2435 			drm_kms_helper_hotplug_event(dev);
2436 	}
2437 	mutex_unlock(&aconnector->hpd_lock);
2438 
2439 }
2440 
2441 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2442 {
2443 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2444 	uint8_t dret;
2445 	bool new_irq_handled = false;
2446 	int dpcd_addr;
2447 	int dpcd_bytes_to_read;
2448 
2449 	const int max_process_count = 30;
2450 	int process_count = 0;
2451 
2452 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2453 
2454 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2455 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2456 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2457 		dpcd_addr = DP_SINK_COUNT;
2458 	} else {
2459 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2460 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2461 		dpcd_addr = DP_SINK_COUNT_ESI;
2462 	}
2463 
2464 	dret = drm_dp_dpcd_read(
2465 		&aconnector->dm_dp_aux.aux,
2466 		dpcd_addr,
2467 		esi,
2468 		dpcd_bytes_to_read);
2469 
2470 	while (dret == dpcd_bytes_to_read &&
2471 		process_count < max_process_count) {
2472 		uint8_t retry;
2473 		dret = 0;
2474 
2475 		process_count++;
2476 
2477 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2478 		/* handle HPD short pulse irq */
2479 		if (aconnector->mst_mgr.mst_state)
2480 			drm_dp_mst_hpd_irq(
2481 				&aconnector->mst_mgr,
2482 				esi,
2483 				&new_irq_handled);
2484 
2485 		if (new_irq_handled) {
2486 			/* ACK at DPCD to notify down stream */
2487 			const int ack_dpcd_bytes_to_write =
2488 				dpcd_bytes_to_read - 1;
2489 
2490 			for (retry = 0; retry < 3; retry++) {
2491 				uint8_t wret;
2492 
2493 				wret = drm_dp_dpcd_write(
2494 					&aconnector->dm_dp_aux.aux,
2495 					dpcd_addr + 1,
2496 					&esi[1],
2497 					ack_dpcd_bytes_to_write);
2498 				if (wret == ack_dpcd_bytes_to_write)
2499 					break;
2500 			}
2501 
2502 			/* check if there is new irq to be handled */
2503 			dret = drm_dp_dpcd_read(
2504 				&aconnector->dm_dp_aux.aux,
2505 				dpcd_addr,
2506 				esi,
2507 				dpcd_bytes_to_read);
2508 
2509 			new_irq_handled = false;
2510 		} else {
2511 			break;
2512 		}
2513 	}
2514 
2515 	if (process_count == max_process_count)
2516 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2517 }
2518 
2519 static void handle_hpd_rx_irq(void *param)
2520 {
2521 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2522 	struct drm_connector *connector = &aconnector->base;
2523 	struct drm_device *dev = connector->dev;
2524 	struct dc_link *dc_link = aconnector->dc_link;
2525 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2526 	bool result = false;
2527 	enum dc_connection_type new_connection_type = dc_connection_none;
2528 	struct amdgpu_device *adev = drm_to_adev(dev);
2529 	union hpd_irq_data hpd_irq_data;
2530 
2531 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2532 
2533 	/*
2534 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2535 	 * conflict, after implement i2c helper, this mutex should be
2536 	 * retired.
2537 	 */
2538 	if (dc_link->type != dc_connection_mst_branch)
2539 		mutex_lock(&aconnector->hpd_lock);
2540 
2541 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2542 
2543 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2544 		(dc_link->type == dc_connection_mst_branch)) {
2545 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2546 			result = true;
2547 			dm_handle_hpd_rx_irq(aconnector);
2548 			goto out;
2549 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2550 			result = false;
2551 			dm_handle_hpd_rx_irq(aconnector);
2552 			goto out;
2553 		}
2554 	}
2555 
2556 	mutex_lock(&adev->dm.dc_lock);
2557 #ifdef CONFIG_DRM_AMD_DC_HDCP
2558 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2559 #else
2560 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2561 #endif
2562 	mutex_unlock(&adev->dm.dc_lock);
2563 
2564 out:
2565 	if (result && !is_mst_root_connector) {
2566 		/* Downstream Port status changed. */
2567 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2568 			DRM_ERROR("KMS: Failed to detect connector\n");
2569 
2570 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2571 			emulated_link_detect(dc_link);
2572 
2573 			if (aconnector->fake_enable)
2574 				aconnector->fake_enable = false;
2575 
2576 			amdgpu_dm_update_connector_after_detect(aconnector);
2577 
2578 
2579 			drm_modeset_lock_all(dev);
2580 			dm_restore_drm_connector_state(dev, connector);
2581 			drm_modeset_unlock_all(dev);
2582 
2583 			drm_kms_helper_hotplug_event(dev);
2584 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2585 
2586 			if (aconnector->fake_enable)
2587 				aconnector->fake_enable = false;
2588 
2589 			amdgpu_dm_update_connector_after_detect(aconnector);
2590 
2591 
2592 			drm_modeset_lock_all(dev);
2593 			dm_restore_drm_connector_state(dev, connector);
2594 			drm_modeset_unlock_all(dev);
2595 
2596 			drm_kms_helper_hotplug_event(dev);
2597 		}
2598 	}
2599 #ifdef CONFIG_DRM_AMD_DC_HDCP
2600 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2601 		if (adev->dm.hdcp_workqueue)
2602 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2603 	}
2604 #endif
2605 
2606 	if (dc_link->type != dc_connection_mst_branch) {
2607 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2608 		mutex_unlock(&aconnector->hpd_lock);
2609 	}
2610 }
2611 
2612 static void register_hpd_handlers(struct amdgpu_device *adev)
2613 {
2614 	struct drm_device *dev = adev_to_drm(adev);
2615 	struct drm_connector *connector;
2616 	struct amdgpu_dm_connector *aconnector;
2617 	const struct dc_link *dc_link;
2618 	struct dc_interrupt_params int_params = {0};
2619 
2620 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2621 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2622 
2623 	list_for_each_entry(connector,
2624 			&dev->mode_config.connector_list, head)	{
2625 
2626 		aconnector = to_amdgpu_dm_connector(connector);
2627 		dc_link = aconnector->dc_link;
2628 
2629 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2630 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2631 			int_params.irq_source = dc_link->irq_source_hpd;
2632 
2633 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2634 					handle_hpd_irq,
2635 					(void *) aconnector);
2636 		}
2637 
2638 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2639 
2640 			/* Also register for DP short pulse (hpd_rx). */
2641 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2642 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2643 
2644 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2645 					handle_hpd_rx_irq,
2646 					(void *) aconnector);
2647 		}
2648 	}
2649 }
2650 
2651 #if defined(CONFIG_DRM_AMD_DC_SI)
2652 /* Register IRQ sources and initialize IRQ callbacks */
2653 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2654 {
2655 	struct dc *dc = adev->dm.dc;
2656 	struct common_irq_params *c_irq_params;
2657 	struct dc_interrupt_params int_params = {0};
2658 	int r;
2659 	int i;
2660 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2661 
2662 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2663 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2664 
2665 	/*
2666 	 * Actions of amdgpu_irq_add_id():
2667 	 * 1. Register a set() function with base driver.
2668 	 *    Base driver will call set() function to enable/disable an
2669 	 *    interrupt in DC hardware.
2670 	 * 2. Register amdgpu_dm_irq_handler().
2671 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2672 	 *    coming from DC hardware.
2673 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2674 	 *    for acknowledging and handling. */
2675 
2676 	/* Use VBLANK interrupt */
2677 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2678 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2679 		if (r) {
2680 			DRM_ERROR("Failed to add crtc irq id!\n");
2681 			return r;
2682 		}
2683 
2684 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2685 		int_params.irq_source =
2686 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2687 
2688 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2689 
2690 		c_irq_params->adev = adev;
2691 		c_irq_params->irq_src = int_params.irq_source;
2692 
2693 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2694 				dm_crtc_high_irq, c_irq_params);
2695 	}
2696 
2697 	/* Use GRPH_PFLIP interrupt */
2698 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2699 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2700 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2701 		if (r) {
2702 			DRM_ERROR("Failed to add page flip irq id!\n");
2703 			return r;
2704 		}
2705 
2706 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2707 		int_params.irq_source =
2708 			dc_interrupt_to_irq_source(dc, i, 0);
2709 
2710 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2711 
2712 		c_irq_params->adev = adev;
2713 		c_irq_params->irq_src = int_params.irq_source;
2714 
2715 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2716 				dm_pflip_high_irq, c_irq_params);
2717 
2718 	}
2719 
2720 	/* HPD */
2721 	r = amdgpu_irq_add_id(adev, client_id,
2722 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2723 	if (r) {
2724 		DRM_ERROR("Failed to add hpd irq id!\n");
2725 		return r;
2726 	}
2727 
2728 	register_hpd_handlers(adev);
2729 
2730 	return 0;
2731 }
2732 #endif
2733 
2734 /* Register IRQ sources and initialize IRQ callbacks */
2735 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2736 {
2737 	struct dc *dc = adev->dm.dc;
2738 	struct common_irq_params *c_irq_params;
2739 	struct dc_interrupt_params int_params = {0};
2740 	int r;
2741 	int i;
2742 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2743 
2744 	if (adev->asic_type >= CHIP_VEGA10)
2745 		client_id = SOC15_IH_CLIENTID_DCE;
2746 
2747 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2748 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2749 
2750 	/*
2751 	 * Actions of amdgpu_irq_add_id():
2752 	 * 1. Register a set() function with base driver.
2753 	 *    Base driver will call set() function to enable/disable an
2754 	 *    interrupt in DC hardware.
2755 	 * 2. Register amdgpu_dm_irq_handler().
2756 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2757 	 *    coming from DC hardware.
2758 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2759 	 *    for acknowledging and handling. */
2760 
2761 	/* Use VBLANK interrupt */
2762 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2763 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2764 		if (r) {
2765 			DRM_ERROR("Failed to add crtc irq id!\n");
2766 			return r;
2767 		}
2768 
2769 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2770 		int_params.irq_source =
2771 			dc_interrupt_to_irq_source(dc, i, 0);
2772 
2773 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2774 
2775 		c_irq_params->adev = adev;
2776 		c_irq_params->irq_src = int_params.irq_source;
2777 
2778 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2779 				dm_crtc_high_irq, c_irq_params);
2780 	}
2781 
2782 	/* Use VUPDATE interrupt */
2783 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2784 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2785 		if (r) {
2786 			DRM_ERROR("Failed to add vupdate irq id!\n");
2787 			return r;
2788 		}
2789 
2790 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2791 		int_params.irq_source =
2792 			dc_interrupt_to_irq_source(dc, i, 0);
2793 
2794 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2795 
2796 		c_irq_params->adev = adev;
2797 		c_irq_params->irq_src = int_params.irq_source;
2798 
2799 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2800 				dm_vupdate_high_irq, c_irq_params);
2801 	}
2802 
2803 	/* Use GRPH_PFLIP interrupt */
2804 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2805 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2806 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2807 		if (r) {
2808 			DRM_ERROR("Failed to add page flip irq id!\n");
2809 			return r;
2810 		}
2811 
2812 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2813 		int_params.irq_source =
2814 			dc_interrupt_to_irq_source(dc, i, 0);
2815 
2816 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2817 
2818 		c_irq_params->adev = adev;
2819 		c_irq_params->irq_src = int_params.irq_source;
2820 
2821 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2822 				dm_pflip_high_irq, c_irq_params);
2823 
2824 	}
2825 
2826 	/* HPD */
2827 	r = amdgpu_irq_add_id(adev, client_id,
2828 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2829 	if (r) {
2830 		DRM_ERROR("Failed to add hpd irq id!\n");
2831 		return r;
2832 	}
2833 
2834 	register_hpd_handlers(adev);
2835 
2836 	return 0;
2837 }
2838 
2839 #if defined(CONFIG_DRM_AMD_DC_DCN)
2840 /* Register IRQ sources and initialize IRQ callbacks */
2841 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2842 {
2843 	struct dc *dc = adev->dm.dc;
2844 	struct common_irq_params *c_irq_params;
2845 	struct dc_interrupt_params int_params = {0};
2846 	int r;
2847 	int i;
2848 
2849 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2850 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2851 
2852 	/*
2853 	 * Actions of amdgpu_irq_add_id():
2854 	 * 1. Register a set() function with base driver.
2855 	 *    Base driver will call set() function to enable/disable an
2856 	 *    interrupt in DC hardware.
2857 	 * 2. Register amdgpu_dm_irq_handler().
2858 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2859 	 *    coming from DC hardware.
2860 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2861 	 *    for acknowledging and handling.
2862 	 */
2863 
2864 	/* Use VSTARTUP interrupt */
2865 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2866 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2867 			i++) {
2868 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2869 
2870 		if (r) {
2871 			DRM_ERROR("Failed to add crtc irq id!\n");
2872 			return r;
2873 		}
2874 
2875 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2876 		int_params.irq_source =
2877 			dc_interrupt_to_irq_source(dc, i, 0);
2878 
2879 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2880 
2881 		c_irq_params->adev = adev;
2882 		c_irq_params->irq_src = int_params.irq_source;
2883 
2884 		amdgpu_dm_irq_register_interrupt(
2885 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2886 	}
2887 
2888 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2889 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2890 	 * to trigger at end of each vblank, regardless of state of the lock,
2891 	 * matching DCE behaviour.
2892 	 */
2893 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2894 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2895 	     i++) {
2896 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2897 
2898 		if (r) {
2899 			DRM_ERROR("Failed to add vupdate irq id!\n");
2900 			return r;
2901 		}
2902 
2903 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2904 		int_params.irq_source =
2905 			dc_interrupt_to_irq_source(dc, i, 0);
2906 
2907 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2908 
2909 		c_irq_params->adev = adev;
2910 		c_irq_params->irq_src = int_params.irq_source;
2911 
2912 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2913 				dm_vupdate_high_irq, c_irq_params);
2914 	}
2915 
2916 	/* Use GRPH_PFLIP interrupt */
2917 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2918 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2919 			i++) {
2920 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2921 		if (r) {
2922 			DRM_ERROR("Failed to add page flip irq id!\n");
2923 			return r;
2924 		}
2925 
2926 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2927 		int_params.irq_source =
2928 			dc_interrupt_to_irq_source(dc, i, 0);
2929 
2930 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2931 
2932 		c_irq_params->adev = adev;
2933 		c_irq_params->irq_src = int_params.irq_source;
2934 
2935 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2936 				dm_pflip_high_irq, c_irq_params);
2937 
2938 	}
2939 
2940 	/* HPD */
2941 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2942 			&adev->hpd_irq);
2943 	if (r) {
2944 		DRM_ERROR("Failed to add hpd irq id!\n");
2945 		return r;
2946 	}
2947 
2948 	register_hpd_handlers(adev);
2949 
2950 	return 0;
2951 }
2952 #endif
2953 
2954 /*
2955  * Acquires the lock for the atomic state object and returns
2956  * the new atomic state.
2957  *
2958  * This should only be called during atomic check.
2959  */
2960 static int dm_atomic_get_state(struct drm_atomic_state *state,
2961 			       struct dm_atomic_state **dm_state)
2962 {
2963 	struct drm_device *dev = state->dev;
2964 	struct amdgpu_device *adev = drm_to_adev(dev);
2965 	struct amdgpu_display_manager *dm = &adev->dm;
2966 	struct drm_private_state *priv_state;
2967 
2968 	if (*dm_state)
2969 		return 0;
2970 
2971 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2972 	if (IS_ERR(priv_state))
2973 		return PTR_ERR(priv_state);
2974 
2975 	*dm_state = to_dm_atomic_state(priv_state);
2976 
2977 	return 0;
2978 }
2979 
2980 static struct dm_atomic_state *
2981 dm_atomic_get_new_state(struct drm_atomic_state *state)
2982 {
2983 	struct drm_device *dev = state->dev;
2984 	struct amdgpu_device *adev = drm_to_adev(dev);
2985 	struct amdgpu_display_manager *dm = &adev->dm;
2986 	struct drm_private_obj *obj;
2987 	struct drm_private_state *new_obj_state;
2988 	int i;
2989 
2990 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2991 		if (obj->funcs == dm->atomic_obj.funcs)
2992 			return to_dm_atomic_state(new_obj_state);
2993 	}
2994 
2995 	return NULL;
2996 }
2997 
2998 static struct drm_private_state *
2999 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3000 {
3001 	struct dm_atomic_state *old_state, *new_state;
3002 
3003 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3004 	if (!new_state)
3005 		return NULL;
3006 
3007 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3008 
3009 	old_state = to_dm_atomic_state(obj->state);
3010 
3011 	if (old_state && old_state->context)
3012 		new_state->context = dc_copy_state(old_state->context);
3013 
3014 	if (!new_state->context) {
3015 		kfree(new_state);
3016 		return NULL;
3017 	}
3018 
3019 	return &new_state->base;
3020 }
3021 
3022 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3023 				    struct drm_private_state *state)
3024 {
3025 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3026 
3027 	if (dm_state && dm_state->context)
3028 		dc_release_state(dm_state->context);
3029 
3030 	kfree(dm_state);
3031 }
3032 
3033 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3034 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3035 	.atomic_destroy_state = dm_atomic_destroy_state,
3036 };
3037 
3038 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3039 {
3040 	struct dm_atomic_state *state;
3041 	int r;
3042 
3043 	adev->mode_info.mode_config_initialized = true;
3044 
3045 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3046 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3047 
3048 	adev_to_drm(adev)->mode_config.max_width = 16384;
3049 	adev_to_drm(adev)->mode_config.max_height = 16384;
3050 
3051 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3052 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3053 	/* indicates support for immediate flip */
3054 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3055 
3056 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3057 
3058 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3059 	if (!state)
3060 		return -ENOMEM;
3061 
3062 	state->context = dc_create_state(adev->dm.dc);
3063 	if (!state->context) {
3064 		kfree(state);
3065 		return -ENOMEM;
3066 	}
3067 
3068 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3069 
3070 	drm_atomic_private_obj_init(adev_to_drm(adev),
3071 				    &adev->dm.atomic_obj,
3072 				    &state->base,
3073 				    &dm_atomic_state_funcs);
3074 
3075 	r = amdgpu_display_modeset_create_props(adev);
3076 	if (r) {
3077 		dc_release_state(state->context);
3078 		kfree(state);
3079 		return r;
3080 	}
3081 
3082 	r = amdgpu_dm_audio_init(adev);
3083 	if (r) {
3084 		dc_release_state(state->context);
3085 		kfree(state);
3086 		return r;
3087 	}
3088 
3089 	return 0;
3090 }
3091 
3092 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3093 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3094 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3095 
3096 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3097 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3098 
3099 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3100 {
3101 #if defined(CONFIG_ACPI)
3102 	struct amdgpu_dm_backlight_caps caps;
3103 
3104 	memset(&caps, 0, sizeof(caps));
3105 
3106 	if (dm->backlight_caps.caps_valid)
3107 		return;
3108 
3109 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3110 	if (caps.caps_valid) {
3111 		dm->backlight_caps.caps_valid = true;
3112 		if (caps.aux_support)
3113 			return;
3114 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3115 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3116 	} else {
3117 		dm->backlight_caps.min_input_signal =
3118 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3119 		dm->backlight_caps.max_input_signal =
3120 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3121 	}
3122 #else
3123 	if (dm->backlight_caps.aux_support)
3124 		return;
3125 
3126 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3127 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3128 #endif
3129 }
3130 
3131 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3132 {
3133 	bool rc;
3134 
3135 	if (!link)
3136 		return 1;
3137 
3138 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
3139 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3140 
3141 	return rc ? 0 : 1;
3142 }
3143 
3144 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3145 				unsigned *min, unsigned *max)
3146 {
3147 	if (!caps)
3148 		return 0;
3149 
3150 	if (caps->aux_support) {
3151 		// Firmware limits are in nits, DC API wants millinits.
3152 		*max = 1000 * caps->aux_max_input_signal;
3153 		*min = 1000 * caps->aux_min_input_signal;
3154 	} else {
3155 		// Firmware limits are 8-bit, PWM control is 16-bit.
3156 		*max = 0x101 * caps->max_input_signal;
3157 		*min = 0x101 * caps->min_input_signal;
3158 	}
3159 	return 1;
3160 }
3161 
3162 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3163 					uint32_t brightness)
3164 {
3165 	unsigned min, max;
3166 
3167 	if (!get_brightness_range(caps, &min, &max))
3168 		return brightness;
3169 
3170 	// Rescale 0..255 to min..max
3171 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3172 				       AMDGPU_MAX_BL_LEVEL);
3173 }
3174 
3175 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3176 				      uint32_t brightness)
3177 {
3178 	unsigned min, max;
3179 
3180 	if (!get_brightness_range(caps, &min, &max))
3181 		return brightness;
3182 
3183 	if (brightness < min)
3184 		return 0;
3185 	// Rescale min..max to 0..255
3186 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3187 				 max - min);
3188 }
3189 
3190 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3191 {
3192 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3193 	struct amdgpu_dm_backlight_caps caps;
3194 	struct dc_link *link = NULL;
3195 	u32 brightness;
3196 	bool rc;
3197 
3198 	amdgpu_dm_update_backlight_caps(dm);
3199 	caps = dm->backlight_caps;
3200 
3201 	link = (struct dc_link *)dm->backlight_link;
3202 
3203 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3204 	// Change brightness based on AUX property
3205 	if (caps.aux_support)
3206 		return set_backlight_via_aux(link, brightness);
3207 
3208 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3209 
3210 	return rc ? 0 : 1;
3211 }
3212 
3213 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3214 {
3215 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3216 	int ret = dc_link_get_backlight_level(dm->backlight_link);
3217 
3218 	if (ret == DC_ERROR_UNEXPECTED)
3219 		return bd->props.brightness;
3220 	return convert_brightness_to_user(&dm->backlight_caps, ret);
3221 }
3222 
3223 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3224 	.options = BL_CORE_SUSPENDRESUME,
3225 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3226 	.update_status	= amdgpu_dm_backlight_update_status,
3227 };
3228 
3229 static void
3230 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3231 {
3232 	char bl_name[16];
3233 	struct backlight_properties props = { 0 };
3234 
3235 	amdgpu_dm_update_backlight_caps(dm);
3236 
3237 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3238 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3239 	props.type = BACKLIGHT_RAW;
3240 
3241 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3242 		 adev_to_drm(dm->adev)->primary->index);
3243 
3244 	dm->backlight_dev = backlight_device_register(bl_name,
3245 						      adev_to_drm(dm->adev)->dev,
3246 						      dm,
3247 						      &amdgpu_dm_backlight_ops,
3248 						      &props);
3249 
3250 	if (IS_ERR(dm->backlight_dev))
3251 		DRM_ERROR("DM: Backlight registration failed!\n");
3252 	else
3253 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3254 }
3255 
3256 #endif
3257 
3258 static int initialize_plane(struct amdgpu_display_manager *dm,
3259 			    struct amdgpu_mode_info *mode_info, int plane_id,
3260 			    enum drm_plane_type plane_type,
3261 			    const struct dc_plane_cap *plane_cap)
3262 {
3263 	struct drm_plane *plane;
3264 	unsigned long possible_crtcs;
3265 	int ret = 0;
3266 
3267 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3268 	if (!plane) {
3269 		DRM_ERROR("KMS: Failed to allocate plane\n");
3270 		return -ENOMEM;
3271 	}
3272 	plane->type = plane_type;
3273 
3274 	/*
3275 	 * HACK: IGT tests expect that the primary plane for a CRTC
3276 	 * can only have one possible CRTC. Only expose support for
3277 	 * any CRTC if they're not going to be used as a primary plane
3278 	 * for a CRTC - like overlay or underlay planes.
3279 	 */
3280 	possible_crtcs = 1 << plane_id;
3281 	if (plane_id >= dm->dc->caps.max_streams)
3282 		possible_crtcs = 0xff;
3283 
3284 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3285 
3286 	if (ret) {
3287 		DRM_ERROR("KMS: Failed to initialize plane\n");
3288 		kfree(plane);
3289 		return ret;
3290 	}
3291 
3292 	if (mode_info)
3293 		mode_info->planes[plane_id] = plane;
3294 
3295 	return ret;
3296 }
3297 
3298 
3299 static void register_backlight_device(struct amdgpu_display_manager *dm,
3300 				      struct dc_link *link)
3301 {
3302 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3303 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3304 
3305 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3306 	    link->type != dc_connection_none) {
3307 		/*
3308 		 * Event if registration failed, we should continue with
3309 		 * DM initialization because not having a backlight control
3310 		 * is better then a black screen.
3311 		 */
3312 		amdgpu_dm_register_backlight_device(dm);
3313 
3314 		if (dm->backlight_dev)
3315 			dm->backlight_link = link;
3316 	}
3317 #endif
3318 }
3319 
3320 
3321 /*
3322  * In this architecture, the association
3323  * connector -> encoder -> crtc
3324  * id not really requried. The crtc and connector will hold the
3325  * display_index as an abstraction to use with DAL component
3326  *
3327  * Returns 0 on success
3328  */
3329 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3330 {
3331 	struct amdgpu_display_manager *dm = &adev->dm;
3332 	int32_t i;
3333 	struct amdgpu_dm_connector *aconnector = NULL;
3334 	struct amdgpu_encoder *aencoder = NULL;
3335 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3336 	uint32_t link_cnt;
3337 	int32_t primary_planes;
3338 	enum dc_connection_type new_connection_type = dc_connection_none;
3339 	const struct dc_plane_cap *plane;
3340 
3341 	dm->display_indexes_num = dm->dc->caps.max_streams;
3342 	/* Update the actual used number of crtc */
3343 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3344 
3345 	link_cnt = dm->dc->caps.max_links;
3346 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3347 		DRM_ERROR("DM: Failed to initialize mode config\n");
3348 		return -EINVAL;
3349 	}
3350 
3351 	/* There is one primary plane per CRTC */
3352 	primary_planes = dm->dc->caps.max_streams;
3353 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3354 
3355 	/*
3356 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3357 	 * Order is reversed to match iteration order in atomic check.
3358 	 */
3359 	for (i = (primary_planes - 1); i >= 0; i--) {
3360 		plane = &dm->dc->caps.planes[i];
3361 
3362 		if (initialize_plane(dm, mode_info, i,
3363 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3364 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3365 			goto fail;
3366 		}
3367 	}
3368 
3369 	/*
3370 	 * Initialize overlay planes, index starting after primary planes.
3371 	 * These planes have a higher DRM index than the primary planes since
3372 	 * they should be considered as having a higher z-order.
3373 	 * Order is reversed to match iteration order in atomic check.
3374 	 *
3375 	 * Only support DCN for now, and only expose one so we don't encourage
3376 	 * userspace to use up all the pipes.
3377 	 */
3378 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3379 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3380 
3381 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3382 			continue;
3383 
3384 		if (!plane->blends_with_above || !plane->blends_with_below)
3385 			continue;
3386 
3387 		if (!plane->pixel_format_support.argb8888)
3388 			continue;
3389 
3390 		if (initialize_plane(dm, NULL, primary_planes + i,
3391 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3392 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3393 			goto fail;
3394 		}
3395 
3396 		/* Only create one overlay plane. */
3397 		break;
3398 	}
3399 
3400 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3401 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3402 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3403 			goto fail;
3404 		}
3405 
3406 	/* loops over all connectors on the board */
3407 	for (i = 0; i < link_cnt; i++) {
3408 		struct dc_link *link = NULL;
3409 
3410 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3411 			DRM_ERROR(
3412 				"KMS: Cannot support more than %d display indexes\n",
3413 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3414 			continue;
3415 		}
3416 
3417 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3418 		if (!aconnector)
3419 			goto fail;
3420 
3421 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3422 		if (!aencoder)
3423 			goto fail;
3424 
3425 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3426 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3427 			goto fail;
3428 		}
3429 
3430 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3431 			DRM_ERROR("KMS: Failed to initialize connector\n");
3432 			goto fail;
3433 		}
3434 
3435 		link = dc_get_link_at_index(dm->dc, i);
3436 
3437 		if (!dc_link_detect_sink(link, &new_connection_type))
3438 			DRM_ERROR("KMS: Failed to detect connector\n");
3439 
3440 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3441 			emulated_link_detect(link);
3442 			amdgpu_dm_update_connector_after_detect(aconnector);
3443 
3444 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3445 			amdgpu_dm_update_connector_after_detect(aconnector);
3446 			register_backlight_device(dm, link);
3447 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3448 				amdgpu_dm_set_psr_caps(link);
3449 		}
3450 
3451 
3452 	}
3453 
3454 	/* Software is initialized. Now we can register interrupt handlers. */
3455 	switch (adev->asic_type) {
3456 #if defined(CONFIG_DRM_AMD_DC_SI)
3457 	case CHIP_TAHITI:
3458 	case CHIP_PITCAIRN:
3459 	case CHIP_VERDE:
3460 	case CHIP_OLAND:
3461 		if (dce60_register_irq_handlers(dm->adev)) {
3462 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3463 			goto fail;
3464 		}
3465 		break;
3466 #endif
3467 	case CHIP_BONAIRE:
3468 	case CHIP_HAWAII:
3469 	case CHIP_KAVERI:
3470 	case CHIP_KABINI:
3471 	case CHIP_MULLINS:
3472 	case CHIP_TONGA:
3473 	case CHIP_FIJI:
3474 	case CHIP_CARRIZO:
3475 	case CHIP_STONEY:
3476 	case CHIP_POLARIS11:
3477 	case CHIP_POLARIS10:
3478 	case CHIP_POLARIS12:
3479 	case CHIP_VEGAM:
3480 	case CHIP_VEGA10:
3481 	case CHIP_VEGA12:
3482 	case CHIP_VEGA20:
3483 		if (dce110_register_irq_handlers(dm->adev)) {
3484 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3485 			goto fail;
3486 		}
3487 		break;
3488 #if defined(CONFIG_DRM_AMD_DC_DCN)
3489 	case CHIP_RAVEN:
3490 	case CHIP_NAVI12:
3491 	case CHIP_NAVI10:
3492 	case CHIP_NAVI14:
3493 	case CHIP_RENOIR:
3494 	case CHIP_SIENNA_CICHLID:
3495 	case CHIP_NAVY_FLOUNDER:
3496 	case CHIP_DIMGREY_CAVEFISH:
3497 	case CHIP_VANGOGH:
3498 		if (dcn10_register_irq_handlers(dm->adev)) {
3499 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3500 			goto fail;
3501 		}
3502 		break;
3503 #endif
3504 	default:
3505 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3506 		goto fail;
3507 	}
3508 
3509 	return 0;
3510 fail:
3511 	kfree(aencoder);
3512 	kfree(aconnector);
3513 
3514 	return -EINVAL;
3515 }
3516 
3517 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3518 {
3519 	drm_mode_config_cleanup(dm->ddev);
3520 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3521 	return;
3522 }
3523 
3524 /******************************************************************************
3525  * amdgpu_display_funcs functions
3526  *****************************************************************************/
3527 
3528 /*
3529  * dm_bandwidth_update - program display watermarks
3530  *
3531  * @adev: amdgpu_device pointer
3532  *
3533  * Calculate and program the display watermarks and line buffer allocation.
3534  */
3535 static void dm_bandwidth_update(struct amdgpu_device *adev)
3536 {
3537 	/* TODO: implement later */
3538 }
3539 
3540 static const struct amdgpu_display_funcs dm_display_funcs = {
3541 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3542 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3543 	.backlight_set_level = NULL, /* never called for DC */
3544 	.backlight_get_level = NULL, /* never called for DC */
3545 	.hpd_sense = NULL,/* called unconditionally */
3546 	.hpd_set_polarity = NULL, /* called unconditionally */
3547 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3548 	.page_flip_get_scanoutpos =
3549 		dm_crtc_get_scanoutpos,/* called unconditionally */
3550 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3551 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3552 };
3553 
3554 #if defined(CONFIG_DEBUG_KERNEL_DC)
3555 
3556 static ssize_t s3_debug_store(struct device *device,
3557 			      struct device_attribute *attr,
3558 			      const char *buf,
3559 			      size_t count)
3560 {
3561 	int ret;
3562 	int s3_state;
3563 	struct drm_device *drm_dev = dev_get_drvdata(device);
3564 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3565 
3566 	ret = kstrtoint(buf, 0, &s3_state);
3567 
3568 	if (ret == 0) {
3569 		if (s3_state) {
3570 			dm_resume(adev);
3571 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3572 		} else
3573 			dm_suspend(adev);
3574 	}
3575 
3576 	return ret == 0 ? count : 0;
3577 }
3578 
3579 DEVICE_ATTR_WO(s3_debug);
3580 
3581 #endif
3582 
3583 static int dm_early_init(void *handle)
3584 {
3585 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3586 
3587 	switch (adev->asic_type) {
3588 #if defined(CONFIG_DRM_AMD_DC_SI)
3589 	case CHIP_TAHITI:
3590 	case CHIP_PITCAIRN:
3591 	case CHIP_VERDE:
3592 		adev->mode_info.num_crtc = 6;
3593 		adev->mode_info.num_hpd = 6;
3594 		adev->mode_info.num_dig = 6;
3595 		break;
3596 	case CHIP_OLAND:
3597 		adev->mode_info.num_crtc = 2;
3598 		adev->mode_info.num_hpd = 2;
3599 		adev->mode_info.num_dig = 2;
3600 		break;
3601 #endif
3602 	case CHIP_BONAIRE:
3603 	case CHIP_HAWAII:
3604 		adev->mode_info.num_crtc = 6;
3605 		adev->mode_info.num_hpd = 6;
3606 		adev->mode_info.num_dig = 6;
3607 		break;
3608 	case CHIP_KAVERI:
3609 		adev->mode_info.num_crtc = 4;
3610 		adev->mode_info.num_hpd = 6;
3611 		adev->mode_info.num_dig = 7;
3612 		break;
3613 	case CHIP_KABINI:
3614 	case CHIP_MULLINS:
3615 		adev->mode_info.num_crtc = 2;
3616 		adev->mode_info.num_hpd = 6;
3617 		adev->mode_info.num_dig = 6;
3618 		break;
3619 	case CHIP_FIJI:
3620 	case CHIP_TONGA:
3621 		adev->mode_info.num_crtc = 6;
3622 		adev->mode_info.num_hpd = 6;
3623 		adev->mode_info.num_dig = 7;
3624 		break;
3625 	case CHIP_CARRIZO:
3626 		adev->mode_info.num_crtc = 3;
3627 		adev->mode_info.num_hpd = 6;
3628 		adev->mode_info.num_dig = 9;
3629 		break;
3630 	case CHIP_STONEY:
3631 		adev->mode_info.num_crtc = 2;
3632 		adev->mode_info.num_hpd = 6;
3633 		adev->mode_info.num_dig = 9;
3634 		break;
3635 	case CHIP_POLARIS11:
3636 	case CHIP_POLARIS12:
3637 		adev->mode_info.num_crtc = 5;
3638 		adev->mode_info.num_hpd = 5;
3639 		adev->mode_info.num_dig = 5;
3640 		break;
3641 	case CHIP_POLARIS10:
3642 	case CHIP_VEGAM:
3643 		adev->mode_info.num_crtc = 6;
3644 		adev->mode_info.num_hpd = 6;
3645 		adev->mode_info.num_dig = 6;
3646 		break;
3647 	case CHIP_VEGA10:
3648 	case CHIP_VEGA12:
3649 	case CHIP_VEGA20:
3650 		adev->mode_info.num_crtc = 6;
3651 		adev->mode_info.num_hpd = 6;
3652 		adev->mode_info.num_dig = 6;
3653 		break;
3654 #if defined(CONFIG_DRM_AMD_DC_DCN)
3655 	case CHIP_RAVEN:
3656 	case CHIP_RENOIR:
3657 	case CHIP_VANGOGH:
3658 		adev->mode_info.num_crtc = 4;
3659 		adev->mode_info.num_hpd = 4;
3660 		adev->mode_info.num_dig = 4;
3661 		break;
3662 	case CHIP_NAVI10:
3663 	case CHIP_NAVI12:
3664 	case CHIP_SIENNA_CICHLID:
3665 	case CHIP_NAVY_FLOUNDER:
3666 		adev->mode_info.num_crtc = 6;
3667 		adev->mode_info.num_hpd = 6;
3668 		adev->mode_info.num_dig = 6;
3669 		break;
3670 	case CHIP_NAVI14:
3671 	case CHIP_DIMGREY_CAVEFISH:
3672 		adev->mode_info.num_crtc = 5;
3673 		adev->mode_info.num_hpd = 5;
3674 		adev->mode_info.num_dig = 5;
3675 		break;
3676 #endif
3677 	default:
3678 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3679 		return -EINVAL;
3680 	}
3681 
3682 	amdgpu_dm_set_irq_funcs(adev);
3683 
3684 	if (adev->mode_info.funcs == NULL)
3685 		adev->mode_info.funcs = &dm_display_funcs;
3686 
3687 	/*
3688 	 * Note: Do NOT change adev->audio_endpt_rreg and
3689 	 * adev->audio_endpt_wreg because they are initialised in
3690 	 * amdgpu_device_init()
3691 	 */
3692 #if defined(CONFIG_DEBUG_KERNEL_DC)
3693 	device_create_file(
3694 		adev_to_drm(adev)->dev,
3695 		&dev_attr_s3_debug);
3696 #endif
3697 
3698 	return 0;
3699 }
3700 
3701 static bool modeset_required(struct drm_crtc_state *crtc_state,
3702 			     struct dc_stream_state *new_stream,
3703 			     struct dc_stream_state *old_stream)
3704 {
3705 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3706 }
3707 
3708 static bool modereset_required(struct drm_crtc_state *crtc_state)
3709 {
3710 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3711 }
3712 
3713 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3714 {
3715 	drm_encoder_cleanup(encoder);
3716 	kfree(encoder);
3717 }
3718 
3719 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3720 	.destroy = amdgpu_dm_encoder_destroy,
3721 };
3722 
3723 
3724 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3725 					 struct drm_framebuffer *fb,
3726 					 int *min_downscale, int *max_upscale)
3727 {
3728 	struct amdgpu_device *adev = drm_to_adev(dev);
3729 	struct dc *dc = adev->dm.dc;
3730 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3731 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3732 
3733 	switch (fb->format->format) {
3734 	case DRM_FORMAT_P010:
3735 	case DRM_FORMAT_NV12:
3736 	case DRM_FORMAT_NV21:
3737 		*max_upscale = plane_cap->max_upscale_factor.nv12;
3738 		*min_downscale = plane_cap->max_downscale_factor.nv12;
3739 		break;
3740 
3741 	case DRM_FORMAT_XRGB16161616F:
3742 	case DRM_FORMAT_ARGB16161616F:
3743 	case DRM_FORMAT_XBGR16161616F:
3744 	case DRM_FORMAT_ABGR16161616F:
3745 		*max_upscale = plane_cap->max_upscale_factor.fp16;
3746 		*min_downscale = plane_cap->max_downscale_factor.fp16;
3747 		break;
3748 
3749 	default:
3750 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
3751 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
3752 		break;
3753 	}
3754 
3755 	/*
3756 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3757 	 * scaling factor of 1.0 == 1000 units.
3758 	 */
3759 	if (*max_upscale == 1)
3760 		*max_upscale = 1000;
3761 
3762 	if (*min_downscale == 1)
3763 		*min_downscale = 1000;
3764 }
3765 
3766 
3767 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3768 				struct dc_scaling_info *scaling_info)
3769 {
3770 	int scale_w, scale_h, min_downscale, max_upscale;
3771 
3772 	memset(scaling_info, 0, sizeof(*scaling_info));
3773 
3774 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3775 	scaling_info->src_rect.x = state->src_x >> 16;
3776 	scaling_info->src_rect.y = state->src_y >> 16;
3777 
3778 	scaling_info->src_rect.width = state->src_w >> 16;
3779 	if (scaling_info->src_rect.width == 0)
3780 		return -EINVAL;
3781 
3782 	scaling_info->src_rect.height = state->src_h >> 16;
3783 	if (scaling_info->src_rect.height == 0)
3784 		return -EINVAL;
3785 
3786 	scaling_info->dst_rect.x = state->crtc_x;
3787 	scaling_info->dst_rect.y = state->crtc_y;
3788 
3789 	if (state->crtc_w == 0)
3790 		return -EINVAL;
3791 
3792 	scaling_info->dst_rect.width = state->crtc_w;
3793 
3794 	if (state->crtc_h == 0)
3795 		return -EINVAL;
3796 
3797 	scaling_info->dst_rect.height = state->crtc_h;
3798 
3799 	/* DRM doesn't specify clipping on destination output. */
3800 	scaling_info->clip_rect = scaling_info->dst_rect;
3801 
3802 	/* Validate scaling per-format with DC plane caps */
3803 	if (state->plane && state->plane->dev && state->fb) {
3804 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3805 					     &min_downscale, &max_upscale);
3806 	} else {
3807 		min_downscale = 250;
3808 		max_upscale = 16000;
3809 	}
3810 
3811 	scale_w = scaling_info->dst_rect.width * 1000 /
3812 		  scaling_info->src_rect.width;
3813 
3814 	if (scale_w < min_downscale || scale_w > max_upscale)
3815 		return -EINVAL;
3816 
3817 	scale_h = scaling_info->dst_rect.height * 1000 /
3818 		  scaling_info->src_rect.height;
3819 
3820 	if (scale_h < min_downscale || scale_h > max_upscale)
3821 		return -EINVAL;
3822 
3823 	/*
3824 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3825 	 * assume reasonable defaults based on the format.
3826 	 */
3827 
3828 	return 0;
3829 }
3830 
3831 static void
3832 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3833 				 uint64_t tiling_flags)
3834 {
3835 	/* Fill GFX8 params */
3836 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3837 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3838 
3839 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3840 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3841 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3842 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3843 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3844 
3845 		/* XXX fix me for VI */
3846 		tiling_info->gfx8.num_banks = num_banks;
3847 		tiling_info->gfx8.array_mode =
3848 				DC_ARRAY_2D_TILED_THIN1;
3849 		tiling_info->gfx8.tile_split = tile_split;
3850 		tiling_info->gfx8.bank_width = bankw;
3851 		tiling_info->gfx8.bank_height = bankh;
3852 		tiling_info->gfx8.tile_aspect = mtaspect;
3853 		tiling_info->gfx8.tile_mode =
3854 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3855 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3856 			== DC_ARRAY_1D_TILED_THIN1) {
3857 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3858 	}
3859 
3860 	tiling_info->gfx8.pipe_config =
3861 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3862 }
3863 
3864 static void
3865 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3866 				  union dc_tiling_info *tiling_info)
3867 {
3868 	tiling_info->gfx9.num_pipes =
3869 		adev->gfx.config.gb_addr_config_fields.num_pipes;
3870 	tiling_info->gfx9.num_banks =
3871 		adev->gfx.config.gb_addr_config_fields.num_banks;
3872 	tiling_info->gfx9.pipe_interleave =
3873 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3874 	tiling_info->gfx9.num_shader_engines =
3875 		adev->gfx.config.gb_addr_config_fields.num_se;
3876 	tiling_info->gfx9.max_compressed_frags =
3877 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3878 	tiling_info->gfx9.num_rb_per_se =
3879 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3880 	tiling_info->gfx9.shaderEnable = 1;
3881 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3882 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
3883 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3884 	    adev->asic_type == CHIP_VANGOGH)
3885 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3886 }
3887 
3888 static int
3889 validate_dcc(struct amdgpu_device *adev,
3890 	     const enum surface_pixel_format format,
3891 	     const enum dc_rotation_angle rotation,
3892 	     const union dc_tiling_info *tiling_info,
3893 	     const struct dc_plane_dcc_param *dcc,
3894 	     const struct dc_plane_address *address,
3895 	     const struct plane_size *plane_size)
3896 {
3897 	struct dc *dc = adev->dm.dc;
3898 	struct dc_dcc_surface_param input;
3899 	struct dc_surface_dcc_cap output;
3900 
3901 	memset(&input, 0, sizeof(input));
3902 	memset(&output, 0, sizeof(output));
3903 
3904 	if (!dcc->enable)
3905 		return 0;
3906 
3907 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3908 	    !dc->cap_funcs.get_dcc_compression_cap)
3909 		return -EINVAL;
3910 
3911 	input.format = format;
3912 	input.surface_size.width = plane_size->surface_size.width;
3913 	input.surface_size.height = plane_size->surface_size.height;
3914 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3915 
3916 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3917 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3918 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3919 		input.scan = SCAN_DIRECTION_VERTICAL;
3920 
3921 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3922 		return -EINVAL;
3923 
3924 	if (!output.capable)
3925 		return -EINVAL;
3926 
3927 	if (dcc->independent_64b_blks == 0 &&
3928 	    output.grph.rgb.independent_64b_blks != 0)
3929 		return -EINVAL;
3930 
3931 	return 0;
3932 }
3933 
3934 static bool
3935 modifier_has_dcc(uint64_t modifier)
3936 {
3937 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3938 }
3939 
3940 static unsigned
3941 modifier_gfx9_swizzle_mode(uint64_t modifier)
3942 {
3943 	if (modifier == DRM_FORMAT_MOD_LINEAR)
3944 		return 0;
3945 
3946 	return AMD_FMT_MOD_GET(TILE, modifier);
3947 }
3948 
3949 static const struct drm_format_info *
3950 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3951 {
3952 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3953 }
3954 
3955 static void
3956 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3957 				    union dc_tiling_info *tiling_info,
3958 				    uint64_t modifier)
3959 {
3960 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3961 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3962 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3963 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3964 
3965 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
3966 
3967 	if (!IS_AMD_FMT_MOD(modifier))
3968 		return;
3969 
3970 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3971 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3972 
3973 	if (adev->family >= AMDGPU_FAMILY_NV) {
3974 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3975 	} else {
3976 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3977 
3978 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3979 	}
3980 }
3981 
3982 enum dm_micro_swizzle {
3983 	MICRO_SWIZZLE_Z = 0,
3984 	MICRO_SWIZZLE_S = 1,
3985 	MICRO_SWIZZLE_D = 2,
3986 	MICRO_SWIZZLE_R = 3
3987 };
3988 
3989 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3990 					  uint32_t format,
3991 					  uint64_t modifier)
3992 {
3993 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
3994 	const struct drm_format_info *info = drm_format_info(format);
3995 
3996 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3997 
3998 	if (!info)
3999 		return false;
4000 
4001 	/*
4002 	 * We always have to allow this modifier, because core DRM still
4003 	 * checks LINEAR support if userspace does not provide modifers.
4004 	 */
4005 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4006 		return true;
4007 
4008 	/*
4009 	 * The arbitrary tiling support for multiplane formats has not been hooked
4010 	 * up.
4011 	 */
4012 	if (info->num_planes > 1)
4013 		return false;
4014 
4015 	/*
4016 	 * For D swizzle the canonical modifier depends on the bpp, so check
4017 	 * it here.
4018 	 */
4019 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4020 	    adev->family >= AMDGPU_FAMILY_NV) {
4021 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4022 			return false;
4023 	}
4024 
4025 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4026 	    info->cpp[0] < 8)
4027 		return false;
4028 
4029 	if (modifier_has_dcc(modifier)) {
4030 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4031 		if (info->cpp[0] != 4)
4032 			return false;
4033 	}
4034 
4035 	return true;
4036 }
4037 
4038 static void
4039 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4040 {
4041 	if (!*mods)
4042 		return;
4043 
4044 	if (*cap - *size < 1) {
4045 		uint64_t new_cap = *cap * 2;
4046 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4047 
4048 		if (!new_mods) {
4049 			kfree(*mods);
4050 			*mods = NULL;
4051 			return;
4052 		}
4053 
4054 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4055 		kfree(*mods);
4056 		*mods = new_mods;
4057 		*cap = new_cap;
4058 	}
4059 
4060 	(*mods)[*size] = mod;
4061 	*size += 1;
4062 }
4063 
4064 static void
4065 add_gfx9_modifiers(const struct amdgpu_device *adev,
4066 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4067 {
4068 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4069 	int pipe_xor_bits = min(8, pipes +
4070 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4071 	int bank_xor_bits = min(8 - pipe_xor_bits,
4072 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4073 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4074 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4075 
4076 
4077 	if (adev->family == AMDGPU_FAMILY_RV) {
4078 		/* Raven2 and later */
4079 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4080 
4081 		/*
4082 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4083 		 * doesn't support _D on DCN
4084 		 */
4085 
4086 		if (has_constant_encode) {
4087 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4088 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4089 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4090 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4091 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4092 				    AMD_FMT_MOD_SET(DCC, 1) |
4093 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4094 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4095 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4096 		}
4097 
4098 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4099 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4100 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4101 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4102 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4103 			    AMD_FMT_MOD_SET(DCC, 1) |
4104 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4105 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4106 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4107 
4108 		if (has_constant_encode) {
4109 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4110 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4111 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4112 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4113 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4114 				    AMD_FMT_MOD_SET(DCC, 1) |
4115 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4116 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4117 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4118 
4119 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4120 				    AMD_FMT_MOD_SET(RB, rb) |
4121 				    AMD_FMT_MOD_SET(PIPE, pipes));
4122 		}
4123 
4124 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4125 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4126 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4127 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4128 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4129 			    AMD_FMT_MOD_SET(DCC, 1) |
4130 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4131 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4132 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4133 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4134 			    AMD_FMT_MOD_SET(RB, rb) |
4135 			    AMD_FMT_MOD_SET(PIPE, pipes));
4136 	}
4137 
4138 	/*
4139 	 * Only supported for 64bpp on Raven, will be filtered on format in
4140 	 * dm_plane_format_mod_supported.
4141 	 */
4142 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4143 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4144 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4145 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4146 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4147 
4148 	if (adev->family == AMDGPU_FAMILY_RV) {
4149 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4150 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4151 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4152 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4153 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4154 	}
4155 
4156 	/*
4157 	 * Only supported for 64bpp on Raven, will be filtered on format in
4158 	 * dm_plane_format_mod_supported.
4159 	 */
4160 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4161 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4162 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4163 
4164 	if (adev->family == AMDGPU_FAMILY_RV) {
4165 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4166 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4167 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4168 	}
4169 }
4170 
4171 static void
4172 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4173 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4174 {
4175 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4176 
4177 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4178 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4179 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4180 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4181 		    AMD_FMT_MOD_SET(DCC, 1) |
4182 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4183 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4184 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4185 
4186 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4187 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4188 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4189 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4190 		    AMD_FMT_MOD_SET(DCC, 1) |
4191 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4192 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4193 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4194 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4195 
4196 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4197 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4198 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4199 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4200 
4201 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4202 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4203 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4204 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4205 
4206 
4207 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4208 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4209 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4210 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4211 
4212 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4213 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4214 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4215 }
4216 
4217 static void
4218 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4219 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4220 {
4221 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4222 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4223 
4224 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4225 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4226 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4227 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4228 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4229 		    AMD_FMT_MOD_SET(DCC, 1) |
4230 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4231 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4232 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4233 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4234 
4235 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4236 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4237 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4238 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4239 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4240 		    AMD_FMT_MOD_SET(DCC, 1) |
4241 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4242 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4243 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4244 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4245 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4246 
4247 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4248 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4249 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4250 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4251 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4252 
4253 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4254 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4255 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4256 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4257 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4258 
4259 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4260 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4261 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4262 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4263 
4264 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4265 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4266 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4267 }
4268 
4269 static int
4270 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4271 {
4272 	uint64_t size = 0, capacity = 128;
4273 	*mods = NULL;
4274 
4275 	/* We have not hooked up any pre-GFX9 modifiers. */
4276 	if (adev->family < AMDGPU_FAMILY_AI)
4277 		return 0;
4278 
4279 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4280 
4281 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4282 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4283 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4284 		return *mods ? 0 : -ENOMEM;
4285 	}
4286 
4287 	switch (adev->family) {
4288 	case AMDGPU_FAMILY_AI:
4289 	case AMDGPU_FAMILY_RV:
4290 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4291 		break;
4292 	case AMDGPU_FAMILY_NV:
4293 	case AMDGPU_FAMILY_VGH:
4294 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4295 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4296 		else
4297 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4298 		break;
4299 	}
4300 
4301 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4302 
4303 	/* INVALID marks the end of the list. */
4304 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4305 
4306 	if (!*mods)
4307 		return -ENOMEM;
4308 
4309 	return 0;
4310 }
4311 
4312 static int
4313 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4314 					  const struct amdgpu_framebuffer *afb,
4315 					  const enum surface_pixel_format format,
4316 					  const enum dc_rotation_angle rotation,
4317 					  const struct plane_size *plane_size,
4318 					  union dc_tiling_info *tiling_info,
4319 					  struct dc_plane_dcc_param *dcc,
4320 					  struct dc_plane_address *address,
4321 					  const bool force_disable_dcc)
4322 {
4323 	const uint64_t modifier = afb->base.modifier;
4324 	int ret;
4325 
4326 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4327 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4328 
4329 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4330 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4331 
4332 		dcc->enable = 1;
4333 		dcc->meta_pitch = afb->base.pitches[1];
4334 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4335 
4336 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4337 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4338 	}
4339 
4340 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4341 	if (ret)
4342 		return ret;
4343 
4344 	return 0;
4345 }
4346 
4347 static int
4348 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4349 			     const struct amdgpu_framebuffer *afb,
4350 			     const enum surface_pixel_format format,
4351 			     const enum dc_rotation_angle rotation,
4352 			     const uint64_t tiling_flags,
4353 			     union dc_tiling_info *tiling_info,
4354 			     struct plane_size *plane_size,
4355 			     struct dc_plane_dcc_param *dcc,
4356 			     struct dc_plane_address *address,
4357 			     bool tmz_surface,
4358 			     bool force_disable_dcc)
4359 {
4360 	const struct drm_framebuffer *fb = &afb->base;
4361 	int ret;
4362 
4363 	memset(tiling_info, 0, sizeof(*tiling_info));
4364 	memset(plane_size, 0, sizeof(*plane_size));
4365 	memset(dcc, 0, sizeof(*dcc));
4366 	memset(address, 0, sizeof(*address));
4367 
4368 	address->tmz_surface = tmz_surface;
4369 
4370 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4371 		uint64_t addr = afb->address + fb->offsets[0];
4372 
4373 		plane_size->surface_size.x = 0;
4374 		plane_size->surface_size.y = 0;
4375 		plane_size->surface_size.width = fb->width;
4376 		plane_size->surface_size.height = fb->height;
4377 		plane_size->surface_pitch =
4378 			fb->pitches[0] / fb->format->cpp[0];
4379 
4380 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4381 		address->grph.addr.low_part = lower_32_bits(addr);
4382 		address->grph.addr.high_part = upper_32_bits(addr);
4383 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4384 		uint64_t luma_addr = afb->address + fb->offsets[0];
4385 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4386 
4387 		plane_size->surface_size.x = 0;
4388 		plane_size->surface_size.y = 0;
4389 		plane_size->surface_size.width = fb->width;
4390 		plane_size->surface_size.height = fb->height;
4391 		plane_size->surface_pitch =
4392 			fb->pitches[0] / fb->format->cpp[0];
4393 
4394 		plane_size->chroma_size.x = 0;
4395 		plane_size->chroma_size.y = 0;
4396 		/* TODO: set these based on surface format */
4397 		plane_size->chroma_size.width = fb->width / 2;
4398 		plane_size->chroma_size.height = fb->height / 2;
4399 
4400 		plane_size->chroma_pitch =
4401 			fb->pitches[1] / fb->format->cpp[1];
4402 
4403 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4404 		address->video_progressive.luma_addr.low_part =
4405 			lower_32_bits(luma_addr);
4406 		address->video_progressive.luma_addr.high_part =
4407 			upper_32_bits(luma_addr);
4408 		address->video_progressive.chroma_addr.low_part =
4409 			lower_32_bits(chroma_addr);
4410 		address->video_progressive.chroma_addr.high_part =
4411 			upper_32_bits(chroma_addr);
4412 	}
4413 
4414 	if (adev->family >= AMDGPU_FAMILY_AI) {
4415 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4416 								rotation, plane_size,
4417 								tiling_info, dcc,
4418 								address,
4419 								force_disable_dcc);
4420 		if (ret)
4421 			return ret;
4422 	} else {
4423 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4424 	}
4425 
4426 	return 0;
4427 }
4428 
4429 static void
4430 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4431 			       bool *per_pixel_alpha, bool *global_alpha,
4432 			       int *global_alpha_value)
4433 {
4434 	*per_pixel_alpha = false;
4435 	*global_alpha = false;
4436 	*global_alpha_value = 0xff;
4437 
4438 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4439 		return;
4440 
4441 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4442 		static const uint32_t alpha_formats[] = {
4443 			DRM_FORMAT_ARGB8888,
4444 			DRM_FORMAT_RGBA8888,
4445 			DRM_FORMAT_ABGR8888,
4446 		};
4447 		uint32_t format = plane_state->fb->format->format;
4448 		unsigned int i;
4449 
4450 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4451 			if (format == alpha_formats[i]) {
4452 				*per_pixel_alpha = true;
4453 				break;
4454 			}
4455 		}
4456 	}
4457 
4458 	if (plane_state->alpha < 0xffff) {
4459 		*global_alpha = true;
4460 		*global_alpha_value = plane_state->alpha >> 8;
4461 	}
4462 }
4463 
4464 static int
4465 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4466 			    const enum surface_pixel_format format,
4467 			    enum dc_color_space *color_space)
4468 {
4469 	bool full_range;
4470 
4471 	*color_space = COLOR_SPACE_SRGB;
4472 
4473 	/* DRM color properties only affect non-RGB formats. */
4474 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4475 		return 0;
4476 
4477 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4478 
4479 	switch (plane_state->color_encoding) {
4480 	case DRM_COLOR_YCBCR_BT601:
4481 		if (full_range)
4482 			*color_space = COLOR_SPACE_YCBCR601;
4483 		else
4484 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4485 		break;
4486 
4487 	case DRM_COLOR_YCBCR_BT709:
4488 		if (full_range)
4489 			*color_space = COLOR_SPACE_YCBCR709;
4490 		else
4491 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4492 		break;
4493 
4494 	case DRM_COLOR_YCBCR_BT2020:
4495 		if (full_range)
4496 			*color_space = COLOR_SPACE_2020_YCBCR;
4497 		else
4498 			return -EINVAL;
4499 		break;
4500 
4501 	default:
4502 		return -EINVAL;
4503 	}
4504 
4505 	return 0;
4506 }
4507 
4508 static int
4509 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4510 			    const struct drm_plane_state *plane_state,
4511 			    const uint64_t tiling_flags,
4512 			    struct dc_plane_info *plane_info,
4513 			    struct dc_plane_address *address,
4514 			    bool tmz_surface,
4515 			    bool force_disable_dcc)
4516 {
4517 	const struct drm_framebuffer *fb = plane_state->fb;
4518 	const struct amdgpu_framebuffer *afb =
4519 		to_amdgpu_framebuffer(plane_state->fb);
4520 	struct drm_format_name_buf format_name;
4521 	int ret;
4522 
4523 	memset(plane_info, 0, sizeof(*plane_info));
4524 
4525 	switch (fb->format->format) {
4526 	case DRM_FORMAT_C8:
4527 		plane_info->format =
4528 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4529 		break;
4530 	case DRM_FORMAT_RGB565:
4531 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4532 		break;
4533 	case DRM_FORMAT_XRGB8888:
4534 	case DRM_FORMAT_ARGB8888:
4535 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4536 		break;
4537 	case DRM_FORMAT_XRGB2101010:
4538 	case DRM_FORMAT_ARGB2101010:
4539 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4540 		break;
4541 	case DRM_FORMAT_XBGR2101010:
4542 	case DRM_FORMAT_ABGR2101010:
4543 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4544 		break;
4545 	case DRM_FORMAT_XBGR8888:
4546 	case DRM_FORMAT_ABGR8888:
4547 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4548 		break;
4549 	case DRM_FORMAT_NV21:
4550 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4551 		break;
4552 	case DRM_FORMAT_NV12:
4553 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4554 		break;
4555 	case DRM_FORMAT_P010:
4556 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4557 		break;
4558 	case DRM_FORMAT_XRGB16161616F:
4559 	case DRM_FORMAT_ARGB16161616F:
4560 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4561 		break;
4562 	case DRM_FORMAT_XBGR16161616F:
4563 	case DRM_FORMAT_ABGR16161616F:
4564 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4565 		break;
4566 	default:
4567 		DRM_ERROR(
4568 			"Unsupported screen format %s\n",
4569 			drm_get_format_name(fb->format->format, &format_name));
4570 		return -EINVAL;
4571 	}
4572 
4573 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4574 	case DRM_MODE_ROTATE_0:
4575 		plane_info->rotation = ROTATION_ANGLE_0;
4576 		break;
4577 	case DRM_MODE_ROTATE_90:
4578 		plane_info->rotation = ROTATION_ANGLE_90;
4579 		break;
4580 	case DRM_MODE_ROTATE_180:
4581 		plane_info->rotation = ROTATION_ANGLE_180;
4582 		break;
4583 	case DRM_MODE_ROTATE_270:
4584 		plane_info->rotation = ROTATION_ANGLE_270;
4585 		break;
4586 	default:
4587 		plane_info->rotation = ROTATION_ANGLE_0;
4588 		break;
4589 	}
4590 
4591 	plane_info->visible = true;
4592 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4593 
4594 	plane_info->layer_index = 0;
4595 
4596 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4597 					  &plane_info->color_space);
4598 	if (ret)
4599 		return ret;
4600 
4601 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4602 					   plane_info->rotation, tiling_flags,
4603 					   &plane_info->tiling_info,
4604 					   &plane_info->plane_size,
4605 					   &plane_info->dcc, address, tmz_surface,
4606 					   force_disable_dcc);
4607 	if (ret)
4608 		return ret;
4609 
4610 	fill_blending_from_plane_state(
4611 		plane_state, &plane_info->per_pixel_alpha,
4612 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4613 
4614 	return 0;
4615 }
4616 
4617 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4618 				    struct dc_plane_state *dc_plane_state,
4619 				    struct drm_plane_state *plane_state,
4620 				    struct drm_crtc_state *crtc_state)
4621 {
4622 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4623 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4624 	struct dc_scaling_info scaling_info;
4625 	struct dc_plane_info plane_info;
4626 	int ret;
4627 	bool force_disable_dcc = false;
4628 
4629 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4630 	if (ret)
4631 		return ret;
4632 
4633 	dc_plane_state->src_rect = scaling_info.src_rect;
4634 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4635 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4636 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4637 
4638 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4639 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4640 					  afb->tiling_flags,
4641 					  &plane_info,
4642 					  &dc_plane_state->address,
4643 					  afb->tmz_surface,
4644 					  force_disable_dcc);
4645 	if (ret)
4646 		return ret;
4647 
4648 	dc_plane_state->format = plane_info.format;
4649 	dc_plane_state->color_space = plane_info.color_space;
4650 	dc_plane_state->format = plane_info.format;
4651 	dc_plane_state->plane_size = plane_info.plane_size;
4652 	dc_plane_state->rotation = plane_info.rotation;
4653 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4654 	dc_plane_state->stereo_format = plane_info.stereo_format;
4655 	dc_plane_state->tiling_info = plane_info.tiling_info;
4656 	dc_plane_state->visible = plane_info.visible;
4657 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4658 	dc_plane_state->global_alpha = plane_info.global_alpha;
4659 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4660 	dc_plane_state->dcc = plane_info.dcc;
4661 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4662 
4663 	/*
4664 	 * Always set input transfer function, since plane state is refreshed
4665 	 * every time.
4666 	 */
4667 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4668 	if (ret)
4669 		return ret;
4670 
4671 	return 0;
4672 }
4673 
4674 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4675 					   const struct dm_connector_state *dm_state,
4676 					   struct dc_stream_state *stream)
4677 {
4678 	enum amdgpu_rmx_type rmx_type;
4679 
4680 	struct rect src = { 0 }; /* viewport in composition space*/
4681 	struct rect dst = { 0 }; /* stream addressable area */
4682 
4683 	/* no mode. nothing to be done */
4684 	if (!mode)
4685 		return;
4686 
4687 	/* Full screen scaling by default */
4688 	src.width = mode->hdisplay;
4689 	src.height = mode->vdisplay;
4690 	dst.width = stream->timing.h_addressable;
4691 	dst.height = stream->timing.v_addressable;
4692 
4693 	if (dm_state) {
4694 		rmx_type = dm_state->scaling;
4695 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4696 			if (src.width * dst.height <
4697 					src.height * dst.width) {
4698 				/* height needs less upscaling/more downscaling */
4699 				dst.width = src.width *
4700 						dst.height / src.height;
4701 			} else {
4702 				/* width needs less upscaling/more downscaling */
4703 				dst.height = src.height *
4704 						dst.width / src.width;
4705 			}
4706 		} else if (rmx_type == RMX_CENTER) {
4707 			dst = src;
4708 		}
4709 
4710 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4711 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4712 
4713 		if (dm_state->underscan_enable) {
4714 			dst.x += dm_state->underscan_hborder / 2;
4715 			dst.y += dm_state->underscan_vborder / 2;
4716 			dst.width -= dm_state->underscan_hborder;
4717 			dst.height -= dm_state->underscan_vborder;
4718 		}
4719 	}
4720 
4721 	stream->src = src;
4722 	stream->dst = dst;
4723 
4724 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4725 			dst.x, dst.y, dst.width, dst.height);
4726 
4727 }
4728 
4729 static enum dc_color_depth
4730 convert_color_depth_from_display_info(const struct drm_connector *connector,
4731 				      bool is_y420, int requested_bpc)
4732 {
4733 	uint8_t bpc;
4734 
4735 	if (is_y420) {
4736 		bpc = 8;
4737 
4738 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4739 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4740 			bpc = 16;
4741 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4742 			bpc = 12;
4743 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4744 			bpc = 10;
4745 	} else {
4746 		bpc = (uint8_t)connector->display_info.bpc;
4747 		/* Assume 8 bpc by default if no bpc is specified. */
4748 		bpc = bpc ? bpc : 8;
4749 	}
4750 
4751 	if (requested_bpc > 0) {
4752 		/*
4753 		 * Cap display bpc based on the user requested value.
4754 		 *
4755 		 * The value for state->max_bpc may not correctly updated
4756 		 * depending on when the connector gets added to the state
4757 		 * or if this was called outside of atomic check, so it
4758 		 * can't be used directly.
4759 		 */
4760 		bpc = min_t(u8, bpc, requested_bpc);
4761 
4762 		/* Round down to the nearest even number. */
4763 		bpc = bpc - (bpc & 1);
4764 	}
4765 
4766 	switch (bpc) {
4767 	case 0:
4768 		/*
4769 		 * Temporary Work around, DRM doesn't parse color depth for
4770 		 * EDID revision before 1.4
4771 		 * TODO: Fix edid parsing
4772 		 */
4773 		return COLOR_DEPTH_888;
4774 	case 6:
4775 		return COLOR_DEPTH_666;
4776 	case 8:
4777 		return COLOR_DEPTH_888;
4778 	case 10:
4779 		return COLOR_DEPTH_101010;
4780 	case 12:
4781 		return COLOR_DEPTH_121212;
4782 	case 14:
4783 		return COLOR_DEPTH_141414;
4784 	case 16:
4785 		return COLOR_DEPTH_161616;
4786 	default:
4787 		return COLOR_DEPTH_UNDEFINED;
4788 	}
4789 }
4790 
4791 static enum dc_aspect_ratio
4792 get_aspect_ratio(const struct drm_display_mode *mode_in)
4793 {
4794 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4795 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4796 }
4797 
4798 static enum dc_color_space
4799 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4800 {
4801 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4802 
4803 	switch (dc_crtc_timing->pixel_encoding)	{
4804 	case PIXEL_ENCODING_YCBCR422:
4805 	case PIXEL_ENCODING_YCBCR444:
4806 	case PIXEL_ENCODING_YCBCR420:
4807 	{
4808 		/*
4809 		 * 27030khz is the separation point between HDTV and SDTV
4810 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4811 		 * respectively
4812 		 */
4813 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4814 			if (dc_crtc_timing->flags.Y_ONLY)
4815 				color_space =
4816 					COLOR_SPACE_YCBCR709_LIMITED;
4817 			else
4818 				color_space = COLOR_SPACE_YCBCR709;
4819 		} else {
4820 			if (dc_crtc_timing->flags.Y_ONLY)
4821 				color_space =
4822 					COLOR_SPACE_YCBCR601_LIMITED;
4823 			else
4824 				color_space = COLOR_SPACE_YCBCR601;
4825 		}
4826 
4827 	}
4828 	break;
4829 	case PIXEL_ENCODING_RGB:
4830 		color_space = COLOR_SPACE_SRGB;
4831 		break;
4832 
4833 	default:
4834 		WARN_ON(1);
4835 		break;
4836 	}
4837 
4838 	return color_space;
4839 }
4840 
4841 static bool adjust_colour_depth_from_display_info(
4842 	struct dc_crtc_timing *timing_out,
4843 	const struct drm_display_info *info)
4844 {
4845 	enum dc_color_depth depth = timing_out->display_color_depth;
4846 	int normalized_clk;
4847 	do {
4848 		normalized_clk = timing_out->pix_clk_100hz / 10;
4849 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4850 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4851 			normalized_clk /= 2;
4852 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4853 		switch (depth) {
4854 		case COLOR_DEPTH_888:
4855 			break;
4856 		case COLOR_DEPTH_101010:
4857 			normalized_clk = (normalized_clk * 30) / 24;
4858 			break;
4859 		case COLOR_DEPTH_121212:
4860 			normalized_clk = (normalized_clk * 36) / 24;
4861 			break;
4862 		case COLOR_DEPTH_161616:
4863 			normalized_clk = (normalized_clk * 48) / 24;
4864 			break;
4865 		default:
4866 			/* The above depths are the only ones valid for HDMI. */
4867 			return false;
4868 		}
4869 		if (normalized_clk <= info->max_tmds_clock) {
4870 			timing_out->display_color_depth = depth;
4871 			return true;
4872 		}
4873 	} while (--depth > COLOR_DEPTH_666);
4874 	return false;
4875 }
4876 
4877 static void fill_stream_properties_from_drm_display_mode(
4878 	struct dc_stream_state *stream,
4879 	const struct drm_display_mode *mode_in,
4880 	const struct drm_connector *connector,
4881 	const struct drm_connector_state *connector_state,
4882 	const struct dc_stream_state *old_stream,
4883 	int requested_bpc)
4884 {
4885 	struct dc_crtc_timing *timing_out = &stream->timing;
4886 	const struct drm_display_info *info = &connector->display_info;
4887 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4888 	struct hdmi_vendor_infoframe hv_frame;
4889 	struct hdmi_avi_infoframe avi_frame;
4890 
4891 	memset(&hv_frame, 0, sizeof(hv_frame));
4892 	memset(&avi_frame, 0, sizeof(avi_frame));
4893 
4894 	timing_out->h_border_left = 0;
4895 	timing_out->h_border_right = 0;
4896 	timing_out->v_border_top = 0;
4897 	timing_out->v_border_bottom = 0;
4898 	/* TODO: un-hardcode */
4899 	if (drm_mode_is_420_only(info, mode_in)
4900 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4901 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4902 	else if (drm_mode_is_420_also(info, mode_in)
4903 			&& aconnector->force_yuv420_output)
4904 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4905 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4906 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4907 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4908 	else
4909 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4910 
4911 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4912 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4913 		connector,
4914 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4915 		requested_bpc);
4916 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4917 	timing_out->hdmi_vic = 0;
4918 
4919 	if(old_stream) {
4920 		timing_out->vic = old_stream->timing.vic;
4921 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4922 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4923 	} else {
4924 		timing_out->vic = drm_match_cea_mode(mode_in);
4925 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4926 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4927 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4928 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4929 	}
4930 
4931 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4932 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4933 		timing_out->vic = avi_frame.video_code;
4934 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4935 		timing_out->hdmi_vic = hv_frame.vic;
4936 	}
4937 
4938 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4939 	timing_out->h_total = mode_in->crtc_htotal;
4940 	timing_out->h_sync_width =
4941 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4942 	timing_out->h_front_porch =
4943 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4944 	timing_out->v_total = mode_in->crtc_vtotal;
4945 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4946 	timing_out->v_front_porch =
4947 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4948 	timing_out->v_sync_width =
4949 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4950 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4951 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4952 
4953 	stream->output_color_space = get_output_color_space(timing_out);
4954 
4955 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4956 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4957 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4958 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4959 		    drm_mode_is_420_also(info, mode_in) &&
4960 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4961 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4962 			adjust_colour_depth_from_display_info(timing_out, info);
4963 		}
4964 	}
4965 }
4966 
4967 static void fill_audio_info(struct audio_info *audio_info,
4968 			    const struct drm_connector *drm_connector,
4969 			    const struct dc_sink *dc_sink)
4970 {
4971 	int i = 0;
4972 	int cea_revision = 0;
4973 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4974 
4975 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4976 	audio_info->product_id = edid_caps->product_id;
4977 
4978 	cea_revision = drm_connector->display_info.cea_rev;
4979 
4980 	strscpy(audio_info->display_name,
4981 		edid_caps->display_name,
4982 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4983 
4984 	if (cea_revision >= 3) {
4985 		audio_info->mode_count = edid_caps->audio_mode_count;
4986 
4987 		for (i = 0; i < audio_info->mode_count; ++i) {
4988 			audio_info->modes[i].format_code =
4989 					(enum audio_format_code)
4990 					(edid_caps->audio_modes[i].format_code);
4991 			audio_info->modes[i].channel_count =
4992 					edid_caps->audio_modes[i].channel_count;
4993 			audio_info->modes[i].sample_rates.all =
4994 					edid_caps->audio_modes[i].sample_rate;
4995 			audio_info->modes[i].sample_size =
4996 					edid_caps->audio_modes[i].sample_size;
4997 		}
4998 	}
4999 
5000 	audio_info->flags.all = edid_caps->speaker_flags;
5001 
5002 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5003 	if (drm_connector->latency_present[0]) {
5004 		audio_info->video_latency = drm_connector->video_latency[0];
5005 		audio_info->audio_latency = drm_connector->audio_latency[0];
5006 	}
5007 
5008 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5009 
5010 }
5011 
5012 static void
5013 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5014 				      struct drm_display_mode *dst_mode)
5015 {
5016 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5017 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5018 	dst_mode->crtc_clock = src_mode->crtc_clock;
5019 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5020 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5021 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5022 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5023 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5024 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5025 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5026 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5027 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5028 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5029 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5030 }
5031 
5032 static void
5033 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5034 					const struct drm_display_mode *native_mode,
5035 					bool scale_enabled)
5036 {
5037 	if (scale_enabled) {
5038 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5039 	} else if (native_mode->clock == drm_mode->clock &&
5040 			native_mode->htotal == drm_mode->htotal &&
5041 			native_mode->vtotal == drm_mode->vtotal) {
5042 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5043 	} else {
5044 		/* no scaling nor amdgpu inserted, no need to patch */
5045 	}
5046 }
5047 
5048 static struct dc_sink *
5049 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5050 {
5051 	struct dc_sink_init_data sink_init_data = { 0 };
5052 	struct dc_sink *sink = NULL;
5053 	sink_init_data.link = aconnector->dc_link;
5054 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5055 
5056 	sink = dc_sink_create(&sink_init_data);
5057 	if (!sink) {
5058 		DRM_ERROR("Failed to create sink!\n");
5059 		return NULL;
5060 	}
5061 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5062 
5063 	return sink;
5064 }
5065 
5066 static void set_multisync_trigger_params(
5067 		struct dc_stream_state *stream)
5068 {
5069 	if (stream->triggered_crtc_reset.enabled) {
5070 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5071 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5072 	}
5073 }
5074 
5075 static void set_master_stream(struct dc_stream_state *stream_set[],
5076 			      int stream_count)
5077 {
5078 	int j, highest_rfr = 0, master_stream = 0;
5079 
5080 	for (j = 0;  j < stream_count; j++) {
5081 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5082 			int refresh_rate = 0;
5083 
5084 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5085 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5086 			if (refresh_rate > highest_rfr) {
5087 				highest_rfr = refresh_rate;
5088 				master_stream = j;
5089 			}
5090 		}
5091 	}
5092 	for (j = 0;  j < stream_count; j++) {
5093 		if (stream_set[j])
5094 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5095 	}
5096 }
5097 
5098 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5099 {
5100 	int i = 0;
5101 
5102 	if (context->stream_count < 2)
5103 		return;
5104 	for (i = 0; i < context->stream_count ; i++) {
5105 		if (!context->streams[i])
5106 			continue;
5107 		/*
5108 		 * TODO: add a function to read AMD VSDB bits and set
5109 		 * crtc_sync_master.multi_sync_enabled flag
5110 		 * For now it's set to false
5111 		 */
5112 		set_multisync_trigger_params(context->streams[i]);
5113 	}
5114 	set_master_stream(context->streams, context->stream_count);
5115 }
5116 
5117 static struct dc_stream_state *
5118 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5119 		       const struct drm_display_mode *drm_mode,
5120 		       const struct dm_connector_state *dm_state,
5121 		       const struct dc_stream_state *old_stream,
5122 		       int requested_bpc)
5123 {
5124 	struct drm_display_mode *preferred_mode = NULL;
5125 	struct drm_connector *drm_connector;
5126 	const struct drm_connector_state *con_state =
5127 		dm_state ? &dm_state->base : NULL;
5128 	struct dc_stream_state *stream = NULL;
5129 	struct drm_display_mode mode = *drm_mode;
5130 	bool native_mode_found = false;
5131 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5132 	int mode_refresh;
5133 	int preferred_refresh = 0;
5134 #if defined(CONFIG_DRM_AMD_DC_DCN)
5135 	struct dsc_dec_dpcd_caps dsc_caps;
5136 	uint32_t link_bandwidth_kbps;
5137 #endif
5138 	struct dc_sink *sink = NULL;
5139 	if (aconnector == NULL) {
5140 		DRM_ERROR("aconnector is NULL!\n");
5141 		return stream;
5142 	}
5143 
5144 	drm_connector = &aconnector->base;
5145 
5146 	if (!aconnector->dc_sink) {
5147 		sink = create_fake_sink(aconnector);
5148 		if (!sink)
5149 			return stream;
5150 	} else {
5151 		sink = aconnector->dc_sink;
5152 		dc_sink_retain(sink);
5153 	}
5154 
5155 	stream = dc_create_stream_for_sink(sink);
5156 
5157 	if (stream == NULL) {
5158 		DRM_ERROR("Failed to create stream for sink!\n");
5159 		goto finish;
5160 	}
5161 
5162 	stream->dm_stream_context = aconnector;
5163 
5164 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5165 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5166 
5167 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5168 		/* Search for preferred mode */
5169 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5170 			native_mode_found = true;
5171 			break;
5172 		}
5173 	}
5174 	if (!native_mode_found)
5175 		preferred_mode = list_first_entry_or_null(
5176 				&aconnector->base.modes,
5177 				struct drm_display_mode,
5178 				head);
5179 
5180 	mode_refresh = drm_mode_vrefresh(&mode);
5181 
5182 	if (preferred_mode == NULL) {
5183 		/*
5184 		 * This may not be an error, the use case is when we have no
5185 		 * usermode calls to reset and set mode upon hotplug. In this
5186 		 * case, we call set mode ourselves to restore the previous mode
5187 		 * and the modelist may not be filled in in time.
5188 		 */
5189 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5190 	} else {
5191 		decide_crtc_timing_for_drm_display_mode(
5192 				&mode, preferred_mode,
5193 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5194 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5195 	}
5196 
5197 	if (!dm_state)
5198 		drm_mode_set_crtcinfo(&mode, 0);
5199 
5200 	/*
5201 	* If scaling is enabled and refresh rate didn't change
5202 	* we copy the vic and polarities of the old timings
5203 	*/
5204 	if (!scale || mode_refresh != preferred_refresh)
5205 		fill_stream_properties_from_drm_display_mode(stream,
5206 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
5207 	else
5208 		fill_stream_properties_from_drm_display_mode(stream,
5209 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
5210 
5211 	stream->timing.flags.DSC = 0;
5212 
5213 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5214 #if defined(CONFIG_DRM_AMD_DC_DCN)
5215 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5216 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5217 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5218 				      &dsc_caps);
5219 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5220 							     dc_link_get_link_cap(aconnector->dc_link));
5221 
5222 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5223 			/* Set DSC policy according to dsc_clock_en */
5224 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5225 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5226 
5227 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5228 						  &dsc_caps,
5229 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5230 						  0,
5231 						  link_bandwidth_kbps,
5232 						  &stream->timing,
5233 						  &stream->timing.dsc_cfg))
5234 				stream->timing.flags.DSC = 1;
5235 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5236 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5237 				stream->timing.flags.DSC = 1;
5238 
5239 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5240 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5241 
5242 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5243 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5244 
5245 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5246 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5247 		}
5248 #endif
5249 	}
5250 
5251 	update_stream_scaling_settings(&mode, dm_state, stream);
5252 
5253 	fill_audio_info(
5254 		&stream->audio_info,
5255 		drm_connector,
5256 		sink);
5257 
5258 	update_stream_signal(stream, sink);
5259 
5260 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5261 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5262 
5263 	if (stream->link->psr_settings.psr_feature_enabled) {
5264 		//
5265 		// should decide stream support vsc sdp colorimetry capability
5266 		// before building vsc info packet
5267 		//
5268 		stream->use_vsc_sdp_for_colorimetry = false;
5269 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5270 			stream->use_vsc_sdp_for_colorimetry =
5271 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5272 		} else {
5273 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5274 				stream->use_vsc_sdp_for_colorimetry = true;
5275 		}
5276 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5277 	}
5278 finish:
5279 	dc_sink_release(sink);
5280 
5281 	return stream;
5282 }
5283 
5284 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5285 {
5286 	drm_crtc_cleanup(crtc);
5287 	kfree(crtc);
5288 }
5289 
5290 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5291 				  struct drm_crtc_state *state)
5292 {
5293 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5294 
5295 	/* TODO Destroy dc_stream objects are stream object is flattened */
5296 	if (cur->stream)
5297 		dc_stream_release(cur->stream);
5298 
5299 
5300 	__drm_atomic_helper_crtc_destroy_state(state);
5301 
5302 
5303 	kfree(state);
5304 }
5305 
5306 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5307 {
5308 	struct dm_crtc_state *state;
5309 
5310 	if (crtc->state)
5311 		dm_crtc_destroy_state(crtc, crtc->state);
5312 
5313 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5314 	if (WARN_ON(!state))
5315 		return;
5316 
5317 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5318 }
5319 
5320 static struct drm_crtc_state *
5321 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5322 {
5323 	struct dm_crtc_state *state, *cur;
5324 
5325 	cur = to_dm_crtc_state(crtc->state);
5326 
5327 	if (WARN_ON(!crtc->state))
5328 		return NULL;
5329 
5330 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5331 	if (!state)
5332 		return NULL;
5333 
5334 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5335 
5336 	if (cur->stream) {
5337 		state->stream = cur->stream;
5338 		dc_stream_retain(state->stream);
5339 	}
5340 
5341 	state->active_planes = cur->active_planes;
5342 	state->vrr_infopacket = cur->vrr_infopacket;
5343 	state->abm_level = cur->abm_level;
5344 	state->vrr_supported = cur->vrr_supported;
5345 	state->freesync_config = cur->freesync_config;
5346 	state->crc_src = cur->crc_src;
5347 	state->cm_has_degamma = cur->cm_has_degamma;
5348 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5349 
5350 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5351 
5352 	return &state->base;
5353 }
5354 
5355 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5356 {
5357 	enum dc_irq_source irq_source;
5358 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5359 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5360 	int rc;
5361 
5362 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5363 
5364 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5365 
5366 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5367 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
5368 	return rc;
5369 }
5370 
5371 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5372 {
5373 	enum dc_irq_source irq_source;
5374 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5375 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5376 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5377 #if defined(CONFIG_DRM_AMD_DC_DCN)
5378 	struct amdgpu_display_manager *dm = &adev->dm;
5379 #endif
5380 	int rc = 0;
5381 
5382 	if (enable) {
5383 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5384 		if (amdgpu_dm_vrr_active(acrtc_state))
5385 			rc = dm_set_vupdate_irq(crtc, true);
5386 	} else {
5387 		/* vblank irq off -> vupdate irq off */
5388 		rc = dm_set_vupdate_irq(crtc, false);
5389 	}
5390 
5391 	if (rc)
5392 		return rc;
5393 
5394 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5395 
5396 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5397 		return -EBUSY;
5398 
5399 #if defined(CONFIG_DRM_AMD_DC_DCN)
5400 	if (amdgpu_in_reset(adev))
5401 		return 0;
5402 
5403 	mutex_lock(&dm->dc_lock);
5404 
5405 	if (enable)
5406 		dm->active_vblank_irq_count++;
5407 	else
5408 		dm->active_vblank_irq_count--;
5409 
5410 #if defined(CONFIG_DRM_AMD_DC_DCN)
5411 	dc_allow_idle_optimizations(
5412 		adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
5413 
5414 	DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
5415 #endif
5416 
5417 	mutex_unlock(&dm->dc_lock);
5418 
5419 #endif
5420 	return 0;
5421 }
5422 
5423 static int dm_enable_vblank(struct drm_crtc *crtc)
5424 {
5425 	return dm_set_vblank(crtc, true);
5426 }
5427 
5428 static void dm_disable_vblank(struct drm_crtc *crtc)
5429 {
5430 	dm_set_vblank(crtc, false);
5431 }
5432 
5433 /* Implemented only the options currently availible for the driver */
5434 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5435 	.reset = dm_crtc_reset_state,
5436 	.destroy = amdgpu_dm_crtc_destroy,
5437 	.set_config = drm_atomic_helper_set_config,
5438 	.page_flip = drm_atomic_helper_page_flip,
5439 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5440 	.atomic_destroy_state = dm_crtc_destroy_state,
5441 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5442 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5443 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5444 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5445 	.enable_vblank = dm_enable_vblank,
5446 	.disable_vblank = dm_disable_vblank,
5447 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5448 };
5449 
5450 static enum drm_connector_status
5451 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5452 {
5453 	bool connected;
5454 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5455 
5456 	/*
5457 	 * Notes:
5458 	 * 1. This interface is NOT called in context of HPD irq.
5459 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5460 	 * makes it a bad place for *any* MST-related activity.
5461 	 */
5462 
5463 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5464 	    !aconnector->fake_enable)
5465 		connected = (aconnector->dc_sink != NULL);
5466 	else
5467 		connected = (aconnector->base.force == DRM_FORCE_ON);
5468 
5469 	update_subconnector_property(aconnector);
5470 
5471 	return (connected ? connector_status_connected :
5472 			connector_status_disconnected);
5473 }
5474 
5475 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5476 					    struct drm_connector_state *connector_state,
5477 					    struct drm_property *property,
5478 					    uint64_t val)
5479 {
5480 	struct drm_device *dev = connector->dev;
5481 	struct amdgpu_device *adev = drm_to_adev(dev);
5482 	struct dm_connector_state *dm_old_state =
5483 		to_dm_connector_state(connector->state);
5484 	struct dm_connector_state *dm_new_state =
5485 		to_dm_connector_state(connector_state);
5486 
5487 	int ret = -EINVAL;
5488 
5489 	if (property == dev->mode_config.scaling_mode_property) {
5490 		enum amdgpu_rmx_type rmx_type;
5491 
5492 		switch (val) {
5493 		case DRM_MODE_SCALE_CENTER:
5494 			rmx_type = RMX_CENTER;
5495 			break;
5496 		case DRM_MODE_SCALE_ASPECT:
5497 			rmx_type = RMX_ASPECT;
5498 			break;
5499 		case DRM_MODE_SCALE_FULLSCREEN:
5500 			rmx_type = RMX_FULL;
5501 			break;
5502 		case DRM_MODE_SCALE_NONE:
5503 		default:
5504 			rmx_type = RMX_OFF;
5505 			break;
5506 		}
5507 
5508 		if (dm_old_state->scaling == rmx_type)
5509 			return 0;
5510 
5511 		dm_new_state->scaling = rmx_type;
5512 		ret = 0;
5513 	} else if (property == adev->mode_info.underscan_hborder_property) {
5514 		dm_new_state->underscan_hborder = val;
5515 		ret = 0;
5516 	} else if (property == adev->mode_info.underscan_vborder_property) {
5517 		dm_new_state->underscan_vborder = val;
5518 		ret = 0;
5519 	} else if (property == adev->mode_info.underscan_property) {
5520 		dm_new_state->underscan_enable = val;
5521 		ret = 0;
5522 	} else if (property == adev->mode_info.abm_level_property) {
5523 		dm_new_state->abm_level = val;
5524 		ret = 0;
5525 	}
5526 
5527 	return ret;
5528 }
5529 
5530 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5531 					    const struct drm_connector_state *state,
5532 					    struct drm_property *property,
5533 					    uint64_t *val)
5534 {
5535 	struct drm_device *dev = connector->dev;
5536 	struct amdgpu_device *adev = drm_to_adev(dev);
5537 	struct dm_connector_state *dm_state =
5538 		to_dm_connector_state(state);
5539 	int ret = -EINVAL;
5540 
5541 	if (property == dev->mode_config.scaling_mode_property) {
5542 		switch (dm_state->scaling) {
5543 		case RMX_CENTER:
5544 			*val = DRM_MODE_SCALE_CENTER;
5545 			break;
5546 		case RMX_ASPECT:
5547 			*val = DRM_MODE_SCALE_ASPECT;
5548 			break;
5549 		case RMX_FULL:
5550 			*val = DRM_MODE_SCALE_FULLSCREEN;
5551 			break;
5552 		case RMX_OFF:
5553 		default:
5554 			*val = DRM_MODE_SCALE_NONE;
5555 			break;
5556 		}
5557 		ret = 0;
5558 	} else if (property == adev->mode_info.underscan_hborder_property) {
5559 		*val = dm_state->underscan_hborder;
5560 		ret = 0;
5561 	} else if (property == adev->mode_info.underscan_vborder_property) {
5562 		*val = dm_state->underscan_vborder;
5563 		ret = 0;
5564 	} else if (property == adev->mode_info.underscan_property) {
5565 		*val = dm_state->underscan_enable;
5566 		ret = 0;
5567 	} else if (property == adev->mode_info.abm_level_property) {
5568 		*val = dm_state->abm_level;
5569 		ret = 0;
5570 	}
5571 
5572 	return ret;
5573 }
5574 
5575 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5576 {
5577 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5578 
5579 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5580 }
5581 
5582 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5583 {
5584 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5585 	const struct dc_link *link = aconnector->dc_link;
5586 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5587 	struct amdgpu_display_manager *dm = &adev->dm;
5588 
5589 	/*
5590 	 * Call only if mst_mgr was iniitalized before since it's not done
5591 	 * for all connector types.
5592 	 */
5593 	if (aconnector->mst_mgr.dev)
5594 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5595 
5596 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5597 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5598 
5599 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5600 	    link->type != dc_connection_none &&
5601 	    dm->backlight_dev) {
5602 		backlight_device_unregister(dm->backlight_dev);
5603 		dm->backlight_dev = NULL;
5604 	}
5605 #endif
5606 
5607 	if (aconnector->dc_em_sink)
5608 		dc_sink_release(aconnector->dc_em_sink);
5609 	aconnector->dc_em_sink = NULL;
5610 	if (aconnector->dc_sink)
5611 		dc_sink_release(aconnector->dc_sink);
5612 	aconnector->dc_sink = NULL;
5613 
5614 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5615 	drm_connector_unregister(connector);
5616 	drm_connector_cleanup(connector);
5617 	if (aconnector->i2c) {
5618 		i2c_del_adapter(&aconnector->i2c->base);
5619 		kfree(aconnector->i2c);
5620 	}
5621 	kfree(aconnector->dm_dp_aux.aux.name);
5622 
5623 	kfree(connector);
5624 }
5625 
5626 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5627 {
5628 	struct dm_connector_state *state =
5629 		to_dm_connector_state(connector->state);
5630 
5631 	if (connector->state)
5632 		__drm_atomic_helper_connector_destroy_state(connector->state);
5633 
5634 	kfree(state);
5635 
5636 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5637 
5638 	if (state) {
5639 		state->scaling = RMX_OFF;
5640 		state->underscan_enable = false;
5641 		state->underscan_hborder = 0;
5642 		state->underscan_vborder = 0;
5643 		state->base.max_requested_bpc = 8;
5644 		state->vcpi_slots = 0;
5645 		state->pbn = 0;
5646 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5647 			state->abm_level = amdgpu_dm_abm_level;
5648 
5649 		__drm_atomic_helper_connector_reset(connector, &state->base);
5650 	}
5651 }
5652 
5653 struct drm_connector_state *
5654 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5655 {
5656 	struct dm_connector_state *state =
5657 		to_dm_connector_state(connector->state);
5658 
5659 	struct dm_connector_state *new_state =
5660 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5661 
5662 	if (!new_state)
5663 		return NULL;
5664 
5665 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5666 
5667 	new_state->freesync_capable = state->freesync_capable;
5668 	new_state->abm_level = state->abm_level;
5669 	new_state->scaling = state->scaling;
5670 	new_state->underscan_enable = state->underscan_enable;
5671 	new_state->underscan_hborder = state->underscan_hborder;
5672 	new_state->underscan_vborder = state->underscan_vborder;
5673 	new_state->vcpi_slots = state->vcpi_slots;
5674 	new_state->pbn = state->pbn;
5675 	return &new_state->base;
5676 }
5677 
5678 static int
5679 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5680 {
5681 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5682 		to_amdgpu_dm_connector(connector);
5683 	int r;
5684 
5685 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5686 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5687 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5688 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5689 		if (r)
5690 			return r;
5691 	}
5692 
5693 #if defined(CONFIG_DEBUG_FS)
5694 	connector_debugfs_init(amdgpu_dm_connector);
5695 #endif
5696 
5697 	return 0;
5698 }
5699 
5700 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5701 	.reset = amdgpu_dm_connector_funcs_reset,
5702 	.detect = amdgpu_dm_connector_detect,
5703 	.fill_modes = drm_helper_probe_single_connector_modes,
5704 	.destroy = amdgpu_dm_connector_destroy,
5705 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5706 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5707 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5708 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5709 	.late_register = amdgpu_dm_connector_late_register,
5710 	.early_unregister = amdgpu_dm_connector_unregister
5711 };
5712 
5713 static int get_modes(struct drm_connector *connector)
5714 {
5715 	return amdgpu_dm_connector_get_modes(connector);
5716 }
5717 
5718 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5719 {
5720 	struct dc_sink_init_data init_params = {
5721 			.link = aconnector->dc_link,
5722 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5723 	};
5724 	struct edid *edid;
5725 
5726 	if (!aconnector->base.edid_blob_ptr) {
5727 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5728 				aconnector->base.name);
5729 
5730 		aconnector->base.force = DRM_FORCE_OFF;
5731 		aconnector->base.override_edid = false;
5732 		return;
5733 	}
5734 
5735 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5736 
5737 	aconnector->edid = edid;
5738 
5739 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5740 		aconnector->dc_link,
5741 		(uint8_t *)edid,
5742 		(edid->extensions + 1) * EDID_LENGTH,
5743 		&init_params);
5744 
5745 	if (aconnector->base.force == DRM_FORCE_ON) {
5746 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5747 		aconnector->dc_link->local_sink :
5748 		aconnector->dc_em_sink;
5749 		dc_sink_retain(aconnector->dc_sink);
5750 	}
5751 }
5752 
5753 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5754 {
5755 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5756 
5757 	/*
5758 	 * In case of headless boot with force on for DP managed connector
5759 	 * Those settings have to be != 0 to get initial modeset
5760 	 */
5761 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5762 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5763 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5764 	}
5765 
5766 
5767 	aconnector->base.override_edid = true;
5768 	create_eml_sink(aconnector);
5769 }
5770 
5771 static struct dc_stream_state *
5772 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5773 				const struct drm_display_mode *drm_mode,
5774 				const struct dm_connector_state *dm_state,
5775 				const struct dc_stream_state *old_stream)
5776 {
5777 	struct drm_connector *connector = &aconnector->base;
5778 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5779 	struct dc_stream_state *stream;
5780 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5781 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5782 	enum dc_status dc_result = DC_OK;
5783 
5784 	do {
5785 		stream = create_stream_for_sink(aconnector, drm_mode,
5786 						dm_state, old_stream,
5787 						requested_bpc);
5788 		if (stream == NULL) {
5789 			DRM_ERROR("Failed to create stream for sink!\n");
5790 			break;
5791 		}
5792 
5793 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5794 
5795 		if (dc_result != DC_OK) {
5796 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5797 				      drm_mode->hdisplay,
5798 				      drm_mode->vdisplay,
5799 				      drm_mode->clock,
5800 				      dc_result,
5801 				      dc_status_to_str(dc_result));
5802 
5803 			dc_stream_release(stream);
5804 			stream = NULL;
5805 			requested_bpc -= 2; /* lower bpc to retry validation */
5806 		}
5807 
5808 	} while (stream == NULL && requested_bpc >= 6);
5809 
5810 	return stream;
5811 }
5812 
5813 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5814 				   struct drm_display_mode *mode)
5815 {
5816 	int result = MODE_ERROR;
5817 	struct dc_sink *dc_sink;
5818 	/* TODO: Unhardcode stream count */
5819 	struct dc_stream_state *stream;
5820 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5821 
5822 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5823 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5824 		return result;
5825 
5826 	/*
5827 	 * Only run this the first time mode_valid is called to initilialize
5828 	 * EDID mgmt
5829 	 */
5830 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5831 		!aconnector->dc_em_sink)
5832 		handle_edid_mgmt(aconnector);
5833 
5834 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5835 
5836 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5837 				aconnector->base.force != DRM_FORCE_ON) {
5838 		DRM_ERROR("dc_sink is NULL!\n");
5839 		goto fail;
5840 	}
5841 
5842 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5843 	if (stream) {
5844 		dc_stream_release(stream);
5845 		result = MODE_OK;
5846 	}
5847 
5848 fail:
5849 	/* TODO: error handling*/
5850 	return result;
5851 }
5852 
5853 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5854 				struct dc_info_packet *out)
5855 {
5856 	struct hdmi_drm_infoframe frame;
5857 	unsigned char buf[30]; /* 26 + 4 */
5858 	ssize_t len;
5859 	int ret, i;
5860 
5861 	memset(out, 0, sizeof(*out));
5862 
5863 	if (!state->hdr_output_metadata)
5864 		return 0;
5865 
5866 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5867 	if (ret)
5868 		return ret;
5869 
5870 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5871 	if (len < 0)
5872 		return (int)len;
5873 
5874 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5875 	if (len != 30)
5876 		return -EINVAL;
5877 
5878 	/* Prepare the infopacket for DC. */
5879 	switch (state->connector->connector_type) {
5880 	case DRM_MODE_CONNECTOR_HDMIA:
5881 		out->hb0 = 0x87; /* type */
5882 		out->hb1 = 0x01; /* version */
5883 		out->hb2 = 0x1A; /* length */
5884 		out->sb[0] = buf[3]; /* checksum */
5885 		i = 1;
5886 		break;
5887 
5888 	case DRM_MODE_CONNECTOR_DisplayPort:
5889 	case DRM_MODE_CONNECTOR_eDP:
5890 		out->hb0 = 0x00; /* sdp id, zero */
5891 		out->hb1 = 0x87; /* type */
5892 		out->hb2 = 0x1D; /* payload len - 1 */
5893 		out->hb3 = (0x13 << 2); /* sdp version */
5894 		out->sb[0] = 0x01; /* version */
5895 		out->sb[1] = 0x1A; /* length */
5896 		i = 2;
5897 		break;
5898 
5899 	default:
5900 		return -EINVAL;
5901 	}
5902 
5903 	memcpy(&out->sb[i], &buf[4], 26);
5904 	out->valid = true;
5905 
5906 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5907 		       sizeof(out->sb), false);
5908 
5909 	return 0;
5910 }
5911 
5912 static bool
5913 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5914 			  const struct drm_connector_state *new_state)
5915 {
5916 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5917 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5918 
5919 	if (old_blob != new_blob) {
5920 		if (old_blob && new_blob &&
5921 		    old_blob->length == new_blob->length)
5922 			return memcmp(old_blob->data, new_blob->data,
5923 				      old_blob->length);
5924 
5925 		return true;
5926 	}
5927 
5928 	return false;
5929 }
5930 
5931 static int
5932 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5933 				 struct drm_atomic_state *state)
5934 {
5935 	struct drm_connector_state *new_con_state =
5936 		drm_atomic_get_new_connector_state(state, conn);
5937 	struct drm_connector_state *old_con_state =
5938 		drm_atomic_get_old_connector_state(state, conn);
5939 	struct drm_crtc *crtc = new_con_state->crtc;
5940 	struct drm_crtc_state *new_crtc_state;
5941 	int ret;
5942 
5943 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
5944 
5945 	if (!crtc)
5946 		return 0;
5947 
5948 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5949 		struct dc_info_packet hdr_infopacket;
5950 
5951 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5952 		if (ret)
5953 			return ret;
5954 
5955 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5956 		if (IS_ERR(new_crtc_state))
5957 			return PTR_ERR(new_crtc_state);
5958 
5959 		/*
5960 		 * DC considers the stream backends changed if the
5961 		 * static metadata changes. Forcing the modeset also
5962 		 * gives a simple way for userspace to switch from
5963 		 * 8bpc to 10bpc when setting the metadata to enter
5964 		 * or exit HDR.
5965 		 *
5966 		 * Changing the static metadata after it's been
5967 		 * set is permissible, however. So only force a
5968 		 * modeset if we're entering or exiting HDR.
5969 		 */
5970 		new_crtc_state->mode_changed =
5971 			!old_con_state->hdr_output_metadata ||
5972 			!new_con_state->hdr_output_metadata;
5973 	}
5974 
5975 	return 0;
5976 }
5977 
5978 static const struct drm_connector_helper_funcs
5979 amdgpu_dm_connector_helper_funcs = {
5980 	/*
5981 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5982 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5983 	 * are missing after user start lightdm. So we need to renew modes list.
5984 	 * in get_modes call back, not just return the modes count
5985 	 */
5986 	.get_modes = get_modes,
5987 	.mode_valid = amdgpu_dm_connector_mode_valid,
5988 	.atomic_check = amdgpu_dm_connector_atomic_check,
5989 };
5990 
5991 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5992 {
5993 }
5994 
5995 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5996 {
5997 	struct drm_atomic_state *state = new_crtc_state->state;
5998 	struct drm_plane *plane;
5999 	int num_active = 0;
6000 
6001 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6002 		struct drm_plane_state *new_plane_state;
6003 
6004 		/* Cursor planes are "fake". */
6005 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6006 			continue;
6007 
6008 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6009 
6010 		if (!new_plane_state) {
6011 			/*
6012 			 * The plane is enable on the CRTC and hasn't changed
6013 			 * state. This means that it previously passed
6014 			 * validation and is therefore enabled.
6015 			 */
6016 			num_active += 1;
6017 			continue;
6018 		}
6019 
6020 		/* We need a framebuffer to be considered enabled. */
6021 		num_active += (new_plane_state->fb != NULL);
6022 	}
6023 
6024 	return num_active;
6025 }
6026 
6027 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6028 					 struct drm_crtc_state *new_crtc_state)
6029 {
6030 	struct dm_crtc_state *dm_new_crtc_state =
6031 		to_dm_crtc_state(new_crtc_state);
6032 
6033 	dm_new_crtc_state->active_planes = 0;
6034 
6035 	if (!dm_new_crtc_state->stream)
6036 		return;
6037 
6038 	dm_new_crtc_state->active_planes =
6039 		count_crtc_active_planes(new_crtc_state);
6040 }
6041 
6042 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6043 				       struct drm_atomic_state *state)
6044 {
6045 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6046 									  crtc);
6047 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6048 	struct dc *dc = adev->dm.dc;
6049 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6050 	int ret = -EINVAL;
6051 
6052 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6053 
6054 	dm_update_crtc_active_planes(crtc, crtc_state);
6055 
6056 	if (unlikely(!dm_crtc_state->stream &&
6057 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6058 		WARN_ON(1);
6059 		return ret;
6060 	}
6061 
6062 	/*
6063 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6064 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6065 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6066 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6067 	 */
6068 	if (crtc_state->enable &&
6069 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6070 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6071 		return -EINVAL;
6072 	}
6073 
6074 	/* In some use cases, like reset, no stream is attached */
6075 	if (!dm_crtc_state->stream)
6076 		return 0;
6077 
6078 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6079 		return 0;
6080 
6081 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6082 	return ret;
6083 }
6084 
6085 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6086 				      const struct drm_display_mode *mode,
6087 				      struct drm_display_mode *adjusted_mode)
6088 {
6089 	return true;
6090 }
6091 
6092 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6093 	.disable = dm_crtc_helper_disable,
6094 	.atomic_check = dm_crtc_helper_atomic_check,
6095 	.mode_fixup = dm_crtc_helper_mode_fixup,
6096 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6097 };
6098 
6099 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6100 {
6101 
6102 }
6103 
6104 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6105 {
6106 	switch (display_color_depth) {
6107 		case COLOR_DEPTH_666:
6108 			return 6;
6109 		case COLOR_DEPTH_888:
6110 			return 8;
6111 		case COLOR_DEPTH_101010:
6112 			return 10;
6113 		case COLOR_DEPTH_121212:
6114 			return 12;
6115 		case COLOR_DEPTH_141414:
6116 			return 14;
6117 		case COLOR_DEPTH_161616:
6118 			return 16;
6119 		default:
6120 			break;
6121 		}
6122 	return 0;
6123 }
6124 
6125 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6126 					  struct drm_crtc_state *crtc_state,
6127 					  struct drm_connector_state *conn_state)
6128 {
6129 	struct drm_atomic_state *state = crtc_state->state;
6130 	struct drm_connector *connector = conn_state->connector;
6131 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6132 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6133 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6134 	struct drm_dp_mst_topology_mgr *mst_mgr;
6135 	struct drm_dp_mst_port *mst_port;
6136 	enum dc_color_depth color_depth;
6137 	int clock, bpp = 0;
6138 	bool is_y420 = false;
6139 
6140 	if (!aconnector->port || !aconnector->dc_sink)
6141 		return 0;
6142 
6143 	mst_port = aconnector->port;
6144 	mst_mgr = &aconnector->mst_port->mst_mgr;
6145 
6146 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6147 		return 0;
6148 
6149 	if (!state->duplicated) {
6150 		int max_bpc = conn_state->max_requested_bpc;
6151 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6152 				aconnector->force_yuv420_output;
6153 		color_depth = convert_color_depth_from_display_info(connector,
6154 								    is_y420,
6155 								    max_bpc);
6156 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6157 		clock = adjusted_mode->clock;
6158 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6159 	}
6160 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6161 									   mst_mgr,
6162 									   mst_port,
6163 									   dm_new_connector_state->pbn,
6164 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6165 	if (dm_new_connector_state->vcpi_slots < 0) {
6166 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6167 		return dm_new_connector_state->vcpi_slots;
6168 	}
6169 	return 0;
6170 }
6171 
6172 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6173 	.disable = dm_encoder_helper_disable,
6174 	.atomic_check = dm_encoder_helper_atomic_check
6175 };
6176 
6177 #if defined(CONFIG_DRM_AMD_DC_DCN)
6178 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6179 					    struct dc_state *dc_state)
6180 {
6181 	struct dc_stream_state *stream = NULL;
6182 	struct drm_connector *connector;
6183 	struct drm_connector_state *new_con_state, *old_con_state;
6184 	struct amdgpu_dm_connector *aconnector;
6185 	struct dm_connector_state *dm_conn_state;
6186 	int i, j, clock, bpp;
6187 	int vcpi, pbn_div, pbn = 0;
6188 
6189 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6190 
6191 		aconnector = to_amdgpu_dm_connector(connector);
6192 
6193 		if (!aconnector->port)
6194 			continue;
6195 
6196 		if (!new_con_state || !new_con_state->crtc)
6197 			continue;
6198 
6199 		dm_conn_state = to_dm_connector_state(new_con_state);
6200 
6201 		for (j = 0; j < dc_state->stream_count; j++) {
6202 			stream = dc_state->streams[j];
6203 			if (!stream)
6204 				continue;
6205 
6206 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6207 				break;
6208 
6209 			stream = NULL;
6210 		}
6211 
6212 		if (!stream)
6213 			continue;
6214 
6215 		if (stream->timing.flags.DSC != 1) {
6216 			drm_dp_mst_atomic_enable_dsc(state,
6217 						     aconnector->port,
6218 						     dm_conn_state->pbn,
6219 						     0,
6220 						     false);
6221 			continue;
6222 		}
6223 
6224 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6225 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6226 		clock = stream->timing.pix_clk_100hz / 10;
6227 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6228 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6229 						    aconnector->port,
6230 						    pbn, pbn_div,
6231 						    true);
6232 		if (vcpi < 0)
6233 			return vcpi;
6234 
6235 		dm_conn_state->pbn = pbn;
6236 		dm_conn_state->vcpi_slots = vcpi;
6237 	}
6238 	return 0;
6239 }
6240 #endif
6241 
6242 static void dm_drm_plane_reset(struct drm_plane *plane)
6243 {
6244 	struct dm_plane_state *amdgpu_state = NULL;
6245 
6246 	if (plane->state)
6247 		plane->funcs->atomic_destroy_state(plane, plane->state);
6248 
6249 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6250 	WARN_ON(amdgpu_state == NULL);
6251 
6252 	if (amdgpu_state)
6253 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6254 }
6255 
6256 static struct drm_plane_state *
6257 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6258 {
6259 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6260 
6261 	old_dm_plane_state = to_dm_plane_state(plane->state);
6262 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6263 	if (!dm_plane_state)
6264 		return NULL;
6265 
6266 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6267 
6268 	if (old_dm_plane_state->dc_state) {
6269 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6270 		dc_plane_state_retain(dm_plane_state->dc_state);
6271 	}
6272 
6273 	return &dm_plane_state->base;
6274 }
6275 
6276 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6277 				struct drm_plane_state *state)
6278 {
6279 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6280 
6281 	if (dm_plane_state->dc_state)
6282 		dc_plane_state_release(dm_plane_state->dc_state);
6283 
6284 	drm_atomic_helper_plane_destroy_state(plane, state);
6285 }
6286 
6287 static const struct drm_plane_funcs dm_plane_funcs = {
6288 	.update_plane	= drm_atomic_helper_update_plane,
6289 	.disable_plane	= drm_atomic_helper_disable_plane,
6290 	.destroy	= drm_primary_helper_destroy,
6291 	.reset = dm_drm_plane_reset,
6292 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6293 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6294 	.format_mod_supported = dm_plane_format_mod_supported,
6295 };
6296 
6297 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6298 				      struct drm_plane_state *new_state)
6299 {
6300 	struct amdgpu_framebuffer *afb;
6301 	struct drm_gem_object *obj;
6302 	struct amdgpu_device *adev;
6303 	struct amdgpu_bo *rbo;
6304 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6305 	struct list_head list;
6306 	struct ttm_validate_buffer tv;
6307 	struct ww_acquire_ctx ticket;
6308 	uint32_t domain;
6309 	int r;
6310 
6311 	if (!new_state->fb) {
6312 		DRM_DEBUG_DRIVER("No FB bound\n");
6313 		return 0;
6314 	}
6315 
6316 	afb = to_amdgpu_framebuffer(new_state->fb);
6317 	obj = new_state->fb->obj[0];
6318 	rbo = gem_to_amdgpu_bo(obj);
6319 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6320 	INIT_LIST_HEAD(&list);
6321 
6322 	tv.bo = &rbo->tbo;
6323 	tv.num_shared = 1;
6324 	list_add(&tv.head, &list);
6325 
6326 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6327 	if (r) {
6328 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6329 		return r;
6330 	}
6331 
6332 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6333 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6334 	else
6335 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6336 
6337 	r = amdgpu_bo_pin(rbo, domain);
6338 	if (unlikely(r != 0)) {
6339 		if (r != -ERESTARTSYS)
6340 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6341 		ttm_eu_backoff_reservation(&ticket, &list);
6342 		return r;
6343 	}
6344 
6345 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6346 	if (unlikely(r != 0)) {
6347 		amdgpu_bo_unpin(rbo);
6348 		ttm_eu_backoff_reservation(&ticket, &list);
6349 		DRM_ERROR("%p bind failed\n", rbo);
6350 		return r;
6351 	}
6352 
6353 	ttm_eu_backoff_reservation(&ticket, &list);
6354 
6355 	afb->address = amdgpu_bo_gpu_offset(rbo);
6356 
6357 	amdgpu_bo_ref(rbo);
6358 
6359 	/**
6360 	 * We don't do surface updates on planes that have been newly created,
6361 	 * but we also don't have the afb->address during atomic check.
6362 	 *
6363 	 * Fill in buffer attributes depending on the address here, but only on
6364 	 * newly created planes since they're not being used by DC yet and this
6365 	 * won't modify global state.
6366 	 */
6367 	dm_plane_state_old = to_dm_plane_state(plane->state);
6368 	dm_plane_state_new = to_dm_plane_state(new_state);
6369 
6370 	if (dm_plane_state_new->dc_state &&
6371 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6372 		struct dc_plane_state *plane_state =
6373 			dm_plane_state_new->dc_state;
6374 		bool force_disable_dcc = !plane_state->dcc.enable;
6375 
6376 		fill_plane_buffer_attributes(
6377 			adev, afb, plane_state->format, plane_state->rotation,
6378 			afb->tiling_flags,
6379 			&plane_state->tiling_info, &plane_state->plane_size,
6380 			&plane_state->dcc, &plane_state->address,
6381 			afb->tmz_surface, force_disable_dcc);
6382 	}
6383 
6384 	return 0;
6385 }
6386 
6387 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6388 				       struct drm_plane_state *old_state)
6389 {
6390 	struct amdgpu_bo *rbo;
6391 	int r;
6392 
6393 	if (!old_state->fb)
6394 		return;
6395 
6396 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6397 	r = amdgpu_bo_reserve(rbo, false);
6398 	if (unlikely(r)) {
6399 		DRM_ERROR("failed to reserve rbo before unpin\n");
6400 		return;
6401 	}
6402 
6403 	amdgpu_bo_unpin(rbo);
6404 	amdgpu_bo_unreserve(rbo);
6405 	amdgpu_bo_unref(&rbo);
6406 }
6407 
6408 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6409 				       struct drm_crtc_state *new_crtc_state)
6410 {
6411 	struct drm_framebuffer *fb = state->fb;
6412 	int min_downscale, max_upscale;
6413 	int min_scale = 0;
6414 	int max_scale = INT_MAX;
6415 
6416 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6417 	if (fb && state->crtc) {
6418 		/* Validate viewport to cover the case when only the position changes */
6419 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6420 			int viewport_width = state->crtc_w;
6421 			int viewport_height = state->crtc_h;
6422 
6423 			if (state->crtc_x < 0)
6424 				viewport_width += state->crtc_x;
6425 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6426 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6427 
6428 			if (state->crtc_y < 0)
6429 				viewport_height += state->crtc_y;
6430 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6431 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6432 
6433 			/* If completely outside of screen, viewport_width and/or viewport_height will be negative,
6434 			 * which is still OK to satisfy the condition below, thereby also covering these cases
6435 			 * (when plane is completely outside of screen).
6436 			 * x2 for width is because of pipe-split.
6437 			 */
6438 			if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
6439 				return -EINVAL;
6440 		}
6441 
6442 		/* Get min/max allowed scaling factors from plane caps. */
6443 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6444 					     &min_downscale, &max_upscale);
6445 		/*
6446 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
6447 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6448 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6449 		 */
6450 		min_scale = (1000 << 16) / max_upscale;
6451 		max_scale = (1000 << 16) / min_downscale;
6452 	}
6453 
6454 	return drm_atomic_helper_check_plane_state(
6455 		state, new_crtc_state, min_scale, max_scale, true, true);
6456 }
6457 
6458 static int dm_plane_atomic_check(struct drm_plane *plane,
6459 				 struct drm_plane_state *state)
6460 {
6461 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6462 	struct dc *dc = adev->dm.dc;
6463 	struct dm_plane_state *dm_plane_state;
6464 	struct dc_scaling_info scaling_info;
6465 	struct drm_crtc_state *new_crtc_state;
6466 	int ret;
6467 
6468 	trace_amdgpu_dm_plane_atomic_check(state);
6469 
6470 	dm_plane_state = to_dm_plane_state(state);
6471 
6472 	if (!dm_plane_state->dc_state)
6473 		return 0;
6474 
6475 	new_crtc_state =
6476 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
6477 	if (!new_crtc_state)
6478 		return -EINVAL;
6479 
6480 	ret = dm_plane_helper_check_state(state, new_crtc_state);
6481 	if (ret)
6482 		return ret;
6483 
6484 	ret = fill_dc_scaling_info(state, &scaling_info);
6485 	if (ret)
6486 		return ret;
6487 
6488 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6489 		return 0;
6490 
6491 	return -EINVAL;
6492 }
6493 
6494 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6495 				       struct drm_plane_state *new_plane_state)
6496 {
6497 	/* Only support async updates on cursor planes. */
6498 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6499 		return -EINVAL;
6500 
6501 	return 0;
6502 }
6503 
6504 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6505 					 struct drm_plane_state *new_state)
6506 {
6507 	struct drm_plane_state *old_state =
6508 		drm_atomic_get_old_plane_state(new_state->state, plane);
6509 
6510 	trace_amdgpu_dm_atomic_update_cursor(new_state);
6511 
6512 	swap(plane->state->fb, new_state->fb);
6513 
6514 	plane->state->src_x = new_state->src_x;
6515 	plane->state->src_y = new_state->src_y;
6516 	plane->state->src_w = new_state->src_w;
6517 	plane->state->src_h = new_state->src_h;
6518 	plane->state->crtc_x = new_state->crtc_x;
6519 	plane->state->crtc_y = new_state->crtc_y;
6520 	plane->state->crtc_w = new_state->crtc_w;
6521 	plane->state->crtc_h = new_state->crtc_h;
6522 
6523 	handle_cursor_update(plane, old_state);
6524 }
6525 
6526 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6527 	.prepare_fb = dm_plane_helper_prepare_fb,
6528 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6529 	.atomic_check = dm_plane_atomic_check,
6530 	.atomic_async_check = dm_plane_atomic_async_check,
6531 	.atomic_async_update = dm_plane_atomic_async_update
6532 };
6533 
6534 /*
6535  * TODO: these are currently initialized to rgb formats only.
6536  * For future use cases we should either initialize them dynamically based on
6537  * plane capabilities, or initialize this array to all formats, so internal drm
6538  * check will succeed, and let DC implement proper check
6539  */
6540 static const uint32_t rgb_formats[] = {
6541 	DRM_FORMAT_XRGB8888,
6542 	DRM_FORMAT_ARGB8888,
6543 	DRM_FORMAT_RGBA8888,
6544 	DRM_FORMAT_XRGB2101010,
6545 	DRM_FORMAT_XBGR2101010,
6546 	DRM_FORMAT_ARGB2101010,
6547 	DRM_FORMAT_ABGR2101010,
6548 	DRM_FORMAT_XBGR8888,
6549 	DRM_FORMAT_ABGR8888,
6550 	DRM_FORMAT_RGB565,
6551 };
6552 
6553 static const uint32_t overlay_formats[] = {
6554 	DRM_FORMAT_XRGB8888,
6555 	DRM_FORMAT_ARGB8888,
6556 	DRM_FORMAT_RGBA8888,
6557 	DRM_FORMAT_XBGR8888,
6558 	DRM_FORMAT_ABGR8888,
6559 	DRM_FORMAT_RGB565
6560 };
6561 
6562 static const u32 cursor_formats[] = {
6563 	DRM_FORMAT_ARGB8888
6564 };
6565 
6566 static int get_plane_formats(const struct drm_plane *plane,
6567 			     const struct dc_plane_cap *plane_cap,
6568 			     uint32_t *formats, int max_formats)
6569 {
6570 	int i, num_formats = 0;
6571 
6572 	/*
6573 	 * TODO: Query support for each group of formats directly from
6574 	 * DC plane caps. This will require adding more formats to the
6575 	 * caps list.
6576 	 */
6577 
6578 	switch (plane->type) {
6579 	case DRM_PLANE_TYPE_PRIMARY:
6580 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6581 			if (num_formats >= max_formats)
6582 				break;
6583 
6584 			formats[num_formats++] = rgb_formats[i];
6585 		}
6586 
6587 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6588 			formats[num_formats++] = DRM_FORMAT_NV12;
6589 		if (plane_cap && plane_cap->pixel_format_support.p010)
6590 			formats[num_formats++] = DRM_FORMAT_P010;
6591 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6592 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6593 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6594 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6595 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6596 		}
6597 		break;
6598 
6599 	case DRM_PLANE_TYPE_OVERLAY:
6600 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6601 			if (num_formats >= max_formats)
6602 				break;
6603 
6604 			formats[num_formats++] = overlay_formats[i];
6605 		}
6606 		break;
6607 
6608 	case DRM_PLANE_TYPE_CURSOR:
6609 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6610 			if (num_formats >= max_formats)
6611 				break;
6612 
6613 			formats[num_formats++] = cursor_formats[i];
6614 		}
6615 		break;
6616 	}
6617 
6618 	return num_formats;
6619 }
6620 
6621 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6622 				struct drm_plane *plane,
6623 				unsigned long possible_crtcs,
6624 				const struct dc_plane_cap *plane_cap)
6625 {
6626 	uint32_t formats[32];
6627 	int num_formats;
6628 	int res = -EPERM;
6629 	unsigned int supported_rotations;
6630 	uint64_t *modifiers = NULL;
6631 
6632 	num_formats = get_plane_formats(plane, plane_cap, formats,
6633 					ARRAY_SIZE(formats));
6634 
6635 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6636 	if (res)
6637 		return res;
6638 
6639 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6640 				       &dm_plane_funcs, formats, num_formats,
6641 				       modifiers, plane->type, NULL);
6642 	kfree(modifiers);
6643 	if (res)
6644 		return res;
6645 
6646 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6647 	    plane_cap && plane_cap->per_pixel_alpha) {
6648 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6649 					  BIT(DRM_MODE_BLEND_PREMULTI);
6650 
6651 		drm_plane_create_alpha_property(plane);
6652 		drm_plane_create_blend_mode_property(plane, blend_caps);
6653 	}
6654 
6655 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6656 	    plane_cap &&
6657 	    (plane_cap->pixel_format_support.nv12 ||
6658 	     plane_cap->pixel_format_support.p010)) {
6659 		/* This only affects YUV formats. */
6660 		drm_plane_create_color_properties(
6661 			plane,
6662 			BIT(DRM_COLOR_YCBCR_BT601) |
6663 			BIT(DRM_COLOR_YCBCR_BT709) |
6664 			BIT(DRM_COLOR_YCBCR_BT2020),
6665 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6666 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6667 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6668 	}
6669 
6670 	supported_rotations =
6671 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6672 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6673 
6674 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
6675 	    plane->type != DRM_PLANE_TYPE_CURSOR)
6676 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6677 						   supported_rotations);
6678 
6679 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6680 
6681 	/* Create (reset) the plane state */
6682 	if (plane->funcs->reset)
6683 		plane->funcs->reset(plane);
6684 
6685 	return 0;
6686 }
6687 
6688 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6689 			       struct drm_plane *plane,
6690 			       uint32_t crtc_index)
6691 {
6692 	struct amdgpu_crtc *acrtc = NULL;
6693 	struct drm_plane *cursor_plane;
6694 
6695 	int res = -ENOMEM;
6696 
6697 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6698 	if (!cursor_plane)
6699 		goto fail;
6700 
6701 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6702 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6703 
6704 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6705 	if (!acrtc)
6706 		goto fail;
6707 
6708 	res = drm_crtc_init_with_planes(
6709 			dm->ddev,
6710 			&acrtc->base,
6711 			plane,
6712 			cursor_plane,
6713 			&amdgpu_dm_crtc_funcs, NULL);
6714 
6715 	if (res)
6716 		goto fail;
6717 
6718 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6719 
6720 	/* Create (reset) the plane state */
6721 	if (acrtc->base.funcs->reset)
6722 		acrtc->base.funcs->reset(&acrtc->base);
6723 
6724 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6725 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6726 
6727 	acrtc->crtc_id = crtc_index;
6728 	acrtc->base.enabled = false;
6729 	acrtc->otg_inst = -1;
6730 
6731 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6732 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6733 				   true, MAX_COLOR_LUT_ENTRIES);
6734 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6735 
6736 	return 0;
6737 
6738 fail:
6739 	kfree(acrtc);
6740 	kfree(cursor_plane);
6741 	return res;
6742 }
6743 
6744 
6745 static int to_drm_connector_type(enum signal_type st)
6746 {
6747 	switch (st) {
6748 	case SIGNAL_TYPE_HDMI_TYPE_A:
6749 		return DRM_MODE_CONNECTOR_HDMIA;
6750 	case SIGNAL_TYPE_EDP:
6751 		return DRM_MODE_CONNECTOR_eDP;
6752 	case SIGNAL_TYPE_LVDS:
6753 		return DRM_MODE_CONNECTOR_LVDS;
6754 	case SIGNAL_TYPE_RGB:
6755 		return DRM_MODE_CONNECTOR_VGA;
6756 	case SIGNAL_TYPE_DISPLAY_PORT:
6757 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6758 		return DRM_MODE_CONNECTOR_DisplayPort;
6759 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6760 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6761 		return DRM_MODE_CONNECTOR_DVID;
6762 	case SIGNAL_TYPE_VIRTUAL:
6763 		return DRM_MODE_CONNECTOR_VIRTUAL;
6764 
6765 	default:
6766 		return DRM_MODE_CONNECTOR_Unknown;
6767 	}
6768 }
6769 
6770 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6771 {
6772 	struct drm_encoder *encoder;
6773 
6774 	/* There is only one encoder per connector */
6775 	drm_connector_for_each_possible_encoder(connector, encoder)
6776 		return encoder;
6777 
6778 	return NULL;
6779 }
6780 
6781 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6782 {
6783 	struct drm_encoder *encoder;
6784 	struct amdgpu_encoder *amdgpu_encoder;
6785 
6786 	encoder = amdgpu_dm_connector_to_encoder(connector);
6787 
6788 	if (encoder == NULL)
6789 		return;
6790 
6791 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6792 
6793 	amdgpu_encoder->native_mode.clock = 0;
6794 
6795 	if (!list_empty(&connector->probed_modes)) {
6796 		struct drm_display_mode *preferred_mode = NULL;
6797 
6798 		list_for_each_entry(preferred_mode,
6799 				    &connector->probed_modes,
6800 				    head) {
6801 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6802 				amdgpu_encoder->native_mode = *preferred_mode;
6803 
6804 			break;
6805 		}
6806 
6807 	}
6808 }
6809 
6810 static struct drm_display_mode *
6811 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6812 			     char *name,
6813 			     int hdisplay, int vdisplay)
6814 {
6815 	struct drm_device *dev = encoder->dev;
6816 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6817 	struct drm_display_mode *mode = NULL;
6818 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6819 
6820 	mode = drm_mode_duplicate(dev, native_mode);
6821 
6822 	if (mode == NULL)
6823 		return NULL;
6824 
6825 	mode->hdisplay = hdisplay;
6826 	mode->vdisplay = vdisplay;
6827 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6828 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6829 
6830 	return mode;
6831 
6832 }
6833 
6834 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6835 						 struct drm_connector *connector)
6836 {
6837 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6838 	struct drm_display_mode *mode = NULL;
6839 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6840 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6841 				to_amdgpu_dm_connector(connector);
6842 	int i;
6843 	int n;
6844 	struct mode_size {
6845 		char name[DRM_DISPLAY_MODE_LEN];
6846 		int w;
6847 		int h;
6848 	} common_modes[] = {
6849 		{  "640x480",  640,  480},
6850 		{  "800x600",  800,  600},
6851 		{ "1024x768", 1024,  768},
6852 		{ "1280x720", 1280,  720},
6853 		{ "1280x800", 1280,  800},
6854 		{"1280x1024", 1280, 1024},
6855 		{ "1440x900", 1440,  900},
6856 		{"1680x1050", 1680, 1050},
6857 		{"1600x1200", 1600, 1200},
6858 		{"1920x1080", 1920, 1080},
6859 		{"1920x1200", 1920, 1200}
6860 	};
6861 
6862 	n = ARRAY_SIZE(common_modes);
6863 
6864 	for (i = 0; i < n; i++) {
6865 		struct drm_display_mode *curmode = NULL;
6866 		bool mode_existed = false;
6867 
6868 		if (common_modes[i].w > native_mode->hdisplay ||
6869 		    common_modes[i].h > native_mode->vdisplay ||
6870 		   (common_modes[i].w == native_mode->hdisplay &&
6871 		    common_modes[i].h == native_mode->vdisplay))
6872 			continue;
6873 
6874 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6875 			if (common_modes[i].w == curmode->hdisplay &&
6876 			    common_modes[i].h == curmode->vdisplay) {
6877 				mode_existed = true;
6878 				break;
6879 			}
6880 		}
6881 
6882 		if (mode_existed)
6883 			continue;
6884 
6885 		mode = amdgpu_dm_create_common_mode(encoder,
6886 				common_modes[i].name, common_modes[i].w,
6887 				common_modes[i].h);
6888 		drm_mode_probed_add(connector, mode);
6889 		amdgpu_dm_connector->num_modes++;
6890 	}
6891 }
6892 
6893 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6894 					      struct edid *edid)
6895 {
6896 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6897 			to_amdgpu_dm_connector(connector);
6898 
6899 	if (edid) {
6900 		/* empty probed_modes */
6901 		INIT_LIST_HEAD(&connector->probed_modes);
6902 		amdgpu_dm_connector->num_modes =
6903 				drm_add_edid_modes(connector, edid);
6904 
6905 		/* sorting the probed modes before calling function
6906 		 * amdgpu_dm_get_native_mode() since EDID can have
6907 		 * more than one preferred mode. The modes that are
6908 		 * later in the probed mode list could be of higher
6909 		 * and preferred resolution. For example, 3840x2160
6910 		 * resolution in base EDID preferred timing and 4096x2160
6911 		 * preferred resolution in DID extension block later.
6912 		 */
6913 		drm_mode_sort(&connector->probed_modes);
6914 		amdgpu_dm_get_native_mode(connector);
6915 	} else {
6916 		amdgpu_dm_connector->num_modes = 0;
6917 	}
6918 }
6919 
6920 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6921 {
6922 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6923 			to_amdgpu_dm_connector(connector);
6924 	struct drm_encoder *encoder;
6925 	struct edid *edid = amdgpu_dm_connector->edid;
6926 
6927 	encoder = amdgpu_dm_connector_to_encoder(connector);
6928 
6929 	if (!drm_edid_is_valid(edid)) {
6930 		amdgpu_dm_connector->num_modes =
6931 				drm_add_modes_noedid(connector, 640, 480);
6932 	} else {
6933 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6934 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6935 	}
6936 	amdgpu_dm_fbc_init(connector);
6937 
6938 	return amdgpu_dm_connector->num_modes;
6939 }
6940 
6941 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6942 				     struct amdgpu_dm_connector *aconnector,
6943 				     int connector_type,
6944 				     struct dc_link *link,
6945 				     int link_index)
6946 {
6947 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6948 
6949 	/*
6950 	 * Some of the properties below require access to state, like bpc.
6951 	 * Allocate some default initial connector state with our reset helper.
6952 	 */
6953 	if (aconnector->base.funcs->reset)
6954 		aconnector->base.funcs->reset(&aconnector->base);
6955 
6956 	aconnector->connector_id = link_index;
6957 	aconnector->dc_link = link;
6958 	aconnector->base.interlace_allowed = false;
6959 	aconnector->base.doublescan_allowed = false;
6960 	aconnector->base.stereo_allowed = false;
6961 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6962 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6963 	aconnector->audio_inst = -1;
6964 	mutex_init(&aconnector->hpd_lock);
6965 
6966 	/*
6967 	 * configure support HPD hot plug connector_>polled default value is 0
6968 	 * which means HPD hot plug not supported
6969 	 */
6970 	switch (connector_type) {
6971 	case DRM_MODE_CONNECTOR_HDMIA:
6972 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6973 		aconnector->base.ycbcr_420_allowed =
6974 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6975 		break;
6976 	case DRM_MODE_CONNECTOR_DisplayPort:
6977 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6978 		aconnector->base.ycbcr_420_allowed =
6979 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6980 		break;
6981 	case DRM_MODE_CONNECTOR_DVID:
6982 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6983 		break;
6984 	default:
6985 		break;
6986 	}
6987 
6988 	drm_object_attach_property(&aconnector->base.base,
6989 				dm->ddev->mode_config.scaling_mode_property,
6990 				DRM_MODE_SCALE_NONE);
6991 
6992 	drm_object_attach_property(&aconnector->base.base,
6993 				adev->mode_info.underscan_property,
6994 				UNDERSCAN_OFF);
6995 	drm_object_attach_property(&aconnector->base.base,
6996 				adev->mode_info.underscan_hborder_property,
6997 				0);
6998 	drm_object_attach_property(&aconnector->base.base,
6999 				adev->mode_info.underscan_vborder_property,
7000 				0);
7001 
7002 	if (!aconnector->mst_port)
7003 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7004 
7005 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7006 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7007 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7008 
7009 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7010 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7011 		drm_object_attach_property(&aconnector->base.base,
7012 				adev->mode_info.abm_level_property, 0);
7013 	}
7014 
7015 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7016 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7017 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7018 		drm_object_attach_property(
7019 			&aconnector->base.base,
7020 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
7021 
7022 		if (!aconnector->mst_port)
7023 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7024 
7025 #ifdef CONFIG_DRM_AMD_DC_HDCP
7026 		if (adev->dm.hdcp_workqueue)
7027 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7028 #endif
7029 	}
7030 }
7031 
7032 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7033 			      struct i2c_msg *msgs, int num)
7034 {
7035 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7036 	struct ddc_service *ddc_service = i2c->ddc_service;
7037 	struct i2c_command cmd;
7038 	int i;
7039 	int result = -EIO;
7040 
7041 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7042 
7043 	if (!cmd.payloads)
7044 		return result;
7045 
7046 	cmd.number_of_payloads = num;
7047 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7048 	cmd.speed = 100;
7049 
7050 	for (i = 0; i < num; i++) {
7051 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7052 		cmd.payloads[i].address = msgs[i].addr;
7053 		cmd.payloads[i].length = msgs[i].len;
7054 		cmd.payloads[i].data = msgs[i].buf;
7055 	}
7056 
7057 	if (dc_submit_i2c(
7058 			ddc_service->ctx->dc,
7059 			ddc_service->ddc_pin->hw_info.ddc_channel,
7060 			&cmd))
7061 		result = num;
7062 
7063 	kfree(cmd.payloads);
7064 	return result;
7065 }
7066 
7067 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7068 {
7069 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7070 }
7071 
7072 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7073 	.master_xfer = amdgpu_dm_i2c_xfer,
7074 	.functionality = amdgpu_dm_i2c_func,
7075 };
7076 
7077 static struct amdgpu_i2c_adapter *
7078 create_i2c(struct ddc_service *ddc_service,
7079 	   int link_index,
7080 	   int *res)
7081 {
7082 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7083 	struct amdgpu_i2c_adapter *i2c;
7084 
7085 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7086 	if (!i2c)
7087 		return NULL;
7088 	i2c->base.owner = THIS_MODULE;
7089 	i2c->base.class = I2C_CLASS_DDC;
7090 	i2c->base.dev.parent = &adev->pdev->dev;
7091 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7092 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7093 	i2c_set_adapdata(&i2c->base, i2c);
7094 	i2c->ddc_service = ddc_service;
7095 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7096 
7097 	return i2c;
7098 }
7099 
7100 
7101 /*
7102  * Note: this function assumes that dc_link_detect() was called for the
7103  * dc_link which will be represented by this aconnector.
7104  */
7105 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7106 				    struct amdgpu_dm_connector *aconnector,
7107 				    uint32_t link_index,
7108 				    struct amdgpu_encoder *aencoder)
7109 {
7110 	int res = 0;
7111 	int connector_type;
7112 	struct dc *dc = dm->dc;
7113 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7114 	struct amdgpu_i2c_adapter *i2c;
7115 
7116 	link->priv = aconnector;
7117 
7118 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7119 
7120 	i2c = create_i2c(link->ddc, link->link_index, &res);
7121 	if (!i2c) {
7122 		DRM_ERROR("Failed to create i2c adapter data\n");
7123 		return -ENOMEM;
7124 	}
7125 
7126 	aconnector->i2c = i2c;
7127 	res = i2c_add_adapter(&i2c->base);
7128 
7129 	if (res) {
7130 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7131 		goto out_free;
7132 	}
7133 
7134 	connector_type = to_drm_connector_type(link->connector_signal);
7135 
7136 	res = drm_connector_init_with_ddc(
7137 			dm->ddev,
7138 			&aconnector->base,
7139 			&amdgpu_dm_connector_funcs,
7140 			connector_type,
7141 			&i2c->base);
7142 
7143 	if (res) {
7144 		DRM_ERROR("connector_init failed\n");
7145 		aconnector->connector_id = -1;
7146 		goto out_free;
7147 	}
7148 
7149 	drm_connector_helper_add(
7150 			&aconnector->base,
7151 			&amdgpu_dm_connector_helper_funcs);
7152 
7153 	amdgpu_dm_connector_init_helper(
7154 		dm,
7155 		aconnector,
7156 		connector_type,
7157 		link,
7158 		link_index);
7159 
7160 	drm_connector_attach_encoder(
7161 		&aconnector->base, &aencoder->base);
7162 
7163 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7164 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7165 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7166 
7167 out_free:
7168 	if (res) {
7169 		kfree(i2c);
7170 		aconnector->i2c = NULL;
7171 	}
7172 	return res;
7173 }
7174 
7175 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7176 {
7177 	switch (adev->mode_info.num_crtc) {
7178 	case 1:
7179 		return 0x1;
7180 	case 2:
7181 		return 0x3;
7182 	case 3:
7183 		return 0x7;
7184 	case 4:
7185 		return 0xf;
7186 	case 5:
7187 		return 0x1f;
7188 	case 6:
7189 	default:
7190 		return 0x3f;
7191 	}
7192 }
7193 
7194 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7195 				  struct amdgpu_encoder *aencoder,
7196 				  uint32_t link_index)
7197 {
7198 	struct amdgpu_device *adev = drm_to_adev(dev);
7199 
7200 	int res = drm_encoder_init(dev,
7201 				   &aencoder->base,
7202 				   &amdgpu_dm_encoder_funcs,
7203 				   DRM_MODE_ENCODER_TMDS,
7204 				   NULL);
7205 
7206 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7207 
7208 	if (!res)
7209 		aencoder->encoder_id = link_index;
7210 	else
7211 		aencoder->encoder_id = -1;
7212 
7213 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7214 
7215 	return res;
7216 }
7217 
7218 static void manage_dm_interrupts(struct amdgpu_device *adev,
7219 				 struct amdgpu_crtc *acrtc,
7220 				 bool enable)
7221 {
7222 	/*
7223 	 * We have no guarantee that the frontend index maps to the same
7224 	 * backend index - some even map to more than one.
7225 	 *
7226 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7227 	 */
7228 	int irq_type =
7229 		amdgpu_display_crtc_idx_to_irq_type(
7230 			adev,
7231 			acrtc->crtc_id);
7232 
7233 	if (enable) {
7234 		drm_crtc_vblank_on(&acrtc->base);
7235 		amdgpu_irq_get(
7236 			adev,
7237 			&adev->pageflip_irq,
7238 			irq_type);
7239 	} else {
7240 
7241 		amdgpu_irq_put(
7242 			adev,
7243 			&adev->pageflip_irq,
7244 			irq_type);
7245 		drm_crtc_vblank_off(&acrtc->base);
7246 	}
7247 }
7248 
7249 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7250 				      struct amdgpu_crtc *acrtc)
7251 {
7252 	int irq_type =
7253 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7254 
7255 	/**
7256 	 * This reads the current state for the IRQ and force reapplies
7257 	 * the setting to hardware.
7258 	 */
7259 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7260 }
7261 
7262 static bool
7263 is_scaling_state_different(const struct dm_connector_state *dm_state,
7264 			   const struct dm_connector_state *old_dm_state)
7265 {
7266 	if (dm_state->scaling != old_dm_state->scaling)
7267 		return true;
7268 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7269 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7270 			return true;
7271 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7272 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7273 			return true;
7274 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7275 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7276 		return true;
7277 	return false;
7278 }
7279 
7280 #ifdef CONFIG_DRM_AMD_DC_HDCP
7281 static bool is_content_protection_different(struct drm_connector_state *state,
7282 					    const struct drm_connector_state *old_state,
7283 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7284 {
7285 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7286 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7287 
7288 	/* Handle: Type0/1 change */
7289 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7290 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7291 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7292 		return true;
7293 	}
7294 
7295 	/* CP is being re enabled, ignore this
7296 	 *
7297 	 * Handles:	ENABLED -> DESIRED
7298 	 */
7299 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7300 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7301 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7302 		return false;
7303 	}
7304 
7305 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7306 	 *
7307 	 * Handles:	UNDESIRED -> ENABLED
7308 	 */
7309 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7310 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7311 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7312 
7313 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7314 	 * hot-plug, headless s3, dpms
7315 	 *
7316 	 * Handles:	DESIRED -> DESIRED (Special case)
7317 	 */
7318 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7319 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7320 		dm_con_state->update_hdcp = false;
7321 		return true;
7322 	}
7323 
7324 	/*
7325 	 * Handles:	UNDESIRED -> UNDESIRED
7326 	 *		DESIRED -> DESIRED
7327 	 *		ENABLED -> ENABLED
7328 	 */
7329 	if (old_state->content_protection == state->content_protection)
7330 		return false;
7331 
7332 	/*
7333 	 * Handles:	UNDESIRED -> DESIRED
7334 	 *		DESIRED -> UNDESIRED
7335 	 *		ENABLED -> UNDESIRED
7336 	 */
7337 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7338 		return true;
7339 
7340 	/*
7341 	 * Handles:	DESIRED -> ENABLED
7342 	 */
7343 	return false;
7344 }
7345 
7346 #endif
7347 static void remove_stream(struct amdgpu_device *adev,
7348 			  struct amdgpu_crtc *acrtc,
7349 			  struct dc_stream_state *stream)
7350 {
7351 	/* this is the update mode case */
7352 
7353 	acrtc->otg_inst = -1;
7354 	acrtc->enabled = false;
7355 }
7356 
7357 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7358 			       struct dc_cursor_position *position)
7359 {
7360 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7361 	int x, y;
7362 	int xorigin = 0, yorigin = 0;
7363 
7364 	position->enable = false;
7365 	position->x = 0;
7366 	position->y = 0;
7367 
7368 	if (!crtc || !plane->state->fb)
7369 		return 0;
7370 
7371 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7372 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7373 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7374 			  __func__,
7375 			  plane->state->crtc_w,
7376 			  plane->state->crtc_h);
7377 		return -EINVAL;
7378 	}
7379 
7380 	x = plane->state->crtc_x;
7381 	y = plane->state->crtc_y;
7382 
7383 	if (x <= -amdgpu_crtc->max_cursor_width ||
7384 	    y <= -amdgpu_crtc->max_cursor_height)
7385 		return 0;
7386 
7387 	if (x < 0) {
7388 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7389 		x = 0;
7390 	}
7391 	if (y < 0) {
7392 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7393 		y = 0;
7394 	}
7395 	position->enable = true;
7396 	position->translate_by_source = true;
7397 	position->x = x;
7398 	position->y = y;
7399 	position->x_hotspot = xorigin;
7400 	position->y_hotspot = yorigin;
7401 
7402 	return 0;
7403 }
7404 
7405 static void handle_cursor_update(struct drm_plane *plane,
7406 				 struct drm_plane_state *old_plane_state)
7407 {
7408 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7409 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7410 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7411 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7412 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7413 	uint64_t address = afb ? afb->address : 0;
7414 	struct dc_cursor_position position;
7415 	struct dc_cursor_attributes attributes;
7416 	int ret;
7417 
7418 	if (!plane->state->fb && !old_plane_state->fb)
7419 		return;
7420 
7421 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7422 			 __func__,
7423 			 amdgpu_crtc->crtc_id,
7424 			 plane->state->crtc_w,
7425 			 plane->state->crtc_h);
7426 
7427 	ret = get_cursor_position(plane, crtc, &position);
7428 	if (ret)
7429 		return;
7430 
7431 	if (!position.enable) {
7432 		/* turn off cursor */
7433 		if (crtc_state && crtc_state->stream) {
7434 			mutex_lock(&adev->dm.dc_lock);
7435 			dc_stream_set_cursor_position(crtc_state->stream,
7436 						      &position);
7437 			mutex_unlock(&adev->dm.dc_lock);
7438 		}
7439 		return;
7440 	}
7441 
7442 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
7443 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
7444 
7445 	memset(&attributes, 0, sizeof(attributes));
7446 	attributes.address.high_part = upper_32_bits(address);
7447 	attributes.address.low_part  = lower_32_bits(address);
7448 	attributes.width             = plane->state->crtc_w;
7449 	attributes.height            = plane->state->crtc_h;
7450 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7451 	attributes.rotation_angle    = 0;
7452 	attributes.attribute_flags.value = 0;
7453 
7454 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7455 
7456 	if (crtc_state->stream) {
7457 		mutex_lock(&adev->dm.dc_lock);
7458 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7459 							 &attributes))
7460 			DRM_ERROR("DC failed to set cursor attributes\n");
7461 
7462 		if (!dc_stream_set_cursor_position(crtc_state->stream,
7463 						   &position))
7464 			DRM_ERROR("DC failed to set cursor position\n");
7465 		mutex_unlock(&adev->dm.dc_lock);
7466 	}
7467 }
7468 
7469 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7470 {
7471 
7472 	assert_spin_locked(&acrtc->base.dev->event_lock);
7473 	WARN_ON(acrtc->event);
7474 
7475 	acrtc->event = acrtc->base.state->event;
7476 
7477 	/* Set the flip status */
7478 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7479 
7480 	/* Mark this event as consumed */
7481 	acrtc->base.state->event = NULL;
7482 
7483 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7484 						 acrtc->crtc_id);
7485 }
7486 
7487 static void update_freesync_state_on_stream(
7488 	struct amdgpu_display_manager *dm,
7489 	struct dm_crtc_state *new_crtc_state,
7490 	struct dc_stream_state *new_stream,
7491 	struct dc_plane_state *surface,
7492 	u32 flip_timestamp_in_us)
7493 {
7494 	struct mod_vrr_params vrr_params;
7495 	struct dc_info_packet vrr_infopacket = {0};
7496 	struct amdgpu_device *adev = dm->adev;
7497 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7498 	unsigned long flags;
7499 
7500 	if (!new_stream)
7501 		return;
7502 
7503 	/*
7504 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7505 	 * For now it's sufficient to just guard against these conditions.
7506 	 */
7507 
7508 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7509 		return;
7510 
7511 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7512         vrr_params = acrtc->dm_irq_params.vrr_params;
7513 
7514 	if (surface) {
7515 		mod_freesync_handle_preflip(
7516 			dm->freesync_module,
7517 			surface,
7518 			new_stream,
7519 			flip_timestamp_in_us,
7520 			&vrr_params);
7521 
7522 		if (adev->family < AMDGPU_FAMILY_AI &&
7523 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7524 			mod_freesync_handle_v_update(dm->freesync_module,
7525 						     new_stream, &vrr_params);
7526 
7527 			/* Need to call this before the frame ends. */
7528 			dc_stream_adjust_vmin_vmax(dm->dc,
7529 						   new_crtc_state->stream,
7530 						   &vrr_params.adjust);
7531 		}
7532 	}
7533 
7534 	mod_freesync_build_vrr_infopacket(
7535 		dm->freesync_module,
7536 		new_stream,
7537 		&vrr_params,
7538 		PACKET_TYPE_VRR,
7539 		TRANSFER_FUNC_UNKNOWN,
7540 		&vrr_infopacket);
7541 
7542 	new_crtc_state->freesync_timing_changed |=
7543 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7544 			&vrr_params.adjust,
7545 			sizeof(vrr_params.adjust)) != 0);
7546 
7547 	new_crtc_state->freesync_vrr_info_changed |=
7548 		(memcmp(&new_crtc_state->vrr_infopacket,
7549 			&vrr_infopacket,
7550 			sizeof(vrr_infopacket)) != 0);
7551 
7552 	acrtc->dm_irq_params.vrr_params = vrr_params;
7553 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7554 
7555 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7556 	new_stream->vrr_infopacket = vrr_infopacket;
7557 
7558 	if (new_crtc_state->freesync_vrr_info_changed)
7559 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7560 			      new_crtc_state->base.crtc->base.id,
7561 			      (int)new_crtc_state->base.vrr_enabled,
7562 			      (int)vrr_params.state);
7563 
7564 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7565 }
7566 
7567 static void update_stream_irq_parameters(
7568 	struct amdgpu_display_manager *dm,
7569 	struct dm_crtc_state *new_crtc_state)
7570 {
7571 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7572 	struct mod_vrr_params vrr_params;
7573 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7574 	struct amdgpu_device *adev = dm->adev;
7575 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7576 	unsigned long flags;
7577 
7578 	if (!new_stream)
7579 		return;
7580 
7581 	/*
7582 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7583 	 * For now it's sufficient to just guard against these conditions.
7584 	 */
7585 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7586 		return;
7587 
7588 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7589 	vrr_params = acrtc->dm_irq_params.vrr_params;
7590 
7591 	if (new_crtc_state->vrr_supported &&
7592 	    config.min_refresh_in_uhz &&
7593 	    config.max_refresh_in_uhz) {
7594 		config.state = new_crtc_state->base.vrr_enabled ?
7595 			VRR_STATE_ACTIVE_VARIABLE :
7596 			VRR_STATE_INACTIVE;
7597 	} else {
7598 		config.state = VRR_STATE_UNSUPPORTED;
7599 	}
7600 
7601 	mod_freesync_build_vrr_params(dm->freesync_module,
7602 				      new_stream,
7603 				      &config, &vrr_params);
7604 
7605 	new_crtc_state->freesync_timing_changed |=
7606 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7607 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7608 
7609 	new_crtc_state->freesync_config = config;
7610 	/* Copy state for access from DM IRQ handler */
7611 	acrtc->dm_irq_params.freesync_config = config;
7612 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7613 	acrtc->dm_irq_params.vrr_params = vrr_params;
7614 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7615 }
7616 
7617 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7618 					    struct dm_crtc_state *new_state)
7619 {
7620 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7621 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7622 
7623 	if (!old_vrr_active && new_vrr_active) {
7624 		/* Transition VRR inactive -> active:
7625 		 * While VRR is active, we must not disable vblank irq, as a
7626 		 * reenable after disable would compute bogus vblank/pflip
7627 		 * timestamps if it likely happened inside display front-porch.
7628 		 *
7629 		 * We also need vupdate irq for the actual core vblank handling
7630 		 * at end of vblank.
7631 		 */
7632 		dm_set_vupdate_irq(new_state->base.crtc, true);
7633 		drm_crtc_vblank_get(new_state->base.crtc);
7634 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7635 				 __func__, new_state->base.crtc->base.id);
7636 	} else if (old_vrr_active && !new_vrr_active) {
7637 		/* Transition VRR active -> inactive:
7638 		 * Allow vblank irq disable again for fixed refresh rate.
7639 		 */
7640 		dm_set_vupdate_irq(new_state->base.crtc, false);
7641 		drm_crtc_vblank_put(new_state->base.crtc);
7642 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7643 				 __func__, new_state->base.crtc->base.id);
7644 	}
7645 }
7646 
7647 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7648 {
7649 	struct drm_plane *plane;
7650 	struct drm_plane_state *old_plane_state, *new_plane_state;
7651 	int i;
7652 
7653 	/*
7654 	 * TODO: Make this per-stream so we don't issue redundant updates for
7655 	 * commits with multiple streams.
7656 	 */
7657 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7658 				       new_plane_state, i)
7659 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7660 			handle_cursor_update(plane, old_plane_state);
7661 }
7662 
7663 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7664 				    struct dc_state *dc_state,
7665 				    struct drm_device *dev,
7666 				    struct amdgpu_display_manager *dm,
7667 				    struct drm_crtc *pcrtc,
7668 				    bool wait_for_vblank)
7669 {
7670 	int i;
7671 	uint64_t timestamp_ns;
7672 	struct drm_plane *plane;
7673 	struct drm_plane_state *old_plane_state, *new_plane_state;
7674 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7675 	struct drm_crtc_state *new_pcrtc_state =
7676 			drm_atomic_get_new_crtc_state(state, pcrtc);
7677 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7678 	struct dm_crtc_state *dm_old_crtc_state =
7679 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7680 	int planes_count = 0, vpos, hpos;
7681 	long r;
7682 	unsigned long flags;
7683 	struct amdgpu_bo *abo;
7684 	uint32_t target_vblank, last_flip_vblank;
7685 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7686 	bool pflip_present = false;
7687 	struct {
7688 		struct dc_surface_update surface_updates[MAX_SURFACES];
7689 		struct dc_plane_info plane_infos[MAX_SURFACES];
7690 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7691 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7692 		struct dc_stream_update stream_update;
7693 	} *bundle;
7694 
7695 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7696 
7697 	if (!bundle) {
7698 		dm_error("Failed to allocate update bundle\n");
7699 		goto cleanup;
7700 	}
7701 
7702 	/*
7703 	 * Disable the cursor first if we're disabling all the planes.
7704 	 * It'll remain on the screen after the planes are re-enabled
7705 	 * if we don't.
7706 	 */
7707 	if (acrtc_state->active_planes == 0)
7708 		amdgpu_dm_commit_cursors(state);
7709 
7710 	/* update planes when needed */
7711 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
7712 		struct drm_crtc *crtc = new_plane_state->crtc;
7713 		struct drm_crtc_state *new_crtc_state;
7714 		struct drm_framebuffer *fb = new_plane_state->fb;
7715 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7716 		bool plane_needs_flip;
7717 		struct dc_plane_state *dc_plane;
7718 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7719 
7720 		/* Cursor plane is handled after stream updates */
7721 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7722 			continue;
7723 
7724 		if (!fb || !crtc || pcrtc != crtc)
7725 			continue;
7726 
7727 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7728 		if (!new_crtc_state->active)
7729 			continue;
7730 
7731 		dc_plane = dm_new_plane_state->dc_state;
7732 
7733 		bundle->surface_updates[planes_count].surface = dc_plane;
7734 		if (new_pcrtc_state->color_mgmt_changed) {
7735 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7736 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7737 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7738 		}
7739 
7740 		fill_dc_scaling_info(new_plane_state,
7741 				     &bundle->scaling_infos[planes_count]);
7742 
7743 		bundle->surface_updates[planes_count].scaling_info =
7744 			&bundle->scaling_infos[planes_count];
7745 
7746 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7747 
7748 		pflip_present = pflip_present || plane_needs_flip;
7749 
7750 		if (!plane_needs_flip) {
7751 			planes_count += 1;
7752 			continue;
7753 		}
7754 
7755 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7756 
7757 		/*
7758 		 * Wait for all fences on this FB. Do limited wait to avoid
7759 		 * deadlock during GPU reset when this fence will not signal
7760 		 * but we hold reservation lock for the BO.
7761 		 */
7762 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7763 							false,
7764 							msecs_to_jiffies(5000));
7765 		if (unlikely(r <= 0))
7766 			DRM_ERROR("Waiting for fences timed out!");
7767 
7768 		fill_dc_plane_info_and_addr(
7769 			dm->adev, new_plane_state,
7770 			afb->tiling_flags,
7771 			&bundle->plane_infos[planes_count],
7772 			&bundle->flip_addrs[planes_count].address,
7773 			afb->tmz_surface, false);
7774 
7775 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7776 				 new_plane_state->plane->index,
7777 				 bundle->plane_infos[planes_count].dcc.enable);
7778 
7779 		bundle->surface_updates[planes_count].plane_info =
7780 			&bundle->plane_infos[planes_count];
7781 
7782 		/*
7783 		 * Only allow immediate flips for fast updates that don't
7784 		 * change FB pitch, DCC state, rotation or mirroing.
7785 		 */
7786 		bundle->flip_addrs[planes_count].flip_immediate =
7787 			crtc->state->async_flip &&
7788 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7789 
7790 		timestamp_ns = ktime_get_ns();
7791 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7792 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7793 		bundle->surface_updates[planes_count].surface = dc_plane;
7794 
7795 		if (!bundle->surface_updates[planes_count].surface) {
7796 			DRM_ERROR("No surface for CRTC: id=%d\n",
7797 					acrtc_attach->crtc_id);
7798 			continue;
7799 		}
7800 
7801 		if (plane == pcrtc->primary)
7802 			update_freesync_state_on_stream(
7803 				dm,
7804 				acrtc_state,
7805 				acrtc_state->stream,
7806 				dc_plane,
7807 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7808 
7809 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7810 				 __func__,
7811 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7812 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7813 
7814 		planes_count += 1;
7815 
7816 	}
7817 
7818 	if (pflip_present) {
7819 		if (!vrr_active) {
7820 			/* Use old throttling in non-vrr fixed refresh rate mode
7821 			 * to keep flip scheduling based on target vblank counts
7822 			 * working in a backwards compatible way, e.g., for
7823 			 * clients using the GLX_OML_sync_control extension or
7824 			 * DRI3/Present extension with defined target_msc.
7825 			 */
7826 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7827 		}
7828 		else {
7829 			/* For variable refresh rate mode only:
7830 			 * Get vblank of last completed flip to avoid > 1 vrr
7831 			 * flips per video frame by use of throttling, but allow
7832 			 * flip programming anywhere in the possibly large
7833 			 * variable vrr vblank interval for fine-grained flip
7834 			 * timing control and more opportunity to avoid stutter
7835 			 * on late submission of flips.
7836 			 */
7837 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7838 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7839 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7840 		}
7841 
7842 		target_vblank = last_flip_vblank + wait_for_vblank;
7843 
7844 		/*
7845 		 * Wait until we're out of the vertical blank period before the one
7846 		 * targeted by the flip
7847 		 */
7848 		while ((acrtc_attach->enabled &&
7849 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7850 							    0, &vpos, &hpos, NULL,
7851 							    NULL, &pcrtc->hwmode)
7852 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7853 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7854 			(int)(target_vblank -
7855 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7856 			usleep_range(1000, 1100);
7857 		}
7858 
7859 		/**
7860 		 * Prepare the flip event for the pageflip interrupt to handle.
7861 		 *
7862 		 * This only works in the case where we've already turned on the
7863 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7864 		 * from 0 -> n planes we have to skip a hardware generated event
7865 		 * and rely on sending it from software.
7866 		 */
7867 		if (acrtc_attach->base.state->event &&
7868 		    acrtc_state->active_planes > 0) {
7869 			drm_crtc_vblank_get(pcrtc);
7870 
7871 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7872 
7873 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7874 			prepare_flip_isr(acrtc_attach);
7875 
7876 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7877 		}
7878 
7879 		if (acrtc_state->stream) {
7880 			if (acrtc_state->freesync_vrr_info_changed)
7881 				bundle->stream_update.vrr_infopacket =
7882 					&acrtc_state->stream->vrr_infopacket;
7883 		}
7884 	}
7885 
7886 	/* Update the planes if changed or disable if we don't have any. */
7887 	if ((planes_count || acrtc_state->active_planes == 0) &&
7888 		acrtc_state->stream) {
7889 		bundle->stream_update.stream = acrtc_state->stream;
7890 		if (new_pcrtc_state->mode_changed) {
7891 			bundle->stream_update.src = acrtc_state->stream->src;
7892 			bundle->stream_update.dst = acrtc_state->stream->dst;
7893 		}
7894 
7895 		if (new_pcrtc_state->color_mgmt_changed) {
7896 			/*
7897 			 * TODO: This isn't fully correct since we've actually
7898 			 * already modified the stream in place.
7899 			 */
7900 			bundle->stream_update.gamut_remap =
7901 				&acrtc_state->stream->gamut_remap_matrix;
7902 			bundle->stream_update.output_csc_transform =
7903 				&acrtc_state->stream->csc_color_matrix;
7904 			bundle->stream_update.out_transfer_func =
7905 				acrtc_state->stream->out_transfer_func;
7906 		}
7907 
7908 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7909 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7910 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7911 
7912 		/*
7913 		 * If FreeSync state on the stream has changed then we need to
7914 		 * re-adjust the min/max bounds now that DC doesn't handle this
7915 		 * as part of commit.
7916 		 */
7917 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7918 		    amdgpu_dm_vrr_active(acrtc_state)) {
7919 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7920 			dc_stream_adjust_vmin_vmax(
7921 				dm->dc, acrtc_state->stream,
7922 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7923 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7924 		}
7925 		mutex_lock(&dm->dc_lock);
7926 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7927 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7928 			amdgpu_dm_psr_disable(acrtc_state->stream);
7929 
7930 		dc_commit_updates_for_stream(dm->dc,
7931 						     bundle->surface_updates,
7932 						     planes_count,
7933 						     acrtc_state->stream,
7934 						     &bundle->stream_update);
7935 
7936 		/**
7937 		 * Enable or disable the interrupts on the backend.
7938 		 *
7939 		 * Most pipes are put into power gating when unused.
7940 		 *
7941 		 * When power gating is enabled on a pipe we lose the
7942 		 * interrupt enablement state when power gating is disabled.
7943 		 *
7944 		 * So we need to update the IRQ control state in hardware
7945 		 * whenever the pipe turns on (since it could be previously
7946 		 * power gated) or off (since some pipes can't be power gated
7947 		 * on some ASICs).
7948 		 */
7949 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7950 			dm_update_pflip_irq_state(drm_to_adev(dev),
7951 						  acrtc_attach);
7952 
7953 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7954 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7955 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7956 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7957 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7958 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7959 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7960 			amdgpu_dm_psr_enable(acrtc_state->stream);
7961 		}
7962 
7963 		mutex_unlock(&dm->dc_lock);
7964 	}
7965 
7966 	/*
7967 	 * Update cursor state *after* programming all the planes.
7968 	 * This avoids redundant programming in the case where we're going
7969 	 * to be disabling a single plane - those pipes are being disabled.
7970 	 */
7971 	if (acrtc_state->active_planes)
7972 		amdgpu_dm_commit_cursors(state);
7973 
7974 cleanup:
7975 	kfree(bundle);
7976 }
7977 
7978 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7979 				   struct drm_atomic_state *state)
7980 {
7981 	struct amdgpu_device *adev = drm_to_adev(dev);
7982 	struct amdgpu_dm_connector *aconnector;
7983 	struct drm_connector *connector;
7984 	struct drm_connector_state *old_con_state, *new_con_state;
7985 	struct drm_crtc_state *new_crtc_state;
7986 	struct dm_crtc_state *new_dm_crtc_state;
7987 	const struct dc_stream_status *status;
7988 	int i, inst;
7989 
7990 	/* Notify device removals. */
7991 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7992 		if (old_con_state->crtc != new_con_state->crtc) {
7993 			/* CRTC changes require notification. */
7994 			goto notify;
7995 		}
7996 
7997 		if (!new_con_state->crtc)
7998 			continue;
7999 
8000 		new_crtc_state = drm_atomic_get_new_crtc_state(
8001 			state, new_con_state->crtc);
8002 
8003 		if (!new_crtc_state)
8004 			continue;
8005 
8006 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8007 			continue;
8008 
8009 	notify:
8010 		aconnector = to_amdgpu_dm_connector(connector);
8011 
8012 		mutex_lock(&adev->dm.audio_lock);
8013 		inst = aconnector->audio_inst;
8014 		aconnector->audio_inst = -1;
8015 		mutex_unlock(&adev->dm.audio_lock);
8016 
8017 		amdgpu_dm_audio_eld_notify(adev, inst);
8018 	}
8019 
8020 	/* Notify audio device additions. */
8021 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8022 		if (!new_con_state->crtc)
8023 			continue;
8024 
8025 		new_crtc_state = drm_atomic_get_new_crtc_state(
8026 			state, new_con_state->crtc);
8027 
8028 		if (!new_crtc_state)
8029 			continue;
8030 
8031 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8032 			continue;
8033 
8034 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8035 		if (!new_dm_crtc_state->stream)
8036 			continue;
8037 
8038 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8039 		if (!status)
8040 			continue;
8041 
8042 		aconnector = to_amdgpu_dm_connector(connector);
8043 
8044 		mutex_lock(&adev->dm.audio_lock);
8045 		inst = status->audio_inst;
8046 		aconnector->audio_inst = inst;
8047 		mutex_unlock(&adev->dm.audio_lock);
8048 
8049 		amdgpu_dm_audio_eld_notify(adev, inst);
8050 	}
8051 }
8052 
8053 /*
8054  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8055  * @crtc_state: the DRM CRTC state
8056  * @stream_state: the DC stream state.
8057  *
8058  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8059  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8060  */
8061 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8062 						struct dc_stream_state *stream_state)
8063 {
8064 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8065 }
8066 
8067 /**
8068  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8069  * @state: The atomic state to commit
8070  *
8071  * This will tell DC to commit the constructed DC state from atomic_check,
8072  * programming the hardware. Any failures here implies a hardware failure, since
8073  * atomic check should have filtered anything non-kosher.
8074  */
8075 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8076 {
8077 	struct drm_device *dev = state->dev;
8078 	struct amdgpu_device *adev = drm_to_adev(dev);
8079 	struct amdgpu_display_manager *dm = &adev->dm;
8080 	struct dm_atomic_state *dm_state;
8081 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8082 	uint32_t i, j;
8083 	struct drm_crtc *crtc;
8084 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8085 	unsigned long flags;
8086 	bool wait_for_vblank = true;
8087 	struct drm_connector *connector;
8088 	struct drm_connector_state *old_con_state, *new_con_state;
8089 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8090 	int crtc_disable_count = 0;
8091 	bool mode_set_reset_required = false;
8092 
8093 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8094 
8095 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8096 
8097 	dm_state = dm_atomic_get_new_state(state);
8098 	if (dm_state && dm_state->context) {
8099 		dc_state = dm_state->context;
8100 	} else {
8101 		/* No state changes, retain current state. */
8102 		dc_state_temp = dc_create_state(dm->dc);
8103 		ASSERT(dc_state_temp);
8104 		dc_state = dc_state_temp;
8105 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8106 	}
8107 
8108 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8109 				       new_crtc_state, i) {
8110 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8111 
8112 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8113 
8114 		if (old_crtc_state->active &&
8115 		    (!new_crtc_state->active ||
8116 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8117 			manage_dm_interrupts(adev, acrtc, false);
8118 			dc_stream_release(dm_old_crtc_state->stream);
8119 		}
8120 	}
8121 
8122 	drm_atomic_helper_calc_timestamping_constants(state);
8123 
8124 	/* update changed items */
8125 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8126 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8127 
8128 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8129 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8130 
8131 		DRM_DEBUG_DRIVER(
8132 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8133 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8134 			"connectors_changed:%d\n",
8135 			acrtc->crtc_id,
8136 			new_crtc_state->enable,
8137 			new_crtc_state->active,
8138 			new_crtc_state->planes_changed,
8139 			new_crtc_state->mode_changed,
8140 			new_crtc_state->active_changed,
8141 			new_crtc_state->connectors_changed);
8142 
8143 		/* Disable cursor if disabling crtc */
8144 		if (old_crtc_state->active && !new_crtc_state->active) {
8145 			struct dc_cursor_position position;
8146 
8147 			memset(&position, 0, sizeof(position));
8148 			mutex_lock(&dm->dc_lock);
8149 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8150 			mutex_unlock(&dm->dc_lock);
8151 		}
8152 
8153 		/* Copy all transient state flags into dc state */
8154 		if (dm_new_crtc_state->stream) {
8155 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8156 							    dm_new_crtc_state->stream);
8157 		}
8158 
8159 		/* handles headless hotplug case, updating new_state and
8160 		 * aconnector as needed
8161 		 */
8162 
8163 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8164 
8165 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8166 
8167 			if (!dm_new_crtc_state->stream) {
8168 				/*
8169 				 * this could happen because of issues with
8170 				 * userspace notifications delivery.
8171 				 * In this case userspace tries to set mode on
8172 				 * display which is disconnected in fact.
8173 				 * dc_sink is NULL in this case on aconnector.
8174 				 * We expect reset mode will come soon.
8175 				 *
8176 				 * This can also happen when unplug is done
8177 				 * during resume sequence ended
8178 				 *
8179 				 * In this case, we want to pretend we still
8180 				 * have a sink to keep the pipe running so that
8181 				 * hw state is consistent with the sw state
8182 				 */
8183 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8184 						__func__, acrtc->base.base.id);
8185 				continue;
8186 			}
8187 
8188 			if (dm_old_crtc_state->stream)
8189 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8190 
8191 			pm_runtime_get_noresume(dev->dev);
8192 
8193 			acrtc->enabled = true;
8194 			acrtc->hw_mode = new_crtc_state->mode;
8195 			crtc->hwmode = new_crtc_state->mode;
8196 			mode_set_reset_required = true;
8197 		} else if (modereset_required(new_crtc_state)) {
8198 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8199 			/* i.e. reset mode */
8200 			if (dm_old_crtc_state->stream)
8201 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8202 			mode_set_reset_required = true;
8203 		}
8204 	} /* for_each_crtc_in_state() */
8205 
8206 	if (dc_state) {
8207 		/* if there mode set or reset, disable eDP PSR */
8208 		if (mode_set_reset_required)
8209 			amdgpu_dm_psr_disable_all(dm);
8210 
8211 		dm_enable_per_frame_crtc_master_sync(dc_state);
8212 		mutex_lock(&dm->dc_lock);
8213 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8214 		mutex_unlock(&dm->dc_lock);
8215 	}
8216 
8217 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8218 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8219 
8220 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8221 
8222 		if (dm_new_crtc_state->stream != NULL) {
8223 			const struct dc_stream_status *status =
8224 					dc_stream_get_status(dm_new_crtc_state->stream);
8225 
8226 			if (!status)
8227 				status = dc_stream_get_status_from_state(dc_state,
8228 									 dm_new_crtc_state->stream);
8229 			if (!status)
8230 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8231 			else
8232 				acrtc->otg_inst = status->primary_otg_inst;
8233 		}
8234 	}
8235 #ifdef CONFIG_DRM_AMD_DC_HDCP
8236 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8237 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8238 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8239 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8240 
8241 		new_crtc_state = NULL;
8242 
8243 		if (acrtc)
8244 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8245 
8246 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8247 
8248 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8249 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8250 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8251 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8252 			dm_new_con_state->update_hdcp = true;
8253 			continue;
8254 		}
8255 
8256 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8257 			hdcp_update_display(
8258 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8259 				new_con_state->hdcp_content_type,
8260 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8261 													 : false);
8262 	}
8263 #endif
8264 
8265 	/* Handle connector state changes */
8266 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8267 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8268 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8269 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8270 		struct dc_surface_update surface_updates[MAX_SURFACES];
8271 		struct dc_stream_update stream_update;
8272 		struct dc_info_packet hdr_packet;
8273 		struct dc_stream_status *status = NULL;
8274 		bool abm_changed, hdr_changed, scaling_changed;
8275 
8276 		memset(&surface_updates, 0, sizeof(surface_updates));
8277 		memset(&stream_update, 0, sizeof(stream_update));
8278 
8279 		if (acrtc) {
8280 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8281 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8282 		}
8283 
8284 		/* Skip any modesets/resets */
8285 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8286 			continue;
8287 
8288 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8289 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8290 
8291 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8292 							     dm_old_con_state);
8293 
8294 		abm_changed = dm_new_crtc_state->abm_level !=
8295 			      dm_old_crtc_state->abm_level;
8296 
8297 		hdr_changed =
8298 			is_hdr_metadata_different(old_con_state, new_con_state);
8299 
8300 		if (!scaling_changed && !abm_changed && !hdr_changed)
8301 			continue;
8302 
8303 		stream_update.stream = dm_new_crtc_state->stream;
8304 		if (scaling_changed) {
8305 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8306 					dm_new_con_state, dm_new_crtc_state->stream);
8307 
8308 			stream_update.src = dm_new_crtc_state->stream->src;
8309 			stream_update.dst = dm_new_crtc_state->stream->dst;
8310 		}
8311 
8312 		if (abm_changed) {
8313 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8314 
8315 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8316 		}
8317 
8318 		if (hdr_changed) {
8319 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8320 			stream_update.hdr_static_metadata = &hdr_packet;
8321 		}
8322 
8323 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8324 		WARN_ON(!status);
8325 		WARN_ON(!status->plane_count);
8326 
8327 		/*
8328 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8329 		 * Here we create an empty update on each plane.
8330 		 * To fix this, DC should permit updating only stream properties.
8331 		 */
8332 		for (j = 0; j < status->plane_count; j++)
8333 			surface_updates[j].surface = status->plane_states[j];
8334 
8335 
8336 		mutex_lock(&dm->dc_lock);
8337 		dc_commit_updates_for_stream(dm->dc,
8338 						surface_updates,
8339 						     status->plane_count,
8340 						     dm_new_crtc_state->stream,
8341 						     &stream_update);
8342 		mutex_unlock(&dm->dc_lock);
8343 	}
8344 
8345 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8346 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8347 				      new_crtc_state, i) {
8348 		if (old_crtc_state->active && !new_crtc_state->active)
8349 			crtc_disable_count++;
8350 
8351 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8352 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8353 
8354 		/* For freesync config update on crtc state and params for irq */
8355 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8356 
8357 		/* Handle vrr on->off / off->on transitions */
8358 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8359 						dm_new_crtc_state);
8360 	}
8361 
8362 	/**
8363 	 * Enable interrupts for CRTCs that are newly enabled or went through
8364 	 * a modeset. It was intentionally deferred until after the front end
8365 	 * state was modified to wait until the OTG was on and so the IRQ
8366 	 * handlers didn't access stale or invalid state.
8367 	 */
8368 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8369 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8370 
8371 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8372 
8373 		if (new_crtc_state->active &&
8374 		    (!old_crtc_state->active ||
8375 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8376 			dc_stream_retain(dm_new_crtc_state->stream);
8377 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8378 			manage_dm_interrupts(adev, acrtc, true);
8379 
8380 #ifdef CONFIG_DEBUG_FS
8381 			/**
8382 			 * Frontend may have changed so reapply the CRC capture
8383 			 * settings for the stream.
8384 			 */
8385 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8386 
8387 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8388 				amdgpu_dm_crtc_configure_crc_source(
8389 					crtc, dm_new_crtc_state,
8390 					dm_new_crtc_state->crc_src);
8391 			}
8392 #endif
8393 		}
8394 	}
8395 
8396 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8397 		if (new_crtc_state->async_flip)
8398 			wait_for_vblank = false;
8399 
8400 	/* update planes when needed per crtc*/
8401 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8402 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8403 
8404 		if (dm_new_crtc_state->stream)
8405 			amdgpu_dm_commit_planes(state, dc_state, dev,
8406 						dm, crtc, wait_for_vblank);
8407 	}
8408 
8409 	/* Update audio instances for each connector. */
8410 	amdgpu_dm_commit_audio(dev, state);
8411 
8412 	/*
8413 	 * send vblank event on all events not handled in flip and
8414 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8415 	 */
8416 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8417 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8418 
8419 		if (new_crtc_state->event)
8420 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8421 
8422 		new_crtc_state->event = NULL;
8423 	}
8424 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8425 
8426 	/* Signal HW programming completion */
8427 	drm_atomic_helper_commit_hw_done(state);
8428 
8429 	if (wait_for_vblank)
8430 		drm_atomic_helper_wait_for_flip_done(dev, state);
8431 
8432 	drm_atomic_helper_cleanup_planes(dev, state);
8433 
8434 	/* return the stolen vga memory back to VRAM */
8435 	if (!adev->mman.keep_stolen_vga_memory)
8436 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8437 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8438 
8439 	/*
8440 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8441 	 * so we can put the GPU into runtime suspend if we're not driving any
8442 	 * displays anymore
8443 	 */
8444 	for (i = 0; i < crtc_disable_count; i++)
8445 		pm_runtime_put_autosuspend(dev->dev);
8446 	pm_runtime_mark_last_busy(dev->dev);
8447 
8448 	if (dc_state_temp)
8449 		dc_release_state(dc_state_temp);
8450 }
8451 
8452 
8453 static int dm_force_atomic_commit(struct drm_connector *connector)
8454 {
8455 	int ret = 0;
8456 	struct drm_device *ddev = connector->dev;
8457 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8458 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8459 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8460 	struct drm_connector_state *conn_state;
8461 	struct drm_crtc_state *crtc_state;
8462 	struct drm_plane_state *plane_state;
8463 
8464 	if (!state)
8465 		return -ENOMEM;
8466 
8467 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8468 
8469 	/* Construct an atomic state to restore previous display setting */
8470 
8471 	/*
8472 	 * Attach connectors to drm_atomic_state
8473 	 */
8474 	conn_state = drm_atomic_get_connector_state(state, connector);
8475 
8476 	ret = PTR_ERR_OR_ZERO(conn_state);
8477 	if (ret)
8478 		goto out;
8479 
8480 	/* Attach crtc to drm_atomic_state*/
8481 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8482 
8483 	ret = PTR_ERR_OR_ZERO(crtc_state);
8484 	if (ret)
8485 		goto out;
8486 
8487 	/* force a restore */
8488 	crtc_state->mode_changed = true;
8489 
8490 	/* Attach plane to drm_atomic_state */
8491 	plane_state = drm_atomic_get_plane_state(state, plane);
8492 
8493 	ret = PTR_ERR_OR_ZERO(plane_state);
8494 	if (ret)
8495 		goto out;
8496 
8497 	/* Call commit internally with the state we just constructed */
8498 	ret = drm_atomic_commit(state);
8499 
8500 out:
8501 	drm_atomic_state_put(state);
8502 	if (ret)
8503 		DRM_ERROR("Restoring old state failed with %i\n", ret);
8504 
8505 	return ret;
8506 }
8507 
8508 /*
8509  * This function handles all cases when set mode does not come upon hotplug.
8510  * This includes when a display is unplugged then plugged back into the
8511  * same port and when running without usermode desktop manager supprot
8512  */
8513 void dm_restore_drm_connector_state(struct drm_device *dev,
8514 				    struct drm_connector *connector)
8515 {
8516 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8517 	struct amdgpu_crtc *disconnected_acrtc;
8518 	struct dm_crtc_state *acrtc_state;
8519 
8520 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8521 		return;
8522 
8523 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8524 	if (!disconnected_acrtc)
8525 		return;
8526 
8527 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8528 	if (!acrtc_state->stream)
8529 		return;
8530 
8531 	/*
8532 	 * If the previous sink is not released and different from the current,
8533 	 * we deduce we are in a state where we can not rely on usermode call
8534 	 * to turn on the display, so we do it here
8535 	 */
8536 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8537 		dm_force_atomic_commit(&aconnector->base);
8538 }
8539 
8540 /*
8541  * Grabs all modesetting locks to serialize against any blocking commits,
8542  * Waits for completion of all non blocking commits.
8543  */
8544 static int do_aquire_global_lock(struct drm_device *dev,
8545 				 struct drm_atomic_state *state)
8546 {
8547 	struct drm_crtc *crtc;
8548 	struct drm_crtc_commit *commit;
8549 	long ret;
8550 
8551 	/*
8552 	 * Adding all modeset locks to aquire_ctx will
8553 	 * ensure that when the framework release it the
8554 	 * extra locks we are locking here will get released to
8555 	 */
8556 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8557 	if (ret)
8558 		return ret;
8559 
8560 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8561 		spin_lock(&crtc->commit_lock);
8562 		commit = list_first_entry_or_null(&crtc->commit_list,
8563 				struct drm_crtc_commit, commit_entry);
8564 		if (commit)
8565 			drm_crtc_commit_get(commit);
8566 		spin_unlock(&crtc->commit_lock);
8567 
8568 		if (!commit)
8569 			continue;
8570 
8571 		/*
8572 		 * Make sure all pending HW programming completed and
8573 		 * page flips done
8574 		 */
8575 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8576 
8577 		if (ret > 0)
8578 			ret = wait_for_completion_interruptible_timeout(
8579 					&commit->flip_done, 10*HZ);
8580 
8581 		if (ret == 0)
8582 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8583 				  "timed out\n", crtc->base.id, crtc->name);
8584 
8585 		drm_crtc_commit_put(commit);
8586 	}
8587 
8588 	return ret < 0 ? ret : 0;
8589 }
8590 
8591 static void get_freesync_config_for_crtc(
8592 	struct dm_crtc_state *new_crtc_state,
8593 	struct dm_connector_state *new_con_state)
8594 {
8595 	struct mod_freesync_config config = {0};
8596 	struct amdgpu_dm_connector *aconnector =
8597 			to_amdgpu_dm_connector(new_con_state->base.connector);
8598 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8599 	int vrefresh = drm_mode_vrefresh(mode);
8600 
8601 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8602 					vrefresh >= aconnector->min_vfreq &&
8603 					vrefresh <= aconnector->max_vfreq;
8604 
8605 	if (new_crtc_state->vrr_supported) {
8606 		new_crtc_state->stream->ignore_msa_timing_param = true;
8607 		config.state = new_crtc_state->base.vrr_enabled ?
8608 				VRR_STATE_ACTIVE_VARIABLE :
8609 				VRR_STATE_INACTIVE;
8610 		config.min_refresh_in_uhz =
8611 				aconnector->min_vfreq * 1000000;
8612 		config.max_refresh_in_uhz =
8613 				aconnector->max_vfreq * 1000000;
8614 		config.vsif_supported = true;
8615 		config.btr = true;
8616 	}
8617 
8618 	new_crtc_state->freesync_config = config;
8619 }
8620 
8621 static void reset_freesync_config_for_crtc(
8622 	struct dm_crtc_state *new_crtc_state)
8623 {
8624 	new_crtc_state->vrr_supported = false;
8625 
8626 	memset(&new_crtc_state->vrr_infopacket, 0,
8627 	       sizeof(new_crtc_state->vrr_infopacket));
8628 }
8629 
8630 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8631 				struct drm_atomic_state *state,
8632 				struct drm_crtc *crtc,
8633 				struct drm_crtc_state *old_crtc_state,
8634 				struct drm_crtc_state *new_crtc_state,
8635 				bool enable,
8636 				bool *lock_and_validation_needed)
8637 {
8638 	struct dm_atomic_state *dm_state = NULL;
8639 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8640 	struct dc_stream_state *new_stream;
8641 	int ret = 0;
8642 
8643 	/*
8644 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8645 	 * update changed items
8646 	 */
8647 	struct amdgpu_crtc *acrtc = NULL;
8648 	struct amdgpu_dm_connector *aconnector = NULL;
8649 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8650 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8651 
8652 	new_stream = NULL;
8653 
8654 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8655 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8656 	acrtc = to_amdgpu_crtc(crtc);
8657 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8658 
8659 	/* TODO This hack should go away */
8660 	if (aconnector && enable) {
8661 		/* Make sure fake sink is created in plug-in scenario */
8662 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8663 							    &aconnector->base);
8664 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8665 							    &aconnector->base);
8666 
8667 		if (IS_ERR(drm_new_conn_state)) {
8668 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8669 			goto fail;
8670 		}
8671 
8672 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8673 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8674 
8675 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8676 			goto skip_modeset;
8677 
8678 		new_stream = create_validate_stream_for_sink(aconnector,
8679 							     &new_crtc_state->mode,
8680 							     dm_new_conn_state,
8681 							     dm_old_crtc_state->stream);
8682 
8683 		/*
8684 		 * we can have no stream on ACTION_SET if a display
8685 		 * was disconnected during S3, in this case it is not an
8686 		 * error, the OS will be updated after detection, and
8687 		 * will do the right thing on next atomic commit
8688 		 */
8689 
8690 		if (!new_stream) {
8691 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8692 					__func__, acrtc->base.base.id);
8693 			ret = -ENOMEM;
8694 			goto fail;
8695 		}
8696 
8697 		/*
8698 		 * TODO: Check VSDB bits to decide whether this should
8699 		 * be enabled or not.
8700 		 */
8701 		new_stream->triggered_crtc_reset.enabled =
8702 			dm->force_timing_sync;
8703 
8704 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8705 
8706 		ret = fill_hdr_info_packet(drm_new_conn_state,
8707 					   &new_stream->hdr_static_metadata);
8708 		if (ret)
8709 			goto fail;
8710 
8711 		/*
8712 		 * If we already removed the old stream from the context
8713 		 * (and set the new stream to NULL) then we can't reuse
8714 		 * the old stream even if the stream and scaling are unchanged.
8715 		 * We'll hit the BUG_ON and black screen.
8716 		 *
8717 		 * TODO: Refactor this function to allow this check to work
8718 		 * in all conditions.
8719 		 */
8720 		if (dm_new_crtc_state->stream &&
8721 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8722 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8723 			new_crtc_state->mode_changed = false;
8724 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8725 					 new_crtc_state->mode_changed);
8726 		}
8727 	}
8728 
8729 	/* mode_changed flag may get updated above, need to check again */
8730 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8731 		goto skip_modeset;
8732 
8733 	DRM_DEBUG_DRIVER(
8734 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8735 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8736 		"connectors_changed:%d\n",
8737 		acrtc->crtc_id,
8738 		new_crtc_state->enable,
8739 		new_crtc_state->active,
8740 		new_crtc_state->planes_changed,
8741 		new_crtc_state->mode_changed,
8742 		new_crtc_state->active_changed,
8743 		new_crtc_state->connectors_changed);
8744 
8745 	/* Remove stream for any changed/disabled CRTC */
8746 	if (!enable) {
8747 
8748 		if (!dm_old_crtc_state->stream)
8749 			goto skip_modeset;
8750 
8751 		ret = dm_atomic_get_state(state, &dm_state);
8752 		if (ret)
8753 			goto fail;
8754 
8755 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8756 				crtc->base.id);
8757 
8758 		/* i.e. reset mode */
8759 		if (dc_remove_stream_from_ctx(
8760 				dm->dc,
8761 				dm_state->context,
8762 				dm_old_crtc_state->stream) != DC_OK) {
8763 			ret = -EINVAL;
8764 			goto fail;
8765 		}
8766 
8767 		dc_stream_release(dm_old_crtc_state->stream);
8768 		dm_new_crtc_state->stream = NULL;
8769 
8770 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8771 
8772 		*lock_and_validation_needed = true;
8773 
8774 	} else {/* Add stream for any updated/enabled CRTC */
8775 		/*
8776 		 * Quick fix to prevent NULL pointer on new_stream when
8777 		 * added MST connectors not found in existing crtc_state in the chained mode
8778 		 * TODO: need to dig out the root cause of that
8779 		 */
8780 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8781 			goto skip_modeset;
8782 
8783 		if (modereset_required(new_crtc_state))
8784 			goto skip_modeset;
8785 
8786 		if (modeset_required(new_crtc_state, new_stream,
8787 				     dm_old_crtc_state->stream)) {
8788 
8789 			WARN_ON(dm_new_crtc_state->stream);
8790 
8791 			ret = dm_atomic_get_state(state, &dm_state);
8792 			if (ret)
8793 				goto fail;
8794 
8795 			dm_new_crtc_state->stream = new_stream;
8796 
8797 			dc_stream_retain(new_stream);
8798 
8799 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8800 						crtc->base.id);
8801 
8802 			if (dc_add_stream_to_ctx(
8803 					dm->dc,
8804 					dm_state->context,
8805 					dm_new_crtc_state->stream) != DC_OK) {
8806 				ret = -EINVAL;
8807 				goto fail;
8808 			}
8809 
8810 			*lock_and_validation_needed = true;
8811 		}
8812 	}
8813 
8814 skip_modeset:
8815 	/* Release extra reference */
8816 	if (new_stream)
8817 		 dc_stream_release(new_stream);
8818 
8819 	/*
8820 	 * We want to do dc stream updates that do not require a
8821 	 * full modeset below.
8822 	 */
8823 	if (!(enable && aconnector && new_crtc_state->active))
8824 		return 0;
8825 	/*
8826 	 * Given above conditions, the dc state cannot be NULL because:
8827 	 * 1. We're in the process of enabling CRTCs (just been added
8828 	 *    to the dc context, or already is on the context)
8829 	 * 2. Has a valid connector attached, and
8830 	 * 3. Is currently active and enabled.
8831 	 * => The dc stream state currently exists.
8832 	 */
8833 	BUG_ON(dm_new_crtc_state->stream == NULL);
8834 
8835 	/* Scaling or underscan settings */
8836 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8837 		update_stream_scaling_settings(
8838 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8839 
8840 	/* ABM settings */
8841 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8842 
8843 	/*
8844 	 * Color management settings. We also update color properties
8845 	 * when a modeset is needed, to ensure it gets reprogrammed.
8846 	 */
8847 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8848 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8849 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8850 		if (ret)
8851 			goto fail;
8852 	}
8853 
8854 	/* Update Freesync settings. */
8855 	get_freesync_config_for_crtc(dm_new_crtc_state,
8856 				     dm_new_conn_state);
8857 
8858 	return ret;
8859 
8860 fail:
8861 	if (new_stream)
8862 		dc_stream_release(new_stream);
8863 	return ret;
8864 }
8865 
8866 static bool should_reset_plane(struct drm_atomic_state *state,
8867 			       struct drm_plane *plane,
8868 			       struct drm_plane_state *old_plane_state,
8869 			       struct drm_plane_state *new_plane_state)
8870 {
8871 	struct drm_plane *other;
8872 	struct drm_plane_state *old_other_state, *new_other_state;
8873 	struct drm_crtc_state *new_crtc_state;
8874 	int i;
8875 
8876 	/*
8877 	 * TODO: Remove this hack once the checks below are sufficient
8878 	 * enough to determine when we need to reset all the planes on
8879 	 * the stream.
8880 	 */
8881 	if (state->allow_modeset)
8882 		return true;
8883 
8884 	/* Exit early if we know that we're adding or removing the plane. */
8885 	if (old_plane_state->crtc != new_plane_state->crtc)
8886 		return true;
8887 
8888 	/* old crtc == new_crtc == NULL, plane not in context. */
8889 	if (!new_plane_state->crtc)
8890 		return false;
8891 
8892 	new_crtc_state =
8893 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8894 
8895 	if (!new_crtc_state)
8896 		return true;
8897 
8898 	/* CRTC Degamma changes currently require us to recreate planes. */
8899 	if (new_crtc_state->color_mgmt_changed)
8900 		return true;
8901 
8902 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8903 		return true;
8904 
8905 	/*
8906 	 * If there are any new primary or overlay planes being added or
8907 	 * removed then the z-order can potentially change. To ensure
8908 	 * correct z-order and pipe acquisition the current DC architecture
8909 	 * requires us to remove and recreate all existing planes.
8910 	 *
8911 	 * TODO: Come up with a more elegant solution for this.
8912 	 */
8913 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8914 		struct amdgpu_framebuffer *old_afb, *new_afb;
8915 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8916 			continue;
8917 
8918 		if (old_other_state->crtc != new_plane_state->crtc &&
8919 		    new_other_state->crtc != new_plane_state->crtc)
8920 			continue;
8921 
8922 		if (old_other_state->crtc != new_other_state->crtc)
8923 			return true;
8924 
8925 		/* Src/dst size and scaling updates. */
8926 		if (old_other_state->src_w != new_other_state->src_w ||
8927 		    old_other_state->src_h != new_other_state->src_h ||
8928 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8929 		    old_other_state->crtc_h != new_other_state->crtc_h)
8930 			return true;
8931 
8932 		/* Rotation / mirroring updates. */
8933 		if (old_other_state->rotation != new_other_state->rotation)
8934 			return true;
8935 
8936 		/* Blending updates. */
8937 		if (old_other_state->pixel_blend_mode !=
8938 		    new_other_state->pixel_blend_mode)
8939 			return true;
8940 
8941 		/* Alpha updates. */
8942 		if (old_other_state->alpha != new_other_state->alpha)
8943 			return true;
8944 
8945 		/* Colorspace changes. */
8946 		if (old_other_state->color_range != new_other_state->color_range ||
8947 		    old_other_state->color_encoding != new_other_state->color_encoding)
8948 			return true;
8949 
8950 		/* Framebuffer checks fall at the end. */
8951 		if (!old_other_state->fb || !new_other_state->fb)
8952 			continue;
8953 
8954 		/* Pixel format changes can require bandwidth updates. */
8955 		if (old_other_state->fb->format != new_other_state->fb->format)
8956 			return true;
8957 
8958 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8959 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8960 
8961 		/* Tiling and DCC changes also require bandwidth updates. */
8962 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
8963 		    old_afb->base.modifier != new_afb->base.modifier)
8964 			return true;
8965 	}
8966 
8967 	return false;
8968 }
8969 
8970 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8971 			      struct drm_plane_state *new_plane_state,
8972 			      struct drm_framebuffer *fb)
8973 {
8974 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8975 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
8976 	unsigned int pitch;
8977 	bool linear;
8978 
8979 	if (fb->width > new_acrtc->max_cursor_width ||
8980 	    fb->height > new_acrtc->max_cursor_height) {
8981 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
8982 				 new_plane_state->fb->width,
8983 				 new_plane_state->fb->height);
8984 		return -EINVAL;
8985 	}
8986 	if (new_plane_state->src_w != fb->width << 16 ||
8987 	    new_plane_state->src_h != fb->height << 16) {
8988 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
8989 		return -EINVAL;
8990 	}
8991 
8992 	/* Pitch in pixels */
8993 	pitch = fb->pitches[0] / fb->format->cpp[0];
8994 
8995 	if (fb->width != pitch) {
8996 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
8997 				 fb->width, pitch);
8998 		return -EINVAL;
8999 	}
9000 
9001 	switch (pitch) {
9002 	case 64:
9003 	case 128:
9004 	case 256:
9005 		/* FB pitch is supported by cursor plane */
9006 		break;
9007 	default:
9008 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9009 		return -EINVAL;
9010 	}
9011 
9012 	/* Core DRM takes care of checking FB modifiers, so we only need to
9013 	 * check tiling flags when the FB doesn't have a modifier. */
9014 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9015 		if (adev->family < AMDGPU_FAMILY_AI) {
9016 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9017 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9018 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9019 		} else {
9020 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9021 		}
9022 		if (!linear) {
9023 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9024 			return -EINVAL;
9025 		}
9026 	}
9027 
9028 	return 0;
9029 }
9030 
9031 static int dm_update_plane_state(struct dc *dc,
9032 				 struct drm_atomic_state *state,
9033 				 struct drm_plane *plane,
9034 				 struct drm_plane_state *old_plane_state,
9035 				 struct drm_plane_state *new_plane_state,
9036 				 bool enable,
9037 				 bool *lock_and_validation_needed)
9038 {
9039 
9040 	struct dm_atomic_state *dm_state = NULL;
9041 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9042 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9043 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9044 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9045 	struct amdgpu_crtc *new_acrtc;
9046 	bool needs_reset;
9047 	int ret = 0;
9048 
9049 
9050 	new_plane_crtc = new_plane_state->crtc;
9051 	old_plane_crtc = old_plane_state->crtc;
9052 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9053 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9054 
9055 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9056 		if (!enable || !new_plane_crtc ||
9057 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9058 			return 0;
9059 
9060 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9061 
9062 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9063 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9064 			return -EINVAL;
9065 		}
9066 
9067 		if (new_plane_state->fb) {
9068 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9069 						 new_plane_state->fb);
9070 			if (ret)
9071 				return ret;
9072 		}
9073 
9074 		return 0;
9075 	}
9076 
9077 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9078 					 new_plane_state);
9079 
9080 	/* Remove any changed/removed planes */
9081 	if (!enable) {
9082 		if (!needs_reset)
9083 			return 0;
9084 
9085 		if (!old_plane_crtc)
9086 			return 0;
9087 
9088 		old_crtc_state = drm_atomic_get_old_crtc_state(
9089 				state, old_plane_crtc);
9090 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9091 
9092 		if (!dm_old_crtc_state->stream)
9093 			return 0;
9094 
9095 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9096 				plane->base.id, old_plane_crtc->base.id);
9097 
9098 		ret = dm_atomic_get_state(state, &dm_state);
9099 		if (ret)
9100 			return ret;
9101 
9102 		if (!dc_remove_plane_from_context(
9103 				dc,
9104 				dm_old_crtc_state->stream,
9105 				dm_old_plane_state->dc_state,
9106 				dm_state->context)) {
9107 
9108 			return -EINVAL;
9109 		}
9110 
9111 
9112 		dc_plane_state_release(dm_old_plane_state->dc_state);
9113 		dm_new_plane_state->dc_state = NULL;
9114 
9115 		*lock_and_validation_needed = true;
9116 
9117 	} else { /* Add new planes */
9118 		struct dc_plane_state *dc_new_plane_state;
9119 
9120 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9121 			return 0;
9122 
9123 		if (!new_plane_crtc)
9124 			return 0;
9125 
9126 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9127 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9128 
9129 		if (!dm_new_crtc_state->stream)
9130 			return 0;
9131 
9132 		if (!needs_reset)
9133 			return 0;
9134 
9135 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9136 		if (ret)
9137 			return ret;
9138 
9139 		WARN_ON(dm_new_plane_state->dc_state);
9140 
9141 		dc_new_plane_state = dc_create_plane_state(dc);
9142 		if (!dc_new_plane_state)
9143 			return -ENOMEM;
9144 
9145 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9146 				plane->base.id, new_plane_crtc->base.id);
9147 
9148 		ret = fill_dc_plane_attributes(
9149 			drm_to_adev(new_plane_crtc->dev),
9150 			dc_new_plane_state,
9151 			new_plane_state,
9152 			new_crtc_state);
9153 		if (ret) {
9154 			dc_plane_state_release(dc_new_plane_state);
9155 			return ret;
9156 		}
9157 
9158 		ret = dm_atomic_get_state(state, &dm_state);
9159 		if (ret) {
9160 			dc_plane_state_release(dc_new_plane_state);
9161 			return ret;
9162 		}
9163 
9164 		/*
9165 		 * Any atomic check errors that occur after this will
9166 		 * not need a release. The plane state will be attached
9167 		 * to the stream, and therefore part of the atomic
9168 		 * state. It'll be released when the atomic state is
9169 		 * cleaned.
9170 		 */
9171 		if (!dc_add_plane_to_context(
9172 				dc,
9173 				dm_new_crtc_state->stream,
9174 				dc_new_plane_state,
9175 				dm_state->context)) {
9176 
9177 			dc_plane_state_release(dc_new_plane_state);
9178 			return -EINVAL;
9179 		}
9180 
9181 		dm_new_plane_state->dc_state = dc_new_plane_state;
9182 
9183 		/* Tell DC to do a full surface update every time there
9184 		 * is a plane change. Inefficient, but works for now.
9185 		 */
9186 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9187 
9188 		*lock_and_validation_needed = true;
9189 	}
9190 
9191 
9192 	return ret;
9193 }
9194 
9195 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9196 				struct drm_crtc *crtc,
9197 				struct drm_crtc_state *new_crtc_state)
9198 {
9199 	struct drm_plane_state *new_cursor_state, *new_primary_state;
9200 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9201 
9202 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9203 	 * cursor per pipe but it's going to inherit the scaling and
9204 	 * positioning from the underlying pipe. Check the cursor plane's
9205 	 * blending properties match the primary plane's. */
9206 
9207 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9208 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9209 	if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9210 		return 0;
9211 	}
9212 
9213 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9214 			 (new_cursor_state->src_w >> 16);
9215 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9216 			 (new_cursor_state->src_h >> 16);
9217 
9218 	primary_scale_w = new_primary_state->crtc_w * 1000 /
9219 			 (new_primary_state->src_w >> 16);
9220 	primary_scale_h = new_primary_state->crtc_h * 1000 /
9221 			 (new_primary_state->src_h >> 16);
9222 
9223 	if (cursor_scale_w != primary_scale_w ||
9224 	    cursor_scale_h != primary_scale_h) {
9225 		DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9226 		return -EINVAL;
9227 	}
9228 
9229 	return 0;
9230 }
9231 
9232 #if defined(CONFIG_DRM_AMD_DC_DCN)
9233 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9234 {
9235 	struct drm_connector *connector;
9236 	struct drm_connector_state *conn_state;
9237 	struct amdgpu_dm_connector *aconnector = NULL;
9238 	int i;
9239 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9240 		if (conn_state->crtc != crtc)
9241 			continue;
9242 
9243 		aconnector = to_amdgpu_dm_connector(connector);
9244 		if (!aconnector->port || !aconnector->mst_port)
9245 			aconnector = NULL;
9246 		else
9247 			break;
9248 	}
9249 
9250 	if (!aconnector)
9251 		return 0;
9252 
9253 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9254 }
9255 #endif
9256 
9257 /**
9258  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9259  * @dev: The DRM device
9260  * @state: The atomic state to commit
9261  *
9262  * Validate that the given atomic state is programmable by DC into hardware.
9263  * This involves constructing a &struct dc_state reflecting the new hardware
9264  * state we wish to commit, then querying DC to see if it is programmable. It's
9265  * important not to modify the existing DC state. Otherwise, atomic_check
9266  * may unexpectedly commit hardware changes.
9267  *
9268  * When validating the DC state, it's important that the right locks are
9269  * acquired. For full updates case which removes/adds/updates streams on one
9270  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9271  * that any such full update commit will wait for completion of any outstanding
9272  * flip using DRMs synchronization events.
9273  *
9274  * Note that DM adds the affected connectors for all CRTCs in state, when that
9275  * might not seem necessary. This is because DC stream creation requires the
9276  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9277  * be possible but non-trivial - a possible TODO item.
9278  *
9279  * Return: -Error code if validation failed.
9280  */
9281 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9282 				  struct drm_atomic_state *state)
9283 {
9284 	struct amdgpu_device *adev = drm_to_adev(dev);
9285 	struct dm_atomic_state *dm_state = NULL;
9286 	struct dc *dc = adev->dm.dc;
9287 	struct drm_connector *connector;
9288 	struct drm_connector_state *old_con_state, *new_con_state;
9289 	struct drm_crtc *crtc;
9290 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9291 	struct drm_plane *plane;
9292 	struct drm_plane_state *old_plane_state, *new_plane_state;
9293 	enum dc_status status;
9294 	int ret, i;
9295 	bool lock_and_validation_needed = false;
9296 	struct dm_crtc_state *dm_old_crtc_state;
9297 
9298 	trace_amdgpu_dm_atomic_check_begin(state);
9299 
9300 	ret = drm_atomic_helper_check_modeset(dev, state);
9301 	if (ret)
9302 		goto fail;
9303 
9304 	/* Check connector changes */
9305 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9306 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9307 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9308 
9309 		/* Skip connectors that are disabled or part of modeset already. */
9310 		if (!old_con_state->crtc && !new_con_state->crtc)
9311 			continue;
9312 
9313 		if (!new_con_state->crtc)
9314 			continue;
9315 
9316 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9317 		if (IS_ERR(new_crtc_state)) {
9318 			ret = PTR_ERR(new_crtc_state);
9319 			goto fail;
9320 		}
9321 
9322 		if (dm_old_con_state->abm_level !=
9323 		    dm_new_con_state->abm_level)
9324 			new_crtc_state->connectors_changed = true;
9325 	}
9326 
9327 #if defined(CONFIG_DRM_AMD_DC_DCN)
9328 	if (adev->asic_type >= CHIP_NAVI10) {
9329 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9330 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9331 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9332 				if (ret)
9333 					goto fail;
9334 			}
9335 		}
9336 	}
9337 #endif
9338 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9339 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9340 
9341 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9342 		    !new_crtc_state->color_mgmt_changed &&
9343 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9344 			dm_old_crtc_state->dsc_force_changed == false)
9345 			continue;
9346 
9347 		if (!new_crtc_state->enable)
9348 			continue;
9349 
9350 		ret = drm_atomic_add_affected_connectors(state, crtc);
9351 		if (ret)
9352 			return ret;
9353 
9354 		ret = drm_atomic_add_affected_planes(state, crtc);
9355 		if (ret)
9356 			goto fail;
9357 
9358 		if (dm_old_crtc_state->dsc_force_changed)
9359 			new_crtc_state->mode_changed = true;
9360 	}
9361 
9362 	/*
9363 	 * Add all primary and overlay planes on the CRTC to the state
9364 	 * whenever a plane is enabled to maintain correct z-ordering
9365 	 * and to enable fast surface updates.
9366 	 */
9367 	drm_for_each_crtc(crtc, dev) {
9368 		bool modified = false;
9369 
9370 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9371 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9372 				continue;
9373 
9374 			if (new_plane_state->crtc == crtc ||
9375 			    old_plane_state->crtc == crtc) {
9376 				modified = true;
9377 				break;
9378 			}
9379 		}
9380 
9381 		if (!modified)
9382 			continue;
9383 
9384 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9385 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9386 				continue;
9387 
9388 			new_plane_state =
9389 				drm_atomic_get_plane_state(state, plane);
9390 
9391 			if (IS_ERR(new_plane_state)) {
9392 				ret = PTR_ERR(new_plane_state);
9393 				goto fail;
9394 			}
9395 		}
9396 	}
9397 
9398 	/* Remove exiting planes if they are modified */
9399 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9400 		ret = dm_update_plane_state(dc, state, plane,
9401 					    old_plane_state,
9402 					    new_plane_state,
9403 					    false,
9404 					    &lock_and_validation_needed);
9405 		if (ret)
9406 			goto fail;
9407 	}
9408 
9409 	/* Disable all crtcs which require disable */
9410 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9411 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9412 					   old_crtc_state,
9413 					   new_crtc_state,
9414 					   false,
9415 					   &lock_and_validation_needed);
9416 		if (ret)
9417 			goto fail;
9418 	}
9419 
9420 	/* Enable all crtcs which require enable */
9421 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9422 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9423 					   old_crtc_state,
9424 					   new_crtc_state,
9425 					   true,
9426 					   &lock_and_validation_needed);
9427 		if (ret)
9428 			goto fail;
9429 	}
9430 
9431 	/* Add new/modified planes */
9432 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9433 		ret = dm_update_plane_state(dc, state, plane,
9434 					    old_plane_state,
9435 					    new_plane_state,
9436 					    true,
9437 					    &lock_and_validation_needed);
9438 		if (ret)
9439 			goto fail;
9440 	}
9441 
9442 	/* Run this here since we want to validate the streams we created */
9443 	ret = drm_atomic_helper_check_planes(dev, state);
9444 	if (ret)
9445 		goto fail;
9446 
9447 	/* Check cursor planes scaling */
9448 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9449 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9450 		if (ret)
9451 			goto fail;
9452 	}
9453 
9454 	if (state->legacy_cursor_update) {
9455 		/*
9456 		 * This is a fast cursor update coming from the plane update
9457 		 * helper, check if it can be done asynchronously for better
9458 		 * performance.
9459 		 */
9460 		state->async_update =
9461 			!drm_atomic_helper_async_check(dev, state);
9462 
9463 		/*
9464 		 * Skip the remaining global validation if this is an async
9465 		 * update. Cursor updates can be done without affecting
9466 		 * state or bandwidth calcs and this avoids the performance
9467 		 * penalty of locking the private state object and
9468 		 * allocating a new dc_state.
9469 		 */
9470 		if (state->async_update)
9471 			return 0;
9472 	}
9473 
9474 	/* Check scaling and underscan changes*/
9475 	/* TODO Removed scaling changes validation due to inability to commit
9476 	 * new stream into context w\o causing full reset. Need to
9477 	 * decide how to handle.
9478 	 */
9479 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9480 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9481 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9482 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9483 
9484 		/* Skip any modesets/resets */
9485 		if (!acrtc || drm_atomic_crtc_needs_modeset(
9486 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9487 			continue;
9488 
9489 		/* Skip any thing not scale or underscan changes */
9490 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9491 			continue;
9492 
9493 		lock_and_validation_needed = true;
9494 	}
9495 
9496 	/**
9497 	 * Streams and planes are reset when there are changes that affect
9498 	 * bandwidth. Anything that affects bandwidth needs to go through
9499 	 * DC global validation to ensure that the configuration can be applied
9500 	 * to hardware.
9501 	 *
9502 	 * We have to currently stall out here in atomic_check for outstanding
9503 	 * commits to finish in this case because our IRQ handlers reference
9504 	 * DRM state directly - we can end up disabling interrupts too early
9505 	 * if we don't.
9506 	 *
9507 	 * TODO: Remove this stall and drop DM state private objects.
9508 	 */
9509 	if (lock_and_validation_needed) {
9510 		ret = dm_atomic_get_state(state, &dm_state);
9511 		if (ret)
9512 			goto fail;
9513 
9514 		ret = do_aquire_global_lock(dev, state);
9515 		if (ret)
9516 			goto fail;
9517 
9518 #if defined(CONFIG_DRM_AMD_DC_DCN)
9519 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9520 			goto fail;
9521 
9522 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9523 		if (ret)
9524 			goto fail;
9525 #endif
9526 
9527 		/*
9528 		 * Perform validation of MST topology in the state:
9529 		 * We need to perform MST atomic check before calling
9530 		 * dc_validate_global_state(), or there is a chance
9531 		 * to get stuck in an infinite loop and hang eventually.
9532 		 */
9533 		ret = drm_dp_mst_atomic_check(state);
9534 		if (ret)
9535 			goto fail;
9536 		status = dc_validate_global_state(dc, dm_state->context, false);
9537 		if (status != DC_OK) {
9538 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
9539 				       dc_status_to_str(status), status);
9540 			ret = -EINVAL;
9541 			goto fail;
9542 		}
9543 	} else {
9544 		/*
9545 		 * The commit is a fast update. Fast updates shouldn't change
9546 		 * the DC context, affect global validation, and can have their
9547 		 * commit work done in parallel with other commits not touching
9548 		 * the same resource. If we have a new DC context as part of
9549 		 * the DM atomic state from validation we need to free it and
9550 		 * retain the existing one instead.
9551 		 *
9552 		 * Furthermore, since the DM atomic state only contains the DC
9553 		 * context and can safely be annulled, we can free the state
9554 		 * and clear the associated private object now to free
9555 		 * some memory and avoid a possible use-after-free later.
9556 		 */
9557 
9558 		for (i = 0; i < state->num_private_objs; i++) {
9559 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9560 
9561 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9562 				int j = state->num_private_objs-1;
9563 
9564 				dm_atomic_destroy_state(obj,
9565 						state->private_objs[i].state);
9566 
9567 				/* If i is not at the end of the array then the
9568 				 * last element needs to be moved to where i was
9569 				 * before the array can safely be truncated.
9570 				 */
9571 				if (i != j)
9572 					state->private_objs[i] =
9573 						state->private_objs[j];
9574 
9575 				state->private_objs[j].ptr = NULL;
9576 				state->private_objs[j].state = NULL;
9577 				state->private_objs[j].old_state = NULL;
9578 				state->private_objs[j].new_state = NULL;
9579 
9580 				state->num_private_objs = j;
9581 				break;
9582 			}
9583 		}
9584 	}
9585 
9586 	/* Store the overall update type for use later in atomic check. */
9587 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9588 		struct dm_crtc_state *dm_new_crtc_state =
9589 			to_dm_crtc_state(new_crtc_state);
9590 
9591 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9592 							 UPDATE_TYPE_FULL :
9593 							 UPDATE_TYPE_FAST;
9594 	}
9595 
9596 	/* Must be success */
9597 	WARN_ON(ret);
9598 
9599 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9600 
9601 	return ret;
9602 
9603 fail:
9604 	if (ret == -EDEADLK)
9605 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9606 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9607 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9608 	else
9609 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9610 
9611 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9612 
9613 	return ret;
9614 }
9615 
9616 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9617 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9618 {
9619 	uint8_t dpcd_data;
9620 	bool capable = false;
9621 
9622 	if (amdgpu_dm_connector->dc_link &&
9623 		dm_helpers_dp_read_dpcd(
9624 				NULL,
9625 				amdgpu_dm_connector->dc_link,
9626 				DP_DOWN_STREAM_PORT_COUNT,
9627 				&dpcd_data,
9628 				sizeof(dpcd_data))) {
9629 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9630 	}
9631 
9632 	return capable;
9633 }
9634 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9635 					struct edid *edid)
9636 {
9637 	int i;
9638 	bool edid_check_required;
9639 	struct detailed_timing *timing;
9640 	struct detailed_non_pixel *data;
9641 	struct detailed_data_monitor_range *range;
9642 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9643 			to_amdgpu_dm_connector(connector);
9644 	struct dm_connector_state *dm_con_state = NULL;
9645 
9646 	struct drm_device *dev = connector->dev;
9647 	struct amdgpu_device *adev = drm_to_adev(dev);
9648 	bool freesync_capable = false;
9649 
9650 	if (!connector->state) {
9651 		DRM_ERROR("%s - Connector has no state", __func__);
9652 		goto update;
9653 	}
9654 
9655 	if (!edid) {
9656 		dm_con_state = to_dm_connector_state(connector->state);
9657 
9658 		amdgpu_dm_connector->min_vfreq = 0;
9659 		amdgpu_dm_connector->max_vfreq = 0;
9660 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9661 
9662 		goto update;
9663 	}
9664 
9665 	dm_con_state = to_dm_connector_state(connector->state);
9666 
9667 	edid_check_required = false;
9668 	if (!amdgpu_dm_connector->dc_sink) {
9669 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9670 		goto update;
9671 	}
9672 	if (!adev->dm.freesync_module)
9673 		goto update;
9674 	/*
9675 	 * if edid non zero restrict freesync only for dp and edp
9676 	 */
9677 	if (edid) {
9678 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9679 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9680 			edid_check_required = is_dp_capable_without_timing_msa(
9681 						adev->dm.dc,
9682 						amdgpu_dm_connector);
9683 		}
9684 	}
9685 	if (edid_check_required == true && (edid->version > 1 ||
9686 	   (edid->version == 1 && edid->revision > 1))) {
9687 		for (i = 0; i < 4; i++) {
9688 
9689 			timing	= &edid->detailed_timings[i];
9690 			data	= &timing->data.other_data;
9691 			range	= &data->data.range;
9692 			/*
9693 			 * Check if monitor has continuous frequency mode
9694 			 */
9695 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
9696 				continue;
9697 			/*
9698 			 * Check for flag range limits only. If flag == 1 then
9699 			 * no additional timing information provided.
9700 			 * Default GTF, GTF Secondary curve and CVT are not
9701 			 * supported
9702 			 */
9703 			if (range->flags != 1)
9704 				continue;
9705 
9706 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9707 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9708 			amdgpu_dm_connector->pixel_clock_mhz =
9709 				range->pixel_clock_mhz * 10;
9710 
9711 			connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
9712 			connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
9713 
9714 			break;
9715 		}
9716 
9717 		if (amdgpu_dm_connector->max_vfreq -
9718 		    amdgpu_dm_connector->min_vfreq > 10) {
9719 
9720 			freesync_capable = true;
9721 		}
9722 	}
9723 
9724 update:
9725 	if (dm_con_state)
9726 		dm_con_state->freesync_capable = freesync_capable;
9727 
9728 	if (connector->vrr_capable_property)
9729 		drm_connector_set_vrr_capable_property(connector,
9730 						       freesync_capable);
9731 }
9732 
9733 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9734 {
9735 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9736 
9737 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9738 		return;
9739 	if (link->type == dc_connection_none)
9740 		return;
9741 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9742 					dpcd_data, sizeof(dpcd_data))) {
9743 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9744 
9745 		if (dpcd_data[0] == 0) {
9746 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9747 			link->psr_settings.psr_feature_enabled = false;
9748 		} else {
9749 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9750 			link->psr_settings.psr_feature_enabled = true;
9751 		}
9752 
9753 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9754 	}
9755 }
9756 
9757 /*
9758  * amdgpu_dm_link_setup_psr() - configure psr link
9759  * @stream: stream state
9760  *
9761  * Return: true if success
9762  */
9763 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9764 {
9765 	struct dc_link *link = NULL;
9766 	struct psr_config psr_config = {0};
9767 	struct psr_context psr_context = {0};
9768 	bool ret = false;
9769 
9770 	if (stream == NULL)
9771 		return false;
9772 
9773 	link = stream->link;
9774 
9775 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9776 
9777 	if (psr_config.psr_version > 0) {
9778 		psr_config.psr_exit_link_training_required = 0x1;
9779 		psr_config.psr_frame_capture_indication_req = 0;
9780 		psr_config.psr_rfb_setup_time = 0x37;
9781 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9782 		psr_config.allow_smu_optimizations = 0x0;
9783 
9784 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9785 
9786 	}
9787 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9788 
9789 	return ret;
9790 }
9791 
9792 /*
9793  * amdgpu_dm_psr_enable() - enable psr f/w
9794  * @stream: stream state
9795  *
9796  * Return: true if success
9797  */
9798 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9799 {
9800 	struct dc_link *link = stream->link;
9801 	unsigned int vsync_rate_hz = 0;
9802 	struct dc_static_screen_params params = {0};
9803 	/* Calculate number of static frames before generating interrupt to
9804 	 * enter PSR.
9805 	 */
9806 	// Init fail safe of 2 frames static
9807 	unsigned int num_frames_static = 2;
9808 
9809 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9810 
9811 	vsync_rate_hz = div64_u64(div64_u64((
9812 			stream->timing.pix_clk_100hz * 100),
9813 			stream->timing.v_total),
9814 			stream->timing.h_total);
9815 
9816 	/* Round up
9817 	 * Calculate number of frames such that at least 30 ms of time has
9818 	 * passed.
9819 	 */
9820 	if (vsync_rate_hz != 0) {
9821 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9822 		num_frames_static = (30000 / frame_time_microsec) + 1;
9823 	}
9824 
9825 	params.triggers.cursor_update = true;
9826 	params.triggers.overlay_update = true;
9827 	params.triggers.surface_update = true;
9828 	params.num_frames = num_frames_static;
9829 
9830 	dc_stream_set_static_screen_params(link->ctx->dc,
9831 					   &stream, 1,
9832 					   &params);
9833 
9834 	return dc_link_set_psr_allow_active(link, true, false, false);
9835 }
9836 
9837 /*
9838  * amdgpu_dm_psr_disable() - disable psr f/w
9839  * @stream:  stream state
9840  *
9841  * Return: true if success
9842  */
9843 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9844 {
9845 
9846 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9847 
9848 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
9849 }
9850 
9851 /*
9852  * amdgpu_dm_psr_disable() - disable psr f/w
9853  * if psr is enabled on any stream
9854  *
9855  * Return: true if success
9856  */
9857 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9858 {
9859 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9860 	return dc_set_psr_allow_active(dm->dc, false);
9861 }
9862 
9863 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9864 {
9865 	struct amdgpu_device *adev = drm_to_adev(dev);
9866 	struct dc *dc = adev->dm.dc;
9867 	int i;
9868 
9869 	mutex_lock(&adev->dm.dc_lock);
9870 	if (dc->current_state) {
9871 		for (i = 0; i < dc->current_state->stream_count; ++i)
9872 			dc->current_state->streams[i]
9873 				->triggered_crtc_reset.enabled =
9874 				adev->dm.force_timing_sync;
9875 
9876 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9877 		dc_trigger_sync(dc, dc->current_state);
9878 	}
9879 	mutex_unlock(&adev->dm.dc_lock);
9880 }
9881 
9882 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9883 		       uint32_t value, const char *func_name)
9884 {
9885 #ifdef DM_CHECK_ADDR_0
9886 	if (address == 0) {
9887 		DC_ERR("invalid register write. address = 0");
9888 		return;
9889 	}
9890 #endif
9891 	cgs_write_register(ctx->cgs_device, address, value);
9892 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9893 }
9894 
9895 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9896 			  const char *func_name)
9897 {
9898 	uint32_t value;
9899 #ifdef DM_CHECK_ADDR_0
9900 	if (address == 0) {
9901 		DC_ERR("invalid register read; address = 0\n");
9902 		return 0;
9903 	}
9904 #endif
9905 
9906 	if (ctx->dmub_srv &&
9907 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9908 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9909 		ASSERT(false);
9910 		return 0;
9911 	}
9912 
9913 	value = cgs_read_register(ctx->cgs_device, address);
9914 
9915 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9916 
9917 	return value;
9918 }
9919