1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 
104 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
106 
107 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
109 
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
112 
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
115 
116 /**
117  * DOC: overview
118  *
119  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121  * requests into DC requests, and DC responses into DRM responses.
122  *
123  * The root control structure is &struct amdgpu_display_manager.
124  */
125 
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
129 
130 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
131 {
132 	switch (link->dpcd_caps.dongle_type) {
133 	case DISPLAY_DONGLE_NONE:
134 		return DRM_MODE_SUBCONNECTOR_Native;
135 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
136 		return DRM_MODE_SUBCONNECTOR_VGA;
137 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
138 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
139 		return DRM_MODE_SUBCONNECTOR_DVID;
140 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
141 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
142 		return DRM_MODE_SUBCONNECTOR_HDMIA;
143 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
144 	default:
145 		return DRM_MODE_SUBCONNECTOR_Unknown;
146 	}
147 }
148 
149 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
150 {
151 	struct dc_link *link = aconnector->dc_link;
152 	struct drm_connector *connector = &aconnector->base;
153 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
154 
155 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
156 		return;
157 
158 	if (aconnector->dc_sink)
159 		subconnector = get_subconnector_type(link);
160 
161 	drm_object_property_set_value(&connector->base,
162 			connector->dev->mode_config.dp_subconnector_property,
163 			subconnector);
164 }
165 
166 /*
167  * initializes drm_device display related structures, based on the information
168  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
169  * drm_encoder, drm_mode_config
170  *
171  * Returns 0 on success
172  */
173 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
174 /* removes and deallocates the drm structures, created by the above function */
175 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
176 
177 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
178 				struct drm_plane *plane,
179 				unsigned long possible_crtcs,
180 				const struct dc_plane_cap *plane_cap);
181 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
182 			       struct drm_plane *plane,
183 			       uint32_t link_index);
184 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
185 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
186 				    uint32_t link_index,
187 				    struct amdgpu_encoder *amdgpu_encoder);
188 static int amdgpu_dm_encoder_init(struct drm_device *dev,
189 				  struct amdgpu_encoder *aencoder,
190 				  uint32_t link_index);
191 
192 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
193 
194 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
195 				   struct drm_atomic_state *state,
196 				   bool nonblock);
197 
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199 
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 				  struct drm_atomic_state *state);
202 
203 static void handle_cursor_update(struct drm_plane *plane,
204 				 struct drm_plane_state *old_plane_state);
205 
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 
211 
212 /*
213  * dm_vblank_get_counter
214  *
215  * @brief
216  * Get counter for number of vertical blanks
217  *
218  * @param
219  * struct amdgpu_device *adev - [in] desired amdgpu device
220  * int disp_idx - [in] which CRTC to get the counter from
221  *
222  * @return
223  * Counter for vertical blanks
224  */
225 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
226 {
227 	if (crtc >= adev->mode_info.num_crtc)
228 		return 0;
229 	else {
230 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
231 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
232 				acrtc->base.state);
233 
234 
235 		if (acrtc_state->stream == NULL) {
236 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
237 				  crtc);
238 			return 0;
239 		}
240 
241 		return dc_stream_get_vblank_counter(acrtc_state->stream);
242 	}
243 }
244 
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246 				  u32 *vbl, u32 *position)
247 {
248 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
249 
250 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251 		return -EINVAL;
252 	else {
253 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
255 						acrtc->base.state);
256 
257 		if (acrtc_state->stream ==  NULL) {
258 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
259 				  crtc);
260 			return 0;
261 		}
262 
263 		/*
264 		 * TODO rework base driver to use values directly.
265 		 * for now parse it back into reg-format
266 		 */
267 		dc_stream_get_scanoutpos(acrtc_state->stream,
268 					 &v_blank_start,
269 					 &v_blank_end,
270 					 &h_position,
271 					 &v_position);
272 
273 		*position = v_position | (h_position << 16);
274 		*vbl = v_blank_start | (v_blank_end << 16);
275 	}
276 
277 	return 0;
278 }
279 
280 static bool dm_is_idle(void *handle)
281 {
282 	/* XXX todo */
283 	return true;
284 }
285 
286 static int dm_wait_for_idle(void *handle)
287 {
288 	/* XXX todo */
289 	return 0;
290 }
291 
292 static bool dm_check_soft_reset(void *handle)
293 {
294 	return false;
295 }
296 
297 static int dm_soft_reset(void *handle)
298 {
299 	/* XXX todo */
300 	return 0;
301 }
302 
303 static struct amdgpu_crtc *
304 get_crtc_by_otg_inst(struct amdgpu_device *adev,
305 		     int otg_inst)
306 {
307 	struct drm_device *dev = adev->ddev;
308 	struct drm_crtc *crtc;
309 	struct amdgpu_crtc *amdgpu_crtc;
310 
311 	if (otg_inst == -1) {
312 		WARN_ON(1);
313 		return adev->mode_info.crtcs[0];
314 	}
315 
316 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
317 		amdgpu_crtc = to_amdgpu_crtc(crtc);
318 
319 		if (amdgpu_crtc->otg_inst == otg_inst)
320 			return amdgpu_crtc;
321 	}
322 
323 	return NULL;
324 }
325 
326 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
327 {
328 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
329 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
330 }
331 
332 /**
333  * dm_pflip_high_irq() - Handle pageflip interrupt
334  * @interrupt_params: ignored
335  *
336  * Handles the pageflip interrupt by notifying all interested parties
337  * that the pageflip has been completed.
338  */
339 static void dm_pflip_high_irq(void *interrupt_params)
340 {
341 	struct amdgpu_crtc *amdgpu_crtc;
342 	struct common_irq_params *irq_params = interrupt_params;
343 	struct amdgpu_device *adev = irq_params->adev;
344 	unsigned long flags;
345 	struct drm_pending_vblank_event *e;
346 	struct dm_crtc_state *acrtc_state;
347 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
348 	bool vrr_active;
349 
350 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
351 
352 	/* IRQ could occur when in initial stage */
353 	/* TODO work and BO cleanup */
354 	if (amdgpu_crtc == NULL) {
355 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
356 		return;
357 	}
358 
359 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
360 
361 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
362 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
363 						 amdgpu_crtc->pflip_status,
364 						 AMDGPU_FLIP_SUBMITTED,
365 						 amdgpu_crtc->crtc_id,
366 						 amdgpu_crtc);
367 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
368 		return;
369 	}
370 
371 	/* page flip completed. */
372 	e = amdgpu_crtc->event;
373 	amdgpu_crtc->event = NULL;
374 
375 	if (!e)
376 		WARN_ON(1);
377 
378 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
379 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
380 
381 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
382 	if (!vrr_active ||
383 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
384 				      &v_blank_end, &hpos, &vpos) ||
385 	    (vpos < v_blank_start)) {
386 		/* Update to correct count and vblank timestamp if racing with
387 		 * vblank irq. This also updates to the correct vblank timestamp
388 		 * even in VRR mode, as scanout is past the front-porch atm.
389 		 */
390 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
391 
392 		/* Wake up userspace by sending the pageflip event with proper
393 		 * count and timestamp of vblank of flip completion.
394 		 */
395 		if (e) {
396 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
397 
398 			/* Event sent, so done with vblank for this flip */
399 			drm_crtc_vblank_put(&amdgpu_crtc->base);
400 		}
401 	} else if (e) {
402 		/* VRR active and inside front-porch: vblank count and
403 		 * timestamp for pageflip event will only be up to date after
404 		 * drm_crtc_handle_vblank() has been executed from late vblank
405 		 * irq handler after start of back-porch (vline 0). We queue the
406 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
407 		 * updated timestamp and count, once it runs after us.
408 		 *
409 		 * We need to open-code this instead of using the helper
410 		 * drm_crtc_arm_vblank_event(), as that helper would
411 		 * call drm_crtc_accurate_vblank_count(), which we must
412 		 * not call in VRR mode while we are in front-porch!
413 		 */
414 
415 		/* sequence will be replaced by real count during send-out. */
416 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
417 		e->pipe = amdgpu_crtc->crtc_id;
418 
419 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
420 		e = NULL;
421 	}
422 
423 	/* Keep track of vblank of this flip for flip throttling. We use the
424 	 * cooked hw counter, as that one incremented at start of this vblank
425 	 * of pageflip completion, so last_flip_vblank is the forbidden count
426 	 * for queueing new pageflips if vsync + VRR is enabled.
427 	 */
428 	amdgpu_crtc->last_flip_vblank =
429 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
430 
431 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
432 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
433 
434 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
435 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
436 			 vrr_active, (int) !e);
437 }
438 
439 static void dm_vupdate_high_irq(void *interrupt_params)
440 {
441 	struct common_irq_params *irq_params = interrupt_params;
442 	struct amdgpu_device *adev = irq_params->adev;
443 	struct amdgpu_crtc *acrtc;
444 	struct dm_crtc_state *acrtc_state;
445 	unsigned long flags;
446 
447 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
448 
449 	if (acrtc) {
450 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
451 
452 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
453 			      acrtc->crtc_id,
454 			      amdgpu_dm_vrr_active(acrtc_state));
455 
456 		/* Core vblank handling is done here after end of front-porch in
457 		 * vrr mode, as vblank timestamping will give valid results
458 		 * while now done after front-porch. This will also deliver
459 		 * page-flip completion events that have been queued to us
460 		 * if a pageflip happened inside front-porch.
461 		 */
462 		if (amdgpu_dm_vrr_active(acrtc_state)) {
463 			drm_crtc_handle_vblank(&acrtc->base);
464 
465 			/* BTR processing for pre-DCE12 ASICs */
466 			if (acrtc_state->stream &&
467 			    adev->family < AMDGPU_FAMILY_AI) {
468 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
469 				mod_freesync_handle_v_update(
470 				    adev->dm.freesync_module,
471 				    acrtc_state->stream,
472 				    &acrtc_state->vrr_params);
473 
474 				dc_stream_adjust_vmin_vmax(
475 				    adev->dm.dc,
476 				    acrtc_state->stream,
477 				    &acrtc_state->vrr_params.adjust);
478 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
479 			}
480 		}
481 	}
482 }
483 
484 /**
485  * dm_crtc_high_irq() - Handles CRTC interrupt
486  * @interrupt_params: used for determining the CRTC instance
487  *
488  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
489  * event handler.
490  */
491 static void dm_crtc_high_irq(void *interrupt_params)
492 {
493 	struct common_irq_params *irq_params = interrupt_params;
494 	struct amdgpu_device *adev = irq_params->adev;
495 	struct amdgpu_crtc *acrtc;
496 	struct dm_crtc_state *acrtc_state;
497 	unsigned long flags;
498 
499 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
500 	if (!acrtc)
501 		return;
502 
503 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
504 
505 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
506 			 amdgpu_dm_vrr_active(acrtc_state),
507 			 acrtc_state->active_planes);
508 
509 	/**
510 	 * Core vblank handling at start of front-porch is only possible
511 	 * in non-vrr mode, as only there vblank timestamping will give
512 	 * valid results while done in front-porch. Otherwise defer it
513 	 * to dm_vupdate_high_irq after end of front-porch.
514 	 */
515 	if (!amdgpu_dm_vrr_active(acrtc_state))
516 		drm_crtc_handle_vblank(&acrtc->base);
517 
518 	/**
519 	 * Following stuff must happen at start of vblank, for crc
520 	 * computation and below-the-range btr support in vrr mode.
521 	 */
522 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
523 
524 	/* BTR updates need to happen before VUPDATE on Vega and above. */
525 	if (adev->family < AMDGPU_FAMILY_AI)
526 		return;
527 
528 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
529 
530 	if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
531 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
532 		mod_freesync_handle_v_update(adev->dm.freesync_module,
533 					     acrtc_state->stream,
534 					     &acrtc_state->vrr_params);
535 
536 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
537 					   &acrtc_state->vrr_params.adjust);
538 	}
539 
540 	/*
541 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
542 	 * In that case, pageflip completion interrupts won't fire and pageflip
543 	 * completion events won't get delivered. Prevent this by sending
544 	 * pending pageflip events from here if a flip is still pending.
545 	 *
546 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
547 	 * avoid race conditions between flip programming and completion,
548 	 * which could cause too early flip completion events.
549 	 */
550 	if (adev->family >= AMDGPU_FAMILY_RV &&
551 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
552 	    acrtc_state->active_planes == 0) {
553 		if (acrtc->event) {
554 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
555 			acrtc->event = NULL;
556 			drm_crtc_vblank_put(&acrtc->base);
557 		}
558 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
559 	}
560 
561 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
562 }
563 
564 static int dm_set_clockgating_state(void *handle,
565 		  enum amd_clockgating_state state)
566 {
567 	return 0;
568 }
569 
570 static int dm_set_powergating_state(void *handle,
571 		  enum amd_powergating_state state)
572 {
573 	return 0;
574 }
575 
576 /* Prototypes of private functions */
577 static int dm_early_init(void* handle);
578 
579 /* Allocate memory for FBC compressed data  */
580 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
581 {
582 	struct drm_device *dev = connector->dev;
583 	struct amdgpu_device *adev = dev->dev_private;
584 	struct dm_comressor_info *compressor = &adev->dm.compressor;
585 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
586 	struct drm_display_mode *mode;
587 	unsigned long max_size = 0;
588 
589 	if (adev->dm.dc->fbc_compressor == NULL)
590 		return;
591 
592 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
593 		return;
594 
595 	if (compressor->bo_ptr)
596 		return;
597 
598 
599 	list_for_each_entry(mode, &connector->modes, head) {
600 		if (max_size < mode->htotal * mode->vtotal)
601 			max_size = mode->htotal * mode->vtotal;
602 	}
603 
604 	if (max_size) {
605 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
606 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
607 			    &compressor->gpu_addr, &compressor->cpu_addr);
608 
609 		if (r)
610 			DRM_ERROR("DM: Failed to initialize FBC\n");
611 		else {
612 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
613 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
614 		}
615 
616 	}
617 
618 }
619 
620 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
621 					  int pipe, bool *enabled,
622 					  unsigned char *buf, int max_bytes)
623 {
624 	struct drm_device *dev = dev_get_drvdata(kdev);
625 	struct amdgpu_device *adev = dev->dev_private;
626 	struct drm_connector *connector;
627 	struct drm_connector_list_iter conn_iter;
628 	struct amdgpu_dm_connector *aconnector;
629 	int ret = 0;
630 
631 	*enabled = false;
632 
633 	mutex_lock(&adev->dm.audio_lock);
634 
635 	drm_connector_list_iter_begin(dev, &conn_iter);
636 	drm_for_each_connector_iter(connector, &conn_iter) {
637 		aconnector = to_amdgpu_dm_connector(connector);
638 		if (aconnector->audio_inst != port)
639 			continue;
640 
641 		*enabled = true;
642 		ret = drm_eld_size(connector->eld);
643 		memcpy(buf, connector->eld, min(max_bytes, ret));
644 
645 		break;
646 	}
647 	drm_connector_list_iter_end(&conn_iter);
648 
649 	mutex_unlock(&adev->dm.audio_lock);
650 
651 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
652 
653 	return ret;
654 }
655 
656 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
657 	.get_eld = amdgpu_dm_audio_component_get_eld,
658 };
659 
660 static int amdgpu_dm_audio_component_bind(struct device *kdev,
661 				       struct device *hda_kdev, void *data)
662 {
663 	struct drm_device *dev = dev_get_drvdata(kdev);
664 	struct amdgpu_device *adev = dev->dev_private;
665 	struct drm_audio_component *acomp = data;
666 
667 	acomp->ops = &amdgpu_dm_audio_component_ops;
668 	acomp->dev = kdev;
669 	adev->dm.audio_component = acomp;
670 
671 	return 0;
672 }
673 
674 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
675 					  struct device *hda_kdev, void *data)
676 {
677 	struct drm_device *dev = dev_get_drvdata(kdev);
678 	struct amdgpu_device *adev = dev->dev_private;
679 	struct drm_audio_component *acomp = data;
680 
681 	acomp->ops = NULL;
682 	acomp->dev = NULL;
683 	adev->dm.audio_component = NULL;
684 }
685 
686 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
687 	.bind	= amdgpu_dm_audio_component_bind,
688 	.unbind	= amdgpu_dm_audio_component_unbind,
689 };
690 
691 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
692 {
693 	int i, ret;
694 
695 	if (!amdgpu_audio)
696 		return 0;
697 
698 	adev->mode_info.audio.enabled = true;
699 
700 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
701 
702 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
703 		adev->mode_info.audio.pin[i].channels = -1;
704 		adev->mode_info.audio.pin[i].rate = -1;
705 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
706 		adev->mode_info.audio.pin[i].status_bits = 0;
707 		adev->mode_info.audio.pin[i].category_code = 0;
708 		adev->mode_info.audio.pin[i].connected = false;
709 		adev->mode_info.audio.pin[i].id =
710 			adev->dm.dc->res_pool->audios[i]->inst;
711 		adev->mode_info.audio.pin[i].offset = 0;
712 	}
713 
714 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
715 	if (ret < 0)
716 		return ret;
717 
718 	adev->dm.audio_registered = true;
719 
720 	return 0;
721 }
722 
723 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
724 {
725 	if (!amdgpu_audio)
726 		return;
727 
728 	if (!adev->mode_info.audio.enabled)
729 		return;
730 
731 	if (adev->dm.audio_registered) {
732 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
733 		adev->dm.audio_registered = false;
734 	}
735 
736 	/* TODO: Disable audio? */
737 
738 	adev->mode_info.audio.enabled = false;
739 }
740 
741 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
742 {
743 	struct drm_audio_component *acomp = adev->dm.audio_component;
744 
745 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
746 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
747 
748 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
749 						 pin, -1);
750 	}
751 }
752 
753 static int dm_dmub_hw_init(struct amdgpu_device *adev)
754 {
755 	const struct dmcub_firmware_header_v1_0 *hdr;
756 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
757 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
758 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
759 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
760 	struct abm *abm = adev->dm.dc->res_pool->abm;
761 	struct dmub_srv_hw_params hw_params;
762 	enum dmub_status status;
763 	const unsigned char *fw_inst_const, *fw_bss_data;
764 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
765 	bool has_hw_support;
766 
767 	if (!dmub_srv)
768 		/* DMUB isn't supported on the ASIC. */
769 		return 0;
770 
771 	if (!fb_info) {
772 		DRM_ERROR("No framebuffer info for DMUB service.\n");
773 		return -EINVAL;
774 	}
775 
776 	if (!dmub_fw) {
777 		/* Firmware required for DMUB support. */
778 		DRM_ERROR("No firmware provided for DMUB.\n");
779 		return -EINVAL;
780 	}
781 
782 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
783 	if (status != DMUB_STATUS_OK) {
784 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
785 		return -EINVAL;
786 	}
787 
788 	if (!has_hw_support) {
789 		DRM_INFO("DMUB unsupported on ASIC\n");
790 		return 0;
791 	}
792 
793 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
794 
795 	fw_inst_const = dmub_fw->data +
796 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
797 			PSP_HEADER_BYTES;
798 
799 	fw_bss_data = dmub_fw->data +
800 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
801 		      le32_to_cpu(hdr->inst_const_bytes);
802 
803 	/* Copy firmware and bios info into FB memory. */
804 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
805 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
806 
807 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
808 
809 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
810 	 * amdgpu_ucode_init_single_fw will load dmub firmware
811 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
812 	 * will be done by dm_dmub_hw_init
813 	 */
814 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
815 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
816 				fw_inst_const_size);
817 	}
818 
819 	if (fw_bss_data_size)
820 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
821 		       fw_bss_data, fw_bss_data_size);
822 
823 	/* Copy firmware bios info into FB memory. */
824 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
825 	       adev->bios_size);
826 
827 	/* Reset regions that need to be reset. */
828 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
829 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
830 
831 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
832 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
833 
834 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
835 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
836 
837 	/* Initialize hardware. */
838 	memset(&hw_params, 0, sizeof(hw_params));
839 	hw_params.fb_base = adev->gmc.fb_start;
840 	hw_params.fb_offset = adev->gmc.aper_base;
841 
842 	/* backdoor load firmware and trigger dmub running */
843 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
844 		hw_params.load_inst_const = true;
845 
846 	if (dmcu)
847 		hw_params.psp_version = dmcu->psp_version;
848 
849 	for (i = 0; i < fb_info->num_fb; ++i)
850 		hw_params.fb[i] = &fb_info->fb[i];
851 
852 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
853 	if (status != DMUB_STATUS_OK) {
854 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
855 		return -EINVAL;
856 	}
857 
858 	/* Wait for firmware load to finish. */
859 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
860 	if (status != DMUB_STATUS_OK)
861 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
862 
863 	/* Init DMCU and ABM if available. */
864 	if (dmcu && abm) {
865 		dmcu->funcs->dmcu_init(dmcu);
866 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
867 	}
868 
869 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
870 	if (!adev->dm.dc->ctx->dmub_srv) {
871 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
872 		return -ENOMEM;
873 	}
874 
875 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
876 		 adev->dm.dmcub_fw_version);
877 
878 	return 0;
879 }
880 
881 static int amdgpu_dm_init(struct amdgpu_device *adev)
882 {
883 	struct dc_init_data init_data;
884 #ifdef CONFIG_DRM_AMD_DC_HDCP
885 	struct dc_callback_init init_params;
886 #endif
887 	int r;
888 
889 	adev->dm.ddev = adev->ddev;
890 	adev->dm.adev = adev;
891 
892 	/* Zero all the fields */
893 	memset(&init_data, 0, sizeof(init_data));
894 #ifdef CONFIG_DRM_AMD_DC_HDCP
895 	memset(&init_params, 0, sizeof(init_params));
896 #endif
897 
898 	mutex_init(&adev->dm.dc_lock);
899 	mutex_init(&adev->dm.audio_lock);
900 
901 	if(amdgpu_dm_irq_init(adev)) {
902 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
903 		goto error;
904 	}
905 
906 	init_data.asic_id.chip_family = adev->family;
907 
908 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
909 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
910 
911 	init_data.asic_id.vram_width = adev->gmc.vram_width;
912 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
913 	init_data.asic_id.atombios_base_address =
914 		adev->mode_info.atom_context->bios;
915 
916 	init_data.driver = adev;
917 
918 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
919 
920 	if (!adev->dm.cgs_device) {
921 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
922 		goto error;
923 	}
924 
925 	init_data.cgs_device = adev->dm.cgs_device;
926 
927 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
928 
929 	switch (adev->asic_type) {
930 	case CHIP_CARRIZO:
931 	case CHIP_STONEY:
932 	case CHIP_RAVEN:
933 	case CHIP_RENOIR:
934 		init_data.flags.gpu_vm_support = true;
935 		break;
936 	default:
937 		break;
938 	}
939 
940 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
941 		init_data.flags.fbc_support = true;
942 
943 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
944 		init_data.flags.multi_mon_pp_mclk_switch = true;
945 
946 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
947 		init_data.flags.disable_fractional_pwm = true;
948 
949 	init_data.flags.power_down_display_on_boot = true;
950 
951 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
952 
953 	/* Display Core create. */
954 	adev->dm.dc = dc_create(&init_data);
955 
956 	if (adev->dm.dc) {
957 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
958 	} else {
959 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
960 		goto error;
961 	}
962 
963 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
964 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
965 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
966 	}
967 
968 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
969 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
970 
971 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
972 		adev->dm.dc->debug.disable_stutter = true;
973 
974 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
975 		adev->dm.dc->debug.disable_dsc = true;
976 
977 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
978 		adev->dm.dc->debug.disable_clock_gate = true;
979 
980 	r = dm_dmub_hw_init(adev);
981 	if (r) {
982 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
983 		goto error;
984 	}
985 
986 	dc_hardware_init(adev->dm.dc);
987 
988 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
989 	if (!adev->dm.freesync_module) {
990 		DRM_ERROR(
991 		"amdgpu: failed to initialize freesync_module.\n");
992 	} else
993 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
994 				adev->dm.freesync_module);
995 
996 	amdgpu_dm_init_color_mod();
997 
998 #ifdef CONFIG_DRM_AMD_DC_HDCP
999 	if (adev->asic_type >= CHIP_RAVEN) {
1000 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1001 
1002 		if (!adev->dm.hdcp_workqueue)
1003 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1004 		else
1005 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1006 
1007 		dc_init_callbacks(adev->dm.dc, &init_params);
1008 	}
1009 #endif
1010 	if (amdgpu_dm_initialize_drm_device(adev)) {
1011 		DRM_ERROR(
1012 		"amdgpu: failed to initialize sw for display support.\n");
1013 		goto error;
1014 	}
1015 
1016 	/* Update the actual used number of crtc */
1017 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1018 
1019 	/* create fake encoders for MST */
1020 	dm_dp_create_fake_mst_encoders(adev);
1021 
1022 	/* TODO: Add_display_info? */
1023 
1024 	/* TODO use dynamic cursor width */
1025 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1026 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1027 
1028 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1029 		DRM_ERROR(
1030 		"amdgpu: failed to initialize sw for display support.\n");
1031 		goto error;
1032 	}
1033 
1034 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1035 
1036 	return 0;
1037 error:
1038 	amdgpu_dm_fini(adev);
1039 
1040 	return -EINVAL;
1041 }
1042 
1043 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1044 {
1045 	int i;
1046 
1047 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1048 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1049 	}
1050 
1051 	amdgpu_dm_audio_fini(adev);
1052 
1053 	amdgpu_dm_destroy_drm_device(&adev->dm);
1054 
1055 #ifdef CONFIG_DRM_AMD_DC_HDCP
1056 	if (adev->dm.hdcp_workqueue) {
1057 		hdcp_destroy(adev->dm.hdcp_workqueue);
1058 		adev->dm.hdcp_workqueue = NULL;
1059 	}
1060 
1061 	if (adev->dm.dc)
1062 		dc_deinit_callbacks(adev->dm.dc);
1063 #endif
1064 	if (adev->dm.dc->ctx->dmub_srv) {
1065 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1066 		adev->dm.dc->ctx->dmub_srv = NULL;
1067 	}
1068 
1069 	if (adev->dm.dmub_bo)
1070 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1071 				      &adev->dm.dmub_bo_gpu_addr,
1072 				      &adev->dm.dmub_bo_cpu_addr);
1073 
1074 	/* DC Destroy TODO: Replace destroy DAL */
1075 	if (adev->dm.dc)
1076 		dc_destroy(&adev->dm.dc);
1077 	/*
1078 	 * TODO: pageflip, vlank interrupt
1079 	 *
1080 	 * amdgpu_dm_irq_fini(adev);
1081 	 */
1082 
1083 	if (adev->dm.cgs_device) {
1084 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1085 		adev->dm.cgs_device = NULL;
1086 	}
1087 	if (adev->dm.freesync_module) {
1088 		mod_freesync_destroy(adev->dm.freesync_module);
1089 		adev->dm.freesync_module = NULL;
1090 	}
1091 
1092 	mutex_destroy(&adev->dm.audio_lock);
1093 	mutex_destroy(&adev->dm.dc_lock);
1094 
1095 	return;
1096 }
1097 
1098 static int load_dmcu_fw(struct amdgpu_device *adev)
1099 {
1100 	const char *fw_name_dmcu = NULL;
1101 	int r;
1102 	const struct dmcu_firmware_header_v1_0 *hdr;
1103 
1104 	switch(adev->asic_type) {
1105 	case CHIP_BONAIRE:
1106 	case CHIP_HAWAII:
1107 	case CHIP_KAVERI:
1108 	case CHIP_KABINI:
1109 	case CHIP_MULLINS:
1110 	case CHIP_TONGA:
1111 	case CHIP_FIJI:
1112 	case CHIP_CARRIZO:
1113 	case CHIP_STONEY:
1114 	case CHIP_POLARIS11:
1115 	case CHIP_POLARIS10:
1116 	case CHIP_POLARIS12:
1117 	case CHIP_VEGAM:
1118 	case CHIP_VEGA10:
1119 	case CHIP_VEGA12:
1120 	case CHIP_VEGA20:
1121 	case CHIP_NAVI10:
1122 	case CHIP_NAVI14:
1123 	case CHIP_RENOIR:
1124 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1125 	case CHIP_SIENNA_CICHLID:
1126 	case CHIP_NAVY_FLOUNDER:
1127 #endif
1128 		return 0;
1129 	case CHIP_NAVI12:
1130 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1131 		break;
1132 	case CHIP_RAVEN:
1133 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1134 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1135 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1136 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1137 		else
1138 			return 0;
1139 		break;
1140 	default:
1141 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1142 		return -EINVAL;
1143 	}
1144 
1145 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1146 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1147 		return 0;
1148 	}
1149 
1150 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1151 	if (r == -ENOENT) {
1152 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1153 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1154 		adev->dm.fw_dmcu = NULL;
1155 		return 0;
1156 	}
1157 	if (r) {
1158 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1159 			fw_name_dmcu);
1160 		return r;
1161 	}
1162 
1163 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1164 	if (r) {
1165 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1166 			fw_name_dmcu);
1167 		release_firmware(adev->dm.fw_dmcu);
1168 		adev->dm.fw_dmcu = NULL;
1169 		return r;
1170 	}
1171 
1172 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1173 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1174 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1175 	adev->firmware.fw_size +=
1176 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1177 
1178 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1179 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1180 	adev->firmware.fw_size +=
1181 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1182 
1183 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1184 
1185 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1186 
1187 	return 0;
1188 }
1189 
1190 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1191 {
1192 	struct amdgpu_device *adev = ctx;
1193 
1194 	return dm_read_reg(adev->dm.dc->ctx, address);
1195 }
1196 
1197 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1198 				     uint32_t value)
1199 {
1200 	struct amdgpu_device *adev = ctx;
1201 
1202 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1203 }
1204 
1205 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1206 {
1207 	struct dmub_srv_create_params create_params;
1208 	struct dmub_srv_region_params region_params;
1209 	struct dmub_srv_region_info region_info;
1210 	struct dmub_srv_fb_params fb_params;
1211 	struct dmub_srv_fb_info *fb_info;
1212 	struct dmub_srv *dmub_srv;
1213 	const struct dmcub_firmware_header_v1_0 *hdr;
1214 	const char *fw_name_dmub;
1215 	enum dmub_asic dmub_asic;
1216 	enum dmub_status status;
1217 	int r;
1218 
1219 	switch (adev->asic_type) {
1220 	case CHIP_RENOIR:
1221 		dmub_asic = DMUB_ASIC_DCN21;
1222 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1223 		break;
1224 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1225 	case CHIP_SIENNA_CICHLID:
1226 		dmub_asic = DMUB_ASIC_DCN30;
1227 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1228 		break;
1229 	case CHIP_NAVY_FLOUNDER:
1230 		dmub_asic = DMUB_ASIC_DCN30;
1231 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1232 		break;
1233 #endif
1234 
1235 	default:
1236 		/* ASIC doesn't support DMUB. */
1237 		return 0;
1238 	}
1239 
1240 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1241 	if (r) {
1242 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1243 		return 0;
1244 	}
1245 
1246 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1247 	if (r) {
1248 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1249 		return 0;
1250 	}
1251 
1252 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1253 
1254 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1255 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1256 			AMDGPU_UCODE_ID_DMCUB;
1257 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1258 			adev->dm.dmub_fw;
1259 		adev->firmware.fw_size +=
1260 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1261 
1262 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1263 			 adev->dm.dmcub_fw_version);
1264 	}
1265 
1266 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1267 
1268 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1269 	dmub_srv = adev->dm.dmub_srv;
1270 
1271 	if (!dmub_srv) {
1272 		DRM_ERROR("Failed to allocate DMUB service!\n");
1273 		return -ENOMEM;
1274 	}
1275 
1276 	memset(&create_params, 0, sizeof(create_params));
1277 	create_params.user_ctx = adev;
1278 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1279 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1280 	create_params.asic = dmub_asic;
1281 
1282 	/* Create the DMUB service. */
1283 	status = dmub_srv_create(dmub_srv, &create_params);
1284 	if (status != DMUB_STATUS_OK) {
1285 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1286 		return -EINVAL;
1287 	}
1288 
1289 	/* Calculate the size of all the regions for the DMUB service. */
1290 	memset(&region_params, 0, sizeof(region_params));
1291 
1292 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1293 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1294 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1295 	region_params.vbios_size = adev->bios_size;
1296 	region_params.fw_bss_data = region_params.bss_data_size ?
1297 		adev->dm.dmub_fw->data +
1298 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1299 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1300 	region_params.fw_inst_const =
1301 		adev->dm.dmub_fw->data +
1302 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1303 		PSP_HEADER_BYTES;
1304 
1305 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1306 					   &region_info);
1307 
1308 	if (status != DMUB_STATUS_OK) {
1309 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1310 		return -EINVAL;
1311 	}
1312 
1313 	/*
1314 	 * Allocate a framebuffer based on the total size of all the regions.
1315 	 * TODO: Move this into GART.
1316 	 */
1317 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1318 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1319 				    &adev->dm.dmub_bo_gpu_addr,
1320 				    &adev->dm.dmub_bo_cpu_addr);
1321 	if (r)
1322 		return r;
1323 
1324 	/* Rebase the regions on the framebuffer address. */
1325 	memset(&fb_params, 0, sizeof(fb_params));
1326 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1327 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1328 	fb_params.region_info = &region_info;
1329 
1330 	adev->dm.dmub_fb_info =
1331 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1332 	fb_info = adev->dm.dmub_fb_info;
1333 
1334 	if (!fb_info) {
1335 		DRM_ERROR(
1336 			"Failed to allocate framebuffer info for DMUB service!\n");
1337 		return -ENOMEM;
1338 	}
1339 
1340 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1341 	if (status != DMUB_STATUS_OK) {
1342 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1343 		return -EINVAL;
1344 	}
1345 
1346 	return 0;
1347 }
1348 
1349 static int dm_sw_init(void *handle)
1350 {
1351 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1352 	int r;
1353 
1354 	r = dm_dmub_sw_init(adev);
1355 	if (r)
1356 		return r;
1357 
1358 	return load_dmcu_fw(adev);
1359 }
1360 
1361 static int dm_sw_fini(void *handle)
1362 {
1363 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1364 
1365 	kfree(adev->dm.dmub_fb_info);
1366 	adev->dm.dmub_fb_info = NULL;
1367 
1368 	if (adev->dm.dmub_srv) {
1369 		dmub_srv_destroy(adev->dm.dmub_srv);
1370 		adev->dm.dmub_srv = NULL;
1371 	}
1372 
1373 	release_firmware(adev->dm.dmub_fw);
1374 	adev->dm.dmub_fw = NULL;
1375 
1376 	release_firmware(adev->dm.fw_dmcu);
1377 	adev->dm.fw_dmcu = NULL;
1378 
1379 	return 0;
1380 }
1381 
1382 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1383 {
1384 	struct amdgpu_dm_connector *aconnector;
1385 	struct drm_connector *connector;
1386 	struct drm_connector_list_iter iter;
1387 	int ret = 0;
1388 
1389 	drm_connector_list_iter_begin(dev, &iter);
1390 	drm_for_each_connector_iter(connector, &iter) {
1391 		aconnector = to_amdgpu_dm_connector(connector);
1392 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1393 		    aconnector->mst_mgr.aux) {
1394 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1395 					 aconnector,
1396 					 aconnector->base.base.id);
1397 
1398 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1399 			if (ret < 0) {
1400 				DRM_ERROR("DM_MST: Failed to start MST\n");
1401 				aconnector->dc_link->type =
1402 					dc_connection_single;
1403 				break;
1404 			}
1405 		}
1406 	}
1407 	drm_connector_list_iter_end(&iter);
1408 
1409 	return ret;
1410 }
1411 
1412 static int dm_late_init(void *handle)
1413 {
1414 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1415 
1416 	struct dmcu_iram_parameters params;
1417 	unsigned int linear_lut[16];
1418 	int i;
1419 	struct dmcu *dmcu = NULL;
1420 	bool ret = true;
1421 
1422 	if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
1423 		return detect_mst_link_for_all_connectors(adev->ddev);
1424 
1425 	dmcu = adev->dm.dc->res_pool->dmcu;
1426 
1427 	for (i = 0; i < 16; i++)
1428 		linear_lut[i] = 0xFFFF * i / 15;
1429 
1430 	params.set = 0;
1431 	params.backlight_ramping_start = 0xCCCC;
1432 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1433 	params.backlight_lut_array_size = 16;
1434 	params.backlight_lut_array = linear_lut;
1435 
1436 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1437 	 * 0xFFFF x 0.01 = 0x28F
1438 	 */
1439 	params.min_abm_backlight = 0x28F;
1440 
1441 	/* In the case where abm is implemented on dmcub,
1442 	 * dmcu object will be null.
1443 	 * ABM 2.4 and up are implemented on dmcub.
1444 	 */
1445 	if (dmcu)
1446 		ret = dmcu_load_iram(dmcu, params);
1447 	else if (adev->dm.dc->ctx->dmub_srv)
1448 		ret = dmub_init_abm_config(adev->dm.dc->res_pool->abm, params);
1449 
1450 	if (!ret)
1451 		return -EINVAL;
1452 
1453 	return detect_mst_link_for_all_connectors(adev->ddev);
1454 }
1455 
1456 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1457 {
1458 	struct amdgpu_dm_connector *aconnector;
1459 	struct drm_connector *connector;
1460 	struct drm_connector_list_iter iter;
1461 	struct drm_dp_mst_topology_mgr *mgr;
1462 	int ret;
1463 	bool need_hotplug = false;
1464 
1465 	drm_connector_list_iter_begin(dev, &iter);
1466 	drm_for_each_connector_iter(connector, &iter) {
1467 		aconnector = to_amdgpu_dm_connector(connector);
1468 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1469 		    aconnector->mst_port)
1470 			continue;
1471 
1472 		mgr = &aconnector->mst_mgr;
1473 
1474 		if (suspend) {
1475 			drm_dp_mst_topology_mgr_suspend(mgr);
1476 		} else {
1477 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1478 			if (ret < 0) {
1479 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1480 				need_hotplug = true;
1481 			}
1482 		}
1483 	}
1484 	drm_connector_list_iter_end(&iter);
1485 
1486 	if (need_hotplug)
1487 		drm_kms_helper_hotplug_event(dev);
1488 }
1489 
1490 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1491 {
1492 	struct smu_context *smu = &adev->smu;
1493 	int ret = 0;
1494 
1495 	if (!is_support_sw_smu(adev))
1496 		return 0;
1497 
1498 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1499 	 * on window driver dc implementation.
1500 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1501 	 * should be passed to smu during boot up and resume from s3.
1502 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1503 	 * dcn20_resource_construct
1504 	 * then call pplib functions below to pass the settings to smu:
1505 	 * smu_set_watermarks_for_clock_ranges
1506 	 * smu_set_watermarks_table
1507 	 * navi10_set_watermarks_table
1508 	 * smu_write_watermarks_table
1509 	 *
1510 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1511 	 * dc has implemented different flow for window driver:
1512 	 * dc_hardware_init / dc_set_power_state
1513 	 * dcn10_init_hw
1514 	 * notify_wm_ranges
1515 	 * set_wm_ranges
1516 	 * -- Linux
1517 	 * smu_set_watermarks_for_clock_ranges
1518 	 * renoir_set_watermarks_table
1519 	 * smu_write_watermarks_table
1520 	 *
1521 	 * For Linux,
1522 	 * dc_hardware_init -> amdgpu_dm_init
1523 	 * dc_set_power_state --> dm_resume
1524 	 *
1525 	 * therefore, this function apply to navi10/12/14 but not Renoir
1526 	 * *
1527 	 */
1528 	switch(adev->asic_type) {
1529 	case CHIP_NAVI10:
1530 	case CHIP_NAVI14:
1531 	case CHIP_NAVI12:
1532 		break;
1533 	default:
1534 		return 0;
1535 	}
1536 
1537 	ret = smu_write_watermarks_table(smu);
1538 	if (ret) {
1539 		DRM_ERROR("Failed to update WMTABLE!\n");
1540 		return ret;
1541 	}
1542 
1543 	return 0;
1544 }
1545 
1546 /**
1547  * dm_hw_init() - Initialize DC device
1548  * @handle: The base driver device containing the amdgpu_dm device.
1549  *
1550  * Initialize the &struct amdgpu_display_manager device. This involves calling
1551  * the initializers of each DM component, then populating the struct with them.
1552  *
1553  * Although the function implies hardware initialization, both hardware and
1554  * software are initialized here. Splitting them out to their relevant init
1555  * hooks is a future TODO item.
1556  *
1557  * Some notable things that are initialized here:
1558  *
1559  * - Display Core, both software and hardware
1560  * - DC modules that we need (freesync and color management)
1561  * - DRM software states
1562  * - Interrupt sources and handlers
1563  * - Vblank support
1564  * - Debug FS entries, if enabled
1565  */
1566 static int dm_hw_init(void *handle)
1567 {
1568 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1569 	/* Create DAL display manager */
1570 	amdgpu_dm_init(adev);
1571 	amdgpu_dm_hpd_init(adev);
1572 
1573 	return 0;
1574 }
1575 
1576 /**
1577  * dm_hw_fini() - Teardown DC device
1578  * @handle: The base driver device containing the amdgpu_dm device.
1579  *
1580  * Teardown components within &struct amdgpu_display_manager that require
1581  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1582  * were loaded. Also flush IRQ workqueues and disable them.
1583  */
1584 static int dm_hw_fini(void *handle)
1585 {
1586 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1587 
1588 	amdgpu_dm_hpd_fini(adev);
1589 
1590 	amdgpu_dm_irq_fini(adev);
1591 	amdgpu_dm_fini(adev);
1592 	return 0;
1593 }
1594 
1595 
1596 static int dm_enable_vblank(struct drm_crtc *crtc);
1597 static void dm_disable_vblank(struct drm_crtc *crtc);
1598 
1599 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1600 				 struct dc_state *state, bool enable)
1601 {
1602 	enum dc_irq_source irq_source;
1603 	struct amdgpu_crtc *acrtc;
1604 	int rc = -EBUSY;
1605 	int i = 0;
1606 
1607 	for (i = 0; i < state->stream_count; i++) {
1608 		acrtc = get_crtc_by_otg_inst(
1609 				adev, state->stream_status[i].primary_otg_inst);
1610 
1611 		if (acrtc && state->stream_status[i].plane_count != 0) {
1612 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1613 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1614 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1615 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1616 			if (rc)
1617 				DRM_WARN("Failed to %s pflip interrupts\n",
1618 					 enable ? "enable" : "disable");
1619 
1620 			if (enable) {
1621 				rc = dm_enable_vblank(&acrtc->base);
1622 				if (rc)
1623 					DRM_WARN("Failed to enable vblank interrupts\n");
1624 			} else {
1625 				dm_disable_vblank(&acrtc->base);
1626 			}
1627 
1628 		}
1629 	}
1630 
1631 }
1632 
1633 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1634 {
1635 	struct dc_state *context = NULL;
1636 	enum dc_status res = DC_ERROR_UNEXPECTED;
1637 	int i;
1638 	struct dc_stream_state *del_streams[MAX_PIPES];
1639 	int del_streams_count = 0;
1640 
1641 	memset(del_streams, 0, sizeof(del_streams));
1642 
1643 	context = dc_create_state(dc);
1644 	if (context == NULL)
1645 		goto context_alloc_fail;
1646 
1647 	dc_resource_state_copy_construct_current(dc, context);
1648 
1649 	/* First remove from context all streams */
1650 	for (i = 0; i < context->stream_count; i++) {
1651 		struct dc_stream_state *stream = context->streams[i];
1652 
1653 		del_streams[del_streams_count++] = stream;
1654 	}
1655 
1656 	/* Remove all planes for removed streams and then remove the streams */
1657 	for (i = 0; i < del_streams_count; i++) {
1658 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1659 			res = DC_FAIL_DETACH_SURFACES;
1660 			goto fail;
1661 		}
1662 
1663 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1664 		if (res != DC_OK)
1665 			goto fail;
1666 	}
1667 
1668 
1669 	res = dc_validate_global_state(dc, context, false);
1670 
1671 	if (res != DC_OK) {
1672 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1673 		goto fail;
1674 	}
1675 
1676 	res = dc_commit_state(dc, context);
1677 
1678 fail:
1679 	dc_release_state(context);
1680 
1681 context_alloc_fail:
1682 	return res;
1683 }
1684 
1685 static int dm_suspend(void *handle)
1686 {
1687 	struct amdgpu_device *adev = handle;
1688 	struct amdgpu_display_manager *dm = &adev->dm;
1689 	int ret = 0;
1690 
1691 	if (adev->in_gpu_reset) {
1692 		mutex_lock(&dm->dc_lock);
1693 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1694 
1695 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1696 
1697 		amdgpu_dm_commit_zero_streams(dm->dc);
1698 
1699 		amdgpu_dm_irq_suspend(adev);
1700 
1701 		return ret;
1702 	}
1703 
1704 	WARN_ON(adev->dm.cached_state);
1705 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1706 
1707 	s3_handle_mst(adev->ddev, true);
1708 
1709 	amdgpu_dm_irq_suspend(adev);
1710 
1711 
1712 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1713 
1714 	return 0;
1715 }
1716 
1717 static struct amdgpu_dm_connector *
1718 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1719 					     struct drm_crtc *crtc)
1720 {
1721 	uint32_t i;
1722 	struct drm_connector_state *new_con_state;
1723 	struct drm_connector *connector;
1724 	struct drm_crtc *crtc_from_state;
1725 
1726 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1727 		crtc_from_state = new_con_state->crtc;
1728 
1729 		if (crtc_from_state == crtc)
1730 			return to_amdgpu_dm_connector(connector);
1731 	}
1732 
1733 	return NULL;
1734 }
1735 
1736 static void emulated_link_detect(struct dc_link *link)
1737 {
1738 	struct dc_sink_init_data sink_init_data = { 0 };
1739 	struct display_sink_capability sink_caps = { 0 };
1740 	enum dc_edid_status edid_status;
1741 	struct dc_context *dc_ctx = link->ctx;
1742 	struct dc_sink *sink = NULL;
1743 	struct dc_sink *prev_sink = NULL;
1744 
1745 	link->type = dc_connection_none;
1746 	prev_sink = link->local_sink;
1747 
1748 	if (prev_sink != NULL)
1749 		dc_sink_retain(prev_sink);
1750 
1751 	switch (link->connector_signal) {
1752 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1753 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1754 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1755 		break;
1756 	}
1757 
1758 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1759 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1760 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1761 		break;
1762 	}
1763 
1764 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1765 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1766 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1767 		break;
1768 	}
1769 
1770 	case SIGNAL_TYPE_LVDS: {
1771 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1772 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1773 		break;
1774 	}
1775 
1776 	case SIGNAL_TYPE_EDP: {
1777 		sink_caps.transaction_type =
1778 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1779 		sink_caps.signal = SIGNAL_TYPE_EDP;
1780 		break;
1781 	}
1782 
1783 	case SIGNAL_TYPE_DISPLAY_PORT: {
1784 		sink_caps.transaction_type =
1785 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1786 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1787 		break;
1788 	}
1789 
1790 	default:
1791 		DC_ERROR("Invalid connector type! signal:%d\n",
1792 			link->connector_signal);
1793 		return;
1794 	}
1795 
1796 	sink_init_data.link = link;
1797 	sink_init_data.sink_signal = sink_caps.signal;
1798 
1799 	sink = dc_sink_create(&sink_init_data);
1800 	if (!sink) {
1801 		DC_ERROR("Failed to create sink!\n");
1802 		return;
1803 	}
1804 
1805 	/* dc_sink_create returns a new reference */
1806 	link->local_sink = sink;
1807 
1808 	edid_status = dm_helpers_read_local_edid(
1809 			link->ctx,
1810 			link,
1811 			sink);
1812 
1813 	if (edid_status != EDID_OK)
1814 		DC_ERROR("Failed to read EDID");
1815 
1816 }
1817 
1818 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1819 				     struct amdgpu_display_manager *dm)
1820 {
1821 	struct {
1822 		struct dc_surface_update surface_updates[MAX_SURFACES];
1823 		struct dc_plane_info plane_infos[MAX_SURFACES];
1824 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1825 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1826 		struct dc_stream_update stream_update;
1827 	} * bundle;
1828 	int k, m;
1829 
1830 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1831 
1832 	if (!bundle) {
1833 		dm_error("Failed to allocate update bundle\n");
1834 		goto cleanup;
1835 	}
1836 
1837 	for (k = 0; k < dc_state->stream_count; k++) {
1838 		bundle->stream_update.stream = dc_state->streams[k];
1839 
1840 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1841 			bundle->surface_updates[m].surface =
1842 				dc_state->stream_status->plane_states[m];
1843 			bundle->surface_updates[m].surface->force_full_update =
1844 				true;
1845 		}
1846 		dc_commit_updates_for_stream(
1847 			dm->dc, bundle->surface_updates,
1848 			dc_state->stream_status->plane_count,
1849 			dc_state->streams[k], &bundle->stream_update, dc_state);
1850 	}
1851 
1852 cleanup:
1853 	kfree(bundle);
1854 
1855 	return;
1856 }
1857 
1858 static int dm_resume(void *handle)
1859 {
1860 	struct amdgpu_device *adev = handle;
1861 	struct drm_device *ddev = adev->ddev;
1862 	struct amdgpu_display_manager *dm = &adev->dm;
1863 	struct amdgpu_dm_connector *aconnector;
1864 	struct drm_connector *connector;
1865 	struct drm_connector_list_iter iter;
1866 	struct drm_crtc *crtc;
1867 	struct drm_crtc_state *new_crtc_state;
1868 	struct dm_crtc_state *dm_new_crtc_state;
1869 	struct drm_plane *plane;
1870 	struct drm_plane_state *new_plane_state;
1871 	struct dm_plane_state *dm_new_plane_state;
1872 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1873 	enum dc_connection_type new_connection_type = dc_connection_none;
1874 	struct dc_state *dc_state;
1875 	int i, r, j;
1876 
1877 	if (adev->in_gpu_reset) {
1878 		dc_state = dm->cached_dc_state;
1879 
1880 		r = dm_dmub_hw_init(adev);
1881 		if (r)
1882 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1883 
1884 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1885 		dc_resume(dm->dc);
1886 
1887 		amdgpu_dm_irq_resume_early(adev);
1888 
1889 		for (i = 0; i < dc_state->stream_count; i++) {
1890 			dc_state->streams[i]->mode_changed = true;
1891 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1892 				dc_state->stream_status->plane_states[j]->update_flags.raw
1893 					= 0xffffffff;
1894 			}
1895 		}
1896 
1897 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
1898 
1899 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
1900 
1901 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1902 
1903 		dc_release_state(dm->cached_dc_state);
1904 		dm->cached_dc_state = NULL;
1905 
1906 		amdgpu_dm_irq_resume_late(adev);
1907 
1908 		mutex_unlock(&dm->dc_lock);
1909 
1910 		return 0;
1911 	}
1912 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1913 	dc_release_state(dm_state->context);
1914 	dm_state->context = dc_create_state(dm->dc);
1915 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1916 	dc_resource_state_construct(dm->dc, dm_state->context);
1917 
1918 	/* Before powering on DC we need to re-initialize DMUB. */
1919 	r = dm_dmub_hw_init(adev);
1920 	if (r)
1921 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1922 
1923 	/* power on hardware */
1924 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1925 
1926 	/* program HPD filter */
1927 	dc_resume(dm->dc);
1928 
1929 	/*
1930 	 * early enable HPD Rx IRQ, should be done before set mode as short
1931 	 * pulse interrupts are used for MST
1932 	 */
1933 	amdgpu_dm_irq_resume_early(adev);
1934 
1935 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1936 	s3_handle_mst(ddev, false);
1937 
1938 	/* Do detection*/
1939 	drm_connector_list_iter_begin(ddev, &iter);
1940 	drm_for_each_connector_iter(connector, &iter) {
1941 		aconnector = to_amdgpu_dm_connector(connector);
1942 
1943 		/*
1944 		 * this is the case when traversing through already created
1945 		 * MST connectors, should be skipped
1946 		 */
1947 		if (aconnector->mst_port)
1948 			continue;
1949 
1950 		mutex_lock(&aconnector->hpd_lock);
1951 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1952 			DRM_ERROR("KMS: Failed to detect connector\n");
1953 
1954 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1955 			emulated_link_detect(aconnector->dc_link);
1956 		else
1957 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1958 
1959 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1960 			aconnector->fake_enable = false;
1961 
1962 		if (aconnector->dc_sink)
1963 			dc_sink_release(aconnector->dc_sink);
1964 		aconnector->dc_sink = NULL;
1965 		amdgpu_dm_update_connector_after_detect(aconnector);
1966 		mutex_unlock(&aconnector->hpd_lock);
1967 	}
1968 	drm_connector_list_iter_end(&iter);
1969 
1970 	/* Force mode set in atomic commit */
1971 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1972 		new_crtc_state->active_changed = true;
1973 
1974 	/*
1975 	 * atomic_check is expected to create the dc states. We need to release
1976 	 * them here, since they were duplicated as part of the suspend
1977 	 * procedure.
1978 	 */
1979 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1980 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1981 		if (dm_new_crtc_state->stream) {
1982 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1983 			dc_stream_release(dm_new_crtc_state->stream);
1984 			dm_new_crtc_state->stream = NULL;
1985 		}
1986 	}
1987 
1988 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1989 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1990 		if (dm_new_plane_state->dc_state) {
1991 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1992 			dc_plane_state_release(dm_new_plane_state->dc_state);
1993 			dm_new_plane_state->dc_state = NULL;
1994 		}
1995 	}
1996 
1997 	drm_atomic_helper_resume(ddev, dm->cached_state);
1998 
1999 	dm->cached_state = NULL;
2000 
2001 	amdgpu_dm_irq_resume_late(adev);
2002 
2003 	amdgpu_dm_smu_write_watermarks_table(adev);
2004 
2005 	return 0;
2006 }
2007 
2008 /**
2009  * DOC: DM Lifecycle
2010  *
2011  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2012  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2013  * the base driver's device list to be initialized and torn down accordingly.
2014  *
2015  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2016  */
2017 
2018 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2019 	.name = "dm",
2020 	.early_init = dm_early_init,
2021 	.late_init = dm_late_init,
2022 	.sw_init = dm_sw_init,
2023 	.sw_fini = dm_sw_fini,
2024 	.hw_init = dm_hw_init,
2025 	.hw_fini = dm_hw_fini,
2026 	.suspend = dm_suspend,
2027 	.resume = dm_resume,
2028 	.is_idle = dm_is_idle,
2029 	.wait_for_idle = dm_wait_for_idle,
2030 	.check_soft_reset = dm_check_soft_reset,
2031 	.soft_reset = dm_soft_reset,
2032 	.set_clockgating_state = dm_set_clockgating_state,
2033 	.set_powergating_state = dm_set_powergating_state,
2034 };
2035 
2036 const struct amdgpu_ip_block_version dm_ip_block =
2037 {
2038 	.type = AMD_IP_BLOCK_TYPE_DCE,
2039 	.major = 1,
2040 	.minor = 0,
2041 	.rev = 0,
2042 	.funcs = &amdgpu_dm_funcs,
2043 };
2044 
2045 
2046 /**
2047  * DOC: atomic
2048  *
2049  * *WIP*
2050  */
2051 
2052 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2053 	.fb_create = amdgpu_display_user_framebuffer_create,
2054 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2055 	.atomic_check = amdgpu_dm_atomic_check,
2056 	.atomic_commit = amdgpu_dm_atomic_commit,
2057 };
2058 
2059 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2060 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2061 };
2062 
2063 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2064 {
2065 	u32 max_cll, min_cll, max, min, q, r;
2066 	struct amdgpu_dm_backlight_caps *caps;
2067 	struct amdgpu_display_manager *dm;
2068 	struct drm_connector *conn_base;
2069 	struct amdgpu_device *adev;
2070 	struct dc_link *link = NULL;
2071 	static const u8 pre_computed_values[] = {
2072 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2073 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2074 
2075 	if (!aconnector || !aconnector->dc_link)
2076 		return;
2077 
2078 	link = aconnector->dc_link;
2079 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2080 		return;
2081 
2082 	conn_base = &aconnector->base;
2083 	adev = conn_base->dev->dev_private;
2084 	dm = &adev->dm;
2085 	caps = &dm->backlight_caps;
2086 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2087 	caps->aux_support = false;
2088 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2089 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2090 
2091 	if (caps->ext_caps->bits.oled == 1 ||
2092 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2093 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2094 		caps->aux_support = true;
2095 
2096 	/* From the specification (CTA-861-G), for calculating the maximum
2097 	 * luminance we need to use:
2098 	 *	Luminance = 50*2**(CV/32)
2099 	 * Where CV is a one-byte value.
2100 	 * For calculating this expression we may need float point precision;
2101 	 * to avoid this complexity level, we take advantage that CV is divided
2102 	 * by a constant. From the Euclids division algorithm, we know that CV
2103 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2104 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2105 	 * need to pre-compute the value of r/32. For pre-computing the values
2106 	 * We just used the following Ruby line:
2107 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2108 	 * The results of the above expressions can be verified at
2109 	 * pre_computed_values.
2110 	 */
2111 	q = max_cll >> 5;
2112 	r = max_cll % 32;
2113 	max = (1 << q) * pre_computed_values[r];
2114 
2115 	// min luminance: maxLum * (CV/255)^2 / 100
2116 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2117 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2118 
2119 	caps->aux_max_input_signal = max;
2120 	caps->aux_min_input_signal = min;
2121 }
2122 
2123 void amdgpu_dm_update_connector_after_detect(
2124 		struct amdgpu_dm_connector *aconnector)
2125 {
2126 	struct drm_connector *connector = &aconnector->base;
2127 	struct drm_device *dev = connector->dev;
2128 	struct dc_sink *sink;
2129 
2130 	/* MST handled by drm_mst framework */
2131 	if (aconnector->mst_mgr.mst_state == true)
2132 		return;
2133 
2134 	sink = aconnector->dc_link->local_sink;
2135 	if (sink)
2136 		dc_sink_retain(sink);
2137 
2138 	/*
2139 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2140 	 * the connector sink is set to either fake or physical sink depends on link status.
2141 	 * Skip if already done during boot.
2142 	 */
2143 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2144 			&& aconnector->dc_em_sink) {
2145 
2146 		/*
2147 		 * For S3 resume with headless use eml_sink to fake stream
2148 		 * because on resume connector->sink is set to NULL
2149 		 */
2150 		mutex_lock(&dev->mode_config.mutex);
2151 
2152 		if (sink) {
2153 			if (aconnector->dc_sink) {
2154 				amdgpu_dm_update_freesync_caps(connector, NULL);
2155 				/*
2156 				 * retain and release below are used to
2157 				 * bump up refcount for sink because the link doesn't point
2158 				 * to it anymore after disconnect, so on next crtc to connector
2159 				 * reshuffle by UMD we will get into unwanted dc_sink release
2160 				 */
2161 				dc_sink_release(aconnector->dc_sink);
2162 			}
2163 			aconnector->dc_sink = sink;
2164 			dc_sink_retain(aconnector->dc_sink);
2165 			amdgpu_dm_update_freesync_caps(connector,
2166 					aconnector->edid);
2167 		} else {
2168 			amdgpu_dm_update_freesync_caps(connector, NULL);
2169 			if (!aconnector->dc_sink) {
2170 				aconnector->dc_sink = aconnector->dc_em_sink;
2171 				dc_sink_retain(aconnector->dc_sink);
2172 			}
2173 		}
2174 
2175 		mutex_unlock(&dev->mode_config.mutex);
2176 
2177 		if (sink)
2178 			dc_sink_release(sink);
2179 		return;
2180 	}
2181 
2182 	/*
2183 	 * TODO: temporary guard to look for proper fix
2184 	 * if this sink is MST sink, we should not do anything
2185 	 */
2186 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2187 		dc_sink_release(sink);
2188 		return;
2189 	}
2190 
2191 	if (aconnector->dc_sink == sink) {
2192 		/*
2193 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2194 		 * Do nothing!!
2195 		 */
2196 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2197 				aconnector->connector_id);
2198 		if (sink)
2199 			dc_sink_release(sink);
2200 		return;
2201 	}
2202 
2203 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2204 		aconnector->connector_id, aconnector->dc_sink, sink);
2205 
2206 	mutex_lock(&dev->mode_config.mutex);
2207 
2208 	/*
2209 	 * 1. Update status of the drm connector
2210 	 * 2. Send an event and let userspace tell us what to do
2211 	 */
2212 	if (sink) {
2213 		/*
2214 		 * TODO: check if we still need the S3 mode update workaround.
2215 		 * If yes, put it here.
2216 		 */
2217 		if (aconnector->dc_sink)
2218 			amdgpu_dm_update_freesync_caps(connector, NULL);
2219 
2220 		aconnector->dc_sink = sink;
2221 		dc_sink_retain(aconnector->dc_sink);
2222 		if (sink->dc_edid.length == 0) {
2223 			aconnector->edid = NULL;
2224 			if (aconnector->dc_link->aux_mode) {
2225 				drm_dp_cec_unset_edid(
2226 					&aconnector->dm_dp_aux.aux);
2227 			}
2228 		} else {
2229 			aconnector->edid =
2230 				(struct edid *)sink->dc_edid.raw_edid;
2231 
2232 			drm_connector_update_edid_property(connector,
2233 							   aconnector->edid);
2234 
2235 			if (aconnector->dc_link->aux_mode)
2236 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2237 						    aconnector->edid);
2238 		}
2239 
2240 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2241 		update_connector_ext_caps(aconnector);
2242 	} else {
2243 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2244 		amdgpu_dm_update_freesync_caps(connector, NULL);
2245 		drm_connector_update_edid_property(connector, NULL);
2246 		aconnector->num_modes = 0;
2247 		dc_sink_release(aconnector->dc_sink);
2248 		aconnector->dc_sink = NULL;
2249 		aconnector->edid = NULL;
2250 #ifdef CONFIG_DRM_AMD_DC_HDCP
2251 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2252 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2253 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2254 #endif
2255 	}
2256 
2257 	mutex_unlock(&dev->mode_config.mutex);
2258 
2259 	update_subconnector_property(aconnector);
2260 
2261 	if (sink)
2262 		dc_sink_release(sink);
2263 }
2264 
2265 static void handle_hpd_irq(void *param)
2266 {
2267 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2268 	struct drm_connector *connector = &aconnector->base;
2269 	struct drm_device *dev = connector->dev;
2270 	enum dc_connection_type new_connection_type = dc_connection_none;
2271 #ifdef CONFIG_DRM_AMD_DC_HDCP
2272 	struct amdgpu_device *adev = dev->dev_private;
2273 #endif
2274 
2275 	/*
2276 	 * In case of failure or MST no need to update connector status or notify the OS
2277 	 * since (for MST case) MST does this in its own context.
2278 	 */
2279 	mutex_lock(&aconnector->hpd_lock);
2280 
2281 #ifdef CONFIG_DRM_AMD_DC_HDCP
2282 	if (adev->dm.hdcp_workqueue)
2283 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2284 #endif
2285 	if (aconnector->fake_enable)
2286 		aconnector->fake_enable = false;
2287 
2288 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2289 		DRM_ERROR("KMS: Failed to detect connector\n");
2290 
2291 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2292 		emulated_link_detect(aconnector->dc_link);
2293 
2294 
2295 		drm_modeset_lock_all(dev);
2296 		dm_restore_drm_connector_state(dev, connector);
2297 		drm_modeset_unlock_all(dev);
2298 
2299 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2300 			drm_kms_helper_hotplug_event(dev);
2301 
2302 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2303 		amdgpu_dm_update_connector_after_detect(aconnector);
2304 
2305 
2306 		drm_modeset_lock_all(dev);
2307 		dm_restore_drm_connector_state(dev, connector);
2308 		drm_modeset_unlock_all(dev);
2309 
2310 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2311 			drm_kms_helper_hotplug_event(dev);
2312 	}
2313 	mutex_unlock(&aconnector->hpd_lock);
2314 
2315 }
2316 
2317 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2318 {
2319 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2320 	uint8_t dret;
2321 	bool new_irq_handled = false;
2322 	int dpcd_addr;
2323 	int dpcd_bytes_to_read;
2324 
2325 	const int max_process_count = 30;
2326 	int process_count = 0;
2327 
2328 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2329 
2330 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2331 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2332 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2333 		dpcd_addr = DP_SINK_COUNT;
2334 	} else {
2335 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2336 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2337 		dpcd_addr = DP_SINK_COUNT_ESI;
2338 	}
2339 
2340 	dret = drm_dp_dpcd_read(
2341 		&aconnector->dm_dp_aux.aux,
2342 		dpcd_addr,
2343 		esi,
2344 		dpcd_bytes_to_read);
2345 
2346 	while (dret == dpcd_bytes_to_read &&
2347 		process_count < max_process_count) {
2348 		uint8_t retry;
2349 		dret = 0;
2350 
2351 		process_count++;
2352 
2353 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2354 		/* handle HPD short pulse irq */
2355 		if (aconnector->mst_mgr.mst_state)
2356 			drm_dp_mst_hpd_irq(
2357 				&aconnector->mst_mgr,
2358 				esi,
2359 				&new_irq_handled);
2360 
2361 		if (new_irq_handled) {
2362 			/* ACK at DPCD to notify down stream */
2363 			const int ack_dpcd_bytes_to_write =
2364 				dpcd_bytes_to_read - 1;
2365 
2366 			for (retry = 0; retry < 3; retry++) {
2367 				uint8_t wret;
2368 
2369 				wret = drm_dp_dpcd_write(
2370 					&aconnector->dm_dp_aux.aux,
2371 					dpcd_addr + 1,
2372 					&esi[1],
2373 					ack_dpcd_bytes_to_write);
2374 				if (wret == ack_dpcd_bytes_to_write)
2375 					break;
2376 			}
2377 
2378 			/* check if there is new irq to be handled */
2379 			dret = drm_dp_dpcd_read(
2380 				&aconnector->dm_dp_aux.aux,
2381 				dpcd_addr,
2382 				esi,
2383 				dpcd_bytes_to_read);
2384 
2385 			new_irq_handled = false;
2386 		} else {
2387 			break;
2388 		}
2389 	}
2390 
2391 	if (process_count == max_process_count)
2392 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2393 }
2394 
2395 static void handle_hpd_rx_irq(void *param)
2396 {
2397 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2398 	struct drm_connector *connector = &aconnector->base;
2399 	struct drm_device *dev = connector->dev;
2400 	struct dc_link *dc_link = aconnector->dc_link;
2401 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2402 	enum dc_connection_type new_connection_type = dc_connection_none;
2403 #ifdef CONFIG_DRM_AMD_DC_HDCP
2404 	union hpd_irq_data hpd_irq_data;
2405 	struct amdgpu_device *adev = dev->dev_private;
2406 
2407 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2408 #endif
2409 
2410 	/*
2411 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2412 	 * conflict, after implement i2c helper, this mutex should be
2413 	 * retired.
2414 	 */
2415 	if (dc_link->type != dc_connection_mst_branch)
2416 		mutex_lock(&aconnector->hpd_lock);
2417 
2418 
2419 #ifdef CONFIG_DRM_AMD_DC_HDCP
2420 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2421 #else
2422 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2423 #endif
2424 			!is_mst_root_connector) {
2425 		/* Downstream Port status changed. */
2426 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2427 			DRM_ERROR("KMS: Failed to detect connector\n");
2428 
2429 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2430 			emulated_link_detect(dc_link);
2431 
2432 			if (aconnector->fake_enable)
2433 				aconnector->fake_enable = false;
2434 
2435 			amdgpu_dm_update_connector_after_detect(aconnector);
2436 
2437 
2438 			drm_modeset_lock_all(dev);
2439 			dm_restore_drm_connector_state(dev, connector);
2440 			drm_modeset_unlock_all(dev);
2441 
2442 			drm_kms_helper_hotplug_event(dev);
2443 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2444 
2445 			if (aconnector->fake_enable)
2446 				aconnector->fake_enable = false;
2447 
2448 			amdgpu_dm_update_connector_after_detect(aconnector);
2449 
2450 
2451 			drm_modeset_lock_all(dev);
2452 			dm_restore_drm_connector_state(dev, connector);
2453 			drm_modeset_unlock_all(dev);
2454 
2455 			drm_kms_helper_hotplug_event(dev);
2456 		}
2457 	}
2458 #ifdef CONFIG_DRM_AMD_DC_HDCP
2459 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2460 		if (adev->dm.hdcp_workqueue)
2461 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2462 	}
2463 #endif
2464 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2465 	    (dc_link->type == dc_connection_mst_branch))
2466 		dm_handle_hpd_rx_irq(aconnector);
2467 
2468 	if (dc_link->type != dc_connection_mst_branch) {
2469 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2470 		mutex_unlock(&aconnector->hpd_lock);
2471 	}
2472 }
2473 
2474 static void register_hpd_handlers(struct amdgpu_device *adev)
2475 {
2476 	struct drm_device *dev = adev->ddev;
2477 	struct drm_connector *connector;
2478 	struct amdgpu_dm_connector *aconnector;
2479 	const struct dc_link *dc_link;
2480 	struct dc_interrupt_params int_params = {0};
2481 
2482 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2483 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2484 
2485 	list_for_each_entry(connector,
2486 			&dev->mode_config.connector_list, head)	{
2487 
2488 		aconnector = to_amdgpu_dm_connector(connector);
2489 		dc_link = aconnector->dc_link;
2490 
2491 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2492 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2493 			int_params.irq_source = dc_link->irq_source_hpd;
2494 
2495 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2496 					handle_hpd_irq,
2497 					(void *) aconnector);
2498 		}
2499 
2500 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2501 
2502 			/* Also register for DP short pulse (hpd_rx). */
2503 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2504 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2505 
2506 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2507 					handle_hpd_rx_irq,
2508 					(void *) aconnector);
2509 		}
2510 	}
2511 }
2512 
2513 /* Register IRQ sources and initialize IRQ callbacks */
2514 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2515 {
2516 	struct dc *dc = adev->dm.dc;
2517 	struct common_irq_params *c_irq_params;
2518 	struct dc_interrupt_params int_params = {0};
2519 	int r;
2520 	int i;
2521 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2522 
2523 	if (adev->asic_type >= CHIP_VEGA10)
2524 		client_id = SOC15_IH_CLIENTID_DCE;
2525 
2526 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2527 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2528 
2529 	/*
2530 	 * Actions of amdgpu_irq_add_id():
2531 	 * 1. Register a set() function with base driver.
2532 	 *    Base driver will call set() function to enable/disable an
2533 	 *    interrupt in DC hardware.
2534 	 * 2. Register amdgpu_dm_irq_handler().
2535 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2536 	 *    coming from DC hardware.
2537 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2538 	 *    for acknowledging and handling. */
2539 
2540 	/* Use VBLANK interrupt */
2541 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2542 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2543 		if (r) {
2544 			DRM_ERROR("Failed to add crtc irq id!\n");
2545 			return r;
2546 		}
2547 
2548 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2549 		int_params.irq_source =
2550 			dc_interrupt_to_irq_source(dc, i, 0);
2551 
2552 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2553 
2554 		c_irq_params->adev = adev;
2555 		c_irq_params->irq_src = int_params.irq_source;
2556 
2557 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2558 				dm_crtc_high_irq, c_irq_params);
2559 	}
2560 
2561 	/* Use VUPDATE interrupt */
2562 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2563 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2564 		if (r) {
2565 			DRM_ERROR("Failed to add vupdate irq id!\n");
2566 			return r;
2567 		}
2568 
2569 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2570 		int_params.irq_source =
2571 			dc_interrupt_to_irq_source(dc, i, 0);
2572 
2573 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2574 
2575 		c_irq_params->adev = adev;
2576 		c_irq_params->irq_src = int_params.irq_source;
2577 
2578 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2579 				dm_vupdate_high_irq, c_irq_params);
2580 	}
2581 
2582 	/* Use GRPH_PFLIP interrupt */
2583 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2584 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2585 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2586 		if (r) {
2587 			DRM_ERROR("Failed to add page flip irq id!\n");
2588 			return r;
2589 		}
2590 
2591 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2592 		int_params.irq_source =
2593 			dc_interrupt_to_irq_source(dc, i, 0);
2594 
2595 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2596 
2597 		c_irq_params->adev = adev;
2598 		c_irq_params->irq_src = int_params.irq_source;
2599 
2600 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2601 				dm_pflip_high_irq, c_irq_params);
2602 
2603 	}
2604 
2605 	/* HPD */
2606 	r = amdgpu_irq_add_id(adev, client_id,
2607 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2608 	if (r) {
2609 		DRM_ERROR("Failed to add hpd irq id!\n");
2610 		return r;
2611 	}
2612 
2613 	register_hpd_handlers(adev);
2614 
2615 	return 0;
2616 }
2617 
2618 #if defined(CONFIG_DRM_AMD_DC_DCN)
2619 /* Register IRQ sources and initialize IRQ callbacks */
2620 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2621 {
2622 	struct dc *dc = adev->dm.dc;
2623 	struct common_irq_params *c_irq_params;
2624 	struct dc_interrupt_params int_params = {0};
2625 	int r;
2626 	int i;
2627 
2628 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2629 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2630 
2631 	/*
2632 	 * Actions of amdgpu_irq_add_id():
2633 	 * 1. Register a set() function with base driver.
2634 	 *    Base driver will call set() function to enable/disable an
2635 	 *    interrupt in DC hardware.
2636 	 * 2. Register amdgpu_dm_irq_handler().
2637 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2638 	 *    coming from DC hardware.
2639 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2640 	 *    for acknowledging and handling.
2641 	 */
2642 
2643 	/* Use VSTARTUP interrupt */
2644 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2645 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2646 			i++) {
2647 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2648 
2649 		if (r) {
2650 			DRM_ERROR("Failed to add crtc irq id!\n");
2651 			return r;
2652 		}
2653 
2654 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2655 		int_params.irq_source =
2656 			dc_interrupt_to_irq_source(dc, i, 0);
2657 
2658 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2659 
2660 		c_irq_params->adev = adev;
2661 		c_irq_params->irq_src = int_params.irq_source;
2662 
2663 		amdgpu_dm_irq_register_interrupt(
2664 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2665 	}
2666 
2667 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2668 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2669 	 * to trigger at end of each vblank, regardless of state of the lock,
2670 	 * matching DCE behaviour.
2671 	 */
2672 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2673 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2674 	     i++) {
2675 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2676 
2677 		if (r) {
2678 			DRM_ERROR("Failed to add vupdate irq id!\n");
2679 			return r;
2680 		}
2681 
2682 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2683 		int_params.irq_source =
2684 			dc_interrupt_to_irq_source(dc, i, 0);
2685 
2686 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2687 
2688 		c_irq_params->adev = adev;
2689 		c_irq_params->irq_src = int_params.irq_source;
2690 
2691 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2692 				dm_vupdate_high_irq, c_irq_params);
2693 	}
2694 
2695 	/* Use GRPH_PFLIP interrupt */
2696 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2697 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2698 			i++) {
2699 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2700 		if (r) {
2701 			DRM_ERROR("Failed to add page flip irq id!\n");
2702 			return r;
2703 		}
2704 
2705 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2706 		int_params.irq_source =
2707 			dc_interrupt_to_irq_source(dc, i, 0);
2708 
2709 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2710 
2711 		c_irq_params->adev = adev;
2712 		c_irq_params->irq_src = int_params.irq_source;
2713 
2714 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2715 				dm_pflip_high_irq, c_irq_params);
2716 
2717 	}
2718 
2719 	/* HPD */
2720 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2721 			&adev->hpd_irq);
2722 	if (r) {
2723 		DRM_ERROR("Failed to add hpd irq id!\n");
2724 		return r;
2725 	}
2726 
2727 	register_hpd_handlers(adev);
2728 
2729 	return 0;
2730 }
2731 #endif
2732 
2733 /*
2734  * Acquires the lock for the atomic state object and returns
2735  * the new atomic state.
2736  *
2737  * This should only be called during atomic check.
2738  */
2739 static int dm_atomic_get_state(struct drm_atomic_state *state,
2740 			       struct dm_atomic_state **dm_state)
2741 {
2742 	struct drm_device *dev = state->dev;
2743 	struct amdgpu_device *adev = dev->dev_private;
2744 	struct amdgpu_display_manager *dm = &adev->dm;
2745 	struct drm_private_state *priv_state;
2746 
2747 	if (*dm_state)
2748 		return 0;
2749 
2750 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2751 	if (IS_ERR(priv_state))
2752 		return PTR_ERR(priv_state);
2753 
2754 	*dm_state = to_dm_atomic_state(priv_state);
2755 
2756 	return 0;
2757 }
2758 
2759 static struct dm_atomic_state *
2760 dm_atomic_get_new_state(struct drm_atomic_state *state)
2761 {
2762 	struct drm_device *dev = state->dev;
2763 	struct amdgpu_device *adev = dev->dev_private;
2764 	struct amdgpu_display_manager *dm = &adev->dm;
2765 	struct drm_private_obj *obj;
2766 	struct drm_private_state *new_obj_state;
2767 	int i;
2768 
2769 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2770 		if (obj->funcs == dm->atomic_obj.funcs)
2771 			return to_dm_atomic_state(new_obj_state);
2772 	}
2773 
2774 	return NULL;
2775 }
2776 
2777 static struct dm_atomic_state *
2778 dm_atomic_get_old_state(struct drm_atomic_state *state)
2779 {
2780 	struct drm_device *dev = state->dev;
2781 	struct amdgpu_device *adev = dev->dev_private;
2782 	struct amdgpu_display_manager *dm = &adev->dm;
2783 	struct drm_private_obj *obj;
2784 	struct drm_private_state *old_obj_state;
2785 	int i;
2786 
2787 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2788 		if (obj->funcs == dm->atomic_obj.funcs)
2789 			return to_dm_atomic_state(old_obj_state);
2790 	}
2791 
2792 	return NULL;
2793 }
2794 
2795 static struct drm_private_state *
2796 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2797 {
2798 	struct dm_atomic_state *old_state, *new_state;
2799 
2800 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2801 	if (!new_state)
2802 		return NULL;
2803 
2804 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2805 
2806 	old_state = to_dm_atomic_state(obj->state);
2807 
2808 	if (old_state && old_state->context)
2809 		new_state->context = dc_copy_state(old_state->context);
2810 
2811 	if (!new_state->context) {
2812 		kfree(new_state);
2813 		return NULL;
2814 	}
2815 
2816 	return &new_state->base;
2817 }
2818 
2819 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2820 				    struct drm_private_state *state)
2821 {
2822 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2823 
2824 	if (dm_state && dm_state->context)
2825 		dc_release_state(dm_state->context);
2826 
2827 	kfree(dm_state);
2828 }
2829 
2830 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2831 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2832 	.atomic_destroy_state = dm_atomic_destroy_state,
2833 };
2834 
2835 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2836 {
2837 	struct dm_atomic_state *state;
2838 	int r;
2839 
2840 	adev->mode_info.mode_config_initialized = true;
2841 
2842 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2843 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2844 
2845 	adev->ddev->mode_config.max_width = 16384;
2846 	adev->ddev->mode_config.max_height = 16384;
2847 
2848 	adev->ddev->mode_config.preferred_depth = 24;
2849 	adev->ddev->mode_config.prefer_shadow = 1;
2850 	/* indicates support for immediate flip */
2851 	adev->ddev->mode_config.async_page_flip = true;
2852 
2853 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2854 
2855 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2856 	if (!state)
2857 		return -ENOMEM;
2858 
2859 	state->context = dc_create_state(adev->dm.dc);
2860 	if (!state->context) {
2861 		kfree(state);
2862 		return -ENOMEM;
2863 	}
2864 
2865 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2866 
2867 	drm_atomic_private_obj_init(adev->ddev,
2868 				    &adev->dm.atomic_obj,
2869 				    &state->base,
2870 				    &dm_atomic_state_funcs);
2871 
2872 	r = amdgpu_display_modeset_create_props(adev);
2873 	if (r)
2874 		return r;
2875 
2876 	r = amdgpu_dm_audio_init(adev);
2877 	if (r)
2878 		return r;
2879 
2880 	return 0;
2881 }
2882 
2883 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2884 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2885 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2886 
2887 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2888 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2889 
2890 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2891 {
2892 #if defined(CONFIG_ACPI)
2893 	struct amdgpu_dm_backlight_caps caps;
2894 
2895 	if (dm->backlight_caps.caps_valid)
2896 		return;
2897 
2898 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2899 	if (caps.caps_valid) {
2900 		dm->backlight_caps.caps_valid = true;
2901 		if (caps.aux_support)
2902 			return;
2903 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2904 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2905 	} else {
2906 		dm->backlight_caps.min_input_signal =
2907 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2908 		dm->backlight_caps.max_input_signal =
2909 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2910 	}
2911 #else
2912 	if (dm->backlight_caps.aux_support)
2913 		return;
2914 
2915 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2916 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2917 #endif
2918 }
2919 
2920 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2921 {
2922 	bool rc;
2923 
2924 	if (!link)
2925 		return 1;
2926 
2927 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2928 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2929 
2930 	return rc ? 0 : 1;
2931 }
2932 
2933 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2934 			      const uint32_t user_brightness)
2935 {
2936 	u32 min, max, conversion_pace;
2937 	u32 brightness = user_brightness;
2938 
2939 	if (!caps)
2940 		goto out;
2941 
2942 	if (!caps->aux_support) {
2943 		max = caps->max_input_signal;
2944 		min = caps->min_input_signal;
2945 		/*
2946 		 * The brightness input is in the range 0-255
2947 		 * It needs to be rescaled to be between the
2948 		 * requested min and max input signal
2949 		 * It also needs to be scaled up by 0x101 to
2950 		 * match the DC interface which has a range of
2951 		 * 0 to 0xffff
2952 		 */
2953 		conversion_pace = 0x101;
2954 		brightness =
2955 			user_brightness
2956 			* conversion_pace
2957 			* (max - min)
2958 			/ AMDGPU_MAX_BL_LEVEL
2959 			+ min * conversion_pace;
2960 	} else {
2961 		/* TODO
2962 		 * We are doing a linear interpolation here, which is OK but
2963 		 * does not provide the optimal result. We probably want
2964 		 * something close to the Perceptual Quantizer (PQ) curve.
2965 		 */
2966 		max = caps->aux_max_input_signal;
2967 		min = caps->aux_min_input_signal;
2968 
2969 		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2970 			       + user_brightness * max;
2971 		// Multiple the value by 1000 since we use millinits
2972 		brightness *= 1000;
2973 		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2974 	}
2975 
2976 out:
2977 	return brightness;
2978 }
2979 
2980 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2981 {
2982 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2983 	struct amdgpu_dm_backlight_caps caps;
2984 	struct dc_link *link = NULL;
2985 	u32 brightness;
2986 	bool rc;
2987 
2988 	amdgpu_dm_update_backlight_caps(dm);
2989 	caps = dm->backlight_caps;
2990 
2991 	link = (struct dc_link *)dm->backlight_link;
2992 
2993 	brightness = convert_brightness(&caps, bd->props.brightness);
2994 	// Change brightness based on AUX property
2995 	if (caps.aux_support)
2996 		return set_backlight_via_aux(link, brightness);
2997 
2998 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2999 
3000 	return rc ? 0 : 1;
3001 }
3002 
3003 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3004 {
3005 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3006 	int ret = dc_link_get_backlight_level(dm->backlight_link);
3007 
3008 	if (ret == DC_ERROR_UNEXPECTED)
3009 		return bd->props.brightness;
3010 	return ret;
3011 }
3012 
3013 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3014 	.options = BL_CORE_SUSPENDRESUME,
3015 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3016 	.update_status	= amdgpu_dm_backlight_update_status,
3017 };
3018 
3019 static void
3020 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3021 {
3022 	char bl_name[16];
3023 	struct backlight_properties props = { 0 };
3024 
3025 	amdgpu_dm_update_backlight_caps(dm);
3026 
3027 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3028 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3029 	props.type = BACKLIGHT_RAW;
3030 
3031 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3032 			dm->adev->ddev->primary->index);
3033 
3034 	dm->backlight_dev = backlight_device_register(bl_name,
3035 			dm->adev->ddev->dev,
3036 			dm,
3037 			&amdgpu_dm_backlight_ops,
3038 			&props);
3039 
3040 	if (IS_ERR(dm->backlight_dev))
3041 		DRM_ERROR("DM: Backlight registration failed!\n");
3042 	else
3043 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3044 }
3045 
3046 #endif
3047 
3048 static int initialize_plane(struct amdgpu_display_manager *dm,
3049 			    struct amdgpu_mode_info *mode_info, int plane_id,
3050 			    enum drm_plane_type plane_type,
3051 			    const struct dc_plane_cap *plane_cap)
3052 {
3053 	struct drm_plane *plane;
3054 	unsigned long possible_crtcs;
3055 	int ret = 0;
3056 
3057 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3058 	if (!plane) {
3059 		DRM_ERROR("KMS: Failed to allocate plane\n");
3060 		return -ENOMEM;
3061 	}
3062 	plane->type = plane_type;
3063 
3064 	/*
3065 	 * HACK: IGT tests expect that the primary plane for a CRTC
3066 	 * can only have one possible CRTC. Only expose support for
3067 	 * any CRTC if they're not going to be used as a primary plane
3068 	 * for a CRTC - like overlay or underlay planes.
3069 	 */
3070 	possible_crtcs = 1 << plane_id;
3071 	if (plane_id >= dm->dc->caps.max_streams)
3072 		possible_crtcs = 0xff;
3073 
3074 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3075 
3076 	if (ret) {
3077 		DRM_ERROR("KMS: Failed to initialize plane\n");
3078 		kfree(plane);
3079 		return ret;
3080 	}
3081 
3082 	if (mode_info)
3083 		mode_info->planes[plane_id] = plane;
3084 
3085 	return ret;
3086 }
3087 
3088 
3089 static void register_backlight_device(struct amdgpu_display_manager *dm,
3090 				      struct dc_link *link)
3091 {
3092 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3093 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3094 
3095 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3096 	    link->type != dc_connection_none) {
3097 		/*
3098 		 * Event if registration failed, we should continue with
3099 		 * DM initialization because not having a backlight control
3100 		 * is better then a black screen.
3101 		 */
3102 		amdgpu_dm_register_backlight_device(dm);
3103 
3104 		if (dm->backlight_dev)
3105 			dm->backlight_link = link;
3106 	}
3107 #endif
3108 }
3109 
3110 
3111 /*
3112  * In this architecture, the association
3113  * connector -> encoder -> crtc
3114  * id not really requried. The crtc and connector will hold the
3115  * display_index as an abstraction to use with DAL component
3116  *
3117  * Returns 0 on success
3118  */
3119 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3120 {
3121 	struct amdgpu_display_manager *dm = &adev->dm;
3122 	int32_t i;
3123 	struct amdgpu_dm_connector *aconnector = NULL;
3124 	struct amdgpu_encoder *aencoder = NULL;
3125 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3126 	uint32_t link_cnt;
3127 	int32_t primary_planes;
3128 	enum dc_connection_type new_connection_type = dc_connection_none;
3129 	const struct dc_plane_cap *plane;
3130 
3131 	link_cnt = dm->dc->caps.max_links;
3132 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3133 		DRM_ERROR("DM: Failed to initialize mode config\n");
3134 		return -EINVAL;
3135 	}
3136 
3137 	/* There is one primary plane per CRTC */
3138 	primary_planes = dm->dc->caps.max_streams;
3139 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3140 
3141 	/*
3142 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3143 	 * Order is reversed to match iteration order in atomic check.
3144 	 */
3145 	for (i = (primary_planes - 1); i >= 0; i--) {
3146 		plane = &dm->dc->caps.planes[i];
3147 
3148 		if (initialize_plane(dm, mode_info, i,
3149 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3150 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3151 			goto fail;
3152 		}
3153 	}
3154 
3155 	/*
3156 	 * Initialize overlay planes, index starting after primary planes.
3157 	 * These planes have a higher DRM index than the primary planes since
3158 	 * they should be considered as having a higher z-order.
3159 	 * Order is reversed to match iteration order in atomic check.
3160 	 *
3161 	 * Only support DCN for now, and only expose one so we don't encourage
3162 	 * userspace to use up all the pipes.
3163 	 */
3164 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3165 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3166 
3167 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3168 			continue;
3169 
3170 		if (!plane->blends_with_above || !plane->blends_with_below)
3171 			continue;
3172 
3173 		if (!plane->pixel_format_support.argb8888)
3174 			continue;
3175 
3176 		if (initialize_plane(dm, NULL, primary_planes + i,
3177 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3178 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3179 			goto fail;
3180 		}
3181 
3182 		/* Only create one overlay plane. */
3183 		break;
3184 	}
3185 
3186 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3187 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3188 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3189 			goto fail;
3190 		}
3191 
3192 	dm->display_indexes_num = dm->dc->caps.max_streams;
3193 
3194 	/* loops over all connectors on the board */
3195 	for (i = 0; i < link_cnt; i++) {
3196 		struct dc_link *link = NULL;
3197 
3198 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3199 			DRM_ERROR(
3200 				"KMS: Cannot support more than %d display indexes\n",
3201 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3202 			continue;
3203 		}
3204 
3205 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3206 		if (!aconnector)
3207 			goto fail;
3208 
3209 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3210 		if (!aencoder)
3211 			goto fail;
3212 
3213 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3214 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3215 			goto fail;
3216 		}
3217 
3218 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3219 			DRM_ERROR("KMS: Failed to initialize connector\n");
3220 			goto fail;
3221 		}
3222 
3223 		link = dc_get_link_at_index(dm->dc, i);
3224 
3225 		if (!dc_link_detect_sink(link, &new_connection_type))
3226 			DRM_ERROR("KMS: Failed to detect connector\n");
3227 
3228 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3229 			emulated_link_detect(link);
3230 			amdgpu_dm_update_connector_after_detect(aconnector);
3231 
3232 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3233 			amdgpu_dm_update_connector_after_detect(aconnector);
3234 			register_backlight_device(dm, link);
3235 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3236 				amdgpu_dm_set_psr_caps(link);
3237 		}
3238 
3239 
3240 	}
3241 
3242 	/* Software is initialized. Now we can register interrupt handlers. */
3243 	switch (adev->asic_type) {
3244 	case CHIP_BONAIRE:
3245 	case CHIP_HAWAII:
3246 	case CHIP_KAVERI:
3247 	case CHIP_KABINI:
3248 	case CHIP_MULLINS:
3249 	case CHIP_TONGA:
3250 	case CHIP_FIJI:
3251 	case CHIP_CARRIZO:
3252 	case CHIP_STONEY:
3253 	case CHIP_POLARIS11:
3254 	case CHIP_POLARIS10:
3255 	case CHIP_POLARIS12:
3256 	case CHIP_VEGAM:
3257 	case CHIP_VEGA10:
3258 	case CHIP_VEGA12:
3259 	case CHIP_VEGA20:
3260 		if (dce110_register_irq_handlers(dm->adev)) {
3261 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3262 			goto fail;
3263 		}
3264 		break;
3265 #if defined(CONFIG_DRM_AMD_DC_DCN)
3266 	case CHIP_RAVEN:
3267 	case CHIP_NAVI12:
3268 	case CHIP_NAVI10:
3269 	case CHIP_NAVI14:
3270 	case CHIP_RENOIR:
3271 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3272 	case CHIP_SIENNA_CICHLID:
3273 	case CHIP_NAVY_FLOUNDER:
3274 #endif
3275 		if (dcn10_register_irq_handlers(dm->adev)) {
3276 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3277 			goto fail;
3278 		}
3279 		break;
3280 #endif
3281 	default:
3282 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3283 		goto fail;
3284 	}
3285 
3286 	/* No userspace support. */
3287 	dm->dc->debug.disable_tri_buf = true;
3288 
3289 	return 0;
3290 fail:
3291 	kfree(aencoder);
3292 	kfree(aconnector);
3293 
3294 	return -EINVAL;
3295 }
3296 
3297 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3298 {
3299 	drm_mode_config_cleanup(dm->ddev);
3300 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3301 	return;
3302 }
3303 
3304 /******************************************************************************
3305  * amdgpu_display_funcs functions
3306  *****************************************************************************/
3307 
3308 /*
3309  * dm_bandwidth_update - program display watermarks
3310  *
3311  * @adev: amdgpu_device pointer
3312  *
3313  * Calculate and program the display watermarks and line buffer allocation.
3314  */
3315 static void dm_bandwidth_update(struct amdgpu_device *adev)
3316 {
3317 	/* TODO: implement later */
3318 }
3319 
3320 static const struct amdgpu_display_funcs dm_display_funcs = {
3321 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3322 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3323 	.backlight_set_level = NULL, /* never called for DC */
3324 	.backlight_get_level = NULL, /* never called for DC */
3325 	.hpd_sense = NULL,/* called unconditionally */
3326 	.hpd_set_polarity = NULL, /* called unconditionally */
3327 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3328 	.page_flip_get_scanoutpos =
3329 		dm_crtc_get_scanoutpos,/* called unconditionally */
3330 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3331 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3332 };
3333 
3334 #if defined(CONFIG_DEBUG_KERNEL_DC)
3335 
3336 static ssize_t s3_debug_store(struct device *device,
3337 			      struct device_attribute *attr,
3338 			      const char *buf,
3339 			      size_t count)
3340 {
3341 	int ret;
3342 	int s3_state;
3343 	struct drm_device *drm_dev = dev_get_drvdata(device);
3344 	struct amdgpu_device *adev = drm_dev->dev_private;
3345 
3346 	ret = kstrtoint(buf, 0, &s3_state);
3347 
3348 	if (ret == 0) {
3349 		if (s3_state) {
3350 			dm_resume(adev);
3351 			drm_kms_helper_hotplug_event(adev->ddev);
3352 		} else
3353 			dm_suspend(adev);
3354 	}
3355 
3356 	return ret == 0 ? count : 0;
3357 }
3358 
3359 DEVICE_ATTR_WO(s3_debug);
3360 
3361 #endif
3362 
3363 static int dm_early_init(void *handle)
3364 {
3365 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3366 
3367 	switch (adev->asic_type) {
3368 	case CHIP_BONAIRE:
3369 	case CHIP_HAWAII:
3370 		adev->mode_info.num_crtc = 6;
3371 		adev->mode_info.num_hpd = 6;
3372 		adev->mode_info.num_dig = 6;
3373 		break;
3374 	case CHIP_KAVERI:
3375 		adev->mode_info.num_crtc = 4;
3376 		adev->mode_info.num_hpd = 6;
3377 		adev->mode_info.num_dig = 7;
3378 		break;
3379 	case CHIP_KABINI:
3380 	case CHIP_MULLINS:
3381 		adev->mode_info.num_crtc = 2;
3382 		adev->mode_info.num_hpd = 6;
3383 		adev->mode_info.num_dig = 6;
3384 		break;
3385 	case CHIP_FIJI:
3386 	case CHIP_TONGA:
3387 		adev->mode_info.num_crtc = 6;
3388 		adev->mode_info.num_hpd = 6;
3389 		adev->mode_info.num_dig = 7;
3390 		break;
3391 	case CHIP_CARRIZO:
3392 		adev->mode_info.num_crtc = 3;
3393 		adev->mode_info.num_hpd = 6;
3394 		adev->mode_info.num_dig = 9;
3395 		break;
3396 	case CHIP_STONEY:
3397 		adev->mode_info.num_crtc = 2;
3398 		adev->mode_info.num_hpd = 6;
3399 		adev->mode_info.num_dig = 9;
3400 		break;
3401 	case CHIP_POLARIS11:
3402 	case CHIP_POLARIS12:
3403 		adev->mode_info.num_crtc = 5;
3404 		adev->mode_info.num_hpd = 5;
3405 		adev->mode_info.num_dig = 5;
3406 		break;
3407 	case CHIP_POLARIS10:
3408 	case CHIP_VEGAM:
3409 		adev->mode_info.num_crtc = 6;
3410 		adev->mode_info.num_hpd = 6;
3411 		adev->mode_info.num_dig = 6;
3412 		break;
3413 	case CHIP_VEGA10:
3414 	case CHIP_VEGA12:
3415 	case CHIP_VEGA20:
3416 		adev->mode_info.num_crtc = 6;
3417 		adev->mode_info.num_hpd = 6;
3418 		adev->mode_info.num_dig = 6;
3419 		break;
3420 #if defined(CONFIG_DRM_AMD_DC_DCN)
3421 	case CHIP_RAVEN:
3422 		adev->mode_info.num_crtc = 4;
3423 		adev->mode_info.num_hpd = 4;
3424 		adev->mode_info.num_dig = 4;
3425 		break;
3426 #endif
3427 	case CHIP_NAVI10:
3428 	case CHIP_NAVI12:
3429 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3430 	case CHIP_SIENNA_CICHLID:
3431 	case CHIP_NAVY_FLOUNDER:
3432 #endif
3433 		adev->mode_info.num_crtc = 6;
3434 		adev->mode_info.num_hpd = 6;
3435 		adev->mode_info.num_dig = 6;
3436 		break;
3437 	case CHIP_NAVI14:
3438 		adev->mode_info.num_crtc = 5;
3439 		adev->mode_info.num_hpd = 5;
3440 		adev->mode_info.num_dig = 5;
3441 		break;
3442 	case CHIP_RENOIR:
3443 		adev->mode_info.num_crtc = 4;
3444 		adev->mode_info.num_hpd = 4;
3445 		adev->mode_info.num_dig = 4;
3446 		break;
3447 	default:
3448 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3449 		return -EINVAL;
3450 	}
3451 
3452 	amdgpu_dm_set_irq_funcs(adev);
3453 
3454 	if (adev->mode_info.funcs == NULL)
3455 		adev->mode_info.funcs = &dm_display_funcs;
3456 
3457 	/*
3458 	 * Note: Do NOT change adev->audio_endpt_rreg and
3459 	 * adev->audio_endpt_wreg because they are initialised in
3460 	 * amdgpu_device_init()
3461 	 */
3462 #if defined(CONFIG_DEBUG_KERNEL_DC)
3463 	device_create_file(
3464 		adev->ddev->dev,
3465 		&dev_attr_s3_debug);
3466 #endif
3467 
3468 	return 0;
3469 }
3470 
3471 static bool modeset_required(struct drm_crtc_state *crtc_state,
3472 			     struct dc_stream_state *new_stream,
3473 			     struct dc_stream_state *old_stream)
3474 {
3475 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3476 		return false;
3477 
3478 	if (!crtc_state->enable)
3479 		return false;
3480 
3481 	return crtc_state->active;
3482 }
3483 
3484 static bool modereset_required(struct drm_crtc_state *crtc_state)
3485 {
3486 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3487 		return false;
3488 
3489 	return !crtc_state->enable || !crtc_state->active;
3490 }
3491 
3492 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3493 {
3494 	drm_encoder_cleanup(encoder);
3495 	kfree(encoder);
3496 }
3497 
3498 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3499 	.destroy = amdgpu_dm_encoder_destroy,
3500 };
3501 
3502 
3503 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3504 				struct dc_scaling_info *scaling_info)
3505 {
3506 	int scale_w, scale_h;
3507 
3508 	memset(scaling_info, 0, sizeof(*scaling_info));
3509 
3510 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3511 	scaling_info->src_rect.x = state->src_x >> 16;
3512 	scaling_info->src_rect.y = state->src_y >> 16;
3513 
3514 	scaling_info->src_rect.width = state->src_w >> 16;
3515 	if (scaling_info->src_rect.width == 0)
3516 		return -EINVAL;
3517 
3518 	scaling_info->src_rect.height = state->src_h >> 16;
3519 	if (scaling_info->src_rect.height == 0)
3520 		return -EINVAL;
3521 
3522 	scaling_info->dst_rect.x = state->crtc_x;
3523 	scaling_info->dst_rect.y = state->crtc_y;
3524 
3525 	if (state->crtc_w == 0)
3526 		return -EINVAL;
3527 
3528 	scaling_info->dst_rect.width = state->crtc_w;
3529 
3530 	if (state->crtc_h == 0)
3531 		return -EINVAL;
3532 
3533 	scaling_info->dst_rect.height = state->crtc_h;
3534 
3535 	/* DRM doesn't specify clipping on destination output. */
3536 	scaling_info->clip_rect = scaling_info->dst_rect;
3537 
3538 	/* TODO: Validate scaling per-format with DC plane caps */
3539 	scale_w = scaling_info->dst_rect.width * 1000 /
3540 		  scaling_info->src_rect.width;
3541 
3542 	if (scale_w < 250 || scale_w > 16000)
3543 		return -EINVAL;
3544 
3545 	scale_h = scaling_info->dst_rect.height * 1000 /
3546 		  scaling_info->src_rect.height;
3547 
3548 	if (scale_h < 250 || scale_h > 16000)
3549 		return -EINVAL;
3550 
3551 	/*
3552 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3553 	 * assume reasonable defaults based on the format.
3554 	 */
3555 
3556 	return 0;
3557 }
3558 
3559 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3560 		       uint64_t *tiling_flags, bool *tmz_surface)
3561 {
3562 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3563 	int r = amdgpu_bo_reserve(rbo, false);
3564 
3565 	if (unlikely(r)) {
3566 		/* Don't show error message when returning -ERESTARTSYS */
3567 		if (r != -ERESTARTSYS)
3568 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3569 		return r;
3570 	}
3571 
3572 	if (tiling_flags)
3573 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3574 
3575 	if (tmz_surface)
3576 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3577 
3578 	amdgpu_bo_unreserve(rbo);
3579 
3580 	return r;
3581 }
3582 
3583 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3584 {
3585 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3586 
3587 	return offset ? (address + offset * 256) : 0;
3588 }
3589 
3590 static int
3591 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3592 			  const struct amdgpu_framebuffer *afb,
3593 			  const enum surface_pixel_format format,
3594 			  const enum dc_rotation_angle rotation,
3595 			  const struct plane_size *plane_size,
3596 			  const union dc_tiling_info *tiling_info,
3597 			  const uint64_t info,
3598 			  struct dc_plane_dcc_param *dcc,
3599 			  struct dc_plane_address *address,
3600 			  bool force_disable_dcc)
3601 {
3602 	struct dc *dc = adev->dm.dc;
3603 	struct dc_dcc_surface_param input;
3604 	struct dc_surface_dcc_cap output;
3605 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3606 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3607 	uint64_t dcc_address;
3608 
3609 	memset(&input, 0, sizeof(input));
3610 	memset(&output, 0, sizeof(output));
3611 
3612 	if (force_disable_dcc)
3613 		return 0;
3614 
3615 	if (!offset)
3616 		return 0;
3617 
3618 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3619 		return 0;
3620 
3621 	if (!dc->cap_funcs.get_dcc_compression_cap)
3622 		return -EINVAL;
3623 
3624 	input.format = format;
3625 	input.surface_size.width = plane_size->surface_size.width;
3626 	input.surface_size.height = plane_size->surface_size.height;
3627 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3628 
3629 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3630 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3631 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3632 		input.scan = SCAN_DIRECTION_VERTICAL;
3633 
3634 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3635 		return -EINVAL;
3636 
3637 	if (!output.capable)
3638 		return -EINVAL;
3639 
3640 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3641 		return -EINVAL;
3642 
3643 	dcc->enable = 1;
3644 	dcc->meta_pitch =
3645 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3646 	dcc->independent_64b_blks = i64b;
3647 
3648 	dcc_address = get_dcc_address(afb->address, info);
3649 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3650 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3651 
3652 	return 0;
3653 }
3654 
3655 static int
3656 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3657 			     const struct amdgpu_framebuffer *afb,
3658 			     const enum surface_pixel_format format,
3659 			     const enum dc_rotation_angle rotation,
3660 			     const uint64_t tiling_flags,
3661 			     union dc_tiling_info *tiling_info,
3662 			     struct plane_size *plane_size,
3663 			     struct dc_plane_dcc_param *dcc,
3664 			     struct dc_plane_address *address,
3665 			     bool tmz_surface,
3666 			     bool force_disable_dcc)
3667 {
3668 	const struct drm_framebuffer *fb = &afb->base;
3669 	int ret;
3670 
3671 	memset(tiling_info, 0, sizeof(*tiling_info));
3672 	memset(plane_size, 0, sizeof(*plane_size));
3673 	memset(dcc, 0, sizeof(*dcc));
3674 	memset(address, 0, sizeof(*address));
3675 
3676 	address->tmz_surface = tmz_surface;
3677 
3678 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3679 		plane_size->surface_size.x = 0;
3680 		plane_size->surface_size.y = 0;
3681 		plane_size->surface_size.width = fb->width;
3682 		plane_size->surface_size.height = fb->height;
3683 		plane_size->surface_pitch =
3684 			fb->pitches[0] / fb->format->cpp[0];
3685 
3686 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3687 		address->grph.addr.low_part = lower_32_bits(afb->address);
3688 		address->grph.addr.high_part = upper_32_bits(afb->address);
3689 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3690 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3691 
3692 		plane_size->surface_size.x = 0;
3693 		plane_size->surface_size.y = 0;
3694 		plane_size->surface_size.width = fb->width;
3695 		plane_size->surface_size.height = fb->height;
3696 		plane_size->surface_pitch =
3697 			fb->pitches[0] / fb->format->cpp[0];
3698 
3699 		plane_size->chroma_size.x = 0;
3700 		plane_size->chroma_size.y = 0;
3701 		/* TODO: set these based on surface format */
3702 		plane_size->chroma_size.width = fb->width / 2;
3703 		plane_size->chroma_size.height = fb->height / 2;
3704 
3705 		plane_size->chroma_pitch =
3706 			fb->pitches[1] / fb->format->cpp[1];
3707 
3708 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3709 		address->video_progressive.luma_addr.low_part =
3710 			lower_32_bits(afb->address);
3711 		address->video_progressive.luma_addr.high_part =
3712 			upper_32_bits(afb->address);
3713 		address->video_progressive.chroma_addr.low_part =
3714 			lower_32_bits(chroma_addr);
3715 		address->video_progressive.chroma_addr.high_part =
3716 			upper_32_bits(chroma_addr);
3717 	}
3718 
3719 	/* Fill GFX8 params */
3720 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3721 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3722 
3723 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3724 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3725 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3726 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3727 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3728 
3729 		/* XXX fix me for VI */
3730 		tiling_info->gfx8.num_banks = num_banks;
3731 		tiling_info->gfx8.array_mode =
3732 				DC_ARRAY_2D_TILED_THIN1;
3733 		tiling_info->gfx8.tile_split = tile_split;
3734 		tiling_info->gfx8.bank_width = bankw;
3735 		tiling_info->gfx8.bank_height = bankh;
3736 		tiling_info->gfx8.tile_aspect = mtaspect;
3737 		tiling_info->gfx8.tile_mode =
3738 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3739 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3740 			== DC_ARRAY_1D_TILED_THIN1) {
3741 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3742 	}
3743 
3744 	tiling_info->gfx8.pipe_config =
3745 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3746 
3747 	if (adev->asic_type == CHIP_VEGA10 ||
3748 	    adev->asic_type == CHIP_VEGA12 ||
3749 	    adev->asic_type == CHIP_VEGA20 ||
3750 	    adev->asic_type == CHIP_NAVI10 ||
3751 	    adev->asic_type == CHIP_NAVI14 ||
3752 	    adev->asic_type == CHIP_NAVI12 ||
3753 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3754 		adev->asic_type == CHIP_SIENNA_CICHLID ||
3755 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
3756 #endif
3757 	    adev->asic_type == CHIP_RENOIR ||
3758 	    adev->asic_type == CHIP_RAVEN) {
3759 		/* Fill GFX9 params */
3760 		tiling_info->gfx9.num_pipes =
3761 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3762 		tiling_info->gfx9.num_banks =
3763 			adev->gfx.config.gb_addr_config_fields.num_banks;
3764 		tiling_info->gfx9.pipe_interleave =
3765 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3766 		tiling_info->gfx9.num_shader_engines =
3767 			adev->gfx.config.gb_addr_config_fields.num_se;
3768 		tiling_info->gfx9.max_compressed_frags =
3769 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3770 		tiling_info->gfx9.num_rb_per_se =
3771 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3772 		tiling_info->gfx9.swizzle =
3773 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3774 		tiling_info->gfx9.shaderEnable = 1;
3775 
3776 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3777 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3778 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
3779 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3780 #endif
3781 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3782 						plane_size, tiling_info,
3783 						tiling_flags, dcc, address,
3784 						force_disable_dcc);
3785 		if (ret)
3786 			return ret;
3787 	}
3788 
3789 	return 0;
3790 }
3791 
3792 static void
3793 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3794 			       bool *per_pixel_alpha, bool *global_alpha,
3795 			       int *global_alpha_value)
3796 {
3797 	*per_pixel_alpha = false;
3798 	*global_alpha = false;
3799 	*global_alpha_value = 0xff;
3800 
3801 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3802 		return;
3803 
3804 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3805 		static const uint32_t alpha_formats[] = {
3806 			DRM_FORMAT_ARGB8888,
3807 			DRM_FORMAT_RGBA8888,
3808 			DRM_FORMAT_ABGR8888,
3809 		};
3810 		uint32_t format = plane_state->fb->format->format;
3811 		unsigned int i;
3812 
3813 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3814 			if (format == alpha_formats[i]) {
3815 				*per_pixel_alpha = true;
3816 				break;
3817 			}
3818 		}
3819 	}
3820 
3821 	if (plane_state->alpha < 0xffff) {
3822 		*global_alpha = true;
3823 		*global_alpha_value = plane_state->alpha >> 8;
3824 	}
3825 }
3826 
3827 static int
3828 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3829 			    const enum surface_pixel_format format,
3830 			    enum dc_color_space *color_space)
3831 {
3832 	bool full_range;
3833 
3834 	*color_space = COLOR_SPACE_SRGB;
3835 
3836 	/* DRM color properties only affect non-RGB formats. */
3837 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3838 		return 0;
3839 
3840 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3841 
3842 	switch (plane_state->color_encoding) {
3843 	case DRM_COLOR_YCBCR_BT601:
3844 		if (full_range)
3845 			*color_space = COLOR_SPACE_YCBCR601;
3846 		else
3847 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3848 		break;
3849 
3850 	case DRM_COLOR_YCBCR_BT709:
3851 		if (full_range)
3852 			*color_space = COLOR_SPACE_YCBCR709;
3853 		else
3854 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3855 		break;
3856 
3857 	case DRM_COLOR_YCBCR_BT2020:
3858 		if (full_range)
3859 			*color_space = COLOR_SPACE_2020_YCBCR;
3860 		else
3861 			return -EINVAL;
3862 		break;
3863 
3864 	default:
3865 		return -EINVAL;
3866 	}
3867 
3868 	return 0;
3869 }
3870 
3871 static int
3872 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3873 			    const struct drm_plane_state *plane_state,
3874 			    const uint64_t tiling_flags,
3875 			    struct dc_plane_info *plane_info,
3876 			    struct dc_plane_address *address,
3877 			    bool tmz_surface,
3878 			    bool force_disable_dcc)
3879 {
3880 	const struct drm_framebuffer *fb = plane_state->fb;
3881 	const struct amdgpu_framebuffer *afb =
3882 		to_amdgpu_framebuffer(plane_state->fb);
3883 	struct drm_format_name_buf format_name;
3884 	int ret;
3885 
3886 	memset(plane_info, 0, sizeof(*plane_info));
3887 
3888 	switch (fb->format->format) {
3889 	case DRM_FORMAT_C8:
3890 		plane_info->format =
3891 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3892 		break;
3893 	case DRM_FORMAT_RGB565:
3894 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3895 		break;
3896 	case DRM_FORMAT_XRGB8888:
3897 	case DRM_FORMAT_ARGB8888:
3898 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3899 		break;
3900 	case DRM_FORMAT_XRGB2101010:
3901 	case DRM_FORMAT_ARGB2101010:
3902 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3903 		break;
3904 	case DRM_FORMAT_XBGR2101010:
3905 	case DRM_FORMAT_ABGR2101010:
3906 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3907 		break;
3908 	case DRM_FORMAT_XBGR8888:
3909 	case DRM_FORMAT_ABGR8888:
3910 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3911 		break;
3912 	case DRM_FORMAT_NV21:
3913 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3914 		break;
3915 	case DRM_FORMAT_NV12:
3916 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3917 		break;
3918 	case DRM_FORMAT_P010:
3919 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3920 		break;
3921 	case DRM_FORMAT_XRGB16161616F:
3922 	case DRM_FORMAT_ARGB16161616F:
3923 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3924 		break;
3925 	case DRM_FORMAT_XBGR16161616F:
3926 	case DRM_FORMAT_ABGR16161616F:
3927 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3928 		break;
3929 	default:
3930 		DRM_ERROR(
3931 			"Unsupported screen format %s\n",
3932 			drm_get_format_name(fb->format->format, &format_name));
3933 		return -EINVAL;
3934 	}
3935 
3936 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3937 	case DRM_MODE_ROTATE_0:
3938 		plane_info->rotation = ROTATION_ANGLE_0;
3939 		break;
3940 	case DRM_MODE_ROTATE_90:
3941 		plane_info->rotation = ROTATION_ANGLE_90;
3942 		break;
3943 	case DRM_MODE_ROTATE_180:
3944 		plane_info->rotation = ROTATION_ANGLE_180;
3945 		break;
3946 	case DRM_MODE_ROTATE_270:
3947 		plane_info->rotation = ROTATION_ANGLE_270;
3948 		break;
3949 	default:
3950 		plane_info->rotation = ROTATION_ANGLE_0;
3951 		break;
3952 	}
3953 
3954 	plane_info->visible = true;
3955 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3956 
3957 	plane_info->layer_index = 0;
3958 
3959 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3960 					  &plane_info->color_space);
3961 	if (ret)
3962 		return ret;
3963 
3964 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3965 					   plane_info->rotation, tiling_flags,
3966 					   &plane_info->tiling_info,
3967 					   &plane_info->plane_size,
3968 					   &plane_info->dcc, address, tmz_surface,
3969 					   force_disable_dcc);
3970 	if (ret)
3971 		return ret;
3972 
3973 	fill_blending_from_plane_state(
3974 		plane_state, &plane_info->per_pixel_alpha,
3975 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3976 
3977 	return 0;
3978 }
3979 
3980 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3981 				    struct dc_plane_state *dc_plane_state,
3982 				    struct drm_plane_state *plane_state,
3983 				    struct drm_crtc_state *crtc_state)
3984 {
3985 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3986 	const struct amdgpu_framebuffer *amdgpu_fb =
3987 		to_amdgpu_framebuffer(plane_state->fb);
3988 	struct dc_scaling_info scaling_info;
3989 	struct dc_plane_info plane_info;
3990 	uint64_t tiling_flags;
3991 	int ret;
3992 	bool tmz_surface = false;
3993 	bool force_disable_dcc = false;
3994 
3995 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3996 	if (ret)
3997 		return ret;
3998 
3999 	dc_plane_state->src_rect = scaling_info.src_rect;
4000 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4001 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4002 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4003 
4004 	ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
4005 	if (ret)
4006 		return ret;
4007 
4008 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4009 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
4010 					  &plane_info,
4011 					  &dc_plane_state->address,
4012 					  tmz_surface,
4013 					  force_disable_dcc);
4014 	if (ret)
4015 		return ret;
4016 
4017 	dc_plane_state->format = plane_info.format;
4018 	dc_plane_state->color_space = plane_info.color_space;
4019 	dc_plane_state->format = plane_info.format;
4020 	dc_plane_state->plane_size = plane_info.plane_size;
4021 	dc_plane_state->rotation = plane_info.rotation;
4022 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4023 	dc_plane_state->stereo_format = plane_info.stereo_format;
4024 	dc_plane_state->tiling_info = plane_info.tiling_info;
4025 	dc_plane_state->visible = plane_info.visible;
4026 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4027 	dc_plane_state->global_alpha = plane_info.global_alpha;
4028 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4029 	dc_plane_state->dcc = plane_info.dcc;
4030 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4031 
4032 	/*
4033 	 * Always set input transfer function, since plane state is refreshed
4034 	 * every time.
4035 	 */
4036 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4037 	if (ret)
4038 		return ret;
4039 
4040 	return 0;
4041 }
4042 
4043 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4044 					   const struct dm_connector_state *dm_state,
4045 					   struct dc_stream_state *stream)
4046 {
4047 	enum amdgpu_rmx_type rmx_type;
4048 
4049 	struct rect src = { 0 }; /* viewport in composition space*/
4050 	struct rect dst = { 0 }; /* stream addressable area */
4051 
4052 	/* no mode. nothing to be done */
4053 	if (!mode)
4054 		return;
4055 
4056 	/* Full screen scaling by default */
4057 	src.width = mode->hdisplay;
4058 	src.height = mode->vdisplay;
4059 	dst.width = stream->timing.h_addressable;
4060 	dst.height = stream->timing.v_addressable;
4061 
4062 	if (dm_state) {
4063 		rmx_type = dm_state->scaling;
4064 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4065 			if (src.width * dst.height <
4066 					src.height * dst.width) {
4067 				/* height needs less upscaling/more downscaling */
4068 				dst.width = src.width *
4069 						dst.height / src.height;
4070 			} else {
4071 				/* width needs less upscaling/more downscaling */
4072 				dst.height = src.height *
4073 						dst.width / src.width;
4074 			}
4075 		} else if (rmx_type == RMX_CENTER) {
4076 			dst = src;
4077 		}
4078 
4079 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4080 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4081 
4082 		if (dm_state->underscan_enable) {
4083 			dst.x += dm_state->underscan_hborder / 2;
4084 			dst.y += dm_state->underscan_vborder / 2;
4085 			dst.width -= dm_state->underscan_hborder;
4086 			dst.height -= dm_state->underscan_vborder;
4087 		}
4088 	}
4089 
4090 	stream->src = src;
4091 	stream->dst = dst;
4092 
4093 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4094 			dst.x, dst.y, dst.width, dst.height);
4095 
4096 }
4097 
4098 static enum dc_color_depth
4099 convert_color_depth_from_display_info(const struct drm_connector *connector,
4100 				      bool is_y420, int requested_bpc)
4101 {
4102 	uint8_t bpc;
4103 
4104 	if (is_y420) {
4105 		bpc = 8;
4106 
4107 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4108 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4109 			bpc = 16;
4110 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4111 			bpc = 12;
4112 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4113 			bpc = 10;
4114 	} else {
4115 		bpc = (uint8_t)connector->display_info.bpc;
4116 		/* Assume 8 bpc by default if no bpc is specified. */
4117 		bpc = bpc ? bpc : 8;
4118 	}
4119 
4120 	if (requested_bpc > 0) {
4121 		/*
4122 		 * Cap display bpc based on the user requested value.
4123 		 *
4124 		 * The value for state->max_bpc may not correctly updated
4125 		 * depending on when the connector gets added to the state
4126 		 * or if this was called outside of atomic check, so it
4127 		 * can't be used directly.
4128 		 */
4129 		bpc = min_t(u8, bpc, requested_bpc);
4130 
4131 		/* Round down to the nearest even number. */
4132 		bpc = bpc - (bpc & 1);
4133 	}
4134 
4135 	switch (bpc) {
4136 	case 0:
4137 		/*
4138 		 * Temporary Work around, DRM doesn't parse color depth for
4139 		 * EDID revision before 1.4
4140 		 * TODO: Fix edid parsing
4141 		 */
4142 		return COLOR_DEPTH_888;
4143 	case 6:
4144 		return COLOR_DEPTH_666;
4145 	case 8:
4146 		return COLOR_DEPTH_888;
4147 	case 10:
4148 		return COLOR_DEPTH_101010;
4149 	case 12:
4150 		return COLOR_DEPTH_121212;
4151 	case 14:
4152 		return COLOR_DEPTH_141414;
4153 	case 16:
4154 		return COLOR_DEPTH_161616;
4155 	default:
4156 		return COLOR_DEPTH_UNDEFINED;
4157 	}
4158 }
4159 
4160 static enum dc_aspect_ratio
4161 get_aspect_ratio(const struct drm_display_mode *mode_in)
4162 {
4163 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4164 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4165 }
4166 
4167 static enum dc_color_space
4168 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4169 {
4170 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4171 
4172 	switch (dc_crtc_timing->pixel_encoding)	{
4173 	case PIXEL_ENCODING_YCBCR422:
4174 	case PIXEL_ENCODING_YCBCR444:
4175 	case PIXEL_ENCODING_YCBCR420:
4176 	{
4177 		/*
4178 		 * 27030khz is the separation point between HDTV and SDTV
4179 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4180 		 * respectively
4181 		 */
4182 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4183 			if (dc_crtc_timing->flags.Y_ONLY)
4184 				color_space =
4185 					COLOR_SPACE_YCBCR709_LIMITED;
4186 			else
4187 				color_space = COLOR_SPACE_YCBCR709;
4188 		} else {
4189 			if (dc_crtc_timing->flags.Y_ONLY)
4190 				color_space =
4191 					COLOR_SPACE_YCBCR601_LIMITED;
4192 			else
4193 				color_space = COLOR_SPACE_YCBCR601;
4194 		}
4195 
4196 	}
4197 	break;
4198 	case PIXEL_ENCODING_RGB:
4199 		color_space = COLOR_SPACE_SRGB;
4200 		break;
4201 
4202 	default:
4203 		WARN_ON(1);
4204 		break;
4205 	}
4206 
4207 	return color_space;
4208 }
4209 
4210 static bool adjust_colour_depth_from_display_info(
4211 	struct dc_crtc_timing *timing_out,
4212 	const struct drm_display_info *info)
4213 {
4214 	enum dc_color_depth depth = timing_out->display_color_depth;
4215 	int normalized_clk;
4216 	do {
4217 		normalized_clk = timing_out->pix_clk_100hz / 10;
4218 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4219 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4220 			normalized_clk /= 2;
4221 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4222 		switch (depth) {
4223 		case COLOR_DEPTH_888:
4224 			break;
4225 		case COLOR_DEPTH_101010:
4226 			normalized_clk = (normalized_clk * 30) / 24;
4227 			break;
4228 		case COLOR_DEPTH_121212:
4229 			normalized_clk = (normalized_clk * 36) / 24;
4230 			break;
4231 		case COLOR_DEPTH_161616:
4232 			normalized_clk = (normalized_clk * 48) / 24;
4233 			break;
4234 		default:
4235 			/* The above depths are the only ones valid for HDMI. */
4236 			return false;
4237 		}
4238 		if (normalized_clk <= info->max_tmds_clock) {
4239 			timing_out->display_color_depth = depth;
4240 			return true;
4241 		}
4242 	} while (--depth > COLOR_DEPTH_666);
4243 	return false;
4244 }
4245 
4246 static void fill_stream_properties_from_drm_display_mode(
4247 	struct dc_stream_state *stream,
4248 	const struct drm_display_mode *mode_in,
4249 	const struct drm_connector *connector,
4250 	const struct drm_connector_state *connector_state,
4251 	const struct dc_stream_state *old_stream,
4252 	int requested_bpc)
4253 {
4254 	struct dc_crtc_timing *timing_out = &stream->timing;
4255 	const struct drm_display_info *info = &connector->display_info;
4256 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4257 	struct hdmi_vendor_infoframe hv_frame;
4258 	struct hdmi_avi_infoframe avi_frame;
4259 
4260 	memset(&hv_frame, 0, sizeof(hv_frame));
4261 	memset(&avi_frame, 0, sizeof(avi_frame));
4262 
4263 	timing_out->h_border_left = 0;
4264 	timing_out->h_border_right = 0;
4265 	timing_out->v_border_top = 0;
4266 	timing_out->v_border_bottom = 0;
4267 	/* TODO: un-hardcode */
4268 	if (drm_mode_is_420_only(info, mode_in)
4269 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4270 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4271 	else if (drm_mode_is_420_also(info, mode_in)
4272 			&& aconnector->force_yuv420_output)
4273 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4274 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4275 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4276 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4277 	else
4278 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4279 
4280 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4281 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4282 		connector,
4283 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4284 		requested_bpc);
4285 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4286 	timing_out->hdmi_vic = 0;
4287 
4288 	if(old_stream) {
4289 		timing_out->vic = old_stream->timing.vic;
4290 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4291 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4292 	} else {
4293 		timing_out->vic = drm_match_cea_mode(mode_in);
4294 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4295 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4296 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4297 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4298 	}
4299 
4300 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4301 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4302 		timing_out->vic = avi_frame.video_code;
4303 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4304 		timing_out->hdmi_vic = hv_frame.vic;
4305 	}
4306 
4307 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4308 	timing_out->h_total = mode_in->crtc_htotal;
4309 	timing_out->h_sync_width =
4310 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4311 	timing_out->h_front_porch =
4312 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4313 	timing_out->v_total = mode_in->crtc_vtotal;
4314 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4315 	timing_out->v_front_porch =
4316 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4317 	timing_out->v_sync_width =
4318 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4319 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4320 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4321 
4322 	stream->output_color_space = get_output_color_space(timing_out);
4323 
4324 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4325 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4326 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4327 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4328 		    drm_mode_is_420_also(info, mode_in) &&
4329 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4330 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4331 			adjust_colour_depth_from_display_info(timing_out, info);
4332 		}
4333 	}
4334 }
4335 
4336 static void fill_audio_info(struct audio_info *audio_info,
4337 			    const struct drm_connector *drm_connector,
4338 			    const struct dc_sink *dc_sink)
4339 {
4340 	int i = 0;
4341 	int cea_revision = 0;
4342 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4343 
4344 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4345 	audio_info->product_id = edid_caps->product_id;
4346 
4347 	cea_revision = drm_connector->display_info.cea_rev;
4348 
4349 	strscpy(audio_info->display_name,
4350 		edid_caps->display_name,
4351 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4352 
4353 	if (cea_revision >= 3) {
4354 		audio_info->mode_count = edid_caps->audio_mode_count;
4355 
4356 		for (i = 0; i < audio_info->mode_count; ++i) {
4357 			audio_info->modes[i].format_code =
4358 					(enum audio_format_code)
4359 					(edid_caps->audio_modes[i].format_code);
4360 			audio_info->modes[i].channel_count =
4361 					edid_caps->audio_modes[i].channel_count;
4362 			audio_info->modes[i].sample_rates.all =
4363 					edid_caps->audio_modes[i].sample_rate;
4364 			audio_info->modes[i].sample_size =
4365 					edid_caps->audio_modes[i].sample_size;
4366 		}
4367 	}
4368 
4369 	audio_info->flags.all = edid_caps->speaker_flags;
4370 
4371 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4372 	if (drm_connector->latency_present[0]) {
4373 		audio_info->video_latency = drm_connector->video_latency[0];
4374 		audio_info->audio_latency = drm_connector->audio_latency[0];
4375 	}
4376 
4377 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4378 
4379 }
4380 
4381 static void
4382 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4383 				      struct drm_display_mode *dst_mode)
4384 {
4385 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4386 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4387 	dst_mode->crtc_clock = src_mode->crtc_clock;
4388 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4389 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4390 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4391 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4392 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4393 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4394 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4395 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4396 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4397 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4398 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4399 }
4400 
4401 static void
4402 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4403 					const struct drm_display_mode *native_mode,
4404 					bool scale_enabled)
4405 {
4406 	if (scale_enabled) {
4407 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4408 	} else if (native_mode->clock == drm_mode->clock &&
4409 			native_mode->htotal == drm_mode->htotal &&
4410 			native_mode->vtotal == drm_mode->vtotal) {
4411 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4412 	} else {
4413 		/* no scaling nor amdgpu inserted, no need to patch */
4414 	}
4415 }
4416 
4417 static struct dc_sink *
4418 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4419 {
4420 	struct dc_sink_init_data sink_init_data = { 0 };
4421 	struct dc_sink *sink = NULL;
4422 	sink_init_data.link = aconnector->dc_link;
4423 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4424 
4425 	sink = dc_sink_create(&sink_init_data);
4426 	if (!sink) {
4427 		DRM_ERROR("Failed to create sink!\n");
4428 		return NULL;
4429 	}
4430 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4431 
4432 	return sink;
4433 }
4434 
4435 static void set_multisync_trigger_params(
4436 		struct dc_stream_state *stream)
4437 {
4438 	if (stream->triggered_crtc_reset.enabled) {
4439 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4440 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4441 	}
4442 }
4443 
4444 static void set_master_stream(struct dc_stream_state *stream_set[],
4445 			      int stream_count)
4446 {
4447 	int j, highest_rfr = 0, master_stream = 0;
4448 
4449 	for (j = 0;  j < stream_count; j++) {
4450 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4451 			int refresh_rate = 0;
4452 
4453 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4454 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4455 			if (refresh_rate > highest_rfr) {
4456 				highest_rfr = refresh_rate;
4457 				master_stream = j;
4458 			}
4459 		}
4460 	}
4461 	for (j = 0;  j < stream_count; j++) {
4462 		if (stream_set[j])
4463 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4464 	}
4465 }
4466 
4467 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4468 {
4469 	int i = 0;
4470 
4471 	if (context->stream_count < 2)
4472 		return;
4473 	for (i = 0; i < context->stream_count ; i++) {
4474 		if (!context->streams[i])
4475 			continue;
4476 		/*
4477 		 * TODO: add a function to read AMD VSDB bits and set
4478 		 * crtc_sync_master.multi_sync_enabled flag
4479 		 * For now it's set to false
4480 		 */
4481 		set_multisync_trigger_params(context->streams[i]);
4482 	}
4483 	set_master_stream(context->streams, context->stream_count);
4484 }
4485 
4486 static struct dc_stream_state *
4487 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4488 		       const struct drm_display_mode *drm_mode,
4489 		       const struct dm_connector_state *dm_state,
4490 		       const struct dc_stream_state *old_stream,
4491 		       int requested_bpc)
4492 {
4493 	struct drm_display_mode *preferred_mode = NULL;
4494 	struct drm_connector *drm_connector;
4495 	const struct drm_connector_state *con_state =
4496 		dm_state ? &dm_state->base : NULL;
4497 	struct dc_stream_state *stream = NULL;
4498 	struct drm_display_mode mode = *drm_mode;
4499 	bool native_mode_found = false;
4500 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4501 	int mode_refresh;
4502 	int preferred_refresh = 0;
4503 #if defined(CONFIG_DRM_AMD_DC_DCN)
4504 	struct dsc_dec_dpcd_caps dsc_caps;
4505 #endif
4506 	uint32_t link_bandwidth_kbps;
4507 
4508 	struct dc_sink *sink = NULL;
4509 	if (aconnector == NULL) {
4510 		DRM_ERROR("aconnector is NULL!\n");
4511 		return stream;
4512 	}
4513 
4514 	drm_connector = &aconnector->base;
4515 
4516 	if (!aconnector->dc_sink) {
4517 		sink = create_fake_sink(aconnector);
4518 		if (!sink)
4519 			return stream;
4520 	} else {
4521 		sink = aconnector->dc_sink;
4522 		dc_sink_retain(sink);
4523 	}
4524 
4525 	stream = dc_create_stream_for_sink(sink);
4526 
4527 	if (stream == NULL) {
4528 		DRM_ERROR("Failed to create stream for sink!\n");
4529 		goto finish;
4530 	}
4531 
4532 	stream->dm_stream_context = aconnector;
4533 
4534 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4535 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4536 
4537 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4538 		/* Search for preferred mode */
4539 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4540 			native_mode_found = true;
4541 			break;
4542 		}
4543 	}
4544 	if (!native_mode_found)
4545 		preferred_mode = list_first_entry_or_null(
4546 				&aconnector->base.modes,
4547 				struct drm_display_mode,
4548 				head);
4549 
4550 	mode_refresh = drm_mode_vrefresh(&mode);
4551 
4552 	if (preferred_mode == NULL) {
4553 		/*
4554 		 * This may not be an error, the use case is when we have no
4555 		 * usermode calls to reset and set mode upon hotplug. In this
4556 		 * case, we call set mode ourselves to restore the previous mode
4557 		 * and the modelist may not be filled in in time.
4558 		 */
4559 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4560 	} else {
4561 		decide_crtc_timing_for_drm_display_mode(
4562 				&mode, preferred_mode,
4563 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4564 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4565 	}
4566 
4567 	if (!dm_state)
4568 		drm_mode_set_crtcinfo(&mode, 0);
4569 
4570 	/*
4571 	* If scaling is enabled and refresh rate didn't change
4572 	* we copy the vic and polarities of the old timings
4573 	*/
4574 	if (!scale || mode_refresh != preferred_refresh)
4575 		fill_stream_properties_from_drm_display_mode(stream,
4576 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4577 	else
4578 		fill_stream_properties_from_drm_display_mode(stream,
4579 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4580 
4581 	stream->timing.flags.DSC = 0;
4582 
4583 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4584 #if defined(CONFIG_DRM_AMD_DC_DCN)
4585 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4586 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4587 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4588 				      &dsc_caps);
4589 #endif
4590 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4591 							     dc_link_get_link_cap(aconnector->dc_link));
4592 
4593 #if defined(CONFIG_DRM_AMD_DC_DCN)
4594 		if (dsc_caps.is_dsc_supported)
4595 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4596 						  &dsc_caps,
4597 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4598 						  link_bandwidth_kbps,
4599 						  &stream->timing,
4600 						  &stream->timing.dsc_cfg))
4601 				stream->timing.flags.DSC = 1;
4602 #endif
4603 	}
4604 
4605 	update_stream_scaling_settings(&mode, dm_state, stream);
4606 
4607 	fill_audio_info(
4608 		&stream->audio_info,
4609 		drm_connector,
4610 		sink);
4611 
4612 	update_stream_signal(stream, sink);
4613 
4614 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4615 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4616 	if (stream->link->psr_settings.psr_feature_enabled) {
4617 		//
4618 		// should decide stream support vsc sdp colorimetry capability
4619 		// before building vsc info packet
4620 		//
4621 		stream->use_vsc_sdp_for_colorimetry = false;
4622 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4623 			stream->use_vsc_sdp_for_colorimetry =
4624 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4625 		} else {
4626 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4627 				stream->use_vsc_sdp_for_colorimetry = true;
4628 		}
4629 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4630 	}
4631 finish:
4632 	dc_sink_release(sink);
4633 
4634 	return stream;
4635 }
4636 
4637 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4638 {
4639 	drm_crtc_cleanup(crtc);
4640 	kfree(crtc);
4641 }
4642 
4643 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4644 				  struct drm_crtc_state *state)
4645 {
4646 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4647 
4648 	/* TODO Destroy dc_stream objects are stream object is flattened */
4649 	if (cur->stream)
4650 		dc_stream_release(cur->stream);
4651 
4652 
4653 	__drm_atomic_helper_crtc_destroy_state(state);
4654 
4655 
4656 	kfree(state);
4657 }
4658 
4659 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4660 {
4661 	struct dm_crtc_state *state;
4662 
4663 	if (crtc->state)
4664 		dm_crtc_destroy_state(crtc, crtc->state);
4665 
4666 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4667 	if (WARN_ON(!state))
4668 		return;
4669 
4670 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
4671 }
4672 
4673 static struct drm_crtc_state *
4674 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4675 {
4676 	struct dm_crtc_state *state, *cur;
4677 
4678 	cur = to_dm_crtc_state(crtc->state);
4679 
4680 	if (WARN_ON(!crtc->state))
4681 		return NULL;
4682 
4683 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4684 	if (!state)
4685 		return NULL;
4686 
4687 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4688 
4689 	if (cur->stream) {
4690 		state->stream = cur->stream;
4691 		dc_stream_retain(state->stream);
4692 	}
4693 
4694 	state->active_planes = cur->active_planes;
4695 	state->vrr_params = cur->vrr_params;
4696 	state->vrr_infopacket = cur->vrr_infopacket;
4697 	state->abm_level = cur->abm_level;
4698 	state->vrr_supported = cur->vrr_supported;
4699 	state->freesync_config = cur->freesync_config;
4700 	state->crc_src = cur->crc_src;
4701 	state->cm_has_degamma = cur->cm_has_degamma;
4702 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4703 
4704 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4705 
4706 	return &state->base;
4707 }
4708 
4709 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4710 {
4711 	enum dc_irq_source irq_source;
4712 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4713 	struct amdgpu_device *adev = crtc->dev->dev_private;
4714 	int rc;
4715 
4716 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4717 
4718 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4719 
4720 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4721 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4722 	return rc;
4723 }
4724 
4725 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4726 {
4727 	enum dc_irq_source irq_source;
4728 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4729 	struct amdgpu_device *adev = crtc->dev->dev_private;
4730 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4731 	int rc = 0;
4732 
4733 	if (enable) {
4734 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4735 		if (amdgpu_dm_vrr_active(acrtc_state))
4736 			rc = dm_set_vupdate_irq(crtc, true);
4737 	} else {
4738 		/* vblank irq off -> vupdate irq off */
4739 		rc = dm_set_vupdate_irq(crtc, false);
4740 	}
4741 
4742 	if (rc)
4743 		return rc;
4744 
4745 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4746 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4747 }
4748 
4749 static int dm_enable_vblank(struct drm_crtc *crtc)
4750 {
4751 	return dm_set_vblank(crtc, true);
4752 }
4753 
4754 static void dm_disable_vblank(struct drm_crtc *crtc)
4755 {
4756 	dm_set_vblank(crtc, false);
4757 }
4758 
4759 /* Implemented only the options currently availible for the driver */
4760 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4761 	.reset = dm_crtc_reset_state,
4762 	.destroy = amdgpu_dm_crtc_destroy,
4763 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4764 	.set_config = drm_atomic_helper_set_config,
4765 	.page_flip = drm_atomic_helper_page_flip,
4766 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4767 	.atomic_destroy_state = dm_crtc_destroy_state,
4768 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4769 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4770 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4771 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4772 	.enable_vblank = dm_enable_vblank,
4773 	.disable_vblank = dm_disable_vblank,
4774 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4775 };
4776 
4777 static enum drm_connector_status
4778 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4779 {
4780 	bool connected;
4781 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4782 
4783 	/*
4784 	 * Notes:
4785 	 * 1. This interface is NOT called in context of HPD irq.
4786 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4787 	 * makes it a bad place for *any* MST-related activity.
4788 	 */
4789 
4790 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4791 	    !aconnector->fake_enable)
4792 		connected = (aconnector->dc_sink != NULL);
4793 	else
4794 		connected = (aconnector->base.force == DRM_FORCE_ON);
4795 
4796 	update_subconnector_property(aconnector);
4797 
4798 	return (connected ? connector_status_connected :
4799 			connector_status_disconnected);
4800 }
4801 
4802 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4803 					    struct drm_connector_state *connector_state,
4804 					    struct drm_property *property,
4805 					    uint64_t val)
4806 {
4807 	struct drm_device *dev = connector->dev;
4808 	struct amdgpu_device *adev = dev->dev_private;
4809 	struct dm_connector_state *dm_old_state =
4810 		to_dm_connector_state(connector->state);
4811 	struct dm_connector_state *dm_new_state =
4812 		to_dm_connector_state(connector_state);
4813 
4814 	int ret = -EINVAL;
4815 
4816 	if (property == dev->mode_config.scaling_mode_property) {
4817 		enum amdgpu_rmx_type rmx_type;
4818 
4819 		switch (val) {
4820 		case DRM_MODE_SCALE_CENTER:
4821 			rmx_type = RMX_CENTER;
4822 			break;
4823 		case DRM_MODE_SCALE_ASPECT:
4824 			rmx_type = RMX_ASPECT;
4825 			break;
4826 		case DRM_MODE_SCALE_FULLSCREEN:
4827 			rmx_type = RMX_FULL;
4828 			break;
4829 		case DRM_MODE_SCALE_NONE:
4830 		default:
4831 			rmx_type = RMX_OFF;
4832 			break;
4833 		}
4834 
4835 		if (dm_old_state->scaling == rmx_type)
4836 			return 0;
4837 
4838 		dm_new_state->scaling = rmx_type;
4839 		ret = 0;
4840 	} else if (property == adev->mode_info.underscan_hborder_property) {
4841 		dm_new_state->underscan_hborder = val;
4842 		ret = 0;
4843 	} else if (property == adev->mode_info.underscan_vborder_property) {
4844 		dm_new_state->underscan_vborder = val;
4845 		ret = 0;
4846 	} else if (property == adev->mode_info.underscan_property) {
4847 		dm_new_state->underscan_enable = val;
4848 		ret = 0;
4849 	} else if (property == adev->mode_info.abm_level_property) {
4850 		dm_new_state->abm_level = val;
4851 		ret = 0;
4852 	}
4853 
4854 	return ret;
4855 }
4856 
4857 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4858 					    const struct drm_connector_state *state,
4859 					    struct drm_property *property,
4860 					    uint64_t *val)
4861 {
4862 	struct drm_device *dev = connector->dev;
4863 	struct amdgpu_device *adev = dev->dev_private;
4864 	struct dm_connector_state *dm_state =
4865 		to_dm_connector_state(state);
4866 	int ret = -EINVAL;
4867 
4868 	if (property == dev->mode_config.scaling_mode_property) {
4869 		switch (dm_state->scaling) {
4870 		case RMX_CENTER:
4871 			*val = DRM_MODE_SCALE_CENTER;
4872 			break;
4873 		case RMX_ASPECT:
4874 			*val = DRM_MODE_SCALE_ASPECT;
4875 			break;
4876 		case RMX_FULL:
4877 			*val = DRM_MODE_SCALE_FULLSCREEN;
4878 			break;
4879 		case RMX_OFF:
4880 		default:
4881 			*val = DRM_MODE_SCALE_NONE;
4882 			break;
4883 		}
4884 		ret = 0;
4885 	} else if (property == adev->mode_info.underscan_hborder_property) {
4886 		*val = dm_state->underscan_hborder;
4887 		ret = 0;
4888 	} else if (property == adev->mode_info.underscan_vborder_property) {
4889 		*val = dm_state->underscan_vborder;
4890 		ret = 0;
4891 	} else if (property == adev->mode_info.underscan_property) {
4892 		*val = dm_state->underscan_enable;
4893 		ret = 0;
4894 	} else if (property == adev->mode_info.abm_level_property) {
4895 		*val = dm_state->abm_level;
4896 		ret = 0;
4897 	}
4898 
4899 	return ret;
4900 }
4901 
4902 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4903 {
4904 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4905 
4906 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4907 }
4908 
4909 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4910 {
4911 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4912 	const struct dc_link *link = aconnector->dc_link;
4913 	struct amdgpu_device *adev = connector->dev->dev_private;
4914 	struct amdgpu_display_manager *dm = &adev->dm;
4915 
4916 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4917 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4918 
4919 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4920 	    link->type != dc_connection_none &&
4921 	    dm->backlight_dev) {
4922 		backlight_device_unregister(dm->backlight_dev);
4923 		dm->backlight_dev = NULL;
4924 	}
4925 #endif
4926 
4927 	if (aconnector->dc_em_sink)
4928 		dc_sink_release(aconnector->dc_em_sink);
4929 	aconnector->dc_em_sink = NULL;
4930 	if (aconnector->dc_sink)
4931 		dc_sink_release(aconnector->dc_sink);
4932 	aconnector->dc_sink = NULL;
4933 
4934 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4935 	drm_connector_unregister(connector);
4936 	drm_connector_cleanup(connector);
4937 	if (aconnector->i2c) {
4938 		i2c_del_adapter(&aconnector->i2c->base);
4939 		kfree(aconnector->i2c);
4940 	}
4941 	kfree(aconnector->dm_dp_aux.aux.name);
4942 
4943 	kfree(connector);
4944 }
4945 
4946 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4947 {
4948 	struct dm_connector_state *state =
4949 		to_dm_connector_state(connector->state);
4950 
4951 	if (connector->state)
4952 		__drm_atomic_helper_connector_destroy_state(connector->state);
4953 
4954 	kfree(state);
4955 
4956 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4957 
4958 	if (state) {
4959 		state->scaling = RMX_OFF;
4960 		state->underscan_enable = false;
4961 		state->underscan_hborder = 0;
4962 		state->underscan_vborder = 0;
4963 		state->base.max_requested_bpc = 8;
4964 		state->vcpi_slots = 0;
4965 		state->pbn = 0;
4966 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4967 			state->abm_level = amdgpu_dm_abm_level;
4968 
4969 		__drm_atomic_helper_connector_reset(connector, &state->base);
4970 	}
4971 }
4972 
4973 struct drm_connector_state *
4974 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4975 {
4976 	struct dm_connector_state *state =
4977 		to_dm_connector_state(connector->state);
4978 
4979 	struct dm_connector_state *new_state =
4980 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4981 
4982 	if (!new_state)
4983 		return NULL;
4984 
4985 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4986 
4987 	new_state->freesync_capable = state->freesync_capable;
4988 	new_state->abm_level = state->abm_level;
4989 	new_state->scaling = state->scaling;
4990 	new_state->underscan_enable = state->underscan_enable;
4991 	new_state->underscan_hborder = state->underscan_hborder;
4992 	new_state->underscan_vborder = state->underscan_vborder;
4993 	new_state->vcpi_slots = state->vcpi_slots;
4994 	new_state->pbn = state->pbn;
4995 	return &new_state->base;
4996 }
4997 
4998 static int
4999 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5000 {
5001 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5002 		to_amdgpu_dm_connector(connector);
5003 	int r;
5004 
5005 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5006 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5007 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5008 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5009 		if (r)
5010 			return r;
5011 	}
5012 
5013 #if defined(CONFIG_DEBUG_FS)
5014 	connector_debugfs_init(amdgpu_dm_connector);
5015 #endif
5016 
5017 	return 0;
5018 }
5019 
5020 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5021 	.reset = amdgpu_dm_connector_funcs_reset,
5022 	.detect = amdgpu_dm_connector_detect,
5023 	.fill_modes = drm_helper_probe_single_connector_modes,
5024 	.destroy = amdgpu_dm_connector_destroy,
5025 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5026 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5027 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5028 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5029 	.late_register = amdgpu_dm_connector_late_register,
5030 	.early_unregister = amdgpu_dm_connector_unregister
5031 };
5032 
5033 static int get_modes(struct drm_connector *connector)
5034 {
5035 	return amdgpu_dm_connector_get_modes(connector);
5036 }
5037 
5038 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5039 {
5040 	struct dc_sink_init_data init_params = {
5041 			.link = aconnector->dc_link,
5042 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5043 	};
5044 	struct edid *edid;
5045 
5046 	if (!aconnector->base.edid_blob_ptr) {
5047 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5048 				aconnector->base.name);
5049 
5050 		aconnector->base.force = DRM_FORCE_OFF;
5051 		aconnector->base.override_edid = false;
5052 		return;
5053 	}
5054 
5055 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5056 
5057 	aconnector->edid = edid;
5058 
5059 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5060 		aconnector->dc_link,
5061 		(uint8_t *)edid,
5062 		(edid->extensions + 1) * EDID_LENGTH,
5063 		&init_params);
5064 
5065 	if (aconnector->base.force == DRM_FORCE_ON) {
5066 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5067 		aconnector->dc_link->local_sink :
5068 		aconnector->dc_em_sink;
5069 		dc_sink_retain(aconnector->dc_sink);
5070 	}
5071 }
5072 
5073 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5074 {
5075 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5076 
5077 	/*
5078 	 * In case of headless boot with force on for DP managed connector
5079 	 * Those settings have to be != 0 to get initial modeset
5080 	 */
5081 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5082 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5083 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5084 	}
5085 
5086 
5087 	aconnector->base.override_edid = true;
5088 	create_eml_sink(aconnector);
5089 }
5090 
5091 static struct dc_stream_state *
5092 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5093 				const struct drm_display_mode *drm_mode,
5094 				const struct dm_connector_state *dm_state,
5095 				const struct dc_stream_state *old_stream)
5096 {
5097 	struct drm_connector *connector = &aconnector->base;
5098 	struct amdgpu_device *adev = connector->dev->dev_private;
5099 	struct dc_stream_state *stream;
5100 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5101 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5102 	enum dc_status dc_result = DC_OK;
5103 
5104 	do {
5105 		stream = create_stream_for_sink(aconnector, drm_mode,
5106 						dm_state, old_stream,
5107 						requested_bpc);
5108 		if (stream == NULL) {
5109 			DRM_ERROR("Failed to create stream for sink!\n");
5110 			break;
5111 		}
5112 
5113 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5114 
5115 		if (dc_result != DC_OK) {
5116 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5117 				      drm_mode->hdisplay,
5118 				      drm_mode->vdisplay,
5119 				      drm_mode->clock,
5120 				      dc_result,
5121 				      dc_status_to_str(dc_result));
5122 
5123 			dc_stream_release(stream);
5124 			stream = NULL;
5125 			requested_bpc -= 2; /* lower bpc to retry validation */
5126 		}
5127 
5128 	} while (stream == NULL && requested_bpc >= 6);
5129 
5130 	return stream;
5131 }
5132 
5133 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5134 				   struct drm_display_mode *mode)
5135 {
5136 	int result = MODE_ERROR;
5137 	struct dc_sink *dc_sink;
5138 	/* TODO: Unhardcode stream count */
5139 	struct dc_stream_state *stream;
5140 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5141 
5142 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5143 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5144 		return result;
5145 
5146 	/*
5147 	 * Only run this the first time mode_valid is called to initilialize
5148 	 * EDID mgmt
5149 	 */
5150 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5151 		!aconnector->dc_em_sink)
5152 		handle_edid_mgmt(aconnector);
5153 
5154 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5155 
5156 	if (dc_sink == NULL) {
5157 		DRM_ERROR("dc_sink is NULL!\n");
5158 		goto fail;
5159 	}
5160 
5161 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5162 	if (stream) {
5163 		dc_stream_release(stream);
5164 		result = MODE_OK;
5165 	}
5166 
5167 fail:
5168 	/* TODO: error handling*/
5169 	return result;
5170 }
5171 
5172 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5173 				struct dc_info_packet *out)
5174 {
5175 	struct hdmi_drm_infoframe frame;
5176 	unsigned char buf[30]; /* 26 + 4 */
5177 	ssize_t len;
5178 	int ret, i;
5179 
5180 	memset(out, 0, sizeof(*out));
5181 
5182 	if (!state->hdr_output_metadata)
5183 		return 0;
5184 
5185 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5186 	if (ret)
5187 		return ret;
5188 
5189 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5190 	if (len < 0)
5191 		return (int)len;
5192 
5193 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5194 	if (len != 30)
5195 		return -EINVAL;
5196 
5197 	/* Prepare the infopacket for DC. */
5198 	switch (state->connector->connector_type) {
5199 	case DRM_MODE_CONNECTOR_HDMIA:
5200 		out->hb0 = 0x87; /* type */
5201 		out->hb1 = 0x01; /* version */
5202 		out->hb2 = 0x1A; /* length */
5203 		out->sb[0] = buf[3]; /* checksum */
5204 		i = 1;
5205 		break;
5206 
5207 	case DRM_MODE_CONNECTOR_DisplayPort:
5208 	case DRM_MODE_CONNECTOR_eDP:
5209 		out->hb0 = 0x00; /* sdp id, zero */
5210 		out->hb1 = 0x87; /* type */
5211 		out->hb2 = 0x1D; /* payload len - 1 */
5212 		out->hb3 = (0x13 << 2); /* sdp version */
5213 		out->sb[0] = 0x01; /* version */
5214 		out->sb[1] = 0x1A; /* length */
5215 		i = 2;
5216 		break;
5217 
5218 	default:
5219 		return -EINVAL;
5220 	}
5221 
5222 	memcpy(&out->sb[i], &buf[4], 26);
5223 	out->valid = true;
5224 
5225 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5226 		       sizeof(out->sb), false);
5227 
5228 	return 0;
5229 }
5230 
5231 static bool
5232 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5233 			  const struct drm_connector_state *new_state)
5234 {
5235 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5236 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5237 
5238 	if (old_blob != new_blob) {
5239 		if (old_blob && new_blob &&
5240 		    old_blob->length == new_blob->length)
5241 			return memcmp(old_blob->data, new_blob->data,
5242 				      old_blob->length);
5243 
5244 		return true;
5245 	}
5246 
5247 	return false;
5248 }
5249 
5250 static int
5251 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5252 				 struct drm_atomic_state *state)
5253 {
5254 	struct drm_connector_state *new_con_state =
5255 		drm_atomic_get_new_connector_state(state, conn);
5256 	struct drm_connector_state *old_con_state =
5257 		drm_atomic_get_old_connector_state(state, conn);
5258 	struct drm_crtc *crtc = new_con_state->crtc;
5259 	struct drm_crtc_state *new_crtc_state;
5260 	int ret;
5261 
5262 	if (!crtc)
5263 		return 0;
5264 
5265 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5266 		struct dc_info_packet hdr_infopacket;
5267 
5268 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5269 		if (ret)
5270 			return ret;
5271 
5272 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5273 		if (IS_ERR(new_crtc_state))
5274 			return PTR_ERR(new_crtc_state);
5275 
5276 		/*
5277 		 * DC considers the stream backends changed if the
5278 		 * static metadata changes. Forcing the modeset also
5279 		 * gives a simple way for userspace to switch from
5280 		 * 8bpc to 10bpc when setting the metadata to enter
5281 		 * or exit HDR.
5282 		 *
5283 		 * Changing the static metadata after it's been
5284 		 * set is permissible, however. So only force a
5285 		 * modeset if we're entering or exiting HDR.
5286 		 */
5287 		new_crtc_state->mode_changed =
5288 			!old_con_state->hdr_output_metadata ||
5289 			!new_con_state->hdr_output_metadata;
5290 	}
5291 
5292 	return 0;
5293 }
5294 
5295 static const struct drm_connector_helper_funcs
5296 amdgpu_dm_connector_helper_funcs = {
5297 	/*
5298 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5299 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5300 	 * are missing after user start lightdm. So we need to renew modes list.
5301 	 * in get_modes call back, not just return the modes count
5302 	 */
5303 	.get_modes = get_modes,
5304 	.mode_valid = amdgpu_dm_connector_mode_valid,
5305 	.atomic_check = amdgpu_dm_connector_atomic_check,
5306 };
5307 
5308 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5309 {
5310 }
5311 
5312 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5313 {
5314 	struct drm_device *dev = new_crtc_state->crtc->dev;
5315 	struct drm_plane *plane;
5316 
5317 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5318 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5319 			return true;
5320 	}
5321 
5322 	return false;
5323 }
5324 
5325 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5326 {
5327 	struct drm_atomic_state *state = new_crtc_state->state;
5328 	struct drm_plane *plane;
5329 	int num_active = 0;
5330 
5331 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5332 		struct drm_plane_state *new_plane_state;
5333 
5334 		/* Cursor planes are "fake". */
5335 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5336 			continue;
5337 
5338 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5339 
5340 		if (!new_plane_state) {
5341 			/*
5342 			 * The plane is enable on the CRTC and hasn't changed
5343 			 * state. This means that it previously passed
5344 			 * validation and is therefore enabled.
5345 			 */
5346 			num_active += 1;
5347 			continue;
5348 		}
5349 
5350 		/* We need a framebuffer to be considered enabled. */
5351 		num_active += (new_plane_state->fb != NULL);
5352 	}
5353 
5354 	return num_active;
5355 }
5356 
5357 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5358 					 struct drm_crtc_state *new_crtc_state)
5359 {
5360 	struct dm_crtc_state *dm_new_crtc_state =
5361 		to_dm_crtc_state(new_crtc_state);
5362 
5363 	dm_new_crtc_state->active_planes = 0;
5364 
5365 	if (!dm_new_crtc_state->stream)
5366 		return;
5367 
5368 	dm_new_crtc_state->active_planes =
5369 		count_crtc_active_planes(new_crtc_state);
5370 }
5371 
5372 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5373 				       struct drm_crtc_state *state)
5374 {
5375 	struct amdgpu_device *adev = crtc->dev->dev_private;
5376 	struct dc *dc = adev->dm.dc;
5377 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5378 	int ret = -EINVAL;
5379 
5380 	dm_update_crtc_active_planes(crtc, state);
5381 
5382 	if (unlikely(!dm_crtc_state->stream &&
5383 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5384 		WARN_ON(1);
5385 		return ret;
5386 	}
5387 
5388 	/* In some use cases, like reset, no stream is attached */
5389 	if (!dm_crtc_state->stream)
5390 		return 0;
5391 
5392 	/*
5393 	 * We want at least one hardware plane enabled to use
5394 	 * the stream with a cursor enabled.
5395 	 */
5396 	if (state->enable && state->active &&
5397 	    does_crtc_have_active_cursor(state) &&
5398 	    dm_crtc_state->active_planes == 0)
5399 		return -EINVAL;
5400 
5401 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5402 		return 0;
5403 
5404 	return ret;
5405 }
5406 
5407 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5408 				      const struct drm_display_mode *mode,
5409 				      struct drm_display_mode *adjusted_mode)
5410 {
5411 	return true;
5412 }
5413 
5414 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5415 	.disable = dm_crtc_helper_disable,
5416 	.atomic_check = dm_crtc_helper_atomic_check,
5417 	.mode_fixup = dm_crtc_helper_mode_fixup,
5418 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5419 };
5420 
5421 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5422 {
5423 
5424 }
5425 
5426 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5427 {
5428 	switch (display_color_depth) {
5429 		case COLOR_DEPTH_666:
5430 			return 6;
5431 		case COLOR_DEPTH_888:
5432 			return 8;
5433 		case COLOR_DEPTH_101010:
5434 			return 10;
5435 		case COLOR_DEPTH_121212:
5436 			return 12;
5437 		case COLOR_DEPTH_141414:
5438 			return 14;
5439 		case COLOR_DEPTH_161616:
5440 			return 16;
5441 		default:
5442 			break;
5443 		}
5444 	return 0;
5445 }
5446 
5447 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5448 					  struct drm_crtc_state *crtc_state,
5449 					  struct drm_connector_state *conn_state)
5450 {
5451 	struct drm_atomic_state *state = crtc_state->state;
5452 	struct drm_connector *connector = conn_state->connector;
5453 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5454 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5455 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5456 	struct drm_dp_mst_topology_mgr *mst_mgr;
5457 	struct drm_dp_mst_port *mst_port;
5458 	enum dc_color_depth color_depth;
5459 	int clock, bpp = 0;
5460 	bool is_y420 = false;
5461 
5462 	if (!aconnector->port || !aconnector->dc_sink)
5463 		return 0;
5464 
5465 	mst_port = aconnector->port;
5466 	mst_mgr = &aconnector->mst_port->mst_mgr;
5467 
5468 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5469 		return 0;
5470 
5471 	if (!state->duplicated) {
5472 		int max_bpc = conn_state->max_requested_bpc;
5473 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5474 				aconnector->force_yuv420_output;
5475 		color_depth = convert_color_depth_from_display_info(connector,
5476 								    is_y420,
5477 								    max_bpc);
5478 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5479 		clock = adjusted_mode->clock;
5480 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5481 	}
5482 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5483 									   mst_mgr,
5484 									   mst_port,
5485 									   dm_new_connector_state->pbn,
5486 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5487 	if (dm_new_connector_state->vcpi_slots < 0) {
5488 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5489 		return dm_new_connector_state->vcpi_slots;
5490 	}
5491 	return 0;
5492 }
5493 
5494 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5495 	.disable = dm_encoder_helper_disable,
5496 	.atomic_check = dm_encoder_helper_atomic_check
5497 };
5498 
5499 #if defined(CONFIG_DRM_AMD_DC_DCN)
5500 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5501 					    struct dc_state *dc_state)
5502 {
5503 	struct dc_stream_state *stream = NULL;
5504 	struct drm_connector *connector;
5505 	struct drm_connector_state *new_con_state, *old_con_state;
5506 	struct amdgpu_dm_connector *aconnector;
5507 	struct dm_connector_state *dm_conn_state;
5508 	int i, j, clock, bpp;
5509 	int vcpi, pbn_div, pbn = 0;
5510 
5511 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5512 
5513 		aconnector = to_amdgpu_dm_connector(connector);
5514 
5515 		if (!aconnector->port)
5516 			continue;
5517 
5518 		if (!new_con_state || !new_con_state->crtc)
5519 			continue;
5520 
5521 		dm_conn_state = to_dm_connector_state(new_con_state);
5522 
5523 		for (j = 0; j < dc_state->stream_count; j++) {
5524 			stream = dc_state->streams[j];
5525 			if (!stream)
5526 				continue;
5527 
5528 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5529 				break;
5530 
5531 			stream = NULL;
5532 		}
5533 
5534 		if (!stream)
5535 			continue;
5536 
5537 		if (stream->timing.flags.DSC != 1) {
5538 			drm_dp_mst_atomic_enable_dsc(state,
5539 						     aconnector->port,
5540 						     dm_conn_state->pbn,
5541 						     0,
5542 						     false);
5543 			continue;
5544 		}
5545 
5546 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5547 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5548 		clock = stream->timing.pix_clk_100hz / 10;
5549 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5550 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5551 						    aconnector->port,
5552 						    pbn, pbn_div,
5553 						    true);
5554 		if (vcpi < 0)
5555 			return vcpi;
5556 
5557 		dm_conn_state->pbn = pbn;
5558 		dm_conn_state->vcpi_slots = vcpi;
5559 	}
5560 	return 0;
5561 }
5562 #endif
5563 
5564 static void dm_drm_plane_reset(struct drm_plane *plane)
5565 {
5566 	struct dm_plane_state *amdgpu_state = NULL;
5567 
5568 	if (plane->state)
5569 		plane->funcs->atomic_destroy_state(plane, plane->state);
5570 
5571 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5572 	WARN_ON(amdgpu_state == NULL);
5573 
5574 	if (amdgpu_state)
5575 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5576 }
5577 
5578 static struct drm_plane_state *
5579 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5580 {
5581 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5582 
5583 	old_dm_plane_state = to_dm_plane_state(plane->state);
5584 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5585 	if (!dm_plane_state)
5586 		return NULL;
5587 
5588 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5589 
5590 	if (old_dm_plane_state->dc_state) {
5591 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5592 		dc_plane_state_retain(dm_plane_state->dc_state);
5593 	}
5594 
5595 	return &dm_plane_state->base;
5596 }
5597 
5598 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5599 				struct drm_plane_state *state)
5600 {
5601 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5602 
5603 	if (dm_plane_state->dc_state)
5604 		dc_plane_state_release(dm_plane_state->dc_state);
5605 
5606 	drm_atomic_helper_plane_destroy_state(plane, state);
5607 }
5608 
5609 static const struct drm_plane_funcs dm_plane_funcs = {
5610 	.update_plane	= drm_atomic_helper_update_plane,
5611 	.disable_plane	= drm_atomic_helper_disable_plane,
5612 	.destroy	= drm_primary_helper_destroy,
5613 	.reset = dm_drm_plane_reset,
5614 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5615 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5616 };
5617 
5618 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5619 				      struct drm_plane_state *new_state)
5620 {
5621 	struct amdgpu_framebuffer *afb;
5622 	struct drm_gem_object *obj;
5623 	struct amdgpu_device *adev;
5624 	struct amdgpu_bo *rbo;
5625 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5626 	struct list_head list;
5627 	struct ttm_validate_buffer tv;
5628 	struct ww_acquire_ctx ticket;
5629 	uint64_t tiling_flags;
5630 	uint32_t domain;
5631 	int r;
5632 	bool tmz_surface = false;
5633 	bool force_disable_dcc = false;
5634 
5635 	dm_plane_state_old = to_dm_plane_state(plane->state);
5636 	dm_plane_state_new = to_dm_plane_state(new_state);
5637 
5638 	if (!new_state->fb) {
5639 		DRM_DEBUG_DRIVER("No FB bound\n");
5640 		return 0;
5641 	}
5642 
5643 	afb = to_amdgpu_framebuffer(new_state->fb);
5644 	obj = new_state->fb->obj[0];
5645 	rbo = gem_to_amdgpu_bo(obj);
5646 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5647 	INIT_LIST_HEAD(&list);
5648 
5649 	tv.bo = &rbo->tbo;
5650 	tv.num_shared = 1;
5651 	list_add(&tv.head, &list);
5652 
5653 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5654 	if (r) {
5655 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5656 		return r;
5657 	}
5658 
5659 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5660 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5661 	else
5662 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5663 
5664 	r = amdgpu_bo_pin(rbo, domain);
5665 	if (unlikely(r != 0)) {
5666 		if (r != -ERESTARTSYS)
5667 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5668 		ttm_eu_backoff_reservation(&ticket, &list);
5669 		return r;
5670 	}
5671 
5672 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5673 	if (unlikely(r != 0)) {
5674 		amdgpu_bo_unpin(rbo);
5675 		ttm_eu_backoff_reservation(&ticket, &list);
5676 		DRM_ERROR("%p bind failed\n", rbo);
5677 		return r;
5678 	}
5679 
5680 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5681 
5682 	tmz_surface = amdgpu_bo_encrypted(rbo);
5683 
5684 	ttm_eu_backoff_reservation(&ticket, &list);
5685 
5686 	afb->address = amdgpu_bo_gpu_offset(rbo);
5687 
5688 	amdgpu_bo_ref(rbo);
5689 
5690 	if (dm_plane_state_new->dc_state &&
5691 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5692 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5693 
5694 		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5695 		fill_plane_buffer_attributes(
5696 			adev, afb, plane_state->format, plane_state->rotation,
5697 			tiling_flags, &plane_state->tiling_info,
5698 			&plane_state->plane_size, &plane_state->dcc,
5699 			&plane_state->address, tmz_surface,
5700 			force_disable_dcc);
5701 	}
5702 
5703 	return 0;
5704 }
5705 
5706 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5707 				       struct drm_plane_state *old_state)
5708 {
5709 	struct amdgpu_bo *rbo;
5710 	int r;
5711 
5712 	if (!old_state->fb)
5713 		return;
5714 
5715 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5716 	r = amdgpu_bo_reserve(rbo, false);
5717 	if (unlikely(r)) {
5718 		DRM_ERROR("failed to reserve rbo before unpin\n");
5719 		return;
5720 	}
5721 
5722 	amdgpu_bo_unpin(rbo);
5723 	amdgpu_bo_unreserve(rbo);
5724 	amdgpu_bo_unref(&rbo);
5725 }
5726 
5727 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5728 				       struct drm_crtc_state *new_crtc_state)
5729 {
5730 	int max_downscale = 0;
5731 	int max_upscale = INT_MAX;
5732 
5733 	/* TODO: These should be checked against DC plane caps */
5734 	return drm_atomic_helper_check_plane_state(
5735 		state, new_crtc_state, max_downscale, max_upscale, true, true);
5736 }
5737 
5738 static int dm_plane_atomic_check(struct drm_plane *plane,
5739 				 struct drm_plane_state *state)
5740 {
5741 	struct amdgpu_device *adev = plane->dev->dev_private;
5742 	struct dc *dc = adev->dm.dc;
5743 	struct dm_plane_state *dm_plane_state;
5744 	struct dc_scaling_info scaling_info;
5745 	struct drm_crtc_state *new_crtc_state;
5746 	int ret;
5747 
5748 	dm_plane_state = to_dm_plane_state(state);
5749 
5750 	if (!dm_plane_state->dc_state)
5751 		return 0;
5752 
5753 	new_crtc_state =
5754 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
5755 	if (!new_crtc_state)
5756 		return -EINVAL;
5757 
5758 	ret = dm_plane_helper_check_state(state, new_crtc_state);
5759 	if (ret)
5760 		return ret;
5761 
5762 	ret = fill_dc_scaling_info(state, &scaling_info);
5763 	if (ret)
5764 		return ret;
5765 
5766 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5767 		return 0;
5768 
5769 	return -EINVAL;
5770 }
5771 
5772 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5773 				       struct drm_plane_state *new_plane_state)
5774 {
5775 	/* Only support async updates on cursor planes. */
5776 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5777 		return -EINVAL;
5778 
5779 	return 0;
5780 }
5781 
5782 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5783 					 struct drm_plane_state *new_state)
5784 {
5785 	struct drm_plane_state *old_state =
5786 		drm_atomic_get_old_plane_state(new_state->state, plane);
5787 
5788 	swap(plane->state->fb, new_state->fb);
5789 
5790 	plane->state->src_x = new_state->src_x;
5791 	plane->state->src_y = new_state->src_y;
5792 	plane->state->src_w = new_state->src_w;
5793 	plane->state->src_h = new_state->src_h;
5794 	plane->state->crtc_x = new_state->crtc_x;
5795 	plane->state->crtc_y = new_state->crtc_y;
5796 	plane->state->crtc_w = new_state->crtc_w;
5797 	plane->state->crtc_h = new_state->crtc_h;
5798 
5799 	handle_cursor_update(plane, old_state);
5800 }
5801 
5802 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5803 	.prepare_fb = dm_plane_helper_prepare_fb,
5804 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5805 	.atomic_check = dm_plane_atomic_check,
5806 	.atomic_async_check = dm_plane_atomic_async_check,
5807 	.atomic_async_update = dm_plane_atomic_async_update
5808 };
5809 
5810 /*
5811  * TODO: these are currently initialized to rgb formats only.
5812  * For future use cases we should either initialize them dynamically based on
5813  * plane capabilities, or initialize this array to all formats, so internal drm
5814  * check will succeed, and let DC implement proper check
5815  */
5816 static const uint32_t rgb_formats[] = {
5817 	DRM_FORMAT_XRGB8888,
5818 	DRM_FORMAT_ARGB8888,
5819 	DRM_FORMAT_RGBA8888,
5820 	DRM_FORMAT_XRGB2101010,
5821 	DRM_FORMAT_XBGR2101010,
5822 	DRM_FORMAT_ARGB2101010,
5823 	DRM_FORMAT_ABGR2101010,
5824 	DRM_FORMAT_XBGR8888,
5825 	DRM_FORMAT_ABGR8888,
5826 	DRM_FORMAT_RGB565,
5827 };
5828 
5829 static const uint32_t overlay_formats[] = {
5830 	DRM_FORMAT_XRGB8888,
5831 	DRM_FORMAT_ARGB8888,
5832 	DRM_FORMAT_RGBA8888,
5833 	DRM_FORMAT_XBGR8888,
5834 	DRM_FORMAT_ABGR8888,
5835 	DRM_FORMAT_RGB565
5836 };
5837 
5838 static const u32 cursor_formats[] = {
5839 	DRM_FORMAT_ARGB8888
5840 };
5841 
5842 static int get_plane_formats(const struct drm_plane *plane,
5843 			     const struct dc_plane_cap *plane_cap,
5844 			     uint32_t *formats, int max_formats)
5845 {
5846 	int i, num_formats = 0;
5847 
5848 	/*
5849 	 * TODO: Query support for each group of formats directly from
5850 	 * DC plane caps. This will require adding more formats to the
5851 	 * caps list.
5852 	 */
5853 
5854 	switch (plane->type) {
5855 	case DRM_PLANE_TYPE_PRIMARY:
5856 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5857 			if (num_formats >= max_formats)
5858 				break;
5859 
5860 			formats[num_formats++] = rgb_formats[i];
5861 		}
5862 
5863 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5864 			formats[num_formats++] = DRM_FORMAT_NV12;
5865 		if (plane_cap && plane_cap->pixel_format_support.p010)
5866 			formats[num_formats++] = DRM_FORMAT_P010;
5867 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
5868 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5869 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5870 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5871 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5872 		}
5873 		break;
5874 
5875 	case DRM_PLANE_TYPE_OVERLAY:
5876 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5877 			if (num_formats >= max_formats)
5878 				break;
5879 
5880 			formats[num_formats++] = overlay_formats[i];
5881 		}
5882 		break;
5883 
5884 	case DRM_PLANE_TYPE_CURSOR:
5885 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5886 			if (num_formats >= max_formats)
5887 				break;
5888 
5889 			formats[num_formats++] = cursor_formats[i];
5890 		}
5891 		break;
5892 	}
5893 
5894 	return num_formats;
5895 }
5896 
5897 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5898 				struct drm_plane *plane,
5899 				unsigned long possible_crtcs,
5900 				const struct dc_plane_cap *plane_cap)
5901 {
5902 	uint32_t formats[32];
5903 	int num_formats;
5904 	int res = -EPERM;
5905 	unsigned int supported_rotations;
5906 
5907 	num_formats = get_plane_formats(plane, plane_cap, formats,
5908 					ARRAY_SIZE(formats));
5909 
5910 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5911 				       &dm_plane_funcs, formats, num_formats,
5912 				       NULL, plane->type, NULL);
5913 	if (res)
5914 		return res;
5915 
5916 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5917 	    plane_cap && plane_cap->per_pixel_alpha) {
5918 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5919 					  BIT(DRM_MODE_BLEND_PREMULTI);
5920 
5921 		drm_plane_create_alpha_property(plane);
5922 		drm_plane_create_blend_mode_property(plane, blend_caps);
5923 	}
5924 
5925 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5926 	    plane_cap &&
5927 	    (plane_cap->pixel_format_support.nv12 ||
5928 	     plane_cap->pixel_format_support.p010)) {
5929 		/* This only affects YUV formats. */
5930 		drm_plane_create_color_properties(
5931 			plane,
5932 			BIT(DRM_COLOR_YCBCR_BT601) |
5933 			BIT(DRM_COLOR_YCBCR_BT709) |
5934 			BIT(DRM_COLOR_YCBCR_BT2020),
5935 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5936 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5937 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5938 	}
5939 
5940 	supported_rotations =
5941 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
5942 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
5943 
5944 	drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
5945 					   supported_rotations);
5946 
5947 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5948 
5949 	/* Create (reset) the plane state */
5950 	if (plane->funcs->reset)
5951 		plane->funcs->reset(plane);
5952 
5953 	return 0;
5954 }
5955 
5956 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5957 			       struct drm_plane *plane,
5958 			       uint32_t crtc_index)
5959 {
5960 	struct amdgpu_crtc *acrtc = NULL;
5961 	struct drm_plane *cursor_plane;
5962 
5963 	int res = -ENOMEM;
5964 
5965 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5966 	if (!cursor_plane)
5967 		goto fail;
5968 
5969 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5970 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5971 
5972 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5973 	if (!acrtc)
5974 		goto fail;
5975 
5976 	res = drm_crtc_init_with_planes(
5977 			dm->ddev,
5978 			&acrtc->base,
5979 			plane,
5980 			cursor_plane,
5981 			&amdgpu_dm_crtc_funcs, NULL);
5982 
5983 	if (res)
5984 		goto fail;
5985 
5986 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5987 
5988 	/* Create (reset) the plane state */
5989 	if (acrtc->base.funcs->reset)
5990 		acrtc->base.funcs->reset(&acrtc->base);
5991 
5992 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5993 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5994 
5995 	acrtc->crtc_id = crtc_index;
5996 	acrtc->base.enabled = false;
5997 	acrtc->otg_inst = -1;
5998 
5999 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6000 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6001 				   true, MAX_COLOR_LUT_ENTRIES);
6002 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6003 
6004 	return 0;
6005 
6006 fail:
6007 	kfree(acrtc);
6008 	kfree(cursor_plane);
6009 	return res;
6010 }
6011 
6012 
6013 static int to_drm_connector_type(enum signal_type st)
6014 {
6015 	switch (st) {
6016 	case SIGNAL_TYPE_HDMI_TYPE_A:
6017 		return DRM_MODE_CONNECTOR_HDMIA;
6018 	case SIGNAL_TYPE_EDP:
6019 		return DRM_MODE_CONNECTOR_eDP;
6020 	case SIGNAL_TYPE_LVDS:
6021 		return DRM_MODE_CONNECTOR_LVDS;
6022 	case SIGNAL_TYPE_RGB:
6023 		return DRM_MODE_CONNECTOR_VGA;
6024 	case SIGNAL_TYPE_DISPLAY_PORT:
6025 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6026 		return DRM_MODE_CONNECTOR_DisplayPort;
6027 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6028 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6029 		return DRM_MODE_CONNECTOR_DVID;
6030 	case SIGNAL_TYPE_VIRTUAL:
6031 		return DRM_MODE_CONNECTOR_VIRTUAL;
6032 
6033 	default:
6034 		return DRM_MODE_CONNECTOR_Unknown;
6035 	}
6036 }
6037 
6038 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6039 {
6040 	struct drm_encoder *encoder;
6041 
6042 	/* There is only one encoder per connector */
6043 	drm_connector_for_each_possible_encoder(connector, encoder)
6044 		return encoder;
6045 
6046 	return NULL;
6047 }
6048 
6049 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6050 {
6051 	struct drm_encoder *encoder;
6052 	struct amdgpu_encoder *amdgpu_encoder;
6053 
6054 	encoder = amdgpu_dm_connector_to_encoder(connector);
6055 
6056 	if (encoder == NULL)
6057 		return;
6058 
6059 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6060 
6061 	amdgpu_encoder->native_mode.clock = 0;
6062 
6063 	if (!list_empty(&connector->probed_modes)) {
6064 		struct drm_display_mode *preferred_mode = NULL;
6065 
6066 		list_for_each_entry(preferred_mode,
6067 				    &connector->probed_modes,
6068 				    head) {
6069 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6070 				amdgpu_encoder->native_mode = *preferred_mode;
6071 
6072 			break;
6073 		}
6074 
6075 	}
6076 }
6077 
6078 static struct drm_display_mode *
6079 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6080 			     char *name,
6081 			     int hdisplay, int vdisplay)
6082 {
6083 	struct drm_device *dev = encoder->dev;
6084 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6085 	struct drm_display_mode *mode = NULL;
6086 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6087 
6088 	mode = drm_mode_duplicate(dev, native_mode);
6089 
6090 	if (mode == NULL)
6091 		return NULL;
6092 
6093 	mode->hdisplay = hdisplay;
6094 	mode->vdisplay = vdisplay;
6095 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6096 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6097 
6098 	return mode;
6099 
6100 }
6101 
6102 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6103 						 struct drm_connector *connector)
6104 {
6105 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6106 	struct drm_display_mode *mode = NULL;
6107 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6108 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6109 				to_amdgpu_dm_connector(connector);
6110 	int i;
6111 	int n;
6112 	struct mode_size {
6113 		char name[DRM_DISPLAY_MODE_LEN];
6114 		int w;
6115 		int h;
6116 	} common_modes[] = {
6117 		{  "640x480",  640,  480},
6118 		{  "800x600",  800,  600},
6119 		{ "1024x768", 1024,  768},
6120 		{ "1280x720", 1280,  720},
6121 		{ "1280x800", 1280,  800},
6122 		{"1280x1024", 1280, 1024},
6123 		{ "1440x900", 1440,  900},
6124 		{"1680x1050", 1680, 1050},
6125 		{"1600x1200", 1600, 1200},
6126 		{"1920x1080", 1920, 1080},
6127 		{"1920x1200", 1920, 1200}
6128 	};
6129 
6130 	n = ARRAY_SIZE(common_modes);
6131 
6132 	for (i = 0; i < n; i++) {
6133 		struct drm_display_mode *curmode = NULL;
6134 		bool mode_existed = false;
6135 
6136 		if (common_modes[i].w > native_mode->hdisplay ||
6137 		    common_modes[i].h > native_mode->vdisplay ||
6138 		   (common_modes[i].w == native_mode->hdisplay &&
6139 		    common_modes[i].h == native_mode->vdisplay))
6140 			continue;
6141 
6142 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6143 			if (common_modes[i].w == curmode->hdisplay &&
6144 			    common_modes[i].h == curmode->vdisplay) {
6145 				mode_existed = true;
6146 				break;
6147 			}
6148 		}
6149 
6150 		if (mode_existed)
6151 			continue;
6152 
6153 		mode = amdgpu_dm_create_common_mode(encoder,
6154 				common_modes[i].name, common_modes[i].w,
6155 				common_modes[i].h);
6156 		drm_mode_probed_add(connector, mode);
6157 		amdgpu_dm_connector->num_modes++;
6158 	}
6159 }
6160 
6161 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6162 					      struct edid *edid)
6163 {
6164 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6165 			to_amdgpu_dm_connector(connector);
6166 
6167 	if (edid) {
6168 		/* empty probed_modes */
6169 		INIT_LIST_HEAD(&connector->probed_modes);
6170 		amdgpu_dm_connector->num_modes =
6171 				drm_add_edid_modes(connector, edid);
6172 
6173 		/* sorting the probed modes before calling function
6174 		 * amdgpu_dm_get_native_mode() since EDID can have
6175 		 * more than one preferred mode. The modes that are
6176 		 * later in the probed mode list could be of higher
6177 		 * and preferred resolution. For example, 3840x2160
6178 		 * resolution in base EDID preferred timing and 4096x2160
6179 		 * preferred resolution in DID extension block later.
6180 		 */
6181 		drm_mode_sort(&connector->probed_modes);
6182 		amdgpu_dm_get_native_mode(connector);
6183 	} else {
6184 		amdgpu_dm_connector->num_modes = 0;
6185 	}
6186 }
6187 
6188 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6189 {
6190 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6191 			to_amdgpu_dm_connector(connector);
6192 	struct drm_encoder *encoder;
6193 	struct edid *edid = amdgpu_dm_connector->edid;
6194 
6195 	encoder = amdgpu_dm_connector_to_encoder(connector);
6196 
6197 	if (!edid || !drm_edid_is_valid(edid)) {
6198 		amdgpu_dm_connector->num_modes =
6199 				drm_add_modes_noedid(connector, 640, 480);
6200 	} else {
6201 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6202 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6203 	}
6204 	amdgpu_dm_fbc_init(connector);
6205 
6206 	return amdgpu_dm_connector->num_modes;
6207 }
6208 
6209 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6210 				     struct amdgpu_dm_connector *aconnector,
6211 				     int connector_type,
6212 				     struct dc_link *link,
6213 				     int link_index)
6214 {
6215 	struct amdgpu_device *adev = dm->ddev->dev_private;
6216 
6217 	/*
6218 	 * Some of the properties below require access to state, like bpc.
6219 	 * Allocate some default initial connector state with our reset helper.
6220 	 */
6221 	if (aconnector->base.funcs->reset)
6222 		aconnector->base.funcs->reset(&aconnector->base);
6223 
6224 	aconnector->connector_id = link_index;
6225 	aconnector->dc_link = link;
6226 	aconnector->base.interlace_allowed = false;
6227 	aconnector->base.doublescan_allowed = false;
6228 	aconnector->base.stereo_allowed = false;
6229 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6230 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6231 	aconnector->audio_inst = -1;
6232 	mutex_init(&aconnector->hpd_lock);
6233 
6234 	/*
6235 	 * configure support HPD hot plug connector_>polled default value is 0
6236 	 * which means HPD hot plug not supported
6237 	 */
6238 	switch (connector_type) {
6239 	case DRM_MODE_CONNECTOR_HDMIA:
6240 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6241 		aconnector->base.ycbcr_420_allowed =
6242 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6243 		break;
6244 	case DRM_MODE_CONNECTOR_DisplayPort:
6245 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6246 		aconnector->base.ycbcr_420_allowed =
6247 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6248 		break;
6249 	case DRM_MODE_CONNECTOR_DVID:
6250 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6251 		break;
6252 	default:
6253 		break;
6254 	}
6255 
6256 	drm_object_attach_property(&aconnector->base.base,
6257 				dm->ddev->mode_config.scaling_mode_property,
6258 				DRM_MODE_SCALE_NONE);
6259 
6260 	drm_object_attach_property(&aconnector->base.base,
6261 				adev->mode_info.underscan_property,
6262 				UNDERSCAN_OFF);
6263 	drm_object_attach_property(&aconnector->base.base,
6264 				adev->mode_info.underscan_hborder_property,
6265 				0);
6266 	drm_object_attach_property(&aconnector->base.base,
6267 				adev->mode_info.underscan_vborder_property,
6268 				0);
6269 
6270 	if (!aconnector->mst_port)
6271 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6272 
6273 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6274 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6275 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6276 
6277 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6278 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6279 		drm_object_attach_property(&aconnector->base.base,
6280 				adev->mode_info.abm_level_property, 0);
6281 	}
6282 
6283 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6284 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6285 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6286 		drm_object_attach_property(
6287 			&aconnector->base.base,
6288 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6289 
6290 		if (!aconnector->mst_port)
6291 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6292 
6293 #ifdef CONFIG_DRM_AMD_DC_HDCP
6294 		if (adev->dm.hdcp_workqueue)
6295 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6296 #endif
6297 	}
6298 }
6299 
6300 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6301 			      struct i2c_msg *msgs, int num)
6302 {
6303 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6304 	struct ddc_service *ddc_service = i2c->ddc_service;
6305 	struct i2c_command cmd;
6306 	int i;
6307 	int result = -EIO;
6308 
6309 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6310 
6311 	if (!cmd.payloads)
6312 		return result;
6313 
6314 	cmd.number_of_payloads = num;
6315 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6316 	cmd.speed = 100;
6317 
6318 	for (i = 0; i < num; i++) {
6319 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6320 		cmd.payloads[i].address = msgs[i].addr;
6321 		cmd.payloads[i].length = msgs[i].len;
6322 		cmd.payloads[i].data = msgs[i].buf;
6323 	}
6324 
6325 	if (dc_submit_i2c(
6326 			ddc_service->ctx->dc,
6327 			ddc_service->ddc_pin->hw_info.ddc_channel,
6328 			&cmd))
6329 		result = num;
6330 
6331 	kfree(cmd.payloads);
6332 	return result;
6333 }
6334 
6335 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6336 {
6337 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6338 }
6339 
6340 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6341 	.master_xfer = amdgpu_dm_i2c_xfer,
6342 	.functionality = amdgpu_dm_i2c_func,
6343 };
6344 
6345 static struct amdgpu_i2c_adapter *
6346 create_i2c(struct ddc_service *ddc_service,
6347 	   int link_index,
6348 	   int *res)
6349 {
6350 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6351 	struct amdgpu_i2c_adapter *i2c;
6352 
6353 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6354 	if (!i2c)
6355 		return NULL;
6356 	i2c->base.owner = THIS_MODULE;
6357 	i2c->base.class = I2C_CLASS_DDC;
6358 	i2c->base.dev.parent = &adev->pdev->dev;
6359 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6360 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6361 	i2c_set_adapdata(&i2c->base, i2c);
6362 	i2c->ddc_service = ddc_service;
6363 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6364 
6365 	return i2c;
6366 }
6367 
6368 
6369 /*
6370  * Note: this function assumes that dc_link_detect() was called for the
6371  * dc_link which will be represented by this aconnector.
6372  */
6373 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6374 				    struct amdgpu_dm_connector *aconnector,
6375 				    uint32_t link_index,
6376 				    struct amdgpu_encoder *aencoder)
6377 {
6378 	int res = 0;
6379 	int connector_type;
6380 	struct dc *dc = dm->dc;
6381 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6382 	struct amdgpu_i2c_adapter *i2c;
6383 
6384 	link->priv = aconnector;
6385 
6386 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6387 
6388 	i2c = create_i2c(link->ddc, link->link_index, &res);
6389 	if (!i2c) {
6390 		DRM_ERROR("Failed to create i2c adapter data\n");
6391 		return -ENOMEM;
6392 	}
6393 
6394 	aconnector->i2c = i2c;
6395 	res = i2c_add_adapter(&i2c->base);
6396 
6397 	if (res) {
6398 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6399 		goto out_free;
6400 	}
6401 
6402 	connector_type = to_drm_connector_type(link->connector_signal);
6403 
6404 	res = drm_connector_init_with_ddc(
6405 			dm->ddev,
6406 			&aconnector->base,
6407 			&amdgpu_dm_connector_funcs,
6408 			connector_type,
6409 			&i2c->base);
6410 
6411 	if (res) {
6412 		DRM_ERROR("connector_init failed\n");
6413 		aconnector->connector_id = -1;
6414 		goto out_free;
6415 	}
6416 
6417 	drm_connector_helper_add(
6418 			&aconnector->base,
6419 			&amdgpu_dm_connector_helper_funcs);
6420 
6421 	amdgpu_dm_connector_init_helper(
6422 		dm,
6423 		aconnector,
6424 		connector_type,
6425 		link,
6426 		link_index);
6427 
6428 	drm_connector_attach_encoder(
6429 		&aconnector->base, &aencoder->base);
6430 
6431 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6432 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6433 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6434 
6435 out_free:
6436 	if (res) {
6437 		kfree(i2c);
6438 		aconnector->i2c = NULL;
6439 	}
6440 	return res;
6441 }
6442 
6443 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6444 {
6445 	switch (adev->mode_info.num_crtc) {
6446 	case 1:
6447 		return 0x1;
6448 	case 2:
6449 		return 0x3;
6450 	case 3:
6451 		return 0x7;
6452 	case 4:
6453 		return 0xf;
6454 	case 5:
6455 		return 0x1f;
6456 	case 6:
6457 	default:
6458 		return 0x3f;
6459 	}
6460 }
6461 
6462 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6463 				  struct amdgpu_encoder *aencoder,
6464 				  uint32_t link_index)
6465 {
6466 	struct amdgpu_device *adev = dev->dev_private;
6467 
6468 	int res = drm_encoder_init(dev,
6469 				   &aencoder->base,
6470 				   &amdgpu_dm_encoder_funcs,
6471 				   DRM_MODE_ENCODER_TMDS,
6472 				   NULL);
6473 
6474 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6475 
6476 	if (!res)
6477 		aencoder->encoder_id = link_index;
6478 	else
6479 		aencoder->encoder_id = -1;
6480 
6481 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6482 
6483 	return res;
6484 }
6485 
6486 static void manage_dm_interrupts(struct amdgpu_device *adev,
6487 				 struct amdgpu_crtc *acrtc,
6488 				 bool enable)
6489 {
6490 	/*
6491 	 * We have no guarantee that the frontend index maps to the same
6492 	 * backend index - some even map to more than one.
6493 	 *
6494 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6495 	 */
6496 	int irq_type =
6497 		amdgpu_display_crtc_idx_to_irq_type(
6498 			adev,
6499 			acrtc->crtc_id);
6500 
6501 	if (enable) {
6502 		drm_crtc_vblank_on(&acrtc->base);
6503 		amdgpu_irq_get(
6504 			adev,
6505 			&adev->pageflip_irq,
6506 			irq_type);
6507 	} else {
6508 
6509 		amdgpu_irq_put(
6510 			adev,
6511 			&adev->pageflip_irq,
6512 			irq_type);
6513 		drm_crtc_vblank_off(&acrtc->base);
6514 	}
6515 }
6516 
6517 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6518 				      struct amdgpu_crtc *acrtc)
6519 {
6520 	int irq_type =
6521 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6522 
6523 	/**
6524 	 * This reads the current state for the IRQ and force reapplies
6525 	 * the setting to hardware.
6526 	 */
6527 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6528 }
6529 
6530 static bool
6531 is_scaling_state_different(const struct dm_connector_state *dm_state,
6532 			   const struct dm_connector_state *old_dm_state)
6533 {
6534 	if (dm_state->scaling != old_dm_state->scaling)
6535 		return true;
6536 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6537 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6538 			return true;
6539 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6540 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6541 			return true;
6542 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6543 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6544 		return true;
6545 	return false;
6546 }
6547 
6548 #ifdef CONFIG_DRM_AMD_DC_HDCP
6549 static bool is_content_protection_different(struct drm_connector_state *state,
6550 					    const struct drm_connector_state *old_state,
6551 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6552 {
6553 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6554 
6555 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6556 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6557 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6558 		return true;
6559 	}
6560 
6561 	/* CP is being re enabled, ignore this */
6562 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6563 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6564 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6565 		return false;
6566 	}
6567 
6568 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6569 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6570 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6571 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6572 
6573 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6574 	 * hot-plug, headless s3, dpms
6575 	 */
6576 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6577 	    aconnector->dc_sink != NULL)
6578 		return true;
6579 
6580 	if (old_state->content_protection == state->content_protection)
6581 		return false;
6582 
6583 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6584 		return true;
6585 
6586 	return false;
6587 }
6588 
6589 #endif
6590 static void remove_stream(struct amdgpu_device *adev,
6591 			  struct amdgpu_crtc *acrtc,
6592 			  struct dc_stream_state *stream)
6593 {
6594 	/* this is the update mode case */
6595 
6596 	acrtc->otg_inst = -1;
6597 	acrtc->enabled = false;
6598 }
6599 
6600 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6601 			       struct dc_cursor_position *position)
6602 {
6603 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6604 	int x, y;
6605 	int xorigin = 0, yorigin = 0;
6606 
6607 	position->enable = false;
6608 	position->x = 0;
6609 	position->y = 0;
6610 
6611 	if (!crtc || !plane->state->fb)
6612 		return 0;
6613 
6614 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6615 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6616 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6617 			  __func__,
6618 			  plane->state->crtc_w,
6619 			  plane->state->crtc_h);
6620 		return -EINVAL;
6621 	}
6622 
6623 	x = plane->state->crtc_x;
6624 	y = plane->state->crtc_y;
6625 
6626 	if (x <= -amdgpu_crtc->max_cursor_width ||
6627 	    y <= -amdgpu_crtc->max_cursor_height)
6628 		return 0;
6629 
6630 	if (x < 0) {
6631 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6632 		x = 0;
6633 	}
6634 	if (y < 0) {
6635 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6636 		y = 0;
6637 	}
6638 	position->enable = true;
6639 	position->translate_by_source = true;
6640 	position->x = x;
6641 	position->y = y;
6642 	position->x_hotspot = xorigin;
6643 	position->y_hotspot = yorigin;
6644 
6645 	return 0;
6646 }
6647 
6648 static void handle_cursor_update(struct drm_plane *plane,
6649 				 struct drm_plane_state *old_plane_state)
6650 {
6651 	struct amdgpu_device *adev = plane->dev->dev_private;
6652 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6653 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6654 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6655 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6656 	uint64_t address = afb ? afb->address : 0;
6657 	struct dc_cursor_position position;
6658 	struct dc_cursor_attributes attributes;
6659 	int ret;
6660 
6661 	if (!plane->state->fb && !old_plane_state->fb)
6662 		return;
6663 
6664 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6665 			 __func__,
6666 			 amdgpu_crtc->crtc_id,
6667 			 plane->state->crtc_w,
6668 			 plane->state->crtc_h);
6669 
6670 	ret = get_cursor_position(plane, crtc, &position);
6671 	if (ret)
6672 		return;
6673 
6674 	if (!position.enable) {
6675 		/* turn off cursor */
6676 		if (crtc_state && crtc_state->stream) {
6677 			mutex_lock(&adev->dm.dc_lock);
6678 			dc_stream_set_cursor_position(crtc_state->stream,
6679 						      &position);
6680 			mutex_unlock(&adev->dm.dc_lock);
6681 		}
6682 		return;
6683 	}
6684 
6685 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6686 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6687 
6688 	memset(&attributes, 0, sizeof(attributes));
6689 	attributes.address.high_part = upper_32_bits(address);
6690 	attributes.address.low_part  = lower_32_bits(address);
6691 	attributes.width             = plane->state->crtc_w;
6692 	attributes.height            = plane->state->crtc_h;
6693 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6694 	attributes.rotation_angle    = 0;
6695 	attributes.attribute_flags.value = 0;
6696 
6697 	attributes.pitch = attributes.width;
6698 
6699 	if (crtc_state->stream) {
6700 		mutex_lock(&adev->dm.dc_lock);
6701 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6702 							 &attributes))
6703 			DRM_ERROR("DC failed to set cursor attributes\n");
6704 
6705 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6706 						   &position))
6707 			DRM_ERROR("DC failed to set cursor position\n");
6708 		mutex_unlock(&adev->dm.dc_lock);
6709 	}
6710 }
6711 
6712 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6713 {
6714 
6715 	assert_spin_locked(&acrtc->base.dev->event_lock);
6716 	WARN_ON(acrtc->event);
6717 
6718 	acrtc->event = acrtc->base.state->event;
6719 
6720 	/* Set the flip status */
6721 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6722 
6723 	/* Mark this event as consumed */
6724 	acrtc->base.state->event = NULL;
6725 
6726 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6727 						 acrtc->crtc_id);
6728 }
6729 
6730 static void update_freesync_state_on_stream(
6731 	struct amdgpu_display_manager *dm,
6732 	struct dm_crtc_state *new_crtc_state,
6733 	struct dc_stream_state *new_stream,
6734 	struct dc_plane_state *surface,
6735 	u32 flip_timestamp_in_us)
6736 {
6737 	struct mod_vrr_params vrr_params;
6738 	struct dc_info_packet vrr_infopacket = {0};
6739 	struct amdgpu_device *adev = dm->adev;
6740 	unsigned long flags;
6741 
6742 	if (!new_stream)
6743 		return;
6744 
6745 	/*
6746 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6747 	 * For now it's sufficient to just guard against these conditions.
6748 	 */
6749 
6750 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6751 		return;
6752 
6753 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6754 	vrr_params = new_crtc_state->vrr_params;
6755 
6756 	if (surface) {
6757 		mod_freesync_handle_preflip(
6758 			dm->freesync_module,
6759 			surface,
6760 			new_stream,
6761 			flip_timestamp_in_us,
6762 			&vrr_params);
6763 
6764 		if (adev->family < AMDGPU_FAMILY_AI &&
6765 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6766 			mod_freesync_handle_v_update(dm->freesync_module,
6767 						     new_stream, &vrr_params);
6768 
6769 			/* Need to call this before the frame ends. */
6770 			dc_stream_adjust_vmin_vmax(dm->dc,
6771 						   new_crtc_state->stream,
6772 						   &vrr_params.adjust);
6773 		}
6774 	}
6775 
6776 	mod_freesync_build_vrr_infopacket(
6777 		dm->freesync_module,
6778 		new_stream,
6779 		&vrr_params,
6780 		PACKET_TYPE_VRR,
6781 		TRANSFER_FUNC_UNKNOWN,
6782 		&vrr_infopacket);
6783 
6784 	new_crtc_state->freesync_timing_changed |=
6785 		(memcmp(&new_crtc_state->vrr_params.adjust,
6786 			&vrr_params.adjust,
6787 			sizeof(vrr_params.adjust)) != 0);
6788 
6789 	new_crtc_state->freesync_vrr_info_changed |=
6790 		(memcmp(&new_crtc_state->vrr_infopacket,
6791 			&vrr_infopacket,
6792 			sizeof(vrr_infopacket)) != 0);
6793 
6794 	new_crtc_state->vrr_params = vrr_params;
6795 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6796 
6797 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6798 	new_stream->vrr_infopacket = vrr_infopacket;
6799 
6800 	if (new_crtc_state->freesync_vrr_info_changed)
6801 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6802 			      new_crtc_state->base.crtc->base.id,
6803 			      (int)new_crtc_state->base.vrr_enabled,
6804 			      (int)vrr_params.state);
6805 
6806 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6807 }
6808 
6809 static void pre_update_freesync_state_on_stream(
6810 	struct amdgpu_display_manager *dm,
6811 	struct dm_crtc_state *new_crtc_state)
6812 {
6813 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6814 	struct mod_vrr_params vrr_params;
6815 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6816 	struct amdgpu_device *adev = dm->adev;
6817 	unsigned long flags;
6818 
6819 	if (!new_stream)
6820 		return;
6821 
6822 	/*
6823 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6824 	 * For now it's sufficient to just guard against these conditions.
6825 	 */
6826 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6827 		return;
6828 
6829 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6830 	vrr_params = new_crtc_state->vrr_params;
6831 
6832 	if (new_crtc_state->vrr_supported &&
6833 	    config.min_refresh_in_uhz &&
6834 	    config.max_refresh_in_uhz) {
6835 		config.state = new_crtc_state->base.vrr_enabled ?
6836 			VRR_STATE_ACTIVE_VARIABLE :
6837 			VRR_STATE_INACTIVE;
6838 	} else {
6839 		config.state = VRR_STATE_UNSUPPORTED;
6840 	}
6841 
6842 	mod_freesync_build_vrr_params(dm->freesync_module,
6843 				      new_stream,
6844 				      &config, &vrr_params);
6845 
6846 	new_crtc_state->freesync_timing_changed |=
6847 		(memcmp(&new_crtc_state->vrr_params.adjust,
6848 			&vrr_params.adjust,
6849 			sizeof(vrr_params.adjust)) != 0);
6850 
6851 	new_crtc_state->vrr_params = vrr_params;
6852 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6853 }
6854 
6855 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6856 					    struct dm_crtc_state *new_state)
6857 {
6858 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6859 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6860 
6861 	if (!old_vrr_active && new_vrr_active) {
6862 		/* Transition VRR inactive -> active:
6863 		 * While VRR is active, we must not disable vblank irq, as a
6864 		 * reenable after disable would compute bogus vblank/pflip
6865 		 * timestamps if it likely happened inside display front-porch.
6866 		 *
6867 		 * We also need vupdate irq for the actual core vblank handling
6868 		 * at end of vblank.
6869 		 */
6870 		dm_set_vupdate_irq(new_state->base.crtc, true);
6871 		drm_crtc_vblank_get(new_state->base.crtc);
6872 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6873 				 __func__, new_state->base.crtc->base.id);
6874 	} else if (old_vrr_active && !new_vrr_active) {
6875 		/* Transition VRR active -> inactive:
6876 		 * Allow vblank irq disable again for fixed refresh rate.
6877 		 */
6878 		dm_set_vupdate_irq(new_state->base.crtc, false);
6879 		drm_crtc_vblank_put(new_state->base.crtc);
6880 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6881 				 __func__, new_state->base.crtc->base.id);
6882 	}
6883 }
6884 
6885 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6886 {
6887 	struct drm_plane *plane;
6888 	struct drm_plane_state *old_plane_state, *new_plane_state;
6889 	int i;
6890 
6891 	/*
6892 	 * TODO: Make this per-stream so we don't issue redundant updates for
6893 	 * commits with multiple streams.
6894 	 */
6895 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6896 				       new_plane_state, i)
6897 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6898 			handle_cursor_update(plane, old_plane_state);
6899 }
6900 
6901 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6902 				    struct dc_state *dc_state,
6903 				    struct drm_device *dev,
6904 				    struct amdgpu_display_manager *dm,
6905 				    struct drm_crtc *pcrtc,
6906 				    bool wait_for_vblank)
6907 {
6908 	uint32_t i;
6909 	uint64_t timestamp_ns;
6910 	struct drm_plane *plane;
6911 	struct drm_plane_state *old_plane_state, *new_plane_state;
6912 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6913 	struct drm_crtc_state *new_pcrtc_state =
6914 			drm_atomic_get_new_crtc_state(state, pcrtc);
6915 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6916 	struct dm_crtc_state *dm_old_crtc_state =
6917 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6918 	int planes_count = 0, vpos, hpos;
6919 	long r;
6920 	unsigned long flags;
6921 	struct amdgpu_bo *abo;
6922 	uint64_t tiling_flags;
6923 	bool tmz_surface = false;
6924 	uint32_t target_vblank, last_flip_vblank;
6925 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6926 	bool pflip_present = false;
6927 	struct {
6928 		struct dc_surface_update surface_updates[MAX_SURFACES];
6929 		struct dc_plane_info plane_infos[MAX_SURFACES];
6930 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6931 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6932 		struct dc_stream_update stream_update;
6933 	} *bundle;
6934 
6935 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6936 
6937 	if (!bundle) {
6938 		dm_error("Failed to allocate update bundle\n");
6939 		goto cleanup;
6940 	}
6941 
6942 	/*
6943 	 * Disable the cursor first if we're disabling all the planes.
6944 	 * It'll remain on the screen after the planes are re-enabled
6945 	 * if we don't.
6946 	 */
6947 	if (acrtc_state->active_planes == 0)
6948 		amdgpu_dm_commit_cursors(state);
6949 
6950 	/* update planes when needed */
6951 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6952 		struct drm_crtc *crtc = new_plane_state->crtc;
6953 		struct drm_crtc_state *new_crtc_state;
6954 		struct drm_framebuffer *fb = new_plane_state->fb;
6955 		bool plane_needs_flip;
6956 		struct dc_plane_state *dc_plane;
6957 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6958 
6959 		/* Cursor plane is handled after stream updates */
6960 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6961 			continue;
6962 
6963 		if (!fb || !crtc || pcrtc != crtc)
6964 			continue;
6965 
6966 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6967 		if (!new_crtc_state->active)
6968 			continue;
6969 
6970 		dc_plane = dm_new_plane_state->dc_state;
6971 
6972 		bundle->surface_updates[planes_count].surface = dc_plane;
6973 		if (new_pcrtc_state->color_mgmt_changed) {
6974 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6975 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6976 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6977 		}
6978 
6979 		fill_dc_scaling_info(new_plane_state,
6980 				     &bundle->scaling_infos[planes_count]);
6981 
6982 		bundle->surface_updates[planes_count].scaling_info =
6983 			&bundle->scaling_infos[planes_count];
6984 
6985 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6986 
6987 		pflip_present = pflip_present || plane_needs_flip;
6988 
6989 		if (!plane_needs_flip) {
6990 			planes_count += 1;
6991 			continue;
6992 		}
6993 
6994 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6995 
6996 		/*
6997 		 * Wait for all fences on this FB. Do limited wait to avoid
6998 		 * deadlock during GPU reset when this fence will not signal
6999 		 * but we hold reservation lock for the BO.
7000 		 */
7001 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7002 							false,
7003 							msecs_to_jiffies(5000));
7004 		if (unlikely(r <= 0))
7005 			DRM_ERROR("Waiting for fences timed out!");
7006 
7007 		/*
7008 		 * TODO This might fail and hence better not used, wait
7009 		 * explicitly on fences instead
7010 		 * and in general should be called for
7011 		 * blocking commit to as per framework helpers
7012 		 */
7013 		r = amdgpu_bo_reserve(abo, true);
7014 		if (unlikely(r != 0))
7015 			DRM_ERROR("failed to reserve buffer before flip\n");
7016 
7017 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
7018 
7019 		tmz_surface = amdgpu_bo_encrypted(abo);
7020 
7021 		amdgpu_bo_unreserve(abo);
7022 
7023 		fill_dc_plane_info_and_addr(
7024 			dm->adev, new_plane_state, tiling_flags,
7025 			&bundle->plane_infos[planes_count],
7026 			&bundle->flip_addrs[planes_count].address,
7027 			tmz_surface,
7028 			false);
7029 
7030 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7031 				 new_plane_state->plane->index,
7032 				 bundle->plane_infos[planes_count].dcc.enable);
7033 
7034 		bundle->surface_updates[planes_count].plane_info =
7035 			&bundle->plane_infos[planes_count];
7036 
7037 		/*
7038 		 * Only allow immediate flips for fast updates that don't
7039 		 * change FB pitch, DCC state, rotation or mirroing.
7040 		 */
7041 		bundle->flip_addrs[planes_count].flip_immediate =
7042 			crtc->state->async_flip &&
7043 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7044 
7045 		timestamp_ns = ktime_get_ns();
7046 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7047 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7048 		bundle->surface_updates[planes_count].surface = dc_plane;
7049 
7050 		if (!bundle->surface_updates[planes_count].surface) {
7051 			DRM_ERROR("No surface for CRTC: id=%d\n",
7052 					acrtc_attach->crtc_id);
7053 			continue;
7054 		}
7055 
7056 		if (plane == pcrtc->primary)
7057 			update_freesync_state_on_stream(
7058 				dm,
7059 				acrtc_state,
7060 				acrtc_state->stream,
7061 				dc_plane,
7062 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7063 
7064 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7065 				 __func__,
7066 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7067 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7068 
7069 		planes_count += 1;
7070 
7071 	}
7072 
7073 	if (pflip_present) {
7074 		if (!vrr_active) {
7075 			/* Use old throttling in non-vrr fixed refresh rate mode
7076 			 * to keep flip scheduling based on target vblank counts
7077 			 * working in a backwards compatible way, e.g., for
7078 			 * clients using the GLX_OML_sync_control extension or
7079 			 * DRI3/Present extension with defined target_msc.
7080 			 */
7081 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7082 		}
7083 		else {
7084 			/* For variable refresh rate mode only:
7085 			 * Get vblank of last completed flip to avoid > 1 vrr
7086 			 * flips per video frame by use of throttling, but allow
7087 			 * flip programming anywhere in the possibly large
7088 			 * variable vrr vblank interval for fine-grained flip
7089 			 * timing control and more opportunity to avoid stutter
7090 			 * on late submission of flips.
7091 			 */
7092 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7093 			last_flip_vblank = acrtc_attach->last_flip_vblank;
7094 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7095 		}
7096 
7097 		target_vblank = last_flip_vblank + wait_for_vblank;
7098 
7099 		/*
7100 		 * Wait until we're out of the vertical blank period before the one
7101 		 * targeted by the flip
7102 		 */
7103 		while ((acrtc_attach->enabled &&
7104 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7105 							    0, &vpos, &hpos, NULL,
7106 							    NULL, &pcrtc->hwmode)
7107 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7108 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7109 			(int)(target_vblank -
7110 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7111 			usleep_range(1000, 1100);
7112 		}
7113 
7114 		/**
7115 		 * Prepare the flip event for the pageflip interrupt to handle.
7116 		 *
7117 		 * This only works in the case where we've already turned on the
7118 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7119 		 * from 0 -> n planes we have to skip a hardware generated event
7120 		 * and rely on sending it from software.
7121 		 */
7122 		if (acrtc_attach->base.state->event &&
7123 		    acrtc_state->active_planes > 0) {
7124 			drm_crtc_vblank_get(pcrtc);
7125 
7126 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7127 
7128 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7129 			prepare_flip_isr(acrtc_attach);
7130 
7131 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7132 		}
7133 
7134 		if (acrtc_state->stream) {
7135 			if (acrtc_state->freesync_vrr_info_changed)
7136 				bundle->stream_update.vrr_infopacket =
7137 					&acrtc_state->stream->vrr_infopacket;
7138 		}
7139 	}
7140 
7141 	/* Update the planes if changed or disable if we don't have any. */
7142 	if ((planes_count || acrtc_state->active_planes == 0) &&
7143 		acrtc_state->stream) {
7144 		bundle->stream_update.stream = acrtc_state->stream;
7145 		if (new_pcrtc_state->mode_changed) {
7146 			bundle->stream_update.src = acrtc_state->stream->src;
7147 			bundle->stream_update.dst = acrtc_state->stream->dst;
7148 		}
7149 
7150 		if (new_pcrtc_state->color_mgmt_changed) {
7151 			/*
7152 			 * TODO: This isn't fully correct since we've actually
7153 			 * already modified the stream in place.
7154 			 */
7155 			bundle->stream_update.gamut_remap =
7156 				&acrtc_state->stream->gamut_remap_matrix;
7157 			bundle->stream_update.output_csc_transform =
7158 				&acrtc_state->stream->csc_color_matrix;
7159 			bundle->stream_update.out_transfer_func =
7160 				acrtc_state->stream->out_transfer_func;
7161 		}
7162 
7163 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7164 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7165 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7166 
7167 		/*
7168 		 * If FreeSync state on the stream has changed then we need to
7169 		 * re-adjust the min/max bounds now that DC doesn't handle this
7170 		 * as part of commit.
7171 		 */
7172 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7173 		    amdgpu_dm_vrr_active(acrtc_state)) {
7174 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7175 			dc_stream_adjust_vmin_vmax(
7176 				dm->dc, acrtc_state->stream,
7177 				&acrtc_state->vrr_params.adjust);
7178 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7179 		}
7180 		mutex_lock(&dm->dc_lock);
7181 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7182 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7183 			amdgpu_dm_psr_disable(acrtc_state->stream);
7184 
7185 		dc_commit_updates_for_stream(dm->dc,
7186 						     bundle->surface_updates,
7187 						     planes_count,
7188 						     acrtc_state->stream,
7189 						     &bundle->stream_update,
7190 						     dc_state);
7191 
7192 		/**
7193 		 * Enable or disable the interrupts on the backend.
7194 		 *
7195 		 * Most pipes are put into power gating when unused.
7196 		 *
7197 		 * When power gating is enabled on a pipe we lose the
7198 		 * interrupt enablement state when power gating is disabled.
7199 		 *
7200 		 * So we need to update the IRQ control state in hardware
7201 		 * whenever the pipe turns on (since it could be previously
7202 		 * power gated) or off (since some pipes can't be power gated
7203 		 * on some ASICs).
7204 		 */
7205 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7206 			dm_update_pflip_irq_state(
7207 				(struct amdgpu_device *)dev->dev_private,
7208 				acrtc_attach);
7209 
7210 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7211 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7212 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7213 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7214 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7215 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7216 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7217 			amdgpu_dm_psr_enable(acrtc_state->stream);
7218 		}
7219 
7220 		mutex_unlock(&dm->dc_lock);
7221 	}
7222 
7223 	/*
7224 	 * Update cursor state *after* programming all the planes.
7225 	 * This avoids redundant programming in the case where we're going
7226 	 * to be disabling a single plane - those pipes are being disabled.
7227 	 */
7228 	if (acrtc_state->active_planes)
7229 		amdgpu_dm_commit_cursors(state);
7230 
7231 cleanup:
7232 	kfree(bundle);
7233 }
7234 
7235 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7236 				   struct drm_atomic_state *state)
7237 {
7238 	struct amdgpu_device *adev = dev->dev_private;
7239 	struct amdgpu_dm_connector *aconnector;
7240 	struct drm_connector *connector;
7241 	struct drm_connector_state *old_con_state, *new_con_state;
7242 	struct drm_crtc_state *new_crtc_state;
7243 	struct dm_crtc_state *new_dm_crtc_state;
7244 	const struct dc_stream_status *status;
7245 	int i, inst;
7246 
7247 	/* Notify device removals. */
7248 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7249 		if (old_con_state->crtc != new_con_state->crtc) {
7250 			/* CRTC changes require notification. */
7251 			goto notify;
7252 		}
7253 
7254 		if (!new_con_state->crtc)
7255 			continue;
7256 
7257 		new_crtc_state = drm_atomic_get_new_crtc_state(
7258 			state, new_con_state->crtc);
7259 
7260 		if (!new_crtc_state)
7261 			continue;
7262 
7263 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7264 			continue;
7265 
7266 	notify:
7267 		aconnector = to_amdgpu_dm_connector(connector);
7268 
7269 		mutex_lock(&adev->dm.audio_lock);
7270 		inst = aconnector->audio_inst;
7271 		aconnector->audio_inst = -1;
7272 		mutex_unlock(&adev->dm.audio_lock);
7273 
7274 		amdgpu_dm_audio_eld_notify(adev, inst);
7275 	}
7276 
7277 	/* Notify audio device additions. */
7278 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7279 		if (!new_con_state->crtc)
7280 			continue;
7281 
7282 		new_crtc_state = drm_atomic_get_new_crtc_state(
7283 			state, new_con_state->crtc);
7284 
7285 		if (!new_crtc_state)
7286 			continue;
7287 
7288 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7289 			continue;
7290 
7291 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7292 		if (!new_dm_crtc_state->stream)
7293 			continue;
7294 
7295 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7296 		if (!status)
7297 			continue;
7298 
7299 		aconnector = to_amdgpu_dm_connector(connector);
7300 
7301 		mutex_lock(&adev->dm.audio_lock);
7302 		inst = status->audio_inst;
7303 		aconnector->audio_inst = inst;
7304 		mutex_unlock(&adev->dm.audio_lock);
7305 
7306 		amdgpu_dm_audio_eld_notify(adev, inst);
7307 	}
7308 }
7309 
7310 /*
7311  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7312  * @crtc_state: the DRM CRTC state
7313  * @stream_state: the DC stream state.
7314  *
7315  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7316  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7317  */
7318 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7319 						struct dc_stream_state *stream_state)
7320 {
7321 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7322 }
7323 
7324 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7325 				   struct drm_atomic_state *state,
7326 				   bool nonblock)
7327 {
7328 	struct drm_crtc *crtc;
7329 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7330 	struct amdgpu_device *adev = dev->dev_private;
7331 	int i;
7332 
7333 	/*
7334 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7335 	 * a modeset, being disabled, or have no active planes.
7336 	 *
7337 	 * It's done in atomic commit rather than commit tail for now since
7338 	 * some of these interrupt handlers access the current CRTC state and
7339 	 * potentially the stream pointer itself.
7340 	 *
7341 	 * Since the atomic state is swapped within atomic commit and not within
7342 	 * commit tail this would leave to new state (that hasn't been committed yet)
7343 	 * being accesssed from within the handlers.
7344 	 *
7345 	 * TODO: Fix this so we can do this in commit tail and not have to block
7346 	 * in atomic check.
7347 	 */
7348 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7349 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7350 
7351 		if (old_crtc_state->active &&
7352 		    (!new_crtc_state->active ||
7353 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7354 			manage_dm_interrupts(adev, acrtc, false);
7355 	}
7356 	/*
7357 	 * Add check here for SoC's that support hardware cursor plane, to
7358 	 * unset legacy_cursor_update
7359 	 */
7360 
7361 	return drm_atomic_helper_commit(dev, state, nonblock);
7362 
7363 	/*TODO Handle EINTR, reenable IRQ*/
7364 }
7365 
7366 /**
7367  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7368  * @state: The atomic state to commit
7369  *
7370  * This will tell DC to commit the constructed DC state from atomic_check,
7371  * programming the hardware. Any failures here implies a hardware failure, since
7372  * atomic check should have filtered anything non-kosher.
7373  */
7374 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7375 {
7376 	struct drm_device *dev = state->dev;
7377 	struct amdgpu_device *adev = dev->dev_private;
7378 	struct amdgpu_display_manager *dm = &adev->dm;
7379 	struct dm_atomic_state *dm_state;
7380 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7381 	uint32_t i, j;
7382 	struct drm_crtc *crtc;
7383 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7384 	unsigned long flags;
7385 	bool wait_for_vblank = true;
7386 	struct drm_connector *connector;
7387 	struct drm_connector_state *old_con_state, *new_con_state;
7388 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7389 	int crtc_disable_count = 0;
7390 
7391 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7392 
7393 	dm_state = dm_atomic_get_new_state(state);
7394 	if (dm_state && dm_state->context) {
7395 		dc_state = dm_state->context;
7396 	} else {
7397 		/* No state changes, retain current state. */
7398 		dc_state_temp = dc_create_state(dm->dc);
7399 		ASSERT(dc_state_temp);
7400 		dc_state = dc_state_temp;
7401 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7402 	}
7403 
7404 	/* update changed items */
7405 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7406 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7407 
7408 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7409 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7410 
7411 		DRM_DEBUG_DRIVER(
7412 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7413 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7414 			"connectors_changed:%d\n",
7415 			acrtc->crtc_id,
7416 			new_crtc_state->enable,
7417 			new_crtc_state->active,
7418 			new_crtc_state->planes_changed,
7419 			new_crtc_state->mode_changed,
7420 			new_crtc_state->active_changed,
7421 			new_crtc_state->connectors_changed);
7422 
7423 		/* Copy all transient state flags into dc state */
7424 		if (dm_new_crtc_state->stream) {
7425 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7426 							    dm_new_crtc_state->stream);
7427 		}
7428 
7429 		/* handles headless hotplug case, updating new_state and
7430 		 * aconnector as needed
7431 		 */
7432 
7433 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7434 
7435 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7436 
7437 			if (!dm_new_crtc_state->stream) {
7438 				/*
7439 				 * this could happen because of issues with
7440 				 * userspace notifications delivery.
7441 				 * In this case userspace tries to set mode on
7442 				 * display which is disconnected in fact.
7443 				 * dc_sink is NULL in this case on aconnector.
7444 				 * We expect reset mode will come soon.
7445 				 *
7446 				 * This can also happen when unplug is done
7447 				 * during resume sequence ended
7448 				 *
7449 				 * In this case, we want to pretend we still
7450 				 * have a sink to keep the pipe running so that
7451 				 * hw state is consistent with the sw state
7452 				 */
7453 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7454 						__func__, acrtc->base.base.id);
7455 				continue;
7456 			}
7457 
7458 			if (dm_old_crtc_state->stream)
7459 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7460 
7461 			pm_runtime_get_noresume(dev->dev);
7462 
7463 			acrtc->enabled = true;
7464 			acrtc->hw_mode = new_crtc_state->mode;
7465 			crtc->hwmode = new_crtc_state->mode;
7466 		} else if (modereset_required(new_crtc_state)) {
7467 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7468 			/* i.e. reset mode */
7469 			if (dm_old_crtc_state->stream) {
7470 				if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7471 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7472 
7473 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7474 			}
7475 		}
7476 	} /* for_each_crtc_in_state() */
7477 
7478 	if (dc_state) {
7479 		dm_enable_per_frame_crtc_master_sync(dc_state);
7480 		mutex_lock(&dm->dc_lock);
7481 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7482 		mutex_unlock(&dm->dc_lock);
7483 	}
7484 
7485 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7486 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7487 
7488 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7489 
7490 		if (dm_new_crtc_state->stream != NULL) {
7491 			const struct dc_stream_status *status =
7492 					dc_stream_get_status(dm_new_crtc_state->stream);
7493 
7494 			if (!status)
7495 				status = dc_stream_get_status_from_state(dc_state,
7496 									 dm_new_crtc_state->stream);
7497 
7498 			if (!status)
7499 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7500 			else
7501 				acrtc->otg_inst = status->primary_otg_inst;
7502 		}
7503 	}
7504 #ifdef CONFIG_DRM_AMD_DC_HDCP
7505 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7506 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7507 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7508 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7509 
7510 		new_crtc_state = NULL;
7511 
7512 		if (acrtc)
7513 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7514 
7515 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7516 
7517 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7518 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7519 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7520 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7521 			continue;
7522 		}
7523 
7524 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7525 			hdcp_update_display(
7526 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7527 				new_con_state->hdcp_content_type,
7528 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7529 													 : false);
7530 	}
7531 #endif
7532 
7533 	/* Handle connector state changes */
7534 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7535 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7536 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7537 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7538 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7539 		struct dc_stream_update stream_update;
7540 		struct dc_info_packet hdr_packet;
7541 		struct dc_stream_status *status = NULL;
7542 		bool abm_changed, hdr_changed, scaling_changed;
7543 
7544 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7545 		memset(&stream_update, 0, sizeof(stream_update));
7546 
7547 		if (acrtc) {
7548 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7549 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7550 		}
7551 
7552 		/* Skip any modesets/resets */
7553 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7554 			continue;
7555 
7556 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7557 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7558 
7559 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7560 							     dm_old_con_state);
7561 
7562 		abm_changed = dm_new_crtc_state->abm_level !=
7563 			      dm_old_crtc_state->abm_level;
7564 
7565 		hdr_changed =
7566 			is_hdr_metadata_different(old_con_state, new_con_state);
7567 
7568 		if (!scaling_changed && !abm_changed && !hdr_changed)
7569 			continue;
7570 
7571 		stream_update.stream = dm_new_crtc_state->stream;
7572 		if (scaling_changed) {
7573 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7574 					dm_new_con_state, dm_new_crtc_state->stream);
7575 
7576 			stream_update.src = dm_new_crtc_state->stream->src;
7577 			stream_update.dst = dm_new_crtc_state->stream->dst;
7578 		}
7579 
7580 		if (abm_changed) {
7581 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7582 
7583 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7584 		}
7585 
7586 		if (hdr_changed) {
7587 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7588 			stream_update.hdr_static_metadata = &hdr_packet;
7589 		}
7590 
7591 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7592 		WARN_ON(!status);
7593 		WARN_ON(!status->plane_count);
7594 
7595 		/*
7596 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7597 		 * Here we create an empty update on each plane.
7598 		 * To fix this, DC should permit updating only stream properties.
7599 		 */
7600 		for (j = 0; j < status->plane_count; j++)
7601 			dummy_updates[j].surface = status->plane_states[0];
7602 
7603 
7604 		mutex_lock(&dm->dc_lock);
7605 		dc_commit_updates_for_stream(dm->dc,
7606 						     dummy_updates,
7607 						     status->plane_count,
7608 						     dm_new_crtc_state->stream,
7609 						     &stream_update,
7610 						     dc_state);
7611 		mutex_unlock(&dm->dc_lock);
7612 	}
7613 
7614 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7615 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7616 				      new_crtc_state, i) {
7617 		if (old_crtc_state->active && !new_crtc_state->active)
7618 			crtc_disable_count++;
7619 
7620 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7621 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7622 
7623 		/* Update freesync active state. */
7624 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7625 
7626 		/* Handle vrr on->off / off->on transitions */
7627 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7628 						dm_new_crtc_state);
7629 	}
7630 
7631 	/**
7632 	 * Enable interrupts for CRTCs that are newly enabled or went through
7633 	 * a modeset. It was intentionally deferred until after the front end
7634 	 * state was modified to wait until the OTG was on and so the IRQ
7635 	 * handlers didn't access stale or invalid state.
7636 	 */
7637 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7638 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7639 
7640 		if (new_crtc_state->active &&
7641 		    (!old_crtc_state->active ||
7642 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7643 			manage_dm_interrupts(adev, acrtc, true);
7644 #ifdef CONFIG_DEBUG_FS
7645 			/**
7646 			 * Frontend may have changed so reapply the CRC capture
7647 			 * settings for the stream.
7648 			 */
7649 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7650 
7651 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7652 				amdgpu_dm_crtc_configure_crc_source(
7653 					crtc, dm_new_crtc_state,
7654 					dm_new_crtc_state->crc_src);
7655 			}
7656 #endif
7657 		}
7658 	}
7659 
7660 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7661 		if (new_crtc_state->async_flip)
7662 			wait_for_vblank = false;
7663 
7664 	/* update planes when needed per crtc*/
7665 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7666 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7667 
7668 		if (dm_new_crtc_state->stream)
7669 			amdgpu_dm_commit_planes(state, dc_state, dev,
7670 						dm, crtc, wait_for_vblank);
7671 	}
7672 
7673 	/* Update audio instances for each connector. */
7674 	amdgpu_dm_commit_audio(dev, state);
7675 
7676 	/*
7677 	 * send vblank event on all events not handled in flip and
7678 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7679 	 */
7680 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7681 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7682 
7683 		if (new_crtc_state->event)
7684 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7685 
7686 		new_crtc_state->event = NULL;
7687 	}
7688 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7689 
7690 	/* Signal HW programming completion */
7691 	drm_atomic_helper_commit_hw_done(state);
7692 
7693 	if (wait_for_vblank)
7694 		drm_atomic_helper_wait_for_flip_done(dev, state);
7695 
7696 	drm_atomic_helper_cleanup_planes(dev, state);
7697 
7698 	/*
7699 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7700 	 * so we can put the GPU into runtime suspend if we're not driving any
7701 	 * displays anymore
7702 	 */
7703 	for (i = 0; i < crtc_disable_count; i++)
7704 		pm_runtime_put_autosuspend(dev->dev);
7705 	pm_runtime_mark_last_busy(dev->dev);
7706 
7707 	if (dc_state_temp)
7708 		dc_release_state(dc_state_temp);
7709 }
7710 
7711 
7712 static int dm_force_atomic_commit(struct drm_connector *connector)
7713 {
7714 	int ret = 0;
7715 	struct drm_device *ddev = connector->dev;
7716 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7717 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7718 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7719 	struct drm_connector_state *conn_state;
7720 	struct drm_crtc_state *crtc_state;
7721 	struct drm_plane_state *plane_state;
7722 
7723 	if (!state)
7724 		return -ENOMEM;
7725 
7726 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7727 
7728 	/* Construct an atomic state to restore previous display setting */
7729 
7730 	/*
7731 	 * Attach connectors to drm_atomic_state
7732 	 */
7733 	conn_state = drm_atomic_get_connector_state(state, connector);
7734 
7735 	ret = PTR_ERR_OR_ZERO(conn_state);
7736 	if (ret)
7737 		goto err;
7738 
7739 	/* Attach crtc to drm_atomic_state*/
7740 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7741 
7742 	ret = PTR_ERR_OR_ZERO(crtc_state);
7743 	if (ret)
7744 		goto err;
7745 
7746 	/* force a restore */
7747 	crtc_state->mode_changed = true;
7748 
7749 	/* Attach plane to drm_atomic_state */
7750 	plane_state = drm_atomic_get_plane_state(state, plane);
7751 
7752 	ret = PTR_ERR_OR_ZERO(plane_state);
7753 	if (ret)
7754 		goto err;
7755 
7756 
7757 	/* Call commit internally with the state we just constructed */
7758 	ret = drm_atomic_commit(state);
7759 	if (!ret)
7760 		return 0;
7761 
7762 err:
7763 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7764 	drm_atomic_state_put(state);
7765 
7766 	return ret;
7767 }
7768 
7769 /*
7770  * This function handles all cases when set mode does not come upon hotplug.
7771  * This includes when a display is unplugged then plugged back into the
7772  * same port and when running without usermode desktop manager supprot
7773  */
7774 void dm_restore_drm_connector_state(struct drm_device *dev,
7775 				    struct drm_connector *connector)
7776 {
7777 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7778 	struct amdgpu_crtc *disconnected_acrtc;
7779 	struct dm_crtc_state *acrtc_state;
7780 
7781 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7782 		return;
7783 
7784 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7785 	if (!disconnected_acrtc)
7786 		return;
7787 
7788 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7789 	if (!acrtc_state->stream)
7790 		return;
7791 
7792 	/*
7793 	 * If the previous sink is not released and different from the current,
7794 	 * we deduce we are in a state where we can not rely on usermode call
7795 	 * to turn on the display, so we do it here
7796 	 */
7797 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7798 		dm_force_atomic_commit(&aconnector->base);
7799 }
7800 
7801 /*
7802  * Grabs all modesetting locks to serialize against any blocking commits,
7803  * Waits for completion of all non blocking commits.
7804  */
7805 static int do_aquire_global_lock(struct drm_device *dev,
7806 				 struct drm_atomic_state *state)
7807 {
7808 	struct drm_crtc *crtc;
7809 	struct drm_crtc_commit *commit;
7810 	long ret;
7811 
7812 	/*
7813 	 * Adding all modeset locks to aquire_ctx will
7814 	 * ensure that when the framework release it the
7815 	 * extra locks we are locking here will get released to
7816 	 */
7817 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7818 	if (ret)
7819 		return ret;
7820 
7821 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7822 		spin_lock(&crtc->commit_lock);
7823 		commit = list_first_entry_or_null(&crtc->commit_list,
7824 				struct drm_crtc_commit, commit_entry);
7825 		if (commit)
7826 			drm_crtc_commit_get(commit);
7827 		spin_unlock(&crtc->commit_lock);
7828 
7829 		if (!commit)
7830 			continue;
7831 
7832 		/*
7833 		 * Make sure all pending HW programming completed and
7834 		 * page flips done
7835 		 */
7836 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7837 
7838 		if (ret > 0)
7839 			ret = wait_for_completion_interruptible_timeout(
7840 					&commit->flip_done, 10*HZ);
7841 
7842 		if (ret == 0)
7843 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7844 				  "timed out\n", crtc->base.id, crtc->name);
7845 
7846 		drm_crtc_commit_put(commit);
7847 	}
7848 
7849 	return ret < 0 ? ret : 0;
7850 }
7851 
7852 static void get_freesync_config_for_crtc(
7853 	struct dm_crtc_state *new_crtc_state,
7854 	struct dm_connector_state *new_con_state)
7855 {
7856 	struct mod_freesync_config config = {0};
7857 	struct amdgpu_dm_connector *aconnector =
7858 			to_amdgpu_dm_connector(new_con_state->base.connector);
7859 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7860 	int vrefresh = drm_mode_vrefresh(mode);
7861 
7862 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7863 					vrefresh >= aconnector->min_vfreq &&
7864 					vrefresh <= aconnector->max_vfreq;
7865 
7866 	if (new_crtc_state->vrr_supported) {
7867 		new_crtc_state->stream->ignore_msa_timing_param = true;
7868 		config.state = new_crtc_state->base.vrr_enabled ?
7869 				VRR_STATE_ACTIVE_VARIABLE :
7870 				VRR_STATE_INACTIVE;
7871 		config.min_refresh_in_uhz =
7872 				aconnector->min_vfreq * 1000000;
7873 		config.max_refresh_in_uhz =
7874 				aconnector->max_vfreq * 1000000;
7875 		config.vsif_supported = true;
7876 		config.btr = true;
7877 	}
7878 
7879 	new_crtc_state->freesync_config = config;
7880 }
7881 
7882 static void reset_freesync_config_for_crtc(
7883 	struct dm_crtc_state *new_crtc_state)
7884 {
7885 	new_crtc_state->vrr_supported = false;
7886 
7887 	memset(&new_crtc_state->vrr_params, 0,
7888 	       sizeof(new_crtc_state->vrr_params));
7889 	memset(&new_crtc_state->vrr_infopacket, 0,
7890 	       sizeof(new_crtc_state->vrr_infopacket));
7891 }
7892 
7893 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7894 				struct drm_atomic_state *state,
7895 				struct drm_crtc *crtc,
7896 				struct drm_crtc_state *old_crtc_state,
7897 				struct drm_crtc_state *new_crtc_state,
7898 				bool enable,
7899 				bool *lock_and_validation_needed)
7900 {
7901 	struct dm_atomic_state *dm_state = NULL;
7902 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7903 	struct dc_stream_state *new_stream;
7904 	int ret = 0;
7905 
7906 	/*
7907 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7908 	 * update changed items
7909 	 */
7910 	struct amdgpu_crtc *acrtc = NULL;
7911 	struct amdgpu_dm_connector *aconnector = NULL;
7912 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7913 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7914 
7915 	new_stream = NULL;
7916 
7917 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7918 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7919 	acrtc = to_amdgpu_crtc(crtc);
7920 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7921 
7922 	/* TODO This hack should go away */
7923 	if (aconnector && enable) {
7924 		/* Make sure fake sink is created in plug-in scenario */
7925 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7926 							    &aconnector->base);
7927 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7928 							    &aconnector->base);
7929 
7930 		if (IS_ERR(drm_new_conn_state)) {
7931 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7932 			goto fail;
7933 		}
7934 
7935 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7936 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7937 
7938 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7939 			goto skip_modeset;
7940 
7941 		new_stream = create_validate_stream_for_sink(aconnector,
7942 							     &new_crtc_state->mode,
7943 							     dm_new_conn_state,
7944 							     dm_old_crtc_state->stream);
7945 
7946 		/*
7947 		 * we can have no stream on ACTION_SET if a display
7948 		 * was disconnected during S3, in this case it is not an
7949 		 * error, the OS will be updated after detection, and
7950 		 * will do the right thing on next atomic commit
7951 		 */
7952 
7953 		if (!new_stream) {
7954 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7955 					__func__, acrtc->base.base.id);
7956 			ret = -ENOMEM;
7957 			goto fail;
7958 		}
7959 
7960 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7961 
7962 		ret = fill_hdr_info_packet(drm_new_conn_state,
7963 					   &new_stream->hdr_static_metadata);
7964 		if (ret)
7965 			goto fail;
7966 
7967 		/*
7968 		 * If we already removed the old stream from the context
7969 		 * (and set the new stream to NULL) then we can't reuse
7970 		 * the old stream even if the stream and scaling are unchanged.
7971 		 * We'll hit the BUG_ON and black screen.
7972 		 *
7973 		 * TODO: Refactor this function to allow this check to work
7974 		 * in all conditions.
7975 		 */
7976 		if (dm_new_crtc_state->stream &&
7977 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7978 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7979 			new_crtc_state->mode_changed = false;
7980 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7981 					 new_crtc_state->mode_changed);
7982 		}
7983 	}
7984 
7985 	/* mode_changed flag may get updated above, need to check again */
7986 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7987 		goto skip_modeset;
7988 
7989 	DRM_DEBUG_DRIVER(
7990 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7991 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7992 		"connectors_changed:%d\n",
7993 		acrtc->crtc_id,
7994 		new_crtc_state->enable,
7995 		new_crtc_state->active,
7996 		new_crtc_state->planes_changed,
7997 		new_crtc_state->mode_changed,
7998 		new_crtc_state->active_changed,
7999 		new_crtc_state->connectors_changed);
8000 
8001 	/* Remove stream for any changed/disabled CRTC */
8002 	if (!enable) {
8003 
8004 		if (!dm_old_crtc_state->stream)
8005 			goto skip_modeset;
8006 
8007 		ret = dm_atomic_get_state(state, &dm_state);
8008 		if (ret)
8009 			goto fail;
8010 
8011 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8012 				crtc->base.id);
8013 
8014 		/* i.e. reset mode */
8015 		if (dc_remove_stream_from_ctx(
8016 				dm->dc,
8017 				dm_state->context,
8018 				dm_old_crtc_state->stream) != DC_OK) {
8019 			ret = -EINVAL;
8020 			goto fail;
8021 		}
8022 
8023 		dc_stream_release(dm_old_crtc_state->stream);
8024 		dm_new_crtc_state->stream = NULL;
8025 
8026 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8027 
8028 		*lock_and_validation_needed = true;
8029 
8030 	} else {/* Add stream for any updated/enabled CRTC */
8031 		/*
8032 		 * Quick fix to prevent NULL pointer on new_stream when
8033 		 * added MST connectors not found in existing crtc_state in the chained mode
8034 		 * TODO: need to dig out the root cause of that
8035 		 */
8036 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8037 			goto skip_modeset;
8038 
8039 		if (modereset_required(new_crtc_state))
8040 			goto skip_modeset;
8041 
8042 		if (modeset_required(new_crtc_state, new_stream,
8043 				     dm_old_crtc_state->stream)) {
8044 
8045 			WARN_ON(dm_new_crtc_state->stream);
8046 
8047 			ret = dm_atomic_get_state(state, &dm_state);
8048 			if (ret)
8049 				goto fail;
8050 
8051 			dm_new_crtc_state->stream = new_stream;
8052 
8053 			dc_stream_retain(new_stream);
8054 
8055 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8056 						crtc->base.id);
8057 
8058 			if (dc_add_stream_to_ctx(
8059 					dm->dc,
8060 					dm_state->context,
8061 					dm_new_crtc_state->stream) != DC_OK) {
8062 				ret = -EINVAL;
8063 				goto fail;
8064 			}
8065 
8066 			*lock_and_validation_needed = true;
8067 		}
8068 	}
8069 
8070 skip_modeset:
8071 	/* Release extra reference */
8072 	if (new_stream)
8073 		 dc_stream_release(new_stream);
8074 
8075 	/*
8076 	 * We want to do dc stream updates that do not require a
8077 	 * full modeset below.
8078 	 */
8079 	if (!(enable && aconnector && new_crtc_state->enable &&
8080 	      new_crtc_state->active))
8081 		return 0;
8082 	/*
8083 	 * Given above conditions, the dc state cannot be NULL because:
8084 	 * 1. We're in the process of enabling CRTCs (just been added
8085 	 *    to the dc context, or already is on the context)
8086 	 * 2. Has a valid connector attached, and
8087 	 * 3. Is currently active and enabled.
8088 	 * => The dc stream state currently exists.
8089 	 */
8090 	BUG_ON(dm_new_crtc_state->stream == NULL);
8091 
8092 	/* Scaling or underscan settings */
8093 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8094 		update_stream_scaling_settings(
8095 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8096 
8097 	/* ABM settings */
8098 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8099 
8100 	/*
8101 	 * Color management settings. We also update color properties
8102 	 * when a modeset is needed, to ensure it gets reprogrammed.
8103 	 */
8104 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8105 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8106 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8107 		if (ret)
8108 			goto fail;
8109 	}
8110 
8111 	/* Update Freesync settings. */
8112 	get_freesync_config_for_crtc(dm_new_crtc_state,
8113 				     dm_new_conn_state);
8114 
8115 	return ret;
8116 
8117 fail:
8118 	if (new_stream)
8119 		dc_stream_release(new_stream);
8120 	return ret;
8121 }
8122 
8123 static bool should_reset_plane(struct drm_atomic_state *state,
8124 			       struct drm_plane *plane,
8125 			       struct drm_plane_state *old_plane_state,
8126 			       struct drm_plane_state *new_plane_state)
8127 {
8128 	struct drm_plane *other;
8129 	struct drm_plane_state *old_other_state, *new_other_state;
8130 	struct drm_crtc_state *new_crtc_state;
8131 	int i;
8132 
8133 	/*
8134 	 * TODO: Remove this hack once the checks below are sufficient
8135 	 * enough to determine when we need to reset all the planes on
8136 	 * the stream.
8137 	 */
8138 	if (state->allow_modeset)
8139 		return true;
8140 
8141 	/* Exit early if we know that we're adding or removing the plane. */
8142 	if (old_plane_state->crtc != new_plane_state->crtc)
8143 		return true;
8144 
8145 	/* old crtc == new_crtc == NULL, plane not in context. */
8146 	if (!new_plane_state->crtc)
8147 		return false;
8148 
8149 	new_crtc_state =
8150 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8151 
8152 	if (!new_crtc_state)
8153 		return true;
8154 
8155 	/* CRTC Degamma changes currently require us to recreate planes. */
8156 	if (new_crtc_state->color_mgmt_changed)
8157 		return true;
8158 
8159 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8160 		return true;
8161 
8162 	/*
8163 	 * If there are any new primary or overlay planes being added or
8164 	 * removed then the z-order can potentially change. To ensure
8165 	 * correct z-order and pipe acquisition the current DC architecture
8166 	 * requires us to remove and recreate all existing planes.
8167 	 *
8168 	 * TODO: Come up with a more elegant solution for this.
8169 	 */
8170 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8171 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8172 			continue;
8173 
8174 		if (old_other_state->crtc != new_plane_state->crtc &&
8175 		    new_other_state->crtc != new_plane_state->crtc)
8176 			continue;
8177 
8178 		if (old_other_state->crtc != new_other_state->crtc)
8179 			return true;
8180 
8181 		/* TODO: Remove this once we can handle fast format changes. */
8182 		if (old_other_state->fb && new_other_state->fb &&
8183 		    old_other_state->fb->format != new_other_state->fb->format)
8184 			return true;
8185 	}
8186 
8187 	return false;
8188 }
8189 
8190 static int dm_update_plane_state(struct dc *dc,
8191 				 struct drm_atomic_state *state,
8192 				 struct drm_plane *plane,
8193 				 struct drm_plane_state *old_plane_state,
8194 				 struct drm_plane_state *new_plane_state,
8195 				 bool enable,
8196 				 bool *lock_and_validation_needed)
8197 {
8198 
8199 	struct dm_atomic_state *dm_state = NULL;
8200 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8201 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8202 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8203 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8204 	struct amdgpu_crtc *new_acrtc;
8205 	bool needs_reset;
8206 	int ret = 0;
8207 
8208 
8209 	new_plane_crtc = new_plane_state->crtc;
8210 	old_plane_crtc = old_plane_state->crtc;
8211 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8212 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8213 
8214 	/*TODO Implement better atomic check for cursor plane */
8215 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8216 		if (!enable || !new_plane_crtc ||
8217 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8218 			return 0;
8219 
8220 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8221 
8222 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8223 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8224 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8225 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8226 			return -EINVAL;
8227 		}
8228 
8229 		return 0;
8230 	}
8231 
8232 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8233 					 new_plane_state);
8234 
8235 	/* Remove any changed/removed planes */
8236 	if (!enable) {
8237 		if (!needs_reset)
8238 			return 0;
8239 
8240 		if (!old_plane_crtc)
8241 			return 0;
8242 
8243 		old_crtc_state = drm_atomic_get_old_crtc_state(
8244 				state, old_plane_crtc);
8245 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8246 
8247 		if (!dm_old_crtc_state->stream)
8248 			return 0;
8249 
8250 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8251 				plane->base.id, old_plane_crtc->base.id);
8252 
8253 		ret = dm_atomic_get_state(state, &dm_state);
8254 		if (ret)
8255 			return ret;
8256 
8257 		if (!dc_remove_plane_from_context(
8258 				dc,
8259 				dm_old_crtc_state->stream,
8260 				dm_old_plane_state->dc_state,
8261 				dm_state->context)) {
8262 
8263 			ret = EINVAL;
8264 			return ret;
8265 		}
8266 
8267 
8268 		dc_plane_state_release(dm_old_plane_state->dc_state);
8269 		dm_new_plane_state->dc_state = NULL;
8270 
8271 		*lock_and_validation_needed = true;
8272 
8273 	} else { /* Add new planes */
8274 		struct dc_plane_state *dc_new_plane_state;
8275 
8276 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8277 			return 0;
8278 
8279 		if (!new_plane_crtc)
8280 			return 0;
8281 
8282 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8283 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8284 
8285 		if (!dm_new_crtc_state->stream)
8286 			return 0;
8287 
8288 		if (!needs_reset)
8289 			return 0;
8290 
8291 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8292 		if (ret)
8293 			return ret;
8294 
8295 		WARN_ON(dm_new_plane_state->dc_state);
8296 
8297 		dc_new_plane_state = dc_create_plane_state(dc);
8298 		if (!dc_new_plane_state)
8299 			return -ENOMEM;
8300 
8301 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8302 				plane->base.id, new_plane_crtc->base.id);
8303 
8304 		ret = fill_dc_plane_attributes(
8305 			new_plane_crtc->dev->dev_private,
8306 			dc_new_plane_state,
8307 			new_plane_state,
8308 			new_crtc_state);
8309 		if (ret) {
8310 			dc_plane_state_release(dc_new_plane_state);
8311 			return ret;
8312 		}
8313 
8314 		ret = dm_atomic_get_state(state, &dm_state);
8315 		if (ret) {
8316 			dc_plane_state_release(dc_new_plane_state);
8317 			return ret;
8318 		}
8319 
8320 		/*
8321 		 * Any atomic check errors that occur after this will
8322 		 * not need a release. The plane state will be attached
8323 		 * to the stream, and therefore part of the atomic
8324 		 * state. It'll be released when the atomic state is
8325 		 * cleaned.
8326 		 */
8327 		if (!dc_add_plane_to_context(
8328 				dc,
8329 				dm_new_crtc_state->stream,
8330 				dc_new_plane_state,
8331 				dm_state->context)) {
8332 
8333 			dc_plane_state_release(dc_new_plane_state);
8334 			return -EINVAL;
8335 		}
8336 
8337 		dm_new_plane_state->dc_state = dc_new_plane_state;
8338 
8339 		/* Tell DC to do a full surface update every time there
8340 		 * is a plane change. Inefficient, but works for now.
8341 		 */
8342 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8343 
8344 		*lock_and_validation_needed = true;
8345 	}
8346 
8347 
8348 	return ret;
8349 }
8350 
8351 static int
8352 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8353 				    struct drm_atomic_state *state,
8354 				    enum surface_update_type *out_type)
8355 {
8356 	struct dc *dc = dm->dc;
8357 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8358 	int i, j, num_plane, ret = 0;
8359 	struct drm_plane_state *old_plane_state, *new_plane_state;
8360 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8361 	struct drm_crtc *new_plane_crtc;
8362 	struct drm_plane *plane;
8363 
8364 	struct drm_crtc *crtc;
8365 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8366 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8367 	struct dc_stream_status *status = NULL;
8368 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8369 	struct surface_info_bundle {
8370 		struct dc_surface_update surface_updates[MAX_SURFACES];
8371 		struct dc_plane_info plane_infos[MAX_SURFACES];
8372 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8373 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8374 		struct dc_stream_update stream_update;
8375 	} *bundle;
8376 
8377 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8378 
8379 	if (!bundle) {
8380 		DRM_ERROR("Failed to allocate update bundle\n");
8381 		/* Set type to FULL to avoid crashing in DC*/
8382 		update_type = UPDATE_TYPE_FULL;
8383 		goto cleanup;
8384 	}
8385 
8386 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8387 
8388 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8389 
8390 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8391 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8392 		num_plane = 0;
8393 
8394 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8395 			update_type = UPDATE_TYPE_FULL;
8396 			goto cleanup;
8397 		}
8398 
8399 		if (!new_dm_crtc_state->stream)
8400 			continue;
8401 
8402 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8403 			const struct amdgpu_framebuffer *amdgpu_fb =
8404 				to_amdgpu_framebuffer(new_plane_state->fb);
8405 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8406 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8407 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8408 			uint64_t tiling_flags;
8409 			bool tmz_surface = false;
8410 
8411 			new_plane_crtc = new_plane_state->crtc;
8412 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8413 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8414 
8415 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8416 				continue;
8417 
8418 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8419 				update_type = UPDATE_TYPE_FULL;
8420 				goto cleanup;
8421 			}
8422 
8423 			if (crtc != new_plane_crtc)
8424 				continue;
8425 
8426 			bundle->surface_updates[num_plane].surface =
8427 					new_dm_plane_state->dc_state;
8428 
8429 			if (new_crtc_state->mode_changed) {
8430 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8431 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8432 			}
8433 
8434 			if (new_crtc_state->color_mgmt_changed) {
8435 				bundle->surface_updates[num_plane].gamma =
8436 						new_dm_plane_state->dc_state->gamma_correction;
8437 				bundle->surface_updates[num_plane].in_transfer_func =
8438 						new_dm_plane_state->dc_state->in_transfer_func;
8439 				bundle->surface_updates[num_plane].gamut_remap_matrix =
8440 						&new_dm_plane_state->dc_state->gamut_remap_matrix;
8441 				bundle->stream_update.gamut_remap =
8442 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8443 				bundle->stream_update.output_csc_transform =
8444 						&new_dm_crtc_state->stream->csc_color_matrix;
8445 				bundle->stream_update.out_transfer_func =
8446 						new_dm_crtc_state->stream->out_transfer_func;
8447 			}
8448 
8449 			ret = fill_dc_scaling_info(new_plane_state,
8450 						   scaling_info);
8451 			if (ret)
8452 				goto cleanup;
8453 
8454 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8455 
8456 			if (amdgpu_fb) {
8457 				ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8458 				if (ret)
8459 					goto cleanup;
8460 
8461 				ret = fill_dc_plane_info_and_addr(
8462 					dm->adev, new_plane_state, tiling_flags,
8463 					plane_info,
8464 					&flip_addr->address, tmz_surface,
8465 					false);
8466 				if (ret)
8467 					goto cleanup;
8468 
8469 				bundle->surface_updates[num_plane].plane_info = plane_info;
8470 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8471 			}
8472 
8473 			num_plane++;
8474 		}
8475 
8476 		if (num_plane == 0)
8477 			continue;
8478 
8479 		ret = dm_atomic_get_state(state, &dm_state);
8480 		if (ret)
8481 			goto cleanup;
8482 
8483 		old_dm_state = dm_atomic_get_old_state(state);
8484 		if (!old_dm_state) {
8485 			ret = -EINVAL;
8486 			goto cleanup;
8487 		}
8488 
8489 		status = dc_stream_get_status_from_state(old_dm_state->context,
8490 							 new_dm_crtc_state->stream);
8491 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8492 		/*
8493 		 * TODO: DC modifies the surface during this call so we need
8494 		 * to lock here - find a way to do this without locking.
8495 		 */
8496 		mutex_lock(&dm->dc_lock);
8497 		update_type = dc_check_update_surfaces_for_stream(
8498 				dc,	bundle->surface_updates, num_plane,
8499 				&bundle->stream_update, status);
8500 		mutex_unlock(&dm->dc_lock);
8501 
8502 		if (update_type > UPDATE_TYPE_MED) {
8503 			update_type = UPDATE_TYPE_FULL;
8504 			goto cleanup;
8505 		}
8506 	}
8507 
8508 cleanup:
8509 	kfree(bundle);
8510 
8511 	*out_type = update_type;
8512 	return ret;
8513 }
8514 #if defined(CONFIG_DRM_AMD_DC_DCN)
8515 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8516 {
8517 	struct drm_connector *connector;
8518 	struct drm_connector_state *conn_state;
8519 	struct amdgpu_dm_connector *aconnector = NULL;
8520 	int i;
8521 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8522 		if (conn_state->crtc != crtc)
8523 			continue;
8524 
8525 		aconnector = to_amdgpu_dm_connector(connector);
8526 		if (!aconnector->port || !aconnector->mst_port)
8527 			aconnector = NULL;
8528 		else
8529 			break;
8530 	}
8531 
8532 	if (!aconnector)
8533 		return 0;
8534 
8535 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8536 }
8537 #endif
8538 
8539 /**
8540  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8541  * @dev: The DRM device
8542  * @state: The atomic state to commit
8543  *
8544  * Validate that the given atomic state is programmable by DC into hardware.
8545  * This involves constructing a &struct dc_state reflecting the new hardware
8546  * state we wish to commit, then querying DC to see if it is programmable. It's
8547  * important not to modify the existing DC state. Otherwise, atomic_check
8548  * may unexpectedly commit hardware changes.
8549  *
8550  * When validating the DC state, it's important that the right locks are
8551  * acquired. For full updates case which removes/adds/updates streams on one
8552  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8553  * that any such full update commit will wait for completion of any outstanding
8554  * flip using DRMs synchronization events. See
8555  * dm_determine_update_type_for_commit()
8556  *
8557  * Note that DM adds the affected connectors for all CRTCs in state, when that
8558  * might not seem necessary. This is because DC stream creation requires the
8559  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8560  * be possible but non-trivial - a possible TODO item.
8561  *
8562  * Return: -Error code if validation failed.
8563  */
8564 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8565 				  struct drm_atomic_state *state)
8566 {
8567 	struct amdgpu_device *adev = dev->dev_private;
8568 	struct dm_atomic_state *dm_state = NULL;
8569 	struct dc *dc = adev->dm.dc;
8570 	struct drm_connector *connector;
8571 	struct drm_connector_state *old_con_state, *new_con_state;
8572 	struct drm_crtc *crtc;
8573 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8574 	struct drm_plane *plane;
8575 	struct drm_plane_state *old_plane_state, *new_plane_state;
8576 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8577 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8578 	enum dc_status status;
8579 	int ret, i;
8580 
8581 	/*
8582 	 * This bool will be set for true for any modeset/reset
8583 	 * or plane update which implies non fast surface update.
8584 	 */
8585 	bool lock_and_validation_needed = false;
8586 
8587 	ret = drm_atomic_helper_check_modeset(dev, state);
8588 	if (ret)
8589 		goto fail;
8590 
8591 	/* Check connector changes */
8592 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8593 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8594 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8595 
8596 		/* Skip connectors that are disabled or part of modeset already. */
8597 		if (!old_con_state->crtc && !new_con_state->crtc)
8598 			continue;
8599 
8600 		if (!new_con_state->crtc)
8601 			continue;
8602 
8603 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8604 		if (IS_ERR(new_crtc_state)) {
8605 			ret = PTR_ERR(new_crtc_state);
8606 			goto fail;
8607 		}
8608 
8609 		if (dm_old_con_state->abm_level !=
8610 		    dm_new_con_state->abm_level)
8611 			new_crtc_state->connectors_changed = true;
8612 	}
8613 
8614 #if defined(CONFIG_DRM_AMD_DC_DCN)
8615 	if (adev->asic_type >= CHIP_NAVI10) {
8616 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8617 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8618 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8619 				if (ret)
8620 					goto fail;
8621 			}
8622 		}
8623 	}
8624 #endif
8625 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8626 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8627 		    !new_crtc_state->color_mgmt_changed &&
8628 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8629 			continue;
8630 
8631 		if (!new_crtc_state->enable)
8632 			continue;
8633 
8634 		ret = drm_atomic_add_affected_connectors(state, crtc);
8635 		if (ret)
8636 			return ret;
8637 
8638 		ret = drm_atomic_add_affected_planes(state, crtc);
8639 		if (ret)
8640 			goto fail;
8641 	}
8642 
8643 	/*
8644 	 * Add all primary and overlay planes on the CRTC to the state
8645 	 * whenever a plane is enabled to maintain correct z-ordering
8646 	 * and to enable fast surface updates.
8647 	 */
8648 	drm_for_each_crtc(crtc, dev) {
8649 		bool modified = false;
8650 
8651 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8652 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8653 				continue;
8654 
8655 			if (new_plane_state->crtc == crtc ||
8656 			    old_plane_state->crtc == crtc) {
8657 				modified = true;
8658 				break;
8659 			}
8660 		}
8661 
8662 		if (!modified)
8663 			continue;
8664 
8665 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8666 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8667 				continue;
8668 
8669 			new_plane_state =
8670 				drm_atomic_get_plane_state(state, plane);
8671 
8672 			if (IS_ERR(new_plane_state)) {
8673 				ret = PTR_ERR(new_plane_state);
8674 				goto fail;
8675 			}
8676 		}
8677 	}
8678 
8679 	/* Remove exiting planes if they are modified */
8680 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8681 		ret = dm_update_plane_state(dc, state, plane,
8682 					    old_plane_state,
8683 					    new_plane_state,
8684 					    false,
8685 					    &lock_and_validation_needed);
8686 		if (ret)
8687 			goto fail;
8688 	}
8689 
8690 	/* Disable all crtcs which require disable */
8691 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8692 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8693 					   old_crtc_state,
8694 					   new_crtc_state,
8695 					   false,
8696 					   &lock_and_validation_needed);
8697 		if (ret)
8698 			goto fail;
8699 	}
8700 
8701 	/* Enable all crtcs which require enable */
8702 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8703 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8704 					   old_crtc_state,
8705 					   new_crtc_state,
8706 					   true,
8707 					   &lock_and_validation_needed);
8708 		if (ret)
8709 			goto fail;
8710 	}
8711 
8712 	/* Add new/modified planes */
8713 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8714 		ret = dm_update_plane_state(dc, state, plane,
8715 					    old_plane_state,
8716 					    new_plane_state,
8717 					    true,
8718 					    &lock_and_validation_needed);
8719 		if (ret)
8720 			goto fail;
8721 	}
8722 
8723 	/* Run this here since we want to validate the streams we created */
8724 	ret = drm_atomic_helper_check_planes(dev, state);
8725 	if (ret)
8726 		goto fail;
8727 
8728 	if (state->legacy_cursor_update) {
8729 		/*
8730 		 * This is a fast cursor update coming from the plane update
8731 		 * helper, check if it can be done asynchronously for better
8732 		 * performance.
8733 		 */
8734 		state->async_update =
8735 			!drm_atomic_helper_async_check(dev, state);
8736 
8737 		/*
8738 		 * Skip the remaining global validation if this is an async
8739 		 * update. Cursor updates can be done without affecting
8740 		 * state or bandwidth calcs and this avoids the performance
8741 		 * penalty of locking the private state object and
8742 		 * allocating a new dc_state.
8743 		 */
8744 		if (state->async_update)
8745 			return 0;
8746 	}
8747 
8748 	/* Check scaling and underscan changes*/
8749 	/* TODO Removed scaling changes validation due to inability to commit
8750 	 * new stream into context w\o causing full reset. Need to
8751 	 * decide how to handle.
8752 	 */
8753 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8754 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8755 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8756 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8757 
8758 		/* Skip any modesets/resets */
8759 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8760 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8761 			continue;
8762 
8763 		/* Skip any thing not scale or underscan changes */
8764 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8765 			continue;
8766 
8767 		overall_update_type = UPDATE_TYPE_FULL;
8768 		lock_and_validation_needed = true;
8769 	}
8770 
8771 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8772 	if (ret)
8773 		goto fail;
8774 
8775 	if (overall_update_type < update_type)
8776 		overall_update_type = update_type;
8777 
8778 	/*
8779 	 * lock_and_validation_needed was an old way to determine if we need to set
8780 	 * the global lock. Leaving it in to check if we broke any corner cases
8781 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8782 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8783 	 */
8784 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8785 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8786 
8787 	if (overall_update_type > UPDATE_TYPE_FAST) {
8788 		ret = dm_atomic_get_state(state, &dm_state);
8789 		if (ret)
8790 			goto fail;
8791 
8792 		ret = do_aquire_global_lock(dev, state);
8793 		if (ret)
8794 			goto fail;
8795 
8796 #if defined(CONFIG_DRM_AMD_DC_DCN)
8797 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8798 			goto fail;
8799 
8800 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8801 		if (ret)
8802 			goto fail;
8803 #endif
8804 
8805 		/*
8806 		 * Perform validation of MST topology in the state:
8807 		 * We need to perform MST atomic check before calling
8808 		 * dc_validate_global_state(), or there is a chance
8809 		 * to get stuck in an infinite loop and hang eventually.
8810 		 */
8811 		ret = drm_dp_mst_atomic_check(state);
8812 		if (ret)
8813 			goto fail;
8814 		status = dc_validate_global_state(dc, dm_state->context, false);
8815 		if (status != DC_OK) {
8816 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
8817 				       dc_status_to_str(status), status);
8818 			ret = -EINVAL;
8819 			goto fail;
8820 		}
8821 	} else {
8822 		/*
8823 		 * The commit is a fast update. Fast updates shouldn't change
8824 		 * the DC context, affect global validation, and can have their
8825 		 * commit work done in parallel with other commits not touching
8826 		 * the same resource. If we have a new DC context as part of
8827 		 * the DM atomic state from validation we need to free it and
8828 		 * retain the existing one instead.
8829 		 *
8830 		 * Furthermore, since the DM atomic state only contains the DC
8831 		 * context and can safely be annulled, we can free the state
8832 		 * and clear the associated private object now to free
8833 		 * some memory and avoid a possible use-after-free later.
8834 		 */
8835 
8836 		for (i = 0; i < state->num_private_objs; i++) {
8837 			struct drm_private_obj *obj = state->private_objs[i].ptr;
8838 
8839 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
8840 				int j = state->num_private_objs-1;
8841 
8842 				dm_atomic_destroy_state(obj,
8843 						state->private_objs[i].state);
8844 
8845 				/* If i is not at the end of the array then the
8846 				 * last element needs to be moved to where i was
8847 				 * before the array can safely be truncated.
8848 				 */
8849 				if (i != j)
8850 					state->private_objs[i] =
8851 						state->private_objs[j];
8852 
8853 				state->private_objs[j].ptr = NULL;
8854 				state->private_objs[j].state = NULL;
8855 				state->private_objs[j].old_state = NULL;
8856 				state->private_objs[j].new_state = NULL;
8857 
8858 				state->num_private_objs = j;
8859 				break;
8860 			}
8861 		}
8862 	}
8863 
8864 	/* Store the overall update type for use later in atomic check. */
8865 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8866 		struct dm_crtc_state *dm_new_crtc_state =
8867 			to_dm_crtc_state(new_crtc_state);
8868 
8869 		dm_new_crtc_state->update_type = (int)overall_update_type;
8870 	}
8871 
8872 	/* Must be success */
8873 	WARN_ON(ret);
8874 	return ret;
8875 
8876 fail:
8877 	if (ret == -EDEADLK)
8878 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8879 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8880 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8881 	else
8882 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8883 
8884 	return ret;
8885 }
8886 
8887 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8888 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8889 {
8890 	uint8_t dpcd_data;
8891 	bool capable = false;
8892 
8893 	if (amdgpu_dm_connector->dc_link &&
8894 		dm_helpers_dp_read_dpcd(
8895 				NULL,
8896 				amdgpu_dm_connector->dc_link,
8897 				DP_DOWN_STREAM_PORT_COUNT,
8898 				&dpcd_data,
8899 				sizeof(dpcd_data))) {
8900 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8901 	}
8902 
8903 	return capable;
8904 }
8905 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8906 					struct edid *edid)
8907 {
8908 	int i;
8909 	bool edid_check_required;
8910 	struct detailed_timing *timing;
8911 	struct detailed_non_pixel *data;
8912 	struct detailed_data_monitor_range *range;
8913 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8914 			to_amdgpu_dm_connector(connector);
8915 	struct dm_connector_state *dm_con_state = NULL;
8916 
8917 	struct drm_device *dev = connector->dev;
8918 	struct amdgpu_device *adev = dev->dev_private;
8919 	bool freesync_capable = false;
8920 
8921 	if (!connector->state) {
8922 		DRM_ERROR("%s - Connector has no state", __func__);
8923 		goto update;
8924 	}
8925 
8926 	if (!edid) {
8927 		dm_con_state = to_dm_connector_state(connector->state);
8928 
8929 		amdgpu_dm_connector->min_vfreq = 0;
8930 		amdgpu_dm_connector->max_vfreq = 0;
8931 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8932 
8933 		goto update;
8934 	}
8935 
8936 	dm_con_state = to_dm_connector_state(connector->state);
8937 
8938 	edid_check_required = false;
8939 	if (!amdgpu_dm_connector->dc_sink) {
8940 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8941 		goto update;
8942 	}
8943 	if (!adev->dm.freesync_module)
8944 		goto update;
8945 	/*
8946 	 * if edid non zero restrict freesync only for dp and edp
8947 	 */
8948 	if (edid) {
8949 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8950 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8951 			edid_check_required = is_dp_capable_without_timing_msa(
8952 						adev->dm.dc,
8953 						amdgpu_dm_connector);
8954 		}
8955 	}
8956 	if (edid_check_required == true && (edid->version > 1 ||
8957 	   (edid->version == 1 && edid->revision > 1))) {
8958 		for (i = 0; i < 4; i++) {
8959 
8960 			timing	= &edid->detailed_timings[i];
8961 			data	= &timing->data.other_data;
8962 			range	= &data->data.range;
8963 			/*
8964 			 * Check if monitor has continuous frequency mode
8965 			 */
8966 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8967 				continue;
8968 			/*
8969 			 * Check for flag range limits only. If flag == 1 then
8970 			 * no additional timing information provided.
8971 			 * Default GTF, GTF Secondary curve and CVT are not
8972 			 * supported
8973 			 */
8974 			if (range->flags != 1)
8975 				continue;
8976 
8977 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8978 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8979 			amdgpu_dm_connector->pixel_clock_mhz =
8980 				range->pixel_clock_mhz * 10;
8981 			break;
8982 		}
8983 
8984 		if (amdgpu_dm_connector->max_vfreq -
8985 		    amdgpu_dm_connector->min_vfreq > 10) {
8986 
8987 			freesync_capable = true;
8988 		}
8989 	}
8990 
8991 update:
8992 	if (dm_con_state)
8993 		dm_con_state->freesync_capable = freesync_capable;
8994 
8995 	if (connector->vrr_capable_property)
8996 		drm_connector_set_vrr_capable_property(connector,
8997 						       freesync_capable);
8998 }
8999 
9000 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9001 {
9002 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9003 
9004 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9005 		return;
9006 	if (link->type == dc_connection_none)
9007 		return;
9008 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9009 					dpcd_data, sizeof(dpcd_data))) {
9010 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9011 
9012 		if (dpcd_data[0] == 0) {
9013 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9014 			link->psr_settings.psr_feature_enabled = false;
9015 		} else {
9016 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9017 			link->psr_settings.psr_feature_enabled = true;
9018 		}
9019 
9020 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9021 	}
9022 }
9023 
9024 /*
9025  * amdgpu_dm_link_setup_psr() - configure psr link
9026  * @stream: stream state
9027  *
9028  * Return: true if success
9029  */
9030 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9031 {
9032 	struct dc_link *link = NULL;
9033 	struct psr_config psr_config = {0};
9034 	struct psr_context psr_context = {0};
9035 	bool ret = false;
9036 
9037 	if (stream == NULL)
9038 		return false;
9039 
9040 	link = stream->link;
9041 
9042 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9043 
9044 	if (psr_config.psr_version > 0) {
9045 		psr_config.psr_exit_link_training_required = 0x1;
9046 		psr_config.psr_frame_capture_indication_req = 0;
9047 		psr_config.psr_rfb_setup_time = 0x37;
9048 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9049 		psr_config.allow_smu_optimizations = 0x0;
9050 
9051 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9052 
9053 	}
9054 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9055 
9056 	return ret;
9057 }
9058 
9059 /*
9060  * amdgpu_dm_psr_enable() - enable psr f/w
9061  * @stream: stream state
9062  *
9063  * Return: true if success
9064  */
9065 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9066 {
9067 	struct dc_link *link = stream->link;
9068 	unsigned int vsync_rate_hz = 0;
9069 	struct dc_static_screen_params params = {0};
9070 	/* Calculate number of static frames before generating interrupt to
9071 	 * enter PSR.
9072 	 */
9073 	// Init fail safe of 2 frames static
9074 	unsigned int num_frames_static = 2;
9075 
9076 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9077 
9078 	vsync_rate_hz = div64_u64(div64_u64((
9079 			stream->timing.pix_clk_100hz * 100),
9080 			stream->timing.v_total),
9081 			stream->timing.h_total);
9082 
9083 	/* Round up
9084 	 * Calculate number of frames such that at least 30 ms of time has
9085 	 * passed.
9086 	 */
9087 	if (vsync_rate_hz != 0) {
9088 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9089 		num_frames_static = (30000 / frame_time_microsec) + 1;
9090 	}
9091 
9092 	params.triggers.cursor_update = true;
9093 	params.triggers.overlay_update = true;
9094 	params.triggers.surface_update = true;
9095 	params.num_frames = num_frames_static;
9096 
9097 	dc_stream_set_static_screen_params(link->ctx->dc,
9098 					   &stream, 1,
9099 					   &params);
9100 
9101 	return dc_link_set_psr_allow_active(link, true, false);
9102 }
9103 
9104 /*
9105  * amdgpu_dm_psr_disable() - disable psr f/w
9106  * @stream:  stream state
9107  *
9108  * Return: true if success
9109  */
9110 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9111 {
9112 
9113 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9114 
9115 	return dc_link_set_psr_allow_active(stream->link, false, true);
9116 }
9117