1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 
104 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
106 
107 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
109 
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
112 
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
115 
116 /**
117  * DOC: overview
118  *
119  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121  * requests into DC requests, and DC responses into DRM responses.
122  *
123  * The root control structure is &struct amdgpu_display_manager.
124  */
125 
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
129 
130 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
131 {
132 	switch (link->dpcd_caps.dongle_type) {
133 	case DISPLAY_DONGLE_NONE:
134 		return DRM_MODE_SUBCONNECTOR_Native;
135 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
136 		return DRM_MODE_SUBCONNECTOR_VGA;
137 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
138 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
139 		return DRM_MODE_SUBCONNECTOR_DVID;
140 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
141 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
142 		return DRM_MODE_SUBCONNECTOR_HDMIA;
143 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
144 	default:
145 		return DRM_MODE_SUBCONNECTOR_Unknown;
146 	}
147 }
148 
149 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
150 {
151 	struct dc_link *link = aconnector->dc_link;
152 	struct drm_connector *connector = &aconnector->base;
153 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
154 
155 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
156 		return;
157 
158 	if (aconnector->dc_sink)
159 		subconnector = get_subconnector_type(link);
160 
161 	drm_object_property_set_value(&connector->base,
162 			connector->dev->mode_config.dp_subconnector_property,
163 			subconnector);
164 }
165 
166 /*
167  * initializes drm_device display related structures, based on the information
168  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
169  * drm_encoder, drm_mode_config
170  *
171  * Returns 0 on success
172  */
173 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
174 /* removes and deallocates the drm structures, created by the above function */
175 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
176 
177 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
178 				struct drm_plane *plane,
179 				unsigned long possible_crtcs,
180 				const struct dc_plane_cap *plane_cap);
181 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
182 			       struct drm_plane *plane,
183 			       uint32_t link_index);
184 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
185 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
186 				    uint32_t link_index,
187 				    struct amdgpu_encoder *amdgpu_encoder);
188 static int amdgpu_dm_encoder_init(struct drm_device *dev,
189 				  struct amdgpu_encoder *aencoder,
190 				  uint32_t link_index);
191 
192 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
193 
194 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
195 				   struct drm_atomic_state *state,
196 				   bool nonblock);
197 
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199 
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 				  struct drm_atomic_state *state);
202 
203 static void handle_cursor_update(struct drm_plane *plane,
204 				 struct drm_plane_state *old_plane_state);
205 
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
211 
212 /*
213  * dm_vblank_get_counter
214  *
215  * @brief
216  * Get counter for number of vertical blanks
217  *
218  * @param
219  * struct amdgpu_device *adev - [in] desired amdgpu device
220  * int disp_idx - [in] which CRTC to get the counter from
221  *
222  * @return
223  * Counter for vertical blanks
224  */
225 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
226 {
227 	if (crtc >= adev->mode_info.num_crtc)
228 		return 0;
229 	else {
230 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
231 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
232 				acrtc->base.state);
233 
234 
235 		if (acrtc_state->stream == NULL) {
236 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
237 				  crtc);
238 			return 0;
239 		}
240 
241 		return dc_stream_get_vblank_counter(acrtc_state->stream);
242 	}
243 }
244 
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246 				  u32 *vbl, u32 *position)
247 {
248 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
249 
250 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251 		return -EINVAL;
252 	else {
253 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
255 						acrtc->base.state);
256 
257 		if (acrtc_state->stream ==  NULL) {
258 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
259 				  crtc);
260 			return 0;
261 		}
262 
263 		/*
264 		 * TODO rework base driver to use values directly.
265 		 * for now parse it back into reg-format
266 		 */
267 		dc_stream_get_scanoutpos(acrtc_state->stream,
268 					 &v_blank_start,
269 					 &v_blank_end,
270 					 &h_position,
271 					 &v_position);
272 
273 		*position = v_position | (h_position << 16);
274 		*vbl = v_blank_start | (v_blank_end << 16);
275 	}
276 
277 	return 0;
278 }
279 
280 static bool dm_is_idle(void *handle)
281 {
282 	/* XXX todo */
283 	return true;
284 }
285 
286 static int dm_wait_for_idle(void *handle)
287 {
288 	/* XXX todo */
289 	return 0;
290 }
291 
292 static bool dm_check_soft_reset(void *handle)
293 {
294 	return false;
295 }
296 
297 static int dm_soft_reset(void *handle)
298 {
299 	/* XXX todo */
300 	return 0;
301 }
302 
303 static struct amdgpu_crtc *
304 get_crtc_by_otg_inst(struct amdgpu_device *adev,
305 		     int otg_inst)
306 {
307 	struct drm_device *dev = adev_to_drm(adev);
308 	struct drm_crtc *crtc;
309 	struct amdgpu_crtc *amdgpu_crtc;
310 
311 	if (otg_inst == -1) {
312 		WARN_ON(1);
313 		return adev->mode_info.crtcs[0];
314 	}
315 
316 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
317 		amdgpu_crtc = to_amdgpu_crtc(crtc);
318 
319 		if (amdgpu_crtc->otg_inst == otg_inst)
320 			return amdgpu_crtc;
321 	}
322 
323 	return NULL;
324 }
325 
326 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
327 {
328 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
329 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
330 }
331 
332 /**
333  * dm_pflip_high_irq() - Handle pageflip interrupt
334  * @interrupt_params: ignored
335  *
336  * Handles the pageflip interrupt by notifying all interested parties
337  * that the pageflip has been completed.
338  */
339 static void dm_pflip_high_irq(void *interrupt_params)
340 {
341 	struct amdgpu_crtc *amdgpu_crtc;
342 	struct common_irq_params *irq_params = interrupt_params;
343 	struct amdgpu_device *adev = irq_params->adev;
344 	unsigned long flags;
345 	struct drm_pending_vblank_event *e;
346 	struct dm_crtc_state *acrtc_state;
347 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
348 	bool vrr_active;
349 
350 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
351 
352 	/* IRQ could occur when in initial stage */
353 	/* TODO work and BO cleanup */
354 	if (amdgpu_crtc == NULL) {
355 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
356 		return;
357 	}
358 
359 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
360 
361 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
362 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
363 						 amdgpu_crtc->pflip_status,
364 						 AMDGPU_FLIP_SUBMITTED,
365 						 amdgpu_crtc->crtc_id,
366 						 amdgpu_crtc);
367 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
368 		return;
369 	}
370 
371 	/* page flip completed. */
372 	e = amdgpu_crtc->event;
373 	amdgpu_crtc->event = NULL;
374 
375 	if (!e)
376 		WARN_ON(1);
377 
378 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
379 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
380 
381 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
382 	if (!vrr_active ||
383 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
384 				      &v_blank_end, &hpos, &vpos) ||
385 	    (vpos < v_blank_start)) {
386 		/* Update to correct count and vblank timestamp if racing with
387 		 * vblank irq. This also updates to the correct vblank timestamp
388 		 * even in VRR mode, as scanout is past the front-porch atm.
389 		 */
390 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
391 
392 		/* Wake up userspace by sending the pageflip event with proper
393 		 * count and timestamp of vblank of flip completion.
394 		 */
395 		if (e) {
396 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
397 
398 			/* Event sent, so done with vblank for this flip */
399 			drm_crtc_vblank_put(&amdgpu_crtc->base);
400 		}
401 	} else if (e) {
402 		/* VRR active and inside front-porch: vblank count and
403 		 * timestamp for pageflip event will only be up to date after
404 		 * drm_crtc_handle_vblank() has been executed from late vblank
405 		 * irq handler after start of back-porch (vline 0). We queue the
406 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
407 		 * updated timestamp and count, once it runs after us.
408 		 *
409 		 * We need to open-code this instead of using the helper
410 		 * drm_crtc_arm_vblank_event(), as that helper would
411 		 * call drm_crtc_accurate_vblank_count(), which we must
412 		 * not call in VRR mode while we are in front-porch!
413 		 */
414 
415 		/* sequence will be replaced by real count during send-out. */
416 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
417 		e->pipe = amdgpu_crtc->crtc_id;
418 
419 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
420 		e = NULL;
421 	}
422 
423 	/* Keep track of vblank of this flip for flip throttling. We use the
424 	 * cooked hw counter, as that one incremented at start of this vblank
425 	 * of pageflip completion, so last_flip_vblank is the forbidden count
426 	 * for queueing new pageflips if vsync + VRR is enabled.
427 	 */
428 	amdgpu_crtc->last_flip_vblank =
429 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
430 
431 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
432 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
433 
434 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
435 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
436 			 vrr_active, (int) !e);
437 }
438 
439 static void dm_vupdate_high_irq(void *interrupt_params)
440 {
441 	struct common_irq_params *irq_params = interrupt_params;
442 	struct amdgpu_device *adev = irq_params->adev;
443 	struct amdgpu_crtc *acrtc;
444 	struct dm_crtc_state *acrtc_state;
445 	unsigned long flags;
446 
447 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
448 
449 	if (acrtc) {
450 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
451 
452 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
453 			      acrtc->crtc_id,
454 			      amdgpu_dm_vrr_active(acrtc_state));
455 
456 		/* Core vblank handling is done here after end of front-porch in
457 		 * vrr mode, as vblank timestamping will give valid results
458 		 * while now done after front-porch. This will also deliver
459 		 * page-flip completion events that have been queued to us
460 		 * if a pageflip happened inside front-porch.
461 		 */
462 		if (amdgpu_dm_vrr_active(acrtc_state)) {
463 			drm_crtc_handle_vblank(&acrtc->base);
464 
465 			/* BTR processing for pre-DCE12 ASICs */
466 			if (acrtc_state->stream &&
467 			    adev->family < AMDGPU_FAMILY_AI) {
468 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
469 				mod_freesync_handle_v_update(
470 				    adev->dm.freesync_module,
471 				    acrtc_state->stream,
472 				    &acrtc_state->vrr_params);
473 
474 				dc_stream_adjust_vmin_vmax(
475 				    adev->dm.dc,
476 				    acrtc_state->stream,
477 				    &acrtc_state->vrr_params.adjust);
478 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
479 			}
480 		}
481 	}
482 }
483 
484 /**
485  * dm_crtc_high_irq() - Handles CRTC interrupt
486  * @interrupt_params: used for determining the CRTC instance
487  *
488  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
489  * event handler.
490  */
491 static void dm_crtc_high_irq(void *interrupt_params)
492 {
493 	struct common_irq_params *irq_params = interrupt_params;
494 	struct amdgpu_device *adev = irq_params->adev;
495 	struct amdgpu_crtc *acrtc;
496 	struct dm_crtc_state *acrtc_state;
497 	unsigned long flags;
498 
499 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
500 	if (!acrtc)
501 		return;
502 
503 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
504 
505 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
506 			 amdgpu_dm_vrr_active(acrtc_state),
507 			 acrtc_state->active_planes);
508 
509 	/**
510 	 * Core vblank handling at start of front-porch is only possible
511 	 * in non-vrr mode, as only there vblank timestamping will give
512 	 * valid results while done in front-porch. Otherwise defer it
513 	 * to dm_vupdate_high_irq after end of front-porch.
514 	 */
515 	if (!amdgpu_dm_vrr_active(acrtc_state))
516 		drm_crtc_handle_vblank(&acrtc->base);
517 
518 	/**
519 	 * Following stuff must happen at start of vblank, for crc
520 	 * computation and below-the-range btr support in vrr mode.
521 	 */
522 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
523 
524 	/* BTR updates need to happen before VUPDATE on Vega and above. */
525 	if (adev->family < AMDGPU_FAMILY_AI)
526 		return;
527 
528 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
529 
530 	if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
531 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
532 		mod_freesync_handle_v_update(adev->dm.freesync_module,
533 					     acrtc_state->stream,
534 					     &acrtc_state->vrr_params);
535 
536 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
537 					   &acrtc_state->vrr_params.adjust);
538 	}
539 
540 	/*
541 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
542 	 * In that case, pageflip completion interrupts won't fire and pageflip
543 	 * completion events won't get delivered. Prevent this by sending
544 	 * pending pageflip events from here if a flip is still pending.
545 	 *
546 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
547 	 * avoid race conditions between flip programming and completion,
548 	 * which could cause too early flip completion events.
549 	 */
550 	if (adev->family >= AMDGPU_FAMILY_RV &&
551 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
552 	    acrtc_state->active_planes == 0) {
553 		if (acrtc->event) {
554 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
555 			acrtc->event = NULL;
556 			drm_crtc_vblank_put(&acrtc->base);
557 		}
558 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
559 	}
560 
561 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
562 }
563 
564 static int dm_set_clockgating_state(void *handle,
565 		  enum amd_clockgating_state state)
566 {
567 	return 0;
568 }
569 
570 static int dm_set_powergating_state(void *handle,
571 		  enum amd_powergating_state state)
572 {
573 	return 0;
574 }
575 
576 /* Prototypes of private functions */
577 static int dm_early_init(void* handle);
578 
579 /* Allocate memory for FBC compressed data  */
580 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
581 {
582 	struct drm_device *dev = connector->dev;
583 	struct amdgpu_device *adev = drm_to_adev(dev);
584 	struct dm_comressor_info *compressor = &adev->dm.compressor;
585 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
586 	struct drm_display_mode *mode;
587 	unsigned long max_size = 0;
588 
589 	if (adev->dm.dc->fbc_compressor == NULL)
590 		return;
591 
592 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
593 		return;
594 
595 	if (compressor->bo_ptr)
596 		return;
597 
598 
599 	list_for_each_entry(mode, &connector->modes, head) {
600 		if (max_size < mode->htotal * mode->vtotal)
601 			max_size = mode->htotal * mode->vtotal;
602 	}
603 
604 	if (max_size) {
605 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
606 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
607 			    &compressor->gpu_addr, &compressor->cpu_addr);
608 
609 		if (r)
610 			DRM_ERROR("DM: Failed to initialize FBC\n");
611 		else {
612 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
613 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
614 		}
615 
616 	}
617 
618 }
619 
620 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
621 					  int pipe, bool *enabled,
622 					  unsigned char *buf, int max_bytes)
623 {
624 	struct drm_device *dev = dev_get_drvdata(kdev);
625 	struct amdgpu_device *adev = drm_to_adev(dev);
626 	struct drm_connector *connector;
627 	struct drm_connector_list_iter conn_iter;
628 	struct amdgpu_dm_connector *aconnector;
629 	int ret = 0;
630 
631 	*enabled = false;
632 
633 	mutex_lock(&adev->dm.audio_lock);
634 
635 	drm_connector_list_iter_begin(dev, &conn_iter);
636 	drm_for_each_connector_iter(connector, &conn_iter) {
637 		aconnector = to_amdgpu_dm_connector(connector);
638 		if (aconnector->audio_inst != port)
639 			continue;
640 
641 		*enabled = true;
642 		ret = drm_eld_size(connector->eld);
643 		memcpy(buf, connector->eld, min(max_bytes, ret));
644 
645 		break;
646 	}
647 	drm_connector_list_iter_end(&conn_iter);
648 
649 	mutex_unlock(&adev->dm.audio_lock);
650 
651 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
652 
653 	return ret;
654 }
655 
656 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
657 	.get_eld = amdgpu_dm_audio_component_get_eld,
658 };
659 
660 static int amdgpu_dm_audio_component_bind(struct device *kdev,
661 				       struct device *hda_kdev, void *data)
662 {
663 	struct drm_device *dev = dev_get_drvdata(kdev);
664 	struct amdgpu_device *adev = drm_to_adev(dev);
665 	struct drm_audio_component *acomp = data;
666 
667 	acomp->ops = &amdgpu_dm_audio_component_ops;
668 	acomp->dev = kdev;
669 	adev->dm.audio_component = acomp;
670 
671 	return 0;
672 }
673 
674 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
675 					  struct device *hda_kdev, void *data)
676 {
677 	struct drm_device *dev = dev_get_drvdata(kdev);
678 	struct amdgpu_device *adev = drm_to_adev(dev);
679 	struct drm_audio_component *acomp = data;
680 
681 	acomp->ops = NULL;
682 	acomp->dev = NULL;
683 	adev->dm.audio_component = NULL;
684 }
685 
686 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
687 	.bind	= amdgpu_dm_audio_component_bind,
688 	.unbind	= amdgpu_dm_audio_component_unbind,
689 };
690 
691 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
692 {
693 	int i, ret;
694 
695 	if (!amdgpu_audio)
696 		return 0;
697 
698 	adev->mode_info.audio.enabled = true;
699 
700 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
701 
702 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
703 		adev->mode_info.audio.pin[i].channels = -1;
704 		adev->mode_info.audio.pin[i].rate = -1;
705 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
706 		adev->mode_info.audio.pin[i].status_bits = 0;
707 		adev->mode_info.audio.pin[i].category_code = 0;
708 		adev->mode_info.audio.pin[i].connected = false;
709 		adev->mode_info.audio.pin[i].id =
710 			adev->dm.dc->res_pool->audios[i]->inst;
711 		adev->mode_info.audio.pin[i].offset = 0;
712 	}
713 
714 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
715 	if (ret < 0)
716 		return ret;
717 
718 	adev->dm.audio_registered = true;
719 
720 	return 0;
721 }
722 
723 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
724 {
725 	if (!amdgpu_audio)
726 		return;
727 
728 	if (!adev->mode_info.audio.enabled)
729 		return;
730 
731 	if (adev->dm.audio_registered) {
732 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
733 		adev->dm.audio_registered = false;
734 	}
735 
736 	/* TODO: Disable audio? */
737 
738 	adev->mode_info.audio.enabled = false;
739 }
740 
741 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
742 {
743 	struct drm_audio_component *acomp = adev->dm.audio_component;
744 
745 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
746 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
747 
748 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
749 						 pin, -1);
750 	}
751 }
752 
753 static int dm_dmub_hw_init(struct amdgpu_device *adev)
754 {
755 	const struct dmcub_firmware_header_v1_0 *hdr;
756 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
757 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
758 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
759 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
760 	struct abm *abm = adev->dm.dc->res_pool->abm;
761 	struct dmub_srv_hw_params hw_params;
762 	enum dmub_status status;
763 	const unsigned char *fw_inst_const, *fw_bss_data;
764 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
765 	bool has_hw_support;
766 
767 	if (!dmub_srv)
768 		/* DMUB isn't supported on the ASIC. */
769 		return 0;
770 
771 	if (!fb_info) {
772 		DRM_ERROR("No framebuffer info for DMUB service.\n");
773 		return -EINVAL;
774 	}
775 
776 	if (!dmub_fw) {
777 		/* Firmware required for DMUB support. */
778 		DRM_ERROR("No firmware provided for DMUB.\n");
779 		return -EINVAL;
780 	}
781 
782 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
783 	if (status != DMUB_STATUS_OK) {
784 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
785 		return -EINVAL;
786 	}
787 
788 	if (!has_hw_support) {
789 		DRM_INFO("DMUB unsupported on ASIC\n");
790 		return 0;
791 	}
792 
793 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
794 
795 	fw_inst_const = dmub_fw->data +
796 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
797 			PSP_HEADER_BYTES;
798 
799 	fw_bss_data = dmub_fw->data +
800 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
801 		      le32_to_cpu(hdr->inst_const_bytes);
802 
803 	/* Copy firmware and bios info into FB memory. */
804 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
805 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
806 
807 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
808 
809 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
810 	 * amdgpu_ucode_init_single_fw will load dmub firmware
811 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
812 	 * will be done by dm_dmub_hw_init
813 	 */
814 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
815 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
816 				fw_inst_const_size);
817 	}
818 
819 	if (fw_bss_data_size)
820 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
821 		       fw_bss_data, fw_bss_data_size);
822 
823 	/* Copy firmware bios info into FB memory. */
824 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
825 	       adev->bios_size);
826 
827 	/* Reset regions that need to be reset. */
828 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
829 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
830 
831 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
832 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
833 
834 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
835 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
836 
837 	/* Initialize hardware. */
838 	memset(&hw_params, 0, sizeof(hw_params));
839 	hw_params.fb_base = adev->gmc.fb_start;
840 	hw_params.fb_offset = adev->gmc.aper_base;
841 
842 	/* backdoor load firmware and trigger dmub running */
843 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
844 		hw_params.load_inst_const = true;
845 
846 	if (dmcu)
847 		hw_params.psp_version = dmcu->psp_version;
848 
849 	for (i = 0; i < fb_info->num_fb; ++i)
850 		hw_params.fb[i] = &fb_info->fb[i];
851 
852 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
853 	if (status != DMUB_STATUS_OK) {
854 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
855 		return -EINVAL;
856 	}
857 
858 	/* Wait for firmware load to finish. */
859 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
860 	if (status != DMUB_STATUS_OK)
861 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
862 
863 	/* Init DMCU and ABM if available. */
864 	if (dmcu && abm) {
865 		dmcu->funcs->dmcu_init(dmcu);
866 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
867 	}
868 
869 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
870 	if (!adev->dm.dc->ctx->dmub_srv) {
871 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
872 		return -ENOMEM;
873 	}
874 
875 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
876 		 adev->dm.dmcub_fw_version);
877 
878 	return 0;
879 }
880 
881 static int amdgpu_dm_init(struct amdgpu_device *adev)
882 {
883 	struct dc_init_data init_data;
884 #ifdef CONFIG_DRM_AMD_DC_HDCP
885 	struct dc_callback_init init_params;
886 #endif
887 	int r;
888 
889 	adev->dm.ddev = adev_to_drm(adev);
890 	adev->dm.adev = adev;
891 
892 	/* Zero all the fields */
893 	memset(&init_data, 0, sizeof(init_data));
894 #ifdef CONFIG_DRM_AMD_DC_HDCP
895 	memset(&init_params, 0, sizeof(init_params));
896 #endif
897 
898 	mutex_init(&adev->dm.dc_lock);
899 	mutex_init(&adev->dm.audio_lock);
900 
901 	if(amdgpu_dm_irq_init(adev)) {
902 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
903 		goto error;
904 	}
905 
906 	init_data.asic_id.chip_family = adev->family;
907 
908 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
909 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
910 
911 	init_data.asic_id.vram_width = adev->gmc.vram_width;
912 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
913 	init_data.asic_id.atombios_base_address =
914 		adev->mode_info.atom_context->bios;
915 
916 	init_data.driver = adev;
917 
918 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
919 
920 	if (!adev->dm.cgs_device) {
921 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
922 		goto error;
923 	}
924 
925 	init_data.cgs_device = adev->dm.cgs_device;
926 
927 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
928 
929 	switch (adev->asic_type) {
930 	case CHIP_CARRIZO:
931 	case CHIP_STONEY:
932 	case CHIP_RAVEN:
933 	case CHIP_RENOIR:
934 		init_data.flags.gpu_vm_support = true;
935 		break;
936 	default:
937 		break;
938 	}
939 
940 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
941 		init_data.flags.fbc_support = true;
942 
943 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
944 		init_data.flags.multi_mon_pp_mclk_switch = true;
945 
946 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
947 		init_data.flags.disable_fractional_pwm = true;
948 
949 	init_data.flags.power_down_display_on_boot = true;
950 
951 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
952 
953 	/* Display Core create. */
954 	adev->dm.dc = dc_create(&init_data);
955 
956 	if (adev->dm.dc) {
957 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
958 	} else {
959 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
960 		goto error;
961 	}
962 
963 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
964 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
965 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
966 	}
967 
968 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
969 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
970 
971 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
972 		adev->dm.dc->debug.disable_stutter = true;
973 
974 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
975 		adev->dm.dc->debug.disable_dsc = true;
976 
977 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
978 		adev->dm.dc->debug.disable_clock_gate = true;
979 
980 	r = dm_dmub_hw_init(adev);
981 	if (r) {
982 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
983 		goto error;
984 	}
985 
986 	dc_hardware_init(adev->dm.dc);
987 
988 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
989 	if (!adev->dm.freesync_module) {
990 		DRM_ERROR(
991 		"amdgpu: failed to initialize freesync_module.\n");
992 	} else
993 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
994 				adev->dm.freesync_module);
995 
996 	amdgpu_dm_init_color_mod();
997 
998 #ifdef CONFIG_DRM_AMD_DC_HDCP
999 	if (adev->asic_type >= CHIP_RAVEN) {
1000 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1001 
1002 		if (!adev->dm.hdcp_workqueue)
1003 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1004 		else
1005 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1006 
1007 		dc_init_callbacks(adev->dm.dc, &init_params);
1008 	}
1009 #endif
1010 	if (amdgpu_dm_initialize_drm_device(adev)) {
1011 		DRM_ERROR(
1012 		"amdgpu: failed to initialize sw for display support.\n");
1013 		goto error;
1014 	}
1015 
1016 	/* Update the actual used number of crtc */
1017 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1018 
1019 	/* create fake encoders for MST */
1020 	dm_dp_create_fake_mst_encoders(adev);
1021 
1022 	/* TODO: Add_display_info? */
1023 
1024 	/* TODO use dynamic cursor width */
1025 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1026 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1027 
1028 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1029 		DRM_ERROR(
1030 		"amdgpu: failed to initialize sw for display support.\n");
1031 		goto error;
1032 	}
1033 
1034 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1035 
1036 	return 0;
1037 error:
1038 	amdgpu_dm_fini(adev);
1039 
1040 	return -EINVAL;
1041 }
1042 
1043 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1044 {
1045 	int i;
1046 
1047 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1048 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1049 	}
1050 
1051 	amdgpu_dm_audio_fini(adev);
1052 
1053 	amdgpu_dm_destroy_drm_device(&adev->dm);
1054 
1055 #ifdef CONFIG_DRM_AMD_DC_HDCP
1056 	if (adev->dm.hdcp_workqueue) {
1057 		hdcp_destroy(adev->dm.hdcp_workqueue);
1058 		adev->dm.hdcp_workqueue = NULL;
1059 	}
1060 
1061 	if (adev->dm.dc)
1062 		dc_deinit_callbacks(adev->dm.dc);
1063 #endif
1064 	if (adev->dm.dc->ctx->dmub_srv) {
1065 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1066 		adev->dm.dc->ctx->dmub_srv = NULL;
1067 	}
1068 
1069 	if (adev->dm.dmub_bo)
1070 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1071 				      &adev->dm.dmub_bo_gpu_addr,
1072 				      &adev->dm.dmub_bo_cpu_addr);
1073 
1074 	/* DC Destroy TODO: Replace destroy DAL */
1075 	if (adev->dm.dc)
1076 		dc_destroy(&adev->dm.dc);
1077 	/*
1078 	 * TODO: pageflip, vlank interrupt
1079 	 *
1080 	 * amdgpu_dm_irq_fini(adev);
1081 	 */
1082 
1083 	if (adev->dm.cgs_device) {
1084 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1085 		adev->dm.cgs_device = NULL;
1086 	}
1087 	if (adev->dm.freesync_module) {
1088 		mod_freesync_destroy(adev->dm.freesync_module);
1089 		adev->dm.freesync_module = NULL;
1090 	}
1091 
1092 	mutex_destroy(&adev->dm.audio_lock);
1093 	mutex_destroy(&adev->dm.dc_lock);
1094 
1095 	return;
1096 }
1097 
1098 static int load_dmcu_fw(struct amdgpu_device *adev)
1099 {
1100 	const char *fw_name_dmcu = NULL;
1101 	int r;
1102 	const struct dmcu_firmware_header_v1_0 *hdr;
1103 
1104 	switch(adev->asic_type) {
1105 #if defined(CONFIG_DRM_AMD_DC_SI)
1106 	case CHIP_TAHITI:
1107 	case CHIP_PITCAIRN:
1108 	case CHIP_VERDE:
1109 	case CHIP_OLAND:
1110 #endif
1111 	case CHIP_BONAIRE:
1112 	case CHIP_HAWAII:
1113 	case CHIP_KAVERI:
1114 	case CHIP_KABINI:
1115 	case CHIP_MULLINS:
1116 	case CHIP_TONGA:
1117 	case CHIP_FIJI:
1118 	case CHIP_CARRIZO:
1119 	case CHIP_STONEY:
1120 	case CHIP_POLARIS11:
1121 	case CHIP_POLARIS10:
1122 	case CHIP_POLARIS12:
1123 	case CHIP_VEGAM:
1124 	case CHIP_VEGA10:
1125 	case CHIP_VEGA12:
1126 	case CHIP_VEGA20:
1127 	case CHIP_NAVI10:
1128 	case CHIP_NAVI14:
1129 	case CHIP_RENOIR:
1130 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1131 	case CHIP_SIENNA_CICHLID:
1132 	case CHIP_NAVY_FLOUNDER:
1133 #endif
1134 		return 0;
1135 	case CHIP_NAVI12:
1136 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1137 		break;
1138 	case CHIP_RAVEN:
1139 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1140 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1141 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1142 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1143 		else
1144 			return 0;
1145 		break;
1146 	default:
1147 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1148 		return -EINVAL;
1149 	}
1150 
1151 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1152 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1153 		return 0;
1154 	}
1155 
1156 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1157 	if (r == -ENOENT) {
1158 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1159 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1160 		adev->dm.fw_dmcu = NULL;
1161 		return 0;
1162 	}
1163 	if (r) {
1164 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1165 			fw_name_dmcu);
1166 		return r;
1167 	}
1168 
1169 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1170 	if (r) {
1171 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1172 			fw_name_dmcu);
1173 		release_firmware(adev->dm.fw_dmcu);
1174 		adev->dm.fw_dmcu = NULL;
1175 		return r;
1176 	}
1177 
1178 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1179 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1180 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1181 	adev->firmware.fw_size +=
1182 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1183 
1184 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1185 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1186 	adev->firmware.fw_size +=
1187 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1188 
1189 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1190 
1191 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1192 
1193 	return 0;
1194 }
1195 
1196 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1197 {
1198 	struct amdgpu_device *adev = ctx;
1199 
1200 	return dm_read_reg(adev->dm.dc->ctx, address);
1201 }
1202 
1203 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1204 				     uint32_t value)
1205 {
1206 	struct amdgpu_device *adev = ctx;
1207 
1208 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1209 }
1210 
1211 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1212 {
1213 	struct dmub_srv_create_params create_params;
1214 	struct dmub_srv_region_params region_params;
1215 	struct dmub_srv_region_info region_info;
1216 	struct dmub_srv_fb_params fb_params;
1217 	struct dmub_srv_fb_info *fb_info;
1218 	struct dmub_srv *dmub_srv;
1219 	const struct dmcub_firmware_header_v1_0 *hdr;
1220 	const char *fw_name_dmub;
1221 	enum dmub_asic dmub_asic;
1222 	enum dmub_status status;
1223 	int r;
1224 
1225 	switch (adev->asic_type) {
1226 	case CHIP_RENOIR:
1227 		dmub_asic = DMUB_ASIC_DCN21;
1228 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1229 		break;
1230 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1231 	case CHIP_SIENNA_CICHLID:
1232 		dmub_asic = DMUB_ASIC_DCN30;
1233 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1234 		break;
1235 	case CHIP_NAVY_FLOUNDER:
1236 		dmub_asic = DMUB_ASIC_DCN30;
1237 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1238 		break;
1239 #endif
1240 
1241 	default:
1242 		/* ASIC doesn't support DMUB. */
1243 		return 0;
1244 	}
1245 
1246 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1247 	if (r) {
1248 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1249 		return 0;
1250 	}
1251 
1252 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1253 	if (r) {
1254 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1255 		return 0;
1256 	}
1257 
1258 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1259 
1260 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1261 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1262 			AMDGPU_UCODE_ID_DMCUB;
1263 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1264 			adev->dm.dmub_fw;
1265 		adev->firmware.fw_size +=
1266 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1267 
1268 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1269 			 adev->dm.dmcub_fw_version);
1270 	}
1271 
1272 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1273 
1274 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1275 	dmub_srv = adev->dm.dmub_srv;
1276 
1277 	if (!dmub_srv) {
1278 		DRM_ERROR("Failed to allocate DMUB service!\n");
1279 		return -ENOMEM;
1280 	}
1281 
1282 	memset(&create_params, 0, sizeof(create_params));
1283 	create_params.user_ctx = adev;
1284 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1285 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1286 	create_params.asic = dmub_asic;
1287 
1288 	/* Create the DMUB service. */
1289 	status = dmub_srv_create(dmub_srv, &create_params);
1290 	if (status != DMUB_STATUS_OK) {
1291 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1292 		return -EINVAL;
1293 	}
1294 
1295 	/* Calculate the size of all the regions for the DMUB service. */
1296 	memset(&region_params, 0, sizeof(region_params));
1297 
1298 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1299 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1300 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1301 	region_params.vbios_size = adev->bios_size;
1302 	region_params.fw_bss_data = region_params.bss_data_size ?
1303 		adev->dm.dmub_fw->data +
1304 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1305 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1306 	region_params.fw_inst_const =
1307 		adev->dm.dmub_fw->data +
1308 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1309 		PSP_HEADER_BYTES;
1310 
1311 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1312 					   &region_info);
1313 
1314 	if (status != DMUB_STATUS_OK) {
1315 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1316 		return -EINVAL;
1317 	}
1318 
1319 	/*
1320 	 * Allocate a framebuffer based on the total size of all the regions.
1321 	 * TODO: Move this into GART.
1322 	 */
1323 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1324 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1325 				    &adev->dm.dmub_bo_gpu_addr,
1326 				    &adev->dm.dmub_bo_cpu_addr);
1327 	if (r)
1328 		return r;
1329 
1330 	/* Rebase the regions on the framebuffer address. */
1331 	memset(&fb_params, 0, sizeof(fb_params));
1332 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1333 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1334 	fb_params.region_info = &region_info;
1335 
1336 	adev->dm.dmub_fb_info =
1337 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1338 	fb_info = adev->dm.dmub_fb_info;
1339 
1340 	if (!fb_info) {
1341 		DRM_ERROR(
1342 			"Failed to allocate framebuffer info for DMUB service!\n");
1343 		return -ENOMEM;
1344 	}
1345 
1346 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1347 	if (status != DMUB_STATUS_OK) {
1348 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1349 		return -EINVAL;
1350 	}
1351 
1352 	return 0;
1353 }
1354 
1355 static int dm_sw_init(void *handle)
1356 {
1357 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1358 	int r;
1359 
1360 	r = dm_dmub_sw_init(adev);
1361 	if (r)
1362 		return r;
1363 
1364 	return load_dmcu_fw(adev);
1365 }
1366 
1367 static int dm_sw_fini(void *handle)
1368 {
1369 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1370 
1371 	kfree(adev->dm.dmub_fb_info);
1372 	adev->dm.dmub_fb_info = NULL;
1373 
1374 	if (adev->dm.dmub_srv) {
1375 		dmub_srv_destroy(adev->dm.dmub_srv);
1376 		adev->dm.dmub_srv = NULL;
1377 	}
1378 
1379 	release_firmware(adev->dm.dmub_fw);
1380 	adev->dm.dmub_fw = NULL;
1381 
1382 	release_firmware(adev->dm.fw_dmcu);
1383 	adev->dm.fw_dmcu = NULL;
1384 
1385 	return 0;
1386 }
1387 
1388 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1389 {
1390 	struct amdgpu_dm_connector *aconnector;
1391 	struct drm_connector *connector;
1392 	struct drm_connector_list_iter iter;
1393 	int ret = 0;
1394 
1395 	drm_connector_list_iter_begin(dev, &iter);
1396 	drm_for_each_connector_iter(connector, &iter) {
1397 		aconnector = to_amdgpu_dm_connector(connector);
1398 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1399 		    aconnector->mst_mgr.aux) {
1400 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1401 					 aconnector,
1402 					 aconnector->base.base.id);
1403 
1404 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1405 			if (ret < 0) {
1406 				DRM_ERROR("DM_MST: Failed to start MST\n");
1407 				aconnector->dc_link->type =
1408 					dc_connection_single;
1409 				break;
1410 			}
1411 		}
1412 	}
1413 	drm_connector_list_iter_end(&iter);
1414 
1415 	return ret;
1416 }
1417 
1418 static int dm_late_init(void *handle)
1419 {
1420 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1421 
1422 	struct dmcu_iram_parameters params;
1423 	unsigned int linear_lut[16];
1424 	int i;
1425 	struct dmcu *dmcu = NULL;
1426 	bool ret = true;
1427 
1428 	if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
1429 		return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1430 
1431 	dmcu = adev->dm.dc->res_pool->dmcu;
1432 
1433 	for (i = 0; i < 16; i++)
1434 		linear_lut[i] = 0xFFFF * i / 15;
1435 
1436 	params.set = 0;
1437 	params.backlight_ramping_start = 0xCCCC;
1438 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1439 	params.backlight_lut_array_size = 16;
1440 	params.backlight_lut_array = linear_lut;
1441 
1442 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1443 	 * 0xFFFF x 0.01 = 0x28F
1444 	 */
1445 	params.min_abm_backlight = 0x28F;
1446 
1447 	/* In the case where abm is implemented on dmcub,
1448 	 * dmcu object will be null.
1449 	 * ABM 2.4 and up are implemented on dmcub.
1450 	 */
1451 	if (dmcu)
1452 		ret = dmcu_load_iram(dmcu, params);
1453 	else if (adev->dm.dc->ctx->dmub_srv)
1454 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1455 
1456 	if (!ret)
1457 		return -EINVAL;
1458 
1459 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1460 }
1461 
1462 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1463 {
1464 	struct amdgpu_dm_connector *aconnector;
1465 	struct drm_connector *connector;
1466 	struct drm_connector_list_iter iter;
1467 	struct drm_dp_mst_topology_mgr *mgr;
1468 	int ret;
1469 	bool need_hotplug = false;
1470 
1471 	drm_connector_list_iter_begin(dev, &iter);
1472 	drm_for_each_connector_iter(connector, &iter) {
1473 		aconnector = to_amdgpu_dm_connector(connector);
1474 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1475 		    aconnector->mst_port)
1476 			continue;
1477 
1478 		mgr = &aconnector->mst_mgr;
1479 
1480 		if (suspend) {
1481 			drm_dp_mst_topology_mgr_suspend(mgr);
1482 		} else {
1483 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1484 			if (ret < 0) {
1485 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1486 				need_hotplug = true;
1487 			}
1488 		}
1489 	}
1490 	drm_connector_list_iter_end(&iter);
1491 
1492 	if (need_hotplug)
1493 		drm_kms_helper_hotplug_event(dev);
1494 }
1495 
1496 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1497 {
1498 	struct smu_context *smu = &adev->smu;
1499 	int ret = 0;
1500 
1501 	if (!is_support_sw_smu(adev))
1502 		return 0;
1503 
1504 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1505 	 * on window driver dc implementation.
1506 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1507 	 * should be passed to smu during boot up and resume from s3.
1508 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1509 	 * dcn20_resource_construct
1510 	 * then call pplib functions below to pass the settings to smu:
1511 	 * smu_set_watermarks_for_clock_ranges
1512 	 * smu_set_watermarks_table
1513 	 * navi10_set_watermarks_table
1514 	 * smu_write_watermarks_table
1515 	 *
1516 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1517 	 * dc has implemented different flow for window driver:
1518 	 * dc_hardware_init / dc_set_power_state
1519 	 * dcn10_init_hw
1520 	 * notify_wm_ranges
1521 	 * set_wm_ranges
1522 	 * -- Linux
1523 	 * smu_set_watermarks_for_clock_ranges
1524 	 * renoir_set_watermarks_table
1525 	 * smu_write_watermarks_table
1526 	 *
1527 	 * For Linux,
1528 	 * dc_hardware_init -> amdgpu_dm_init
1529 	 * dc_set_power_state --> dm_resume
1530 	 *
1531 	 * therefore, this function apply to navi10/12/14 but not Renoir
1532 	 * *
1533 	 */
1534 	switch(adev->asic_type) {
1535 	case CHIP_NAVI10:
1536 	case CHIP_NAVI14:
1537 	case CHIP_NAVI12:
1538 		break;
1539 	default:
1540 		return 0;
1541 	}
1542 
1543 	ret = smu_write_watermarks_table(smu);
1544 	if (ret) {
1545 		DRM_ERROR("Failed to update WMTABLE!\n");
1546 		return ret;
1547 	}
1548 
1549 	return 0;
1550 }
1551 
1552 /**
1553  * dm_hw_init() - Initialize DC device
1554  * @handle: The base driver device containing the amdgpu_dm device.
1555  *
1556  * Initialize the &struct amdgpu_display_manager device. This involves calling
1557  * the initializers of each DM component, then populating the struct with them.
1558  *
1559  * Although the function implies hardware initialization, both hardware and
1560  * software are initialized here. Splitting them out to their relevant init
1561  * hooks is a future TODO item.
1562  *
1563  * Some notable things that are initialized here:
1564  *
1565  * - Display Core, both software and hardware
1566  * - DC modules that we need (freesync and color management)
1567  * - DRM software states
1568  * - Interrupt sources and handlers
1569  * - Vblank support
1570  * - Debug FS entries, if enabled
1571  */
1572 static int dm_hw_init(void *handle)
1573 {
1574 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1575 	/* Create DAL display manager */
1576 	amdgpu_dm_init(adev);
1577 	amdgpu_dm_hpd_init(adev);
1578 
1579 	return 0;
1580 }
1581 
1582 /**
1583  * dm_hw_fini() - Teardown DC device
1584  * @handle: The base driver device containing the amdgpu_dm device.
1585  *
1586  * Teardown components within &struct amdgpu_display_manager that require
1587  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1588  * were loaded. Also flush IRQ workqueues and disable them.
1589  */
1590 static int dm_hw_fini(void *handle)
1591 {
1592 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1593 
1594 	amdgpu_dm_hpd_fini(adev);
1595 
1596 	amdgpu_dm_irq_fini(adev);
1597 	amdgpu_dm_fini(adev);
1598 	return 0;
1599 }
1600 
1601 
1602 static int dm_enable_vblank(struct drm_crtc *crtc);
1603 static void dm_disable_vblank(struct drm_crtc *crtc);
1604 
1605 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1606 				 struct dc_state *state, bool enable)
1607 {
1608 	enum dc_irq_source irq_source;
1609 	struct amdgpu_crtc *acrtc;
1610 	int rc = -EBUSY;
1611 	int i = 0;
1612 
1613 	for (i = 0; i < state->stream_count; i++) {
1614 		acrtc = get_crtc_by_otg_inst(
1615 				adev, state->stream_status[i].primary_otg_inst);
1616 
1617 		if (acrtc && state->stream_status[i].plane_count != 0) {
1618 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1619 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1620 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1621 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1622 			if (rc)
1623 				DRM_WARN("Failed to %s pflip interrupts\n",
1624 					 enable ? "enable" : "disable");
1625 
1626 			if (enable) {
1627 				rc = dm_enable_vblank(&acrtc->base);
1628 				if (rc)
1629 					DRM_WARN("Failed to enable vblank interrupts\n");
1630 			} else {
1631 				dm_disable_vblank(&acrtc->base);
1632 			}
1633 
1634 		}
1635 	}
1636 
1637 }
1638 
1639 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1640 {
1641 	struct dc_state *context = NULL;
1642 	enum dc_status res = DC_ERROR_UNEXPECTED;
1643 	int i;
1644 	struct dc_stream_state *del_streams[MAX_PIPES];
1645 	int del_streams_count = 0;
1646 
1647 	memset(del_streams, 0, sizeof(del_streams));
1648 
1649 	context = dc_create_state(dc);
1650 	if (context == NULL)
1651 		goto context_alloc_fail;
1652 
1653 	dc_resource_state_copy_construct_current(dc, context);
1654 
1655 	/* First remove from context all streams */
1656 	for (i = 0; i < context->stream_count; i++) {
1657 		struct dc_stream_state *stream = context->streams[i];
1658 
1659 		del_streams[del_streams_count++] = stream;
1660 	}
1661 
1662 	/* Remove all planes for removed streams and then remove the streams */
1663 	for (i = 0; i < del_streams_count; i++) {
1664 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1665 			res = DC_FAIL_DETACH_SURFACES;
1666 			goto fail;
1667 		}
1668 
1669 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1670 		if (res != DC_OK)
1671 			goto fail;
1672 	}
1673 
1674 
1675 	res = dc_validate_global_state(dc, context, false);
1676 
1677 	if (res != DC_OK) {
1678 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1679 		goto fail;
1680 	}
1681 
1682 	res = dc_commit_state(dc, context);
1683 
1684 fail:
1685 	dc_release_state(context);
1686 
1687 context_alloc_fail:
1688 	return res;
1689 }
1690 
1691 static int dm_suspend(void *handle)
1692 {
1693 	struct amdgpu_device *adev = handle;
1694 	struct amdgpu_display_manager *dm = &adev->dm;
1695 	int ret = 0;
1696 
1697 	if (amdgpu_in_reset(adev)) {
1698 		mutex_lock(&dm->dc_lock);
1699 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1700 
1701 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1702 
1703 		amdgpu_dm_commit_zero_streams(dm->dc);
1704 
1705 		amdgpu_dm_irq_suspend(adev);
1706 
1707 		return ret;
1708 	}
1709 
1710 	WARN_ON(adev->dm.cached_state);
1711 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1712 
1713 	s3_handle_mst(adev_to_drm(adev), true);
1714 
1715 	amdgpu_dm_irq_suspend(adev);
1716 
1717 
1718 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1719 
1720 	return 0;
1721 }
1722 
1723 static struct amdgpu_dm_connector *
1724 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1725 					     struct drm_crtc *crtc)
1726 {
1727 	uint32_t i;
1728 	struct drm_connector_state *new_con_state;
1729 	struct drm_connector *connector;
1730 	struct drm_crtc *crtc_from_state;
1731 
1732 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1733 		crtc_from_state = new_con_state->crtc;
1734 
1735 		if (crtc_from_state == crtc)
1736 			return to_amdgpu_dm_connector(connector);
1737 	}
1738 
1739 	return NULL;
1740 }
1741 
1742 static void emulated_link_detect(struct dc_link *link)
1743 {
1744 	struct dc_sink_init_data sink_init_data = { 0 };
1745 	struct display_sink_capability sink_caps = { 0 };
1746 	enum dc_edid_status edid_status;
1747 	struct dc_context *dc_ctx = link->ctx;
1748 	struct dc_sink *sink = NULL;
1749 	struct dc_sink *prev_sink = NULL;
1750 
1751 	link->type = dc_connection_none;
1752 	prev_sink = link->local_sink;
1753 
1754 	if (prev_sink != NULL)
1755 		dc_sink_retain(prev_sink);
1756 
1757 	switch (link->connector_signal) {
1758 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1759 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1760 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1761 		break;
1762 	}
1763 
1764 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1765 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1766 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1767 		break;
1768 	}
1769 
1770 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1771 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1772 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1773 		break;
1774 	}
1775 
1776 	case SIGNAL_TYPE_LVDS: {
1777 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1778 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1779 		break;
1780 	}
1781 
1782 	case SIGNAL_TYPE_EDP: {
1783 		sink_caps.transaction_type =
1784 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1785 		sink_caps.signal = SIGNAL_TYPE_EDP;
1786 		break;
1787 	}
1788 
1789 	case SIGNAL_TYPE_DISPLAY_PORT: {
1790 		sink_caps.transaction_type =
1791 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1792 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1793 		break;
1794 	}
1795 
1796 	default:
1797 		DC_ERROR("Invalid connector type! signal:%d\n",
1798 			link->connector_signal);
1799 		return;
1800 	}
1801 
1802 	sink_init_data.link = link;
1803 	sink_init_data.sink_signal = sink_caps.signal;
1804 
1805 	sink = dc_sink_create(&sink_init_data);
1806 	if (!sink) {
1807 		DC_ERROR("Failed to create sink!\n");
1808 		return;
1809 	}
1810 
1811 	/* dc_sink_create returns a new reference */
1812 	link->local_sink = sink;
1813 
1814 	edid_status = dm_helpers_read_local_edid(
1815 			link->ctx,
1816 			link,
1817 			sink);
1818 
1819 	if (edid_status != EDID_OK)
1820 		DC_ERROR("Failed to read EDID");
1821 
1822 }
1823 
1824 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1825 				     struct amdgpu_display_manager *dm)
1826 {
1827 	struct {
1828 		struct dc_surface_update surface_updates[MAX_SURFACES];
1829 		struct dc_plane_info plane_infos[MAX_SURFACES];
1830 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1831 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1832 		struct dc_stream_update stream_update;
1833 	} * bundle;
1834 	int k, m;
1835 
1836 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1837 
1838 	if (!bundle) {
1839 		dm_error("Failed to allocate update bundle\n");
1840 		goto cleanup;
1841 	}
1842 
1843 	for (k = 0; k < dc_state->stream_count; k++) {
1844 		bundle->stream_update.stream = dc_state->streams[k];
1845 
1846 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1847 			bundle->surface_updates[m].surface =
1848 				dc_state->stream_status->plane_states[m];
1849 			bundle->surface_updates[m].surface->force_full_update =
1850 				true;
1851 		}
1852 		dc_commit_updates_for_stream(
1853 			dm->dc, bundle->surface_updates,
1854 			dc_state->stream_status->plane_count,
1855 			dc_state->streams[k], &bundle->stream_update, dc_state);
1856 	}
1857 
1858 cleanup:
1859 	kfree(bundle);
1860 
1861 	return;
1862 }
1863 
1864 static int dm_resume(void *handle)
1865 {
1866 	struct amdgpu_device *adev = handle;
1867 	struct drm_device *ddev = adev_to_drm(adev);
1868 	struct amdgpu_display_manager *dm = &adev->dm;
1869 	struct amdgpu_dm_connector *aconnector;
1870 	struct drm_connector *connector;
1871 	struct drm_connector_list_iter iter;
1872 	struct drm_crtc *crtc;
1873 	struct drm_crtc_state *new_crtc_state;
1874 	struct dm_crtc_state *dm_new_crtc_state;
1875 	struct drm_plane *plane;
1876 	struct drm_plane_state *new_plane_state;
1877 	struct dm_plane_state *dm_new_plane_state;
1878 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1879 	enum dc_connection_type new_connection_type = dc_connection_none;
1880 	struct dc_state *dc_state;
1881 	int i, r, j;
1882 
1883 	if (amdgpu_in_reset(adev)) {
1884 		dc_state = dm->cached_dc_state;
1885 
1886 		r = dm_dmub_hw_init(adev);
1887 		if (r)
1888 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1889 
1890 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1891 		dc_resume(dm->dc);
1892 
1893 		amdgpu_dm_irq_resume_early(adev);
1894 
1895 		for (i = 0; i < dc_state->stream_count; i++) {
1896 			dc_state->streams[i]->mode_changed = true;
1897 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1898 				dc_state->stream_status->plane_states[j]->update_flags.raw
1899 					= 0xffffffff;
1900 			}
1901 		}
1902 
1903 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
1904 
1905 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
1906 
1907 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1908 
1909 		dc_release_state(dm->cached_dc_state);
1910 		dm->cached_dc_state = NULL;
1911 
1912 		amdgpu_dm_irq_resume_late(adev);
1913 
1914 		mutex_unlock(&dm->dc_lock);
1915 
1916 		return 0;
1917 	}
1918 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1919 	dc_release_state(dm_state->context);
1920 	dm_state->context = dc_create_state(dm->dc);
1921 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1922 	dc_resource_state_construct(dm->dc, dm_state->context);
1923 
1924 	/* Before powering on DC we need to re-initialize DMUB. */
1925 	r = dm_dmub_hw_init(adev);
1926 	if (r)
1927 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1928 
1929 	/* power on hardware */
1930 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1931 
1932 	/* program HPD filter */
1933 	dc_resume(dm->dc);
1934 
1935 	/*
1936 	 * early enable HPD Rx IRQ, should be done before set mode as short
1937 	 * pulse interrupts are used for MST
1938 	 */
1939 	amdgpu_dm_irq_resume_early(adev);
1940 
1941 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1942 	s3_handle_mst(ddev, false);
1943 
1944 	/* Do detection*/
1945 	drm_connector_list_iter_begin(ddev, &iter);
1946 	drm_for_each_connector_iter(connector, &iter) {
1947 		aconnector = to_amdgpu_dm_connector(connector);
1948 
1949 		/*
1950 		 * this is the case when traversing through already created
1951 		 * MST connectors, should be skipped
1952 		 */
1953 		if (aconnector->mst_port)
1954 			continue;
1955 
1956 		mutex_lock(&aconnector->hpd_lock);
1957 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1958 			DRM_ERROR("KMS: Failed to detect connector\n");
1959 
1960 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1961 			emulated_link_detect(aconnector->dc_link);
1962 		else
1963 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1964 
1965 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1966 			aconnector->fake_enable = false;
1967 
1968 		if (aconnector->dc_sink)
1969 			dc_sink_release(aconnector->dc_sink);
1970 		aconnector->dc_sink = NULL;
1971 		amdgpu_dm_update_connector_after_detect(aconnector);
1972 		mutex_unlock(&aconnector->hpd_lock);
1973 	}
1974 	drm_connector_list_iter_end(&iter);
1975 
1976 	/* Force mode set in atomic commit */
1977 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1978 		new_crtc_state->active_changed = true;
1979 
1980 	/*
1981 	 * atomic_check is expected to create the dc states. We need to release
1982 	 * them here, since they were duplicated as part of the suspend
1983 	 * procedure.
1984 	 */
1985 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1986 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1987 		if (dm_new_crtc_state->stream) {
1988 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1989 			dc_stream_release(dm_new_crtc_state->stream);
1990 			dm_new_crtc_state->stream = NULL;
1991 		}
1992 	}
1993 
1994 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1995 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1996 		if (dm_new_plane_state->dc_state) {
1997 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1998 			dc_plane_state_release(dm_new_plane_state->dc_state);
1999 			dm_new_plane_state->dc_state = NULL;
2000 		}
2001 	}
2002 
2003 	drm_atomic_helper_resume(ddev, dm->cached_state);
2004 
2005 	dm->cached_state = NULL;
2006 
2007 	amdgpu_dm_irq_resume_late(adev);
2008 
2009 	amdgpu_dm_smu_write_watermarks_table(adev);
2010 
2011 	return 0;
2012 }
2013 
2014 /**
2015  * DOC: DM Lifecycle
2016  *
2017  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2018  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2019  * the base driver's device list to be initialized and torn down accordingly.
2020  *
2021  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2022  */
2023 
2024 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2025 	.name = "dm",
2026 	.early_init = dm_early_init,
2027 	.late_init = dm_late_init,
2028 	.sw_init = dm_sw_init,
2029 	.sw_fini = dm_sw_fini,
2030 	.hw_init = dm_hw_init,
2031 	.hw_fini = dm_hw_fini,
2032 	.suspend = dm_suspend,
2033 	.resume = dm_resume,
2034 	.is_idle = dm_is_idle,
2035 	.wait_for_idle = dm_wait_for_idle,
2036 	.check_soft_reset = dm_check_soft_reset,
2037 	.soft_reset = dm_soft_reset,
2038 	.set_clockgating_state = dm_set_clockgating_state,
2039 	.set_powergating_state = dm_set_powergating_state,
2040 };
2041 
2042 const struct amdgpu_ip_block_version dm_ip_block =
2043 {
2044 	.type = AMD_IP_BLOCK_TYPE_DCE,
2045 	.major = 1,
2046 	.minor = 0,
2047 	.rev = 0,
2048 	.funcs = &amdgpu_dm_funcs,
2049 };
2050 
2051 
2052 /**
2053  * DOC: atomic
2054  *
2055  * *WIP*
2056  */
2057 
2058 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2059 	.fb_create = amdgpu_display_user_framebuffer_create,
2060 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2061 	.atomic_check = amdgpu_dm_atomic_check,
2062 	.atomic_commit = amdgpu_dm_atomic_commit,
2063 };
2064 
2065 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2066 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2067 };
2068 
2069 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2070 {
2071 	u32 max_cll, min_cll, max, min, q, r;
2072 	struct amdgpu_dm_backlight_caps *caps;
2073 	struct amdgpu_display_manager *dm;
2074 	struct drm_connector *conn_base;
2075 	struct amdgpu_device *adev;
2076 	struct dc_link *link = NULL;
2077 	static const u8 pre_computed_values[] = {
2078 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2079 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2080 
2081 	if (!aconnector || !aconnector->dc_link)
2082 		return;
2083 
2084 	link = aconnector->dc_link;
2085 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2086 		return;
2087 
2088 	conn_base = &aconnector->base;
2089 	adev = drm_to_adev(conn_base->dev);
2090 	dm = &adev->dm;
2091 	caps = &dm->backlight_caps;
2092 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2093 	caps->aux_support = false;
2094 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2095 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2096 
2097 	if (caps->ext_caps->bits.oled == 1 ||
2098 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2099 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2100 		caps->aux_support = true;
2101 
2102 	/* From the specification (CTA-861-G), for calculating the maximum
2103 	 * luminance we need to use:
2104 	 *	Luminance = 50*2**(CV/32)
2105 	 * Where CV is a one-byte value.
2106 	 * For calculating this expression we may need float point precision;
2107 	 * to avoid this complexity level, we take advantage that CV is divided
2108 	 * by a constant. From the Euclids division algorithm, we know that CV
2109 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2110 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2111 	 * need to pre-compute the value of r/32. For pre-computing the values
2112 	 * We just used the following Ruby line:
2113 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2114 	 * The results of the above expressions can be verified at
2115 	 * pre_computed_values.
2116 	 */
2117 	q = max_cll >> 5;
2118 	r = max_cll % 32;
2119 	max = (1 << q) * pre_computed_values[r];
2120 
2121 	// min luminance: maxLum * (CV/255)^2 / 100
2122 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2123 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2124 
2125 	caps->aux_max_input_signal = max;
2126 	caps->aux_min_input_signal = min;
2127 }
2128 
2129 void amdgpu_dm_update_connector_after_detect(
2130 		struct amdgpu_dm_connector *aconnector)
2131 {
2132 	struct drm_connector *connector = &aconnector->base;
2133 	struct drm_device *dev = connector->dev;
2134 	struct dc_sink *sink;
2135 
2136 	/* MST handled by drm_mst framework */
2137 	if (aconnector->mst_mgr.mst_state == true)
2138 		return;
2139 
2140 	sink = aconnector->dc_link->local_sink;
2141 	if (sink)
2142 		dc_sink_retain(sink);
2143 
2144 	/*
2145 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2146 	 * the connector sink is set to either fake or physical sink depends on link status.
2147 	 * Skip if already done during boot.
2148 	 */
2149 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2150 			&& aconnector->dc_em_sink) {
2151 
2152 		/*
2153 		 * For S3 resume with headless use eml_sink to fake stream
2154 		 * because on resume connector->sink is set to NULL
2155 		 */
2156 		mutex_lock(&dev->mode_config.mutex);
2157 
2158 		if (sink) {
2159 			if (aconnector->dc_sink) {
2160 				amdgpu_dm_update_freesync_caps(connector, NULL);
2161 				/*
2162 				 * retain and release below are used to
2163 				 * bump up refcount for sink because the link doesn't point
2164 				 * to it anymore after disconnect, so on next crtc to connector
2165 				 * reshuffle by UMD we will get into unwanted dc_sink release
2166 				 */
2167 				dc_sink_release(aconnector->dc_sink);
2168 			}
2169 			aconnector->dc_sink = sink;
2170 			dc_sink_retain(aconnector->dc_sink);
2171 			amdgpu_dm_update_freesync_caps(connector,
2172 					aconnector->edid);
2173 		} else {
2174 			amdgpu_dm_update_freesync_caps(connector, NULL);
2175 			if (!aconnector->dc_sink) {
2176 				aconnector->dc_sink = aconnector->dc_em_sink;
2177 				dc_sink_retain(aconnector->dc_sink);
2178 			}
2179 		}
2180 
2181 		mutex_unlock(&dev->mode_config.mutex);
2182 
2183 		if (sink)
2184 			dc_sink_release(sink);
2185 		return;
2186 	}
2187 
2188 	/*
2189 	 * TODO: temporary guard to look for proper fix
2190 	 * if this sink is MST sink, we should not do anything
2191 	 */
2192 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2193 		dc_sink_release(sink);
2194 		return;
2195 	}
2196 
2197 	if (aconnector->dc_sink == sink) {
2198 		/*
2199 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2200 		 * Do nothing!!
2201 		 */
2202 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2203 				aconnector->connector_id);
2204 		if (sink)
2205 			dc_sink_release(sink);
2206 		return;
2207 	}
2208 
2209 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2210 		aconnector->connector_id, aconnector->dc_sink, sink);
2211 
2212 	mutex_lock(&dev->mode_config.mutex);
2213 
2214 	/*
2215 	 * 1. Update status of the drm connector
2216 	 * 2. Send an event and let userspace tell us what to do
2217 	 */
2218 	if (sink) {
2219 		/*
2220 		 * TODO: check if we still need the S3 mode update workaround.
2221 		 * If yes, put it here.
2222 		 */
2223 		if (aconnector->dc_sink)
2224 			amdgpu_dm_update_freesync_caps(connector, NULL);
2225 
2226 		aconnector->dc_sink = sink;
2227 		dc_sink_retain(aconnector->dc_sink);
2228 		if (sink->dc_edid.length == 0) {
2229 			aconnector->edid = NULL;
2230 			if (aconnector->dc_link->aux_mode) {
2231 				drm_dp_cec_unset_edid(
2232 					&aconnector->dm_dp_aux.aux);
2233 			}
2234 		} else {
2235 			aconnector->edid =
2236 				(struct edid *)sink->dc_edid.raw_edid;
2237 
2238 			drm_connector_update_edid_property(connector,
2239 							   aconnector->edid);
2240 			drm_add_edid_modes(connector, aconnector->edid);
2241 
2242 			if (aconnector->dc_link->aux_mode)
2243 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2244 						    aconnector->edid);
2245 		}
2246 
2247 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2248 		update_connector_ext_caps(aconnector);
2249 	} else {
2250 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2251 		amdgpu_dm_update_freesync_caps(connector, NULL);
2252 		drm_connector_update_edid_property(connector, NULL);
2253 		aconnector->num_modes = 0;
2254 		dc_sink_release(aconnector->dc_sink);
2255 		aconnector->dc_sink = NULL;
2256 		aconnector->edid = NULL;
2257 #ifdef CONFIG_DRM_AMD_DC_HDCP
2258 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2259 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2260 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2261 #endif
2262 	}
2263 
2264 	mutex_unlock(&dev->mode_config.mutex);
2265 
2266 	update_subconnector_property(aconnector);
2267 
2268 	if (sink)
2269 		dc_sink_release(sink);
2270 }
2271 
2272 static void handle_hpd_irq(void *param)
2273 {
2274 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2275 	struct drm_connector *connector = &aconnector->base;
2276 	struct drm_device *dev = connector->dev;
2277 	enum dc_connection_type new_connection_type = dc_connection_none;
2278 #ifdef CONFIG_DRM_AMD_DC_HDCP
2279 	struct amdgpu_device *adev = drm_to_adev(dev);
2280 #endif
2281 
2282 	/*
2283 	 * In case of failure or MST no need to update connector status or notify the OS
2284 	 * since (for MST case) MST does this in its own context.
2285 	 */
2286 	mutex_lock(&aconnector->hpd_lock);
2287 
2288 #ifdef CONFIG_DRM_AMD_DC_HDCP
2289 	if (adev->dm.hdcp_workqueue)
2290 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2291 #endif
2292 	if (aconnector->fake_enable)
2293 		aconnector->fake_enable = false;
2294 
2295 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2296 		DRM_ERROR("KMS: Failed to detect connector\n");
2297 
2298 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2299 		emulated_link_detect(aconnector->dc_link);
2300 
2301 
2302 		drm_modeset_lock_all(dev);
2303 		dm_restore_drm_connector_state(dev, connector);
2304 		drm_modeset_unlock_all(dev);
2305 
2306 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2307 			drm_kms_helper_hotplug_event(dev);
2308 
2309 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2310 		amdgpu_dm_update_connector_after_detect(aconnector);
2311 
2312 
2313 		drm_modeset_lock_all(dev);
2314 		dm_restore_drm_connector_state(dev, connector);
2315 		drm_modeset_unlock_all(dev);
2316 
2317 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2318 			drm_kms_helper_hotplug_event(dev);
2319 	}
2320 	mutex_unlock(&aconnector->hpd_lock);
2321 
2322 }
2323 
2324 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2325 {
2326 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2327 	uint8_t dret;
2328 	bool new_irq_handled = false;
2329 	int dpcd_addr;
2330 	int dpcd_bytes_to_read;
2331 
2332 	const int max_process_count = 30;
2333 	int process_count = 0;
2334 
2335 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2336 
2337 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2338 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2339 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2340 		dpcd_addr = DP_SINK_COUNT;
2341 	} else {
2342 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2343 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2344 		dpcd_addr = DP_SINK_COUNT_ESI;
2345 	}
2346 
2347 	dret = drm_dp_dpcd_read(
2348 		&aconnector->dm_dp_aux.aux,
2349 		dpcd_addr,
2350 		esi,
2351 		dpcd_bytes_to_read);
2352 
2353 	while (dret == dpcd_bytes_to_read &&
2354 		process_count < max_process_count) {
2355 		uint8_t retry;
2356 		dret = 0;
2357 
2358 		process_count++;
2359 
2360 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2361 		/* handle HPD short pulse irq */
2362 		if (aconnector->mst_mgr.mst_state)
2363 			drm_dp_mst_hpd_irq(
2364 				&aconnector->mst_mgr,
2365 				esi,
2366 				&new_irq_handled);
2367 
2368 		if (new_irq_handled) {
2369 			/* ACK at DPCD to notify down stream */
2370 			const int ack_dpcd_bytes_to_write =
2371 				dpcd_bytes_to_read - 1;
2372 
2373 			for (retry = 0; retry < 3; retry++) {
2374 				uint8_t wret;
2375 
2376 				wret = drm_dp_dpcd_write(
2377 					&aconnector->dm_dp_aux.aux,
2378 					dpcd_addr + 1,
2379 					&esi[1],
2380 					ack_dpcd_bytes_to_write);
2381 				if (wret == ack_dpcd_bytes_to_write)
2382 					break;
2383 			}
2384 
2385 			/* check if there is new irq to be handled */
2386 			dret = drm_dp_dpcd_read(
2387 				&aconnector->dm_dp_aux.aux,
2388 				dpcd_addr,
2389 				esi,
2390 				dpcd_bytes_to_read);
2391 
2392 			new_irq_handled = false;
2393 		} else {
2394 			break;
2395 		}
2396 	}
2397 
2398 	if (process_count == max_process_count)
2399 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2400 }
2401 
2402 static void handle_hpd_rx_irq(void *param)
2403 {
2404 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2405 	struct drm_connector *connector = &aconnector->base;
2406 	struct drm_device *dev = connector->dev;
2407 	struct dc_link *dc_link = aconnector->dc_link;
2408 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2409 	enum dc_connection_type new_connection_type = dc_connection_none;
2410 #ifdef CONFIG_DRM_AMD_DC_HDCP
2411 	union hpd_irq_data hpd_irq_data;
2412 	struct amdgpu_device *adev = drm_to_adev(dev);
2413 
2414 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2415 #endif
2416 
2417 	/*
2418 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2419 	 * conflict, after implement i2c helper, this mutex should be
2420 	 * retired.
2421 	 */
2422 	if (dc_link->type != dc_connection_mst_branch)
2423 		mutex_lock(&aconnector->hpd_lock);
2424 
2425 
2426 #ifdef CONFIG_DRM_AMD_DC_HDCP
2427 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2428 #else
2429 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2430 #endif
2431 			!is_mst_root_connector) {
2432 		/* Downstream Port status changed. */
2433 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2434 			DRM_ERROR("KMS: Failed to detect connector\n");
2435 
2436 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2437 			emulated_link_detect(dc_link);
2438 
2439 			if (aconnector->fake_enable)
2440 				aconnector->fake_enable = false;
2441 
2442 			amdgpu_dm_update_connector_after_detect(aconnector);
2443 
2444 
2445 			drm_modeset_lock_all(dev);
2446 			dm_restore_drm_connector_state(dev, connector);
2447 			drm_modeset_unlock_all(dev);
2448 
2449 			drm_kms_helper_hotplug_event(dev);
2450 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2451 
2452 			if (aconnector->fake_enable)
2453 				aconnector->fake_enable = false;
2454 
2455 			amdgpu_dm_update_connector_after_detect(aconnector);
2456 
2457 
2458 			drm_modeset_lock_all(dev);
2459 			dm_restore_drm_connector_state(dev, connector);
2460 			drm_modeset_unlock_all(dev);
2461 
2462 			drm_kms_helper_hotplug_event(dev);
2463 		}
2464 	}
2465 #ifdef CONFIG_DRM_AMD_DC_HDCP
2466 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2467 		if (adev->dm.hdcp_workqueue)
2468 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2469 	}
2470 #endif
2471 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2472 	    (dc_link->type == dc_connection_mst_branch))
2473 		dm_handle_hpd_rx_irq(aconnector);
2474 
2475 	if (dc_link->type != dc_connection_mst_branch) {
2476 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2477 		mutex_unlock(&aconnector->hpd_lock);
2478 	}
2479 }
2480 
2481 static void register_hpd_handlers(struct amdgpu_device *adev)
2482 {
2483 	struct drm_device *dev = adev_to_drm(adev);
2484 	struct drm_connector *connector;
2485 	struct amdgpu_dm_connector *aconnector;
2486 	const struct dc_link *dc_link;
2487 	struct dc_interrupt_params int_params = {0};
2488 
2489 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2490 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2491 
2492 	list_for_each_entry(connector,
2493 			&dev->mode_config.connector_list, head)	{
2494 
2495 		aconnector = to_amdgpu_dm_connector(connector);
2496 		dc_link = aconnector->dc_link;
2497 
2498 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2499 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2500 			int_params.irq_source = dc_link->irq_source_hpd;
2501 
2502 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2503 					handle_hpd_irq,
2504 					(void *) aconnector);
2505 		}
2506 
2507 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2508 
2509 			/* Also register for DP short pulse (hpd_rx). */
2510 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2511 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2512 
2513 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2514 					handle_hpd_rx_irq,
2515 					(void *) aconnector);
2516 		}
2517 	}
2518 }
2519 
2520 #if defined(CONFIG_DRM_AMD_DC_SI)
2521 /* Register IRQ sources and initialize IRQ callbacks */
2522 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2523 {
2524 	struct dc *dc = adev->dm.dc;
2525 	struct common_irq_params *c_irq_params;
2526 	struct dc_interrupt_params int_params = {0};
2527 	int r;
2528 	int i;
2529 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2530 
2531 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2532 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2533 
2534 	/*
2535 	 * Actions of amdgpu_irq_add_id():
2536 	 * 1. Register a set() function with base driver.
2537 	 *    Base driver will call set() function to enable/disable an
2538 	 *    interrupt in DC hardware.
2539 	 * 2. Register amdgpu_dm_irq_handler().
2540 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2541 	 *    coming from DC hardware.
2542 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2543 	 *    for acknowledging and handling. */
2544 
2545 	/* Use VBLANK interrupt */
2546 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2547 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2548 		if (r) {
2549 			DRM_ERROR("Failed to add crtc irq id!\n");
2550 			return r;
2551 		}
2552 
2553 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2554 		int_params.irq_source =
2555 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2556 
2557 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2558 
2559 		c_irq_params->adev = adev;
2560 		c_irq_params->irq_src = int_params.irq_source;
2561 
2562 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2563 				dm_crtc_high_irq, c_irq_params);
2564 	}
2565 
2566 	/* Use GRPH_PFLIP interrupt */
2567 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2568 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2569 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2570 		if (r) {
2571 			DRM_ERROR("Failed to add page flip irq id!\n");
2572 			return r;
2573 		}
2574 
2575 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2576 		int_params.irq_source =
2577 			dc_interrupt_to_irq_source(dc, i, 0);
2578 
2579 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2580 
2581 		c_irq_params->adev = adev;
2582 		c_irq_params->irq_src = int_params.irq_source;
2583 
2584 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2585 				dm_pflip_high_irq, c_irq_params);
2586 
2587 	}
2588 
2589 	/* HPD */
2590 	r = amdgpu_irq_add_id(adev, client_id,
2591 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2592 	if (r) {
2593 		DRM_ERROR("Failed to add hpd irq id!\n");
2594 		return r;
2595 	}
2596 
2597 	register_hpd_handlers(adev);
2598 
2599 	return 0;
2600 }
2601 #endif
2602 
2603 /* Register IRQ sources and initialize IRQ callbacks */
2604 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2605 {
2606 	struct dc *dc = adev->dm.dc;
2607 	struct common_irq_params *c_irq_params;
2608 	struct dc_interrupt_params int_params = {0};
2609 	int r;
2610 	int i;
2611 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2612 
2613 	if (adev->asic_type >= CHIP_VEGA10)
2614 		client_id = SOC15_IH_CLIENTID_DCE;
2615 
2616 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2617 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2618 
2619 	/*
2620 	 * Actions of amdgpu_irq_add_id():
2621 	 * 1. Register a set() function with base driver.
2622 	 *    Base driver will call set() function to enable/disable an
2623 	 *    interrupt in DC hardware.
2624 	 * 2. Register amdgpu_dm_irq_handler().
2625 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2626 	 *    coming from DC hardware.
2627 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2628 	 *    for acknowledging and handling. */
2629 
2630 	/* Use VBLANK interrupt */
2631 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2632 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2633 		if (r) {
2634 			DRM_ERROR("Failed to add crtc irq id!\n");
2635 			return r;
2636 		}
2637 
2638 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2639 		int_params.irq_source =
2640 			dc_interrupt_to_irq_source(dc, i, 0);
2641 
2642 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2643 
2644 		c_irq_params->adev = adev;
2645 		c_irq_params->irq_src = int_params.irq_source;
2646 
2647 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2648 				dm_crtc_high_irq, c_irq_params);
2649 	}
2650 
2651 	/* Use VUPDATE interrupt */
2652 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2653 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2654 		if (r) {
2655 			DRM_ERROR("Failed to add vupdate irq id!\n");
2656 			return r;
2657 		}
2658 
2659 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2660 		int_params.irq_source =
2661 			dc_interrupt_to_irq_source(dc, i, 0);
2662 
2663 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2664 
2665 		c_irq_params->adev = adev;
2666 		c_irq_params->irq_src = int_params.irq_source;
2667 
2668 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2669 				dm_vupdate_high_irq, c_irq_params);
2670 	}
2671 
2672 	/* Use GRPH_PFLIP interrupt */
2673 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2674 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2675 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2676 		if (r) {
2677 			DRM_ERROR("Failed to add page flip irq id!\n");
2678 			return r;
2679 		}
2680 
2681 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2682 		int_params.irq_source =
2683 			dc_interrupt_to_irq_source(dc, i, 0);
2684 
2685 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2686 
2687 		c_irq_params->adev = adev;
2688 		c_irq_params->irq_src = int_params.irq_source;
2689 
2690 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2691 				dm_pflip_high_irq, c_irq_params);
2692 
2693 	}
2694 
2695 	/* HPD */
2696 	r = amdgpu_irq_add_id(adev, client_id,
2697 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2698 	if (r) {
2699 		DRM_ERROR("Failed to add hpd irq id!\n");
2700 		return r;
2701 	}
2702 
2703 	register_hpd_handlers(adev);
2704 
2705 	return 0;
2706 }
2707 
2708 #if defined(CONFIG_DRM_AMD_DC_DCN)
2709 /* Register IRQ sources and initialize IRQ callbacks */
2710 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2711 {
2712 	struct dc *dc = adev->dm.dc;
2713 	struct common_irq_params *c_irq_params;
2714 	struct dc_interrupt_params int_params = {0};
2715 	int r;
2716 	int i;
2717 
2718 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2719 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2720 
2721 	/*
2722 	 * Actions of amdgpu_irq_add_id():
2723 	 * 1. Register a set() function with base driver.
2724 	 *    Base driver will call set() function to enable/disable an
2725 	 *    interrupt in DC hardware.
2726 	 * 2. Register amdgpu_dm_irq_handler().
2727 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2728 	 *    coming from DC hardware.
2729 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2730 	 *    for acknowledging and handling.
2731 	 */
2732 
2733 	/* Use VSTARTUP interrupt */
2734 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2735 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2736 			i++) {
2737 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2738 
2739 		if (r) {
2740 			DRM_ERROR("Failed to add crtc irq id!\n");
2741 			return r;
2742 		}
2743 
2744 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2745 		int_params.irq_source =
2746 			dc_interrupt_to_irq_source(dc, i, 0);
2747 
2748 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2749 
2750 		c_irq_params->adev = adev;
2751 		c_irq_params->irq_src = int_params.irq_source;
2752 
2753 		amdgpu_dm_irq_register_interrupt(
2754 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2755 	}
2756 
2757 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2758 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2759 	 * to trigger at end of each vblank, regardless of state of the lock,
2760 	 * matching DCE behaviour.
2761 	 */
2762 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2763 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2764 	     i++) {
2765 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2766 
2767 		if (r) {
2768 			DRM_ERROR("Failed to add vupdate irq id!\n");
2769 			return r;
2770 		}
2771 
2772 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2773 		int_params.irq_source =
2774 			dc_interrupt_to_irq_source(dc, i, 0);
2775 
2776 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2777 
2778 		c_irq_params->adev = adev;
2779 		c_irq_params->irq_src = int_params.irq_source;
2780 
2781 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2782 				dm_vupdate_high_irq, c_irq_params);
2783 	}
2784 
2785 	/* Use GRPH_PFLIP interrupt */
2786 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2787 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2788 			i++) {
2789 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2790 		if (r) {
2791 			DRM_ERROR("Failed to add page flip irq id!\n");
2792 			return r;
2793 		}
2794 
2795 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2796 		int_params.irq_source =
2797 			dc_interrupt_to_irq_source(dc, i, 0);
2798 
2799 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2800 
2801 		c_irq_params->adev = adev;
2802 		c_irq_params->irq_src = int_params.irq_source;
2803 
2804 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2805 				dm_pflip_high_irq, c_irq_params);
2806 
2807 	}
2808 
2809 	/* HPD */
2810 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2811 			&adev->hpd_irq);
2812 	if (r) {
2813 		DRM_ERROR("Failed to add hpd irq id!\n");
2814 		return r;
2815 	}
2816 
2817 	register_hpd_handlers(adev);
2818 
2819 	return 0;
2820 }
2821 #endif
2822 
2823 /*
2824  * Acquires the lock for the atomic state object and returns
2825  * the new atomic state.
2826  *
2827  * This should only be called during atomic check.
2828  */
2829 static int dm_atomic_get_state(struct drm_atomic_state *state,
2830 			       struct dm_atomic_state **dm_state)
2831 {
2832 	struct drm_device *dev = state->dev;
2833 	struct amdgpu_device *adev = drm_to_adev(dev);
2834 	struct amdgpu_display_manager *dm = &adev->dm;
2835 	struct drm_private_state *priv_state;
2836 
2837 	if (*dm_state)
2838 		return 0;
2839 
2840 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2841 	if (IS_ERR(priv_state))
2842 		return PTR_ERR(priv_state);
2843 
2844 	*dm_state = to_dm_atomic_state(priv_state);
2845 
2846 	return 0;
2847 }
2848 
2849 static struct dm_atomic_state *
2850 dm_atomic_get_new_state(struct drm_atomic_state *state)
2851 {
2852 	struct drm_device *dev = state->dev;
2853 	struct amdgpu_device *adev = drm_to_adev(dev);
2854 	struct amdgpu_display_manager *dm = &adev->dm;
2855 	struct drm_private_obj *obj;
2856 	struct drm_private_state *new_obj_state;
2857 	int i;
2858 
2859 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2860 		if (obj->funcs == dm->atomic_obj.funcs)
2861 			return to_dm_atomic_state(new_obj_state);
2862 	}
2863 
2864 	return NULL;
2865 }
2866 
2867 static struct drm_private_state *
2868 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2869 {
2870 	struct dm_atomic_state *old_state, *new_state;
2871 
2872 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2873 	if (!new_state)
2874 		return NULL;
2875 
2876 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2877 
2878 	old_state = to_dm_atomic_state(obj->state);
2879 
2880 	if (old_state && old_state->context)
2881 		new_state->context = dc_copy_state(old_state->context);
2882 
2883 	if (!new_state->context) {
2884 		kfree(new_state);
2885 		return NULL;
2886 	}
2887 
2888 	return &new_state->base;
2889 }
2890 
2891 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2892 				    struct drm_private_state *state)
2893 {
2894 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2895 
2896 	if (dm_state && dm_state->context)
2897 		dc_release_state(dm_state->context);
2898 
2899 	kfree(dm_state);
2900 }
2901 
2902 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2903 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2904 	.atomic_destroy_state = dm_atomic_destroy_state,
2905 };
2906 
2907 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2908 {
2909 	struct dm_atomic_state *state;
2910 	int r;
2911 
2912 	adev->mode_info.mode_config_initialized = true;
2913 
2914 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2915 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2916 
2917 	adev_to_drm(adev)->mode_config.max_width = 16384;
2918 	adev_to_drm(adev)->mode_config.max_height = 16384;
2919 
2920 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
2921 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2922 	/* indicates support for immediate flip */
2923 	adev_to_drm(adev)->mode_config.async_page_flip = true;
2924 
2925 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
2926 
2927 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2928 	if (!state)
2929 		return -ENOMEM;
2930 
2931 	state->context = dc_create_state(adev->dm.dc);
2932 	if (!state->context) {
2933 		kfree(state);
2934 		return -ENOMEM;
2935 	}
2936 
2937 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2938 
2939 	drm_atomic_private_obj_init(adev_to_drm(adev),
2940 				    &adev->dm.atomic_obj,
2941 				    &state->base,
2942 				    &dm_atomic_state_funcs);
2943 
2944 	r = amdgpu_display_modeset_create_props(adev);
2945 	if (r) {
2946 		dc_release_state(state->context);
2947 		kfree(state);
2948 		return r;
2949 	}
2950 
2951 	r = amdgpu_dm_audio_init(adev);
2952 	if (r) {
2953 		dc_release_state(state->context);
2954 		kfree(state);
2955 		return r;
2956 	}
2957 
2958 	return 0;
2959 }
2960 
2961 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2962 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2963 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2964 
2965 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2966 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2967 
2968 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2969 {
2970 #if defined(CONFIG_ACPI)
2971 	struct amdgpu_dm_backlight_caps caps;
2972 
2973 	memset(&caps, 0, sizeof(caps));
2974 
2975 	if (dm->backlight_caps.caps_valid)
2976 		return;
2977 
2978 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2979 	if (caps.caps_valid) {
2980 		dm->backlight_caps.caps_valid = true;
2981 		if (caps.aux_support)
2982 			return;
2983 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2984 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2985 	} else {
2986 		dm->backlight_caps.min_input_signal =
2987 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2988 		dm->backlight_caps.max_input_signal =
2989 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2990 	}
2991 #else
2992 	if (dm->backlight_caps.aux_support)
2993 		return;
2994 
2995 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2996 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2997 #endif
2998 }
2999 
3000 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3001 {
3002 	bool rc;
3003 
3004 	if (!link)
3005 		return 1;
3006 
3007 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
3008 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3009 
3010 	return rc ? 0 : 1;
3011 }
3012 
3013 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3014 				unsigned *min, unsigned *max)
3015 {
3016 	if (!caps)
3017 		return 0;
3018 
3019 	if (caps->aux_support) {
3020 		// Firmware limits are in nits, DC API wants millinits.
3021 		*max = 1000 * caps->aux_max_input_signal;
3022 		*min = 1000 * caps->aux_min_input_signal;
3023 	} else {
3024 		// Firmware limits are 8-bit, PWM control is 16-bit.
3025 		*max = 0x101 * caps->max_input_signal;
3026 		*min = 0x101 * caps->min_input_signal;
3027 	}
3028 	return 1;
3029 }
3030 
3031 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3032 					uint32_t brightness)
3033 {
3034 	unsigned min, max;
3035 
3036 	if (!get_brightness_range(caps, &min, &max))
3037 		return brightness;
3038 
3039 	// Rescale 0..255 to min..max
3040 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3041 				       AMDGPU_MAX_BL_LEVEL);
3042 }
3043 
3044 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3045 				      uint32_t brightness)
3046 {
3047 	unsigned min, max;
3048 
3049 	if (!get_brightness_range(caps, &min, &max))
3050 		return brightness;
3051 
3052 	if (brightness < min)
3053 		return 0;
3054 	// Rescale min..max to 0..255
3055 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3056 				 max - min);
3057 }
3058 
3059 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3060 {
3061 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3062 	struct amdgpu_dm_backlight_caps caps;
3063 	struct dc_link *link = NULL;
3064 	u32 brightness;
3065 	bool rc;
3066 
3067 	amdgpu_dm_update_backlight_caps(dm);
3068 	caps = dm->backlight_caps;
3069 
3070 	link = (struct dc_link *)dm->backlight_link;
3071 
3072 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3073 	// Change brightness based on AUX property
3074 	if (caps.aux_support)
3075 		return set_backlight_via_aux(link, brightness);
3076 
3077 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3078 
3079 	return rc ? 0 : 1;
3080 }
3081 
3082 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3083 {
3084 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3085 	int ret = dc_link_get_backlight_level(dm->backlight_link);
3086 
3087 	if (ret == DC_ERROR_UNEXPECTED)
3088 		return bd->props.brightness;
3089 	return convert_brightness_to_user(&dm->backlight_caps, ret);
3090 }
3091 
3092 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3093 	.options = BL_CORE_SUSPENDRESUME,
3094 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3095 	.update_status	= amdgpu_dm_backlight_update_status,
3096 };
3097 
3098 static void
3099 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3100 {
3101 	char bl_name[16];
3102 	struct backlight_properties props = { 0 };
3103 
3104 	amdgpu_dm_update_backlight_caps(dm);
3105 
3106 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3107 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3108 	props.type = BACKLIGHT_RAW;
3109 
3110 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3111 		 adev_to_drm(dm->adev)->primary->index);
3112 
3113 	dm->backlight_dev = backlight_device_register(bl_name,
3114 						      adev_to_drm(dm->adev)->dev,
3115 						      dm,
3116 						      &amdgpu_dm_backlight_ops,
3117 						      &props);
3118 
3119 	if (IS_ERR(dm->backlight_dev))
3120 		DRM_ERROR("DM: Backlight registration failed!\n");
3121 	else
3122 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3123 }
3124 
3125 #endif
3126 
3127 static int initialize_plane(struct amdgpu_display_manager *dm,
3128 			    struct amdgpu_mode_info *mode_info, int plane_id,
3129 			    enum drm_plane_type plane_type,
3130 			    const struct dc_plane_cap *plane_cap)
3131 {
3132 	struct drm_plane *plane;
3133 	unsigned long possible_crtcs;
3134 	int ret = 0;
3135 
3136 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3137 	if (!plane) {
3138 		DRM_ERROR("KMS: Failed to allocate plane\n");
3139 		return -ENOMEM;
3140 	}
3141 	plane->type = plane_type;
3142 
3143 	/*
3144 	 * HACK: IGT tests expect that the primary plane for a CRTC
3145 	 * can only have one possible CRTC. Only expose support for
3146 	 * any CRTC if they're not going to be used as a primary plane
3147 	 * for a CRTC - like overlay or underlay planes.
3148 	 */
3149 	possible_crtcs = 1 << plane_id;
3150 	if (plane_id >= dm->dc->caps.max_streams)
3151 		possible_crtcs = 0xff;
3152 
3153 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3154 
3155 	if (ret) {
3156 		DRM_ERROR("KMS: Failed to initialize plane\n");
3157 		kfree(plane);
3158 		return ret;
3159 	}
3160 
3161 	if (mode_info)
3162 		mode_info->planes[plane_id] = plane;
3163 
3164 	return ret;
3165 }
3166 
3167 
3168 static void register_backlight_device(struct amdgpu_display_manager *dm,
3169 				      struct dc_link *link)
3170 {
3171 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3172 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3173 
3174 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3175 	    link->type != dc_connection_none) {
3176 		/*
3177 		 * Event if registration failed, we should continue with
3178 		 * DM initialization because not having a backlight control
3179 		 * is better then a black screen.
3180 		 */
3181 		amdgpu_dm_register_backlight_device(dm);
3182 
3183 		if (dm->backlight_dev)
3184 			dm->backlight_link = link;
3185 	}
3186 #endif
3187 }
3188 
3189 
3190 /*
3191  * In this architecture, the association
3192  * connector -> encoder -> crtc
3193  * id not really requried. The crtc and connector will hold the
3194  * display_index as an abstraction to use with DAL component
3195  *
3196  * Returns 0 on success
3197  */
3198 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3199 {
3200 	struct amdgpu_display_manager *dm = &adev->dm;
3201 	int32_t i;
3202 	struct amdgpu_dm_connector *aconnector = NULL;
3203 	struct amdgpu_encoder *aencoder = NULL;
3204 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3205 	uint32_t link_cnt;
3206 	int32_t primary_planes;
3207 	enum dc_connection_type new_connection_type = dc_connection_none;
3208 	const struct dc_plane_cap *plane;
3209 
3210 	link_cnt = dm->dc->caps.max_links;
3211 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3212 		DRM_ERROR("DM: Failed to initialize mode config\n");
3213 		return -EINVAL;
3214 	}
3215 
3216 	/* There is one primary plane per CRTC */
3217 	primary_planes = dm->dc->caps.max_streams;
3218 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3219 
3220 	/*
3221 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3222 	 * Order is reversed to match iteration order in atomic check.
3223 	 */
3224 	for (i = (primary_planes - 1); i >= 0; i--) {
3225 		plane = &dm->dc->caps.planes[i];
3226 
3227 		if (initialize_plane(dm, mode_info, i,
3228 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3229 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3230 			goto fail;
3231 		}
3232 	}
3233 
3234 	/*
3235 	 * Initialize overlay planes, index starting after primary planes.
3236 	 * These planes have a higher DRM index than the primary planes since
3237 	 * they should be considered as having a higher z-order.
3238 	 * Order is reversed to match iteration order in atomic check.
3239 	 *
3240 	 * Only support DCN for now, and only expose one so we don't encourage
3241 	 * userspace to use up all the pipes.
3242 	 */
3243 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3244 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3245 
3246 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3247 			continue;
3248 
3249 		if (!plane->blends_with_above || !plane->blends_with_below)
3250 			continue;
3251 
3252 		if (!plane->pixel_format_support.argb8888)
3253 			continue;
3254 
3255 		if (initialize_plane(dm, NULL, primary_planes + i,
3256 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3257 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3258 			goto fail;
3259 		}
3260 
3261 		/* Only create one overlay plane. */
3262 		break;
3263 	}
3264 
3265 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3266 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3267 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3268 			goto fail;
3269 		}
3270 
3271 	dm->display_indexes_num = dm->dc->caps.max_streams;
3272 
3273 	/* loops over all connectors on the board */
3274 	for (i = 0; i < link_cnt; i++) {
3275 		struct dc_link *link = NULL;
3276 
3277 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3278 			DRM_ERROR(
3279 				"KMS: Cannot support more than %d display indexes\n",
3280 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3281 			continue;
3282 		}
3283 
3284 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3285 		if (!aconnector)
3286 			goto fail;
3287 
3288 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3289 		if (!aencoder)
3290 			goto fail;
3291 
3292 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3293 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3294 			goto fail;
3295 		}
3296 
3297 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3298 			DRM_ERROR("KMS: Failed to initialize connector\n");
3299 			goto fail;
3300 		}
3301 
3302 		link = dc_get_link_at_index(dm->dc, i);
3303 
3304 		if (!dc_link_detect_sink(link, &new_connection_type))
3305 			DRM_ERROR("KMS: Failed to detect connector\n");
3306 
3307 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3308 			emulated_link_detect(link);
3309 			amdgpu_dm_update_connector_after_detect(aconnector);
3310 
3311 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3312 			amdgpu_dm_update_connector_after_detect(aconnector);
3313 			register_backlight_device(dm, link);
3314 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3315 				amdgpu_dm_set_psr_caps(link);
3316 		}
3317 
3318 
3319 	}
3320 
3321 	/* Software is initialized. Now we can register interrupt handlers. */
3322 	switch (adev->asic_type) {
3323 #if defined(CONFIG_DRM_AMD_DC_SI)
3324 	case CHIP_TAHITI:
3325 	case CHIP_PITCAIRN:
3326 	case CHIP_VERDE:
3327 	case CHIP_OLAND:
3328 		if (dce60_register_irq_handlers(dm->adev)) {
3329 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3330 			goto fail;
3331 		}
3332 		break;
3333 #endif
3334 	case CHIP_BONAIRE:
3335 	case CHIP_HAWAII:
3336 	case CHIP_KAVERI:
3337 	case CHIP_KABINI:
3338 	case CHIP_MULLINS:
3339 	case CHIP_TONGA:
3340 	case CHIP_FIJI:
3341 	case CHIP_CARRIZO:
3342 	case CHIP_STONEY:
3343 	case CHIP_POLARIS11:
3344 	case CHIP_POLARIS10:
3345 	case CHIP_POLARIS12:
3346 	case CHIP_VEGAM:
3347 	case CHIP_VEGA10:
3348 	case CHIP_VEGA12:
3349 	case CHIP_VEGA20:
3350 		if (dce110_register_irq_handlers(dm->adev)) {
3351 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3352 			goto fail;
3353 		}
3354 		break;
3355 #if defined(CONFIG_DRM_AMD_DC_DCN)
3356 	case CHIP_RAVEN:
3357 	case CHIP_NAVI12:
3358 	case CHIP_NAVI10:
3359 	case CHIP_NAVI14:
3360 	case CHIP_RENOIR:
3361 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3362 	case CHIP_SIENNA_CICHLID:
3363 	case CHIP_NAVY_FLOUNDER:
3364 #endif
3365 		if (dcn10_register_irq_handlers(dm->adev)) {
3366 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3367 			goto fail;
3368 		}
3369 		break;
3370 #endif
3371 	default:
3372 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3373 		goto fail;
3374 	}
3375 
3376 	/* No userspace support. */
3377 	dm->dc->debug.disable_tri_buf = true;
3378 
3379 	return 0;
3380 fail:
3381 	kfree(aencoder);
3382 	kfree(aconnector);
3383 
3384 	return -EINVAL;
3385 }
3386 
3387 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3388 {
3389 	drm_mode_config_cleanup(dm->ddev);
3390 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3391 	return;
3392 }
3393 
3394 /******************************************************************************
3395  * amdgpu_display_funcs functions
3396  *****************************************************************************/
3397 
3398 /*
3399  * dm_bandwidth_update - program display watermarks
3400  *
3401  * @adev: amdgpu_device pointer
3402  *
3403  * Calculate and program the display watermarks and line buffer allocation.
3404  */
3405 static void dm_bandwidth_update(struct amdgpu_device *adev)
3406 {
3407 	/* TODO: implement later */
3408 }
3409 
3410 static const struct amdgpu_display_funcs dm_display_funcs = {
3411 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3412 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3413 	.backlight_set_level = NULL, /* never called for DC */
3414 	.backlight_get_level = NULL, /* never called for DC */
3415 	.hpd_sense = NULL,/* called unconditionally */
3416 	.hpd_set_polarity = NULL, /* called unconditionally */
3417 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3418 	.page_flip_get_scanoutpos =
3419 		dm_crtc_get_scanoutpos,/* called unconditionally */
3420 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3421 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3422 };
3423 
3424 #if defined(CONFIG_DEBUG_KERNEL_DC)
3425 
3426 static ssize_t s3_debug_store(struct device *device,
3427 			      struct device_attribute *attr,
3428 			      const char *buf,
3429 			      size_t count)
3430 {
3431 	int ret;
3432 	int s3_state;
3433 	struct drm_device *drm_dev = dev_get_drvdata(device);
3434 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3435 
3436 	ret = kstrtoint(buf, 0, &s3_state);
3437 
3438 	if (ret == 0) {
3439 		if (s3_state) {
3440 			dm_resume(adev);
3441 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3442 		} else
3443 			dm_suspend(adev);
3444 	}
3445 
3446 	return ret == 0 ? count : 0;
3447 }
3448 
3449 DEVICE_ATTR_WO(s3_debug);
3450 
3451 #endif
3452 
3453 static int dm_early_init(void *handle)
3454 {
3455 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3456 
3457 	switch (adev->asic_type) {
3458 #if defined(CONFIG_DRM_AMD_DC_SI)
3459 	case CHIP_TAHITI:
3460 	case CHIP_PITCAIRN:
3461 	case CHIP_VERDE:
3462 		adev->mode_info.num_crtc = 6;
3463 		adev->mode_info.num_hpd = 6;
3464 		adev->mode_info.num_dig = 6;
3465 		break;
3466 	case CHIP_OLAND:
3467 		adev->mode_info.num_crtc = 2;
3468 		adev->mode_info.num_hpd = 2;
3469 		adev->mode_info.num_dig = 2;
3470 		break;
3471 #endif
3472 	case CHIP_BONAIRE:
3473 	case CHIP_HAWAII:
3474 		adev->mode_info.num_crtc = 6;
3475 		adev->mode_info.num_hpd = 6;
3476 		adev->mode_info.num_dig = 6;
3477 		break;
3478 	case CHIP_KAVERI:
3479 		adev->mode_info.num_crtc = 4;
3480 		adev->mode_info.num_hpd = 6;
3481 		adev->mode_info.num_dig = 7;
3482 		break;
3483 	case CHIP_KABINI:
3484 	case CHIP_MULLINS:
3485 		adev->mode_info.num_crtc = 2;
3486 		adev->mode_info.num_hpd = 6;
3487 		adev->mode_info.num_dig = 6;
3488 		break;
3489 	case CHIP_FIJI:
3490 	case CHIP_TONGA:
3491 		adev->mode_info.num_crtc = 6;
3492 		adev->mode_info.num_hpd = 6;
3493 		adev->mode_info.num_dig = 7;
3494 		break;
3495 	case CHIP_CARRIZO:
3496 		adev->mode_info.num_crtc = 3;
3497 		adev->mode_info.num_hpd = 6;
3498 		adev->mode_info.num_dig = 9;
3499 		break;
3500 	case CHIP_STONEY:
3501 		adev->mode_info.num_crtc = 2;
3502 		adev->mode_info.num_hpd = 6;
3503 		adev->mode_info.num_dig = 9;
3504 		break;
3505 	case CHIP_POLARIS11:
3506 	case CHIP_POLARIS12:
3507 		adev->mode_info.num_crtc = 5;
3508 		adev->mode_info.num_hpd = 5;
3509 		adev->mode_info.num_dig = 5;
3510 		break;
3511 	case CHIP_POLARIS10:
3512 	case CHIP_VEGAM:
3513 		adev->mode_info.num_crtc = 6;
3514 		adev->mode_info.num_hpd = 6;
3515 		adev->mode_info.num_dig = 6;
3516 		break;
3517 	case CHIP_VEGA10:
3518 	case CHIP_VEGA12:
3519 	case CHIP_VEGA20:
3520 		adev->mode_info.num_crtc = 6;
3521 		adev->mode_info.num_hpd = 6;
3522 		adev->mode_info.num_dig = 6;
3523 		break;
3524 #if defined(CONFIG_DRM_AMD_DC_DCN)
3525 	case CHIP_RAVEN:
3526 		adev->mode_info.num_crtc = 4;
3527 		adev->mode_info.num_hpd = 4;
3528 		adev->mode_info.num_dig = 4;
3529 		break;
3530 #endif
3531 	case CHIP_NAVI10:
3532 	case CHIP_NAVI12:
3533 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3534 	case CHIP_SIENNA_CICHLID:
3535 	case CHIP_NAVY_FLOUNDER:
3536 #endif
3537 		adev->mode_info.num_crtc = 6;
3538 		adev->mode_info.num_hpd = 6;
3539 		adev->mode_info.num_dig = 6;
3540 		break;
3541 	case CHIP_NAVI14:
3542 		adev->mode_info.num_crtc = 5;
3543 		adev->mode_info.num_hpd = 5;
3544 		adev->mode_info.num_dig = 5;
3545 		break;
3546 	case CHIP_RENOIR:
3547 		adev->mode_info.num_crtc = 4;
3548 		adev->mode_info.num_hpd = 4;
3549 		adev->mode_info.num_dig = 4;
3550 		break;
3551 	default:
3552 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3553 		return -EINVAL;
3554 	}
3555 
3556 	amdgpu_dm_set_irq_funcs(adev);
3557 
3558 	if (adev->mode_info.funcs == NULL)
3559 		adev->mode_info.funcs = &dm_display_funcs;
3560 
3561 	/*
3562 	 * Note: Do NOT change adev->audio_endpt_rreg and
3563 	 * adev->audio_endpt_wreg because they are initialised in
3564 	 * amdgpu_device_init()
3565 	 */
3566 #if defined(CONFIG_DEBUG_KERNEL_DC)
3567 	device_create_file(
3568 		adev_to_drm(adev)->dev,
3569 		&dev_attr_s3_debug);
3570 #endif
3571 
3572 	return 0;
3573 }
3574 
3575 static bool modeset_required(struct drm_crtc_state *crtc_state,
3576 			     struct dc_stream_state *new_stream,
3577 			     struct dc_stream_state *old_stream)
3578 {
3579 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3580 }
3581 
3582 static bool modereset_required(struct drm_crtc_state *crtc_state)
3583 {
3584 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3585 }
3586 
3587 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3588 {
3589 	drm_encoder_cleanup(encoder);
3590 	kfree(encoder);
3591 }
3592 
3593 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3594 	.destroy = amdgpu_dm_encoder_destroy,
3595 };
3596 
3597 
3598 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3599 				struct dc_scaling_info *scaling_info)
3600 {
3601 	int scale_w, scale_h;
3602 
3603 	memset(scaling_info, 0, sizeof(*scaling_info));
3604 
3605 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3606 	scaling_info->src_rect.x = state->src_x >> 16;
3607 	scaling_info->src_rect.y = state->src_y >> 16;
3608 
3609 	scaling_info->src_rect.width = state->src_w >> 16;
3610 	if (scaling_info->src_rect.width == 0)
3611 		return -EINVAL;
3612 
3613 	scaling_info->src_rect.height = state->src_h >> 16;
3614 	if (scaling_info->src_rect.height == 0)
3615 		return -EINVAL;
3616 
3617 	scaling_info->dst_rect.x = state->crtc_x;
3618 	scaling_info->dst_rect.y = state->crtc_y;
3619 
3620 	if (state->crtc_w == 0)
3621 		return -EINVAL;
3622 
3623 	scaling_info->dst_rect.width = state->crtc_w;
3624 
3625 	if (state->crtc_h == 0)
3626 		return -EINVAL;
3627 
3628 	scaling_info->dst_rect.height = state->crtc_h;
3629 
3630 	/* DRM doesn't specify clipping on destination output. */
3631 	scaling_info->clip_rect = scaling_info->dst_rect;
3632 
3633 	/* TODO: Validate scaling per-format with DC plane caps */
3634 	scale_w = scaling_info->dst_rect.width * 1000 /
3635 		  scaling_info->src_rect.width;
3636 
3637 	if (scale_w < 250 || scale_w > 16000)
3638 		return -EINVAL;
3639 
3640 	scale_h = scaling_info->dst_rect.height * 1000 /
3641 		  scaling_info->src_rect.height;
3642 
3643 	if (scale_h < 250 || scale_h > 16000)
3644 		return -EINVAL;
3645 
3646 	/*
3647 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3648 	 * assume reasonable defaults based on the format.
3649 	 */
3650 
3651 	return 0;
3652 }
3653 
3654 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3655 		       uint64_t *tiling_flags, bool *tmz_surface)
3656 {
3657 	struct amdgpu_bo *rbo;
3658 	int r;
3659 
3660 	if (!amdgpu_fb) {
3661 		*tiling_flags = 0;
3662 		*tmz_surface = false;
3663 		return 0;
3664 	}
3665 
3666 	rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3667 	r = amdgpu_bo_reserve(rbo, false);
3668 
3669 	if (unlikely(r)) {
3670 		/* Don't show error message when returning -ERESTARTSYS */
3671 		if (r != -ERESTARTSYS)
3672 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3673 		return r;
3674 	}
3675 
3676 	if (tiling_flags)
3677 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3678 
3679 	if (tmz_surface)
3680 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3681 
3682 	amdgpu_bo_unreserve(rbo);
3683 
3684 	return r;
3685 }
3686 
3687 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3688 {
3689 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3690 
3691 	return offset ? (address + offset * 256) : 0;
3692 }
3693 
3694 static int
3695 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3696 			  const struct amdgpu_framebuffer *afb,
3697 			  const enum surface_pixel_format format,
3698 			  const enum dc_rotation_angle rotation,
3699 			  const struct plane_size *plane_size,
3700 			  const union dc_tiling_info *tiling_info,
3701 			  const uint64_t info,
3702 			  struct dc_plane_dcc_param *dcc,
3703 			  struct dc_plane_address *address,
3704 			  bool force_disable_dcc)
3705 {
3706 	struct dc *dc = adev->dm.dc;
3707 	struct dc_dcc_surface_param input;
3708 	struct dc_surface_dcc_cap output;
3709 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3710 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3711 	uint64_t dcc_address;
3712 
3713 	memset(&input, 0, sizeof(input));
3714 	memset(&output, 0, sizeof(output));
3715 
3716 	if (force_disable_dcc)
3717 		return 0;
3718 
3719 	if (!offset)
3720 		return 0;
3721 
3722 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3723 		return 0;
3724 
3725 	if (!dc->cap_funcs.get_dcc_compression_cap)
3726 		return -EINVAL;
3727 
3728 	input.format = format;
3729 	input.surface_size.width = plane_size->surface_size.width;
3730 	input.surface_size.height = plane_size->surface_size.height;
3731 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3732 
3733 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3734 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3735 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3736 		input.scan = SCAN_DIRECTION_VERTICAL;
3737 
3738 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3739 		return -EINVAL;
3740 
3741 	if (!output.capable)
3742 		return -EINVAL;
3743 
3744 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3745 		return -EINVAL;
3746 
3747 	dcc->enable = 1;
3748 	dcc->meta_pitch =
3749 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3750 	dcc->independent_64b_blks = i64b;
3751 
3752 	dcc_address = get_dcc_address(afb->address, info);
3753 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3754 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3755 
3756 	return 0;
3757 }
3758 
3759 static int
3760 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3761 			     const struct amdgpu_framebuffer *afb,
3762 			     const enum surface_pixel_format format,
3763 			     const enum dc_rotation_angle rotation,
3764 			     const uint64_t tiling_flags,
3765 			     union dc_tiling_info *tiling_info,
3766 			     struct plane_size *plane_size,
3767 			     struct dc_plane_dcc_param *dcc,
3768 			     struct dc_plane_address *address,
3769 			     bool tmz_surface,
3770 			     bool force_disable_dcc)
3771 {
3772 	const struct drm_framebuffer *fb = &afb->base;
3773 	int ret;
3774 
3775 	memset(tiling_info, 0, sizeof(*tiling_info));
3776 	memset(plane_size, 0, sizeof(*plane_size));
3777 	memset(dcc, 0, sizeof(*dcc));
3778 	memset(address, 0, sizeof(*address));
3779 
3780 	address->tmz_surface = tmz_surface;
3781 
3782 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3783 		plane_size->surface_size.x = 0;
3784 		plane_size->surface_size.y = 0;
3785 		plane_size->surface_size.width = fb->width;
3786 		plane_size->surface_size.height = fb->height;
3787 		plane_size->surface_pitch =
3788 			fb->pitches[0] / fb->format->cpp[0];
3789 
3790 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3791 		address->grph.addr.low_part = lower_32_bits(afb->address);
3792 		address->grph.addr.high_part = upper_32_bits(afb->address);
3793 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3794 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3795 
3796 		plane_size->surface_size.x = 0;
3797 		plane_size->surface_size.y = 0;
3798 		plane_size->surface_size.width = fb->width;
3799 		plane_size->surface_size.height = fb->height;
3800 		plane_size->surface_pitch =
3801 			fb->pitches[0] / fb->format->cpp[0];
3802 
3803 		plane_size->chroma_size.x = 0;
3804 		plane_size->chroma_size.y = 0;
3805 		/* TODO: set these based on surface format */
3806 		plane_size->chroma_size.width = fb->width / 2;
3807 		plane_size->chroma_size.height = fb->height / 2;
3808 
3809 		plane_size->chroma_pitch =
3810 			fb->pitches[1] / fb->format->cpp[1];
3811 
3812 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3813 		address->video_progressive.luma_addr.low_part =
3814 			lower_32_bits(afb->address);
3815 		address->video_progressive.luma_addr.high_part =
3816 			upper_32_bits(afb->address);
3817 		address->video_progressive.chroma_addr.low_part =
3818 			lower_32_bits(chroma_addr);
3819 		address->video_progressive.chroma_addr.high_part =
3820 			upper_32_bits(chroma_addr);
3821 	}
3822 
3823 	/* Fill GFX8 params */
3824 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3825 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3826 
3827 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3828 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3829 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3830 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3831 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3832 
3833 		/* XXX fix me for VI */
3834 		tiling_info->gfx8.num_banks = num_banks;
3835 		tiling_info->gfx8.array_mode =
3836 				DC_ARRAY_2D_TILED_THIN1;
3837 		tiling_info->gfx8.tile_split = tile_split;
3838 		tiling_info->gfx8.bank_width = bankw;
3839 		tiling_info->gfx8.bank_height = bankh;
3840 		tiling_info->gfx8.tile_aspect = mtaspect;
3841 		tiling_info->gfx8.tile_mode =
3842 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3843 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3844 			== DC_ARRAY_1D_TILED_THIN1) {
3845 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3846 	}
3847 
3848 	tiling_info->gfx8.pipe_config =
3849 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3850 
3851 	if (adev->asic_type == CHIP_VEGA10 ||
3852 	    adev->asic_type == CHIP_VEGA12 ||
3853 	    adev->asic_type == CHIP_VEGA20 ||
3854 	    adev->asic_type == CHIP_NAVI10 ||
3855 	    adev->asic_type == CHIP_NAVI14 ||
3856 	    adev->asic_type == CHIP_NAVI12 ||
3857 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3858 		adev->asic_type == CHIP_SIENNA_CICHLID ||
3859 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
3860 #endif
3861 	    adev->asic_type == CHIP_RENOIR ||
3862 	    adev->asic_type == CHIP_RAVEN) {
3863 		/* Fill GFX9 params */
3864 		tiling_info->gfx9.num_pipes =
3865 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3866 		tiling_info->gfx9.num_banks =
3867 			adev->gfx.config.gb_addr_config_fields.num_banks;
3868 		tiling_info->gfx9.pipe_interleave =
3869 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3870 		tiling_info->gfx9.num_shader_engines =
3871 			adev->gfx.config.gb_addr_config_fields.num_se;
3872 		tiling_info->gfx9.max_compressed_frags =
3873 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3874 		tiling_info->gfx9.num_rb_per_se =
3875 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3876 		tiling_info->gfx9.swizzle =
3877 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3878 		tiling_info->gfx9.shaderEnable = 1;
3879 
3880 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3881 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3882 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
3883 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3884 #endif
3885 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3886 						plane_size, tiling_info,
3887 						tiling_flags, dcc, address,
3888 						force_disable_dcc);
3889 		if (ret)
3890 			return ret;
3891 	}
3892 
3893 	return 0;
3894 }
3895 
3896 static void
3897 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3898 			       bool *per_pixel_alpha, bool *global_alpha,
3899 			       int *global_alpha_value)
3900 {
3901 	*per_pixel_alpha = false;
3902 	*global_alpha = false;
3903 	*global_alpha_value = 0xff;
3904 
3905 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3906 		return;
3907 
3908 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3909 		static const uint32_t alpha_formats[] = {
3910 			DRM_FORMAT_ARGB8888,
3911 			DRM_FORMAT_RGBA8888,
3912 			DRM_FORMAT_ABGR8888,
3913 		};
3914 		uint32_t format = plane_state->fb->format->format;
3915 		unsigned int i;
3916 
3917 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3918 			if (format == alpha_formats[i]) {
3919 				*per_pixel_alpha = true;
3920 				break;
3921 			}
3922 		}
3923 	}
3924 
3925 	if (plane_state->alpha < 0xffff) {
3926 		*global_alpha = true;
3927 		*global_alpha_value = plane_state->alpha >> 8;
3928 	}
3929 }
3930 
3931 static int
3932 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3933 			    const enum surface_pixel_format format,
3934 			    enum dc_color_space *color_space)
3935 {
3936 	bool full_range;
3937 
3938 	*color_space = COLOR_SPACE_SRGB;
3939 
3940 	/* DRM color properties only affect non-RGB formats. */
3941 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3942 		return 0;
3943 
3944 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3945 
3946 	switch (plane_state->color_encoding) {
3947 	case DRM_COLOR_YCBCR_BT601:
3948 		if (full_range)
3949 			*color_space = COLOR_SPACE_YCBCR601;
3950 		else
3951 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3952 		break;
3953 
3954 	case DRM_COLOR_YCBCR_BT709:
3955 		if (full_range)
3956 			*color_space = COLOR_SPACE_YCBCR709;
3957 		else
3958 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3959 		break;
3960 
3961 	case DRM_COLOR_YCBCR_BT2020:
3962 		if (full_range)
3963 			*color_space = COLOR_SPACE_2020_YCBCR;
3964 		else
3965 			return -EINVAL;
3966 		break;
3967 
3968 	default:
3969 		return -EINVAL;
3970 	}
3971 
3972 	return 0;
3973 }
3974 
3975 static int
3976 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3977 			    const struct drm_plane_state *plane_state,
3978 			    const uint64_t tiling_flags,
3979 			    struct dc_plane_info *plane_info,
3980 			    struct dc_plane_address *address,
3981 			    bool tmz_surface,
3982 			    bool force_disable_dcc)
3983 {
3984 	const struct drm_framebuffer *fb = plane_state->fb;
3985 	const struct amdgpu_framebuffer *afb =
3986 		to_amdgpu_framebuffer(plane_state->fb);
3987 	struct drm_format_name_buf format_name;
3988 	int ret;
3989 
3990 	memset(plane_info, 0, sizeof(*plane_info));
3991 
3992 	switch (fb->format->format) {
3993 	case DRM_FORMAT_C8:
3994 		plane_info->format =
3995 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3996 		break;
3997 	case DRM_FORMAT_RGB565:
3998 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3999 		break;
4000 	case DRM_FORMAT_XRGB8888:
4001 	case DRM_FORMAT_ARGB8888:
4002 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4003 		break;
4004 	case DRM_FORMAT_XRGB2101010:
4005 	case DRM_FORMAT_ARGB2101010:
4006 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4007 		break;
4008 	case DRM_FORMAT_XBGR2101010:
4009 	case DRM_FORMAT_ABGR2101010:
4010 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4011 		break;
4012 	case DRM_FORMAT_XBGR8888:
4013 	case DRM_FORMAT_ABGR8888:
4014 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4015 		break;
4016 	case DRM_FORMAT_NV21:
4017 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4018 		break;
4019 	case DRM_FORMAT_NV12:
4020 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4021 		break;
4022 	case DRM_FORMAT_P010:
4023 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4024 		break;
4025 	case DRM_FORMAT_XRGB16161616F:
4026 	case DRM_FORMAT_ARGB16161616F:
4027 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4028 		break;
4029 	case DRM_FORMAT_XBGR16161616F:
4030 	case DRM_FORMAT_ABGR16161616F:
4031 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4032 		break;
4033 	default:
4034 		DRM_ERROR(
4035 			"Unsupported screen format %s\n",
4036 			drm_get_format_name(fb->format->format, &format_name));
4037 		return -EINVAL;
4038 	}
4039 
4040 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4041 	case DRM_MODE_ROTATE_0:
4042 		plane_info->rotation = ROTATION_ANGLE_0;
4043 		break;
4044 	case DRM_MODE_ROTATE_90:
4045 		plane_info->rotation = ROTATION_ANGLE_90;
4046 		break;
4047 	case DRM_MODE_ROTATE_180:
4048 		plane_info->rotation = ROTATION_ANGLE_180;
4049 		break;
4050 	case DRM_MODE_ROTATE_270:
4051 		plane_info->rotation = ROTATION_ANGLE_270;
4052 		break;
4053 	default:
4054 		plane_info->rotation = ROTATION_ANGLE_0;
4055 		break;
4056 	}
4057 
4058 	plane_info->visible = true;
4059 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4060 
4061 	plane_info->layer_index = 0;
4062 
4063 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4064 					  &plane_info->color_space);
4065 	if (ret)
4066 		return ret;
4067 
4068 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4069 					   plane_info->rotation, tiling_flags,
4070 					   &plane_info->tiling_info,
4071 					   &plane_info->plane_size,
4072 					   &plane_info->dcc, address, tmz_surface,
4073 					   force_disable_dcc);
4074 	if (ret)
4075 		return ret;
4076 
4077 	fill_blending_from_plane_state(
4078 		plane_state, &plane_info->per_pixel_alpha,
4079 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4080 
4081 	return 0;
4082 }
4083 
4084 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4085 				    struct dc_plane_state *dc_plane_state,
4086 				    struct drm_plane_state *plane_state,
4087 				    struct drm_crtc_state *crtc_state)
4088 {
4089 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4090 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4091 	struct dc_scaling_info scaling_info;
4092 	struct dc_plane_info plane_info;
4093 	int ret;
4094 	bool force_disable_dcc = false;
4095 
4096 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4097 	if (ret)
4098 		return ret;
4099 
4100 	dc_plane_state->src_rect = scaling_info.src_rect;
4101 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4102 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4103 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4104 
4105 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4106 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4107 					  dm_plane_state->tiling_flags,
4108 					  &plane_info,
4109 					  &dc_plane_state->address,
4110 					  dm_plane_state->tmz_surface,
4111 					  force_disable_dcc);
4112 	if (ret)
4113 		return ret;
4114 
4115 	dc_plane_state->format = plane_info.format;
4116 	dc_plane_state->color_space = plane_info.color_space;
4117 	dc_plane_state->format = plane_info.format;
4118 	dc_plane_state->plane_size = plane_info.plane_size;
4119 	dc_plane_state->rotation = plane_info.rotation;
4120 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4121 	dc_plane_state->stereo_format = plane_info.stereo_format;
4122 	dc_plane_state->tiling_info = plane_info.tiling_info;
4123 	dc_plane_state->visible = plane_info.visible;
4124 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4125 	dc_plane_state->global_alpha = plane_info.global_alpha;
4126 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4127 	dc_plane_state->dcc = plane_info.dcc;
4128 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4129 
4130 	/*
4131 	 * Always set input transfer function, since plane state is refreshed
4132 	 * every time.
4133 	 */
4134 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4135 	if (ret)
4136 		return ret;
4137 
4138 	return 0;
4139 }
4140 
4141 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4142 					   const struct dm_connector_state *dm_state,
4143 					   struct dc_stream_state *stream)
4144 {
4145 	enum amdgpu_rmx_type rmx_type;
4146 
4147 	struct rect src = { 0 }; /* viewport in composition space*/
4148 	struct rect dst = { 0 }; /* stream addressable area */
4149 
4150 	/* no mode. nothing to be done */
4151 	if (!mode)
4152 		return;
4153 
4154 	/* Full screen scaling by default */
4155 	src.width = mode->hdisplay;
4156 	src.height = mode->vdisplay;
4157 	dst.width = stream->timing.h_addressable;
4158 	dst.height = stream->timing.v_addressable;
4159 
4160 	if (dm_state) {
4161 		rmx_type = dm_state->scaling;
4162 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4163 			if (src.width * dst.height <
4164 					src.height * dst.width) {
4165 				/* height needs less upscaling/more downscaling */
4166 				dst.width = src.width *
4167 						dst.height / src.height;
4168 			} else {
4169 				/* width needs less upscaling/more downscaling */
4170 				dst.height = src.height *
4171 						dst.width / src.width;
4172 			}
4173 		} else if (rmx_type == RMX_CENTER) {
4174 			dst = src;
4175 		}
4176 
4177 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4178 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4179 
4180 		if (dm_state->underscan_enable) {
4181 			dst.x += dm_state->underscan_hborder / 2;
4182 			dst.y += dm_state->underscan_vborder / 2;
4183 			dst.width -= dm_state->underscan_hborder;
4184 			dst.height -= dm_state->underscan_vborder;
4185 		}
4186 	}
4187 
4188 	stream->src = src;
4189 	stream->dst = dst;
4190 
4191 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4192 			dst.x, dst.y, dst.width, dst.height);
4193 
4194 }
4195 
4196 static enum dc_color_depth
4197 convert_color_depth_from_display_info(const struct drm_connector *connector,
4198 				      bool is_y420, int requested_bpc)
4199 {
4200 	uint8_t bpc;
4201 
4202 	if (is_y420) {
4203 		bpc = 8;
4204 
4205 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4206 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4207 			bpc = 16;
4208 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4209 			bpc = 12;
4210 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4211 			bpc = 10;
4212 	} else {
4213 		bpc = (uint8_t)connector->display_info.bpc;
4214 		/* Assume 8 bpc by default if no bpc is specified. */
4215 		bpc = bpc ? bpc : 8;
4216 	}
4217 
4218 	if (requested_bpc > 0) {
4219 		/*
4220 		 * Cap display bpc based on the user requested value.
4221 		 *
4222 		 * The value for state->max_bpc may not correctly updated
4223 		 * depending on when the connector gets added to the state
4224 		 * or if this was called outside of atomic check, so it
4225 		 * can't be used directly.
4226 		 */
4227 		bpc = min_t(u8, bpc, requested_bpc);
4228 
4229 		/* Round down to the nearest even number. */
4230 		bpc = bpc - (bpc & 1);
4231 	}
4232 
4233 	switch (bpc) {
4234 	case 0:
4235 		/*
4236 		 * Temporary Work around, DRM doesn't parse color depth for
4237 		 * EDID revision before 1.4
4238 		 * TODO: Fix edid parsing
4239 		 */
4240 		return COLOR_DEPTH_888;
4241 	case 6:
4242 		return COLOR_DEPTH_666;
4243 	case 8:
4244 		return COLOR_DEPTH_888;
4245 	case 10:
4246 		return COLOR_DEPTH_101010;
4247 	case 12:
4248 		return COLOR_DEPTH_121212;
4249 	case 14:
4250 		return COLOR_DEPTH_141414;
4251 	case 16:
4252 		return COLOR_DEPTH_161616;
4253 	default:
4254 		return COLOR_DEPTH_UNDEFINED;
4255 	}
4256 }
4257 
4258 static enum dc_aspect_ratio
4259 get_aspect_ratio(const struct drm_display_mode *mode_in)
4260 {
4261 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4262 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4263 }
4264 
4265 static enum dc_color_space
4266 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4267 {
4268 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4269 
4270 	switch (dc_crtc_timing->pixel_encoding)	{
4271 	case PIXEL_ENCODING_YCBCR422:
4272 	case PIXEL_ENCODING_YCBCR444:
4273 	case PIXEL_ENCODING_YCBCR420:
4274 	{
4275 		/*
4276 		 * 27030khz is the separation point between HDTV and SDTV
4277 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4278 		 * respectively
4279 		 */
4280 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4281 			if (dc_crtc_timing->flags.Y_ONLY)
4282 				color_space =
4283 					COLOR_SPACE_YCBCR709_LIMITED;
4284 			else
4285 				color_space = COLOR_SPACE_YCBCR709;
4286 		} else {
4287 			if (dc_crtc_timing->flags.Y_ONLY)
4288 				color_space =
4289 					COLOR_SPACE_YCBCR601_LIMITED;
4290 			else
4291 				color_space = COLOR_SPACE_YCBCR601;
4292 		}
4293 
4294 	}
4295 	break;
4296 	case PIXEL_ENCODING_RGB:
4297 		color_space = COLOR_SPACE_SRGB;
4298 		break;
4299 
4300 	default:
4301 		WARN_ON(1);
4302 		break;
4303 	}
4304 
4305 	return color_space;
4306 }
4307 
4308 static bool adjust_colour_depth_from_display_info(
4309 	struct dc_crtc_timing *timing_out,
4310 	const struct drm_display_info *info)
4311 {
4312 	enum dc_color_depth depth = timing_out->display_color_depth;
4313 	int normalized_clk;
4314 	do {
4315 		normalized_clk = timing_out->pix_clk_100hz / 10;
4316 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4317 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4318 			normalized_clk /= 2;
4319 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4320 		switch (depth) {
4321 		case COLOR_DEPTH_888:
4322 			break;
4323 		case COLOR_DEPTH_101010:
4324 			normalized_clk = (normalized_clk * 30) / 24;
4325 			break;
4326 		case COLOR_DEPTH_121212:
4327 			normalized_clk = (normalized_clk * 36) / 24;
4328 			break;
4329 		case COLOR_DEPTH_161616:
4330 			normalized_clk = (normalized_clk * 48) / 24;
4331 			break;
4332 		default:
4333 			/* The above depths are the only ones valid for HDMI. */
4334 			return false;
4335 		}
4336 		if (normalized_clk <= info->max_tmds_clock) {
4337 			timing_out->display_color_depth = depth;
4338 			return true;
4339 		}
4340 	} while (--depth > COLOR_DEPTH_666);
4341 	return false;
4342 }
4343 
4344 static void fill_stream_properties_from_drm_display_mode(
4345 	struct dc_stream_state *stream,
4346 	const struct drm_display_mode *mode_in,
4347 	const struct drm_connector *connector,
4348 	const struct drm_connector_state *connector_state,
4349 	const struct dc_stream_state *old_stream,
4350 	int requested_bpc)
4351 {
4352 	struct dc_crtc_timing *timing_out = &stream->timing;
4353 	const struct drm_display_info *info = &connector->display_info;
4354 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4355 	struct hdmi_vendor_infoframe hv_frame;
4356 	struct hdmi_avi_infoframe avi_frame;
4357 
4358 	memset(&hv_frame, 0, sizeof(hv_frame));
4359 	memset(&avi_frame, 0, sizeof(avi_frame));
4360 
4361 	timing_out->h_border_left = 0;
4362 	timing_out->h_border_right = 0;
4363 	timing_out->v_border_top = 0;
4364 	timing_out->v_border_bottom = 0;
4365 	/* TODO: un-hardcode */
4366 	if (drm_mode_is_420_only(info, mode_in)
4367 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4368 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4369 	else if (drm_mode_is_420_also(info, mode_in)
4370 			&& aconnector->force_yuv420_output)
4371 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4372 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4373 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4374 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4375 	else
4376 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4377 
4378 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4379 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4380 		connector,
4381 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4382 		requested_bpc);
4383 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4384 	timing_out->hdmi_vic = 0;
4385 
4386 	if(old_stream) {
4387 		timing_out->vic = old_stream->timing.vic;
4388 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4389 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4390 	} else {
4391 		timing_out->vic = drm_match_cea_mode(mode_in);
4392 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4393 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4394 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4395 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4396 	}
4397 
4398 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4399 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4400 		timing_out->vic = avi_frame.video_code;
4401 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4402 		timing_out->hdmi_vic = hv_frame.vic;
4403 	}
4404 
4405 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4406 	timing_out->h_total = mode_in->crtc_htotal;
4407 	timing_out->h_sync_width =
4408 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4409 	timing_out->h_front_porch =
4410 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4411 	timing_out->v_total = mode_in->crtc_vtotal;
4412 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4413 	timing_out->v_front_porch =
4414 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4415 	timing_out->v_sync_width =
4416 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4417 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4418 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4419 
4420 	stream->output_color_space = get_output_color_space(timing_out);
4421 
4422 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4423 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4424 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4425 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4426 		    drm_mode_is_420_also(info, mode_in) &&
4427 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4428 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4429 			adjust_colour_depth_from_display_info(timing_out, info);
4430 		}
4431 	}
4432 }
4433 
4434 static void fill_audio_info(struct audio_info *audio_info,
4435 			    const struct drm_connector *drm_connector,
4436 			    const struct dc_sink *dc_sink)
4437 {
4438 	int i = 0;
4439 	int cea_revision = 0;
4440 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4441 
4442 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4443 	audio_info->product_id = edid_caps->product_id;
4444 
4445 	cea_revision = drm_connector->display_info.cea_rev;
4446 
4447 	strscpy(audio_info->display_name,
4448 		edid_caps->display_name,
4449 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4450 
4451 	if (cea_revision >= 3) {
4452 		audio_info->mode_count = edid_caps->audio_mode_count;
4453 
4454 		for (i = 0; i < audio_info->mode_count; ++i) {
4455 			audio_info->modes[i].format_code =
4456 					(enum audio_format_code)
4457 					(edid_caps->audio_modes[i].format_code);
4458 			audio_info->modes[i].channel_count =
4459 					edid_caps->audio_modes[i].channel_count;
4460 			audio_info->modes[i].sample_rates.all =
4461 					edid_caps->audio_modes[i].sample_rate;
4462 			audio_info->modes[i].sample_size =
4463 					edid_caps->audio_modes[i].sample_size;
4464 		}
4465 	}
4466 
4467 	audio_info->flags.all = edid_caps->speaker_flags;
4468 
4469 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4470 	if (drm_connector->latency_present[0]) {
4471 		audio_info->video_latency = drm_connector->video_latency[0];
4472 		audio_info->audio_latency = drm_connector->audio_latency[0];
4473 	}
4474 
4475 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4476 
4477 }
4478 
4479 static void
4480 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4481 				      struct drm_display_mode *dst_mode)
4482 {
4483 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4484 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4485 	dst_mode->crtc_clock = src_mode->crtc_clock;
4486 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4487 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4488 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4489 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4490 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4491 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4492 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4493 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4494 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4495 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4496 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4497 }
4498 
4499 static void
4500 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4501 					const struct drm_display_mode *native_mode,
4502 					bool scale_enabled)
4503 {
4504 	if (scale_enabled) {
4505 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4506 	} else if (native_mode->clock == drm_mode->clock &&
4507 			native_mode->htotal == drm_mode->htotal &&
4508 			native_mode->vtotal == drm_mode->vtotal) {
4509 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4510 	} else {
4511 		/* no scaling nor amdgpu inserted, no need to patch */
4512 	}
4513 }
4514 
4515 static struct dc_sink *
4516 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4517 {
4518 	struct dc_sink_init_data sink_init_data = { 0 };
4519 	struct dc_sink *sink = NULL;
4520 	sink_init_data.link = aconnector->dc_link;
4521 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4522 
4523 	sink = dc_sink_create(&sink_init_data);
4524 	if (!sink) {
4525 		DRM_ERROR("Failed to create sink!\n");
4526 		return NULL;
4527 	}
4528 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4529 
4530 	return sink;
4531 }
4532 
4533 static void set_multisync_trigger_params(
4534 		struct dc_stream_state *stream)
4535 {
4536 	if (stream->triggered_crtc_reset.enabled) {
4537 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4538 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4539 	}
4540 }
4541 
4542 static void set_master_stream(struct dc_stream_state *stream_set[],
4543 			      int stream_count)
4544 {
4545 	int j, highest_rfr = 0, master_stream = 0;
4546 
4547 	for (j = 0;  j < stream_count; j++) {
4548 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4549 			int refresh_rate = 0;
4550 
4551 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4552 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4553 			if (refresh_rate > highest_rfr) {
4554 				highest_rfr = refresh_rate;
4555 				master_stream = j;
4556 			}
4557 		}
4558 	}
4559 	for (j = 0;  j < stream_count; j++) {
4560 		if (stream_set[j])
4561 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4562 	}
4563 }
4564 
4565 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4566 {
4567 	int i = 0;
4568 
4569 	if (context->stream_count < 2)
4570 		return;
4571 	for (i = 0; i < context->stream_count ; i++) {
4572 		if (!context->streams[i])
4573 			continue;
4574 		/*
4575 		 * TODO: add a function to read AMD VSDB bits and set
4576 		 * crtc_sync_master.multi_sync_enabled flag
4577 		 * For now it's set to false
4578 		 */
4579 		set_multisync_trigger_params(context->streams[i]);
4580 	}
4581 	set_master_stream(context->streams, context->stream_count);
4582 }
4583 
4584 static struct dc_stream_state *
4585 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4586 		       const struct drm_display_mode *drm_mode,
4587 		       const struct dm_connector_state *dm_state,
4588 		       const struct dc_stream_state *old_stream,
4589 		       int requested_bpc)
4590 {
4591 	struct drm_display_mode *preferred_mode = NULL;
4592 	struct drm_connector *drm_connector;
4593 	const struct drm_connector_state *con_state =
4594 		dm_state ? &dm_state->base : NULL;
4595 	struct dc_stream_state *stream = NULL;
4596 	struct drm_display_mode mode = *drm_mode;
4597 	bool native_mode_found = false;
4598 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4599 	int mode_refresh;
4600 	int preferred_refresh = 0;
4601 #if defined(CONFIG_DRM_AMD_DC_DCN)
4602 	struct dsc_dec_dpcd_caps dsc_caps;
4603 #endif
4604 	uint32_t link_bandwidth_kbps;
4605 
4606 	struct dc_sink *sink = NULL;
4607 	if (aconnector == NULL) {
4608 		DRM_ERROR("aconnector is NULL!\n");
4609 		return stream;
4610 	}
4611 
4612 	drm_connector = &aconnector->base;
4613 
4614 	if (!aconnector->dc_sink) {
4615 		sink = create_fake_sink(aconnector);
4616 		if (!sink)
4617 			return stream;
4618 	} else {
4619 		sink = aconnector->dc_sink;
4620 		dc_sink_retain(sink);
4621 	}
4622 
4623 	stream = dc_create_stream_for_sink(sink);
4624 
4625 	if (stream == NULL) {
4626 		DRM_ERROR("Failed to create stream for sink!\n");
4627 		goto finish;
4628 	}
4629 
4630 	stream->dm_stream_context = aconnector;
4631 
4632 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4633 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4634 
4635 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4636 		/* Search for preferred mode */
4637 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4638 			native_mode_found = true;
4639 			break;
4640 		}
4641 	}
4642 	if (!native_mode_found)
4643 		preferred_mode = list_first_entry_or_null(
4644 				&aconnector->base.modes,
4645 				struct drm_display_mode,
4646 				head);
4647 
4648 	mode_refresh = drm_mode_vrefresh(&mode);
4649 
4650 	if (preferred_mode == NULL) {
4651 		/*
4652 		 * This may not be an error, the use case is when we have no
4653 		 * usermode calls to reset and set mode upon hotplug. In this
4654 		 * case, we call set mode ourselves to restore the previous mode
4655 		 * and the modelist may not be filled in in time.
4656 		 */
4657 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4658 	} else {
4659 		decide_crtc_timing_for_drm_display_mode(
4660 				&mode, preferred_mode,
4661 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4662 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4663 	}
4664 
4665 	if (!dm_state)
4666 		drm_mode_set_crtcinfo(&mode, 0);
4667 
4668 	/*
4669 	* If scaling is enabled and refresh rate didn't change
4670 	* we copy the vic and polarities of the old timings
4671 	*/
4672 	if (!scale || mode_refresh != preferred_refresh)
4673 		fill_stream_properties_from_drm_display_mode(stream,
4674 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4675 	else
4676 		fill_stream_properties_from_drm_display_mode(stream,
4677 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4678 
4679 	stream->timing.flags.DSC = 0;
4680 
4681 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4682 #if defined(CONFIG_DRM_AMD_DC_DCN)
4683 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4684 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4685 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4686 				      &dsc_caps);
4687 #endif
4688 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4689 							     dc_link_get_link_cap(aconnector->dc_link));
4690 
4691 #if defined(CONFIG_DRM_AMD_DC_DCN)
4692 		if (dsc_caps.is_dsc_supported) {
4693 			/* Set DSC policy according to dsc_clock_en */
4694 			dc_dsc_policy_set_enable_dsc_when_not_needed(aconnector->dsc_settings.dsc_clock_en);
4695 
4696 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4697 						  &dsc_caps,
4698 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4699 						  link_bandwidth_kbps,
4700 						  &stream->timing,
4701 						  &stream->timing.dsc_cfg))
4702 				stream->timing.flags.DSC = 1;
4703 			/* Overwrite the stream flag if DSC is enabled through debugfs */
4704 			if (aconnector->dsc_settings.dsc_clock_en)
4705 				stream->timing.flags.DSC = 1;
4706 
4707 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_width)
4708 				stream->timing.dsc_cfg.num_slices_h = DIV_ROUND_UP(stream->timing.h_addressable,
4709 									aconnector->dsc_settings.dsc_slice_width);
4710 
4711 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_slice_height)
4712 				stream->timing.dsc_cfg.num_slices_v = DIV_ROUND_UP(stream->timing.v_addressable,
4713 									aconnector->dsc_settings.dsc_slice_height);
4714 
4715 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4716 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4717 		}
4718 #endif
4719 	}
4720 
4721 	update_stream_scaling_settings(&mode, dm_state, stream);
4722 
4723 	fill_audio_info(
4724 		&stream->audio_info,
4725 		drm_connector,
4726 		sink);
4727 
4728 	update_stream_signal(stream, sink);
4729 
4730 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4731 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4732 
4733 	if (stream->link->psr_settings.psr_feature_enabled) {
4734 		//
4735 		// should decide stream support vsc sdp colorimetry capability
4736 		// before building vsc info packet
4737 		//
4738 		stream->use_vsc_sdp_for_colorimetry = false;
4739 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4740 			stream->use_vsc_sdp_for_colorimetry =
4741 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4742 		} else {
4743 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4744 				stream->use_vsc_sdp_for_colorimetry = true;
4745 		}
4746 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4747 	}
4748 finish:
4749 	dc_sink_release(sink);
4750 
4751 	return stream;
4752 }
4753 
4754 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4755 {
4756 	drm_crtc_cleanup(crtc);
4757 	kfree(crtc);
4758 }
4759 
4760 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4761 				  struct drm_crtc_state *state)
4762 {
4763 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4764 
4765 	/* TODO Destroy dc_stream objects are stream object is flattened */
4766 	if (cur->stream)
4767 		dc_stream_release(cur->stream);
4768 
4769 
4770 	__drm_atomic_helper_crtc_destroy_state(state);
4771 
4772 
4773 	kfree(state);
4774 }
4775 
4776 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4777 {
4778 	struct dm_crtc_state *state;
4779 
4780 	if (crtc->state)
4781 		dm_crtc_destroy_state(crtc, crtc->state);
4782 
4783 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4784 	if (WARN_ON(!state))
4785 		return;
4786 
4787 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
4788 }
4789 
4790 static struct drm_crtc_state *
4791 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4792 {
4793 	struct dm_crtc_state *state, *cur;
4794 
4795 	cur = to_dm_crtc_state(crtc->state);
4796 
4797 	if (WARN_ON(!crtc->state))
4798 		return NULL;
4799 
4800 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4801 	if (!state)
4802 		return NULL;
4803 
4804 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4805 
4806 	if (cur->stream) {
4807 		state->stream = cur->stream;
4808 		dc_stream_retain(state->stream);
4809 	}
4810 
4811 	state->active_planes = cur->active_planes;
4812 	state->vrr_params = cur->vrr_params;
4813 	state->vrr_infopacket = cur->vrr_infopacket;
4814 	state->abm_level = cur->abm_level;
4815 	state->vrr_supported = cur->vrr_supported;
4816 	state->freesync_config = cur->freesync_config;
4817 	state->crc_src = cur->crc_src;
4818 	state->cm_has_degamma = cur->cm_has_degamma;
4819 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4820 
4821 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4822 
4823 	return &state->base;
4824 }
4825 
4826 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4827 {
4828 	enum dc_irq_source irq_source;
4829 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4830 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4831 	int rc;
4832 
4833 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4834 
4835 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4836 
4837 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4838 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4839 	return rc;
4840 }
4841 
4842 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4843 {
4844 	enum dc_irq_source irq_source;
4845 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4846 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4847 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4848 	int rc = 0;
4849 
4850 	if (enable) {
4851 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4852 		if (amdgpu_dm_vrr_active(acrtc_state))
4853 			rc = dm_set_vupdate_irq(crtc, true);
4854 	} else {
4855 		/* vblank irq off -> vupdate irq off */
4856 		rc = dm_set_vupdate_irq(crtc, false);
4857 	}
4858 
4859 	if (rc)
4860 		return rc;
4861 
4862 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4863 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4864 }
4865 
4866 static int dm_enable_vblank(struct drm_crtc *crtc)
4867 {
4868 	return dm_set_vblank(crtc, true);
4869 }
4870 
4871 static void dm_disable_vblank(struct drm_crtc *crtc)
4872 {
4873 	dm_set_vblank(crtc, false);
4874 }
4875 
4876 /* Implemented only the options currently availible for the driver */
4877 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4878 	.reset = dm_crtc_reset_state,
4879 	.destroy = amdgpu_dm_crtc_destroy,
4880 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4881 	.set_config = drm_atomic_helper_set_config,
4882 	.page_flip = drm_atomic_helper_page_flip,
4883 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4884 	.atomic_destroy_state = dm_crtc_destroy_state,
4885 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4886 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4887 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4888 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4889 	.enable_vblank = dm_enable_vblank,
4890 	.disable_vblank = dm_disable_vblank,
4891 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4892 };
4893 
4894 static enum drm_connector_status
4895 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4896 {
4897 	bool connected;
4898 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4899 
4900 	/*
4901 	 * Notes:
4902 	 * 1. This interface is NOT called in context of HPD irq.
4903 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4904 	 * makes it a bad place for *any* MST-related activity.
4905 	 */
4906 
4907 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4908 	    !aconnector->fake_enable)
4909 		connected = (aconnector->dc_sink != NULL);
4910 	else
4911 		connected = (aconnector->base.force == DRM_FORCE_ON);
4912 
4913 	update_subconnector_property(aconnector);
4914 
4915 	return (connected ? connector_status_connected :
4916 			connector_status_disconnected);
4917 }
4918 
4919 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4920 					    struct drm_connector_state *connector_state,
4921 					    struct drm_property *property,
4922 					    uint64_t val)
4923 {
4924 	struct drm_device *dev = connector->dev;
4925 	struct amdgpu_device *adev = drm_to_adev(dev);
4926 	struct dm_connector_state *dm_old_state =
4927 		to_dm_connector_state(connector->state);
4928 	struct dm_connector_state *dm_new_state =
4929 		to_dm_connector_state(connector_state);
4930 
4931 	int ret = -EINVAL;
4932 
4933 	if (property == dev->mode_config.scaling_mode_property) {
4934 		enum amdgpu_rmx_type rmx_type;
4935 
4936 		switch (val) {
4937 		case DRM_MODE_SCALE_CENTER:
4938 			rmx_type = RMX_CENTER;
4939 			break;
4940 		case DRM_MODE_SCALE_ASPECT:
4941 			rmx_type = RMX_ASPECT;
4942 			break;
4943 		case DRM_MODE_SCALE_FULLSCREEN:
4944 			rmx_type = RMX_FULL;
4945 			break;
4946 		case DRM_MODE_SCALE_NONE:
4947 		default:
4948 			rmx_type = RMX_OFF;
4949 			break;
4950 		}
4951 
4952 		if (dm_old_state->scaling == rmx_type)
4953 			return 0;
4954 
4955 		dm_new_state->scaling = rmx_type;
4956 		ret = 0;
4957 	} else if (property == adev->mode_info.underscan_hborder_property) {
4958 		dm_new_state->underscan_hborder = val;
4959 		ret = 0;
4960 	} else if (property == adev->mode_info.underscan_vborder_property) {
4961 		dm_new_state->underscan_vborder = val;
4962 		ret = 0;
4963 	} else if (property == adev->mode_info.underscan_property) {
4964 		dm_new_state->underscan_enable = val;
4965 		ret = 0;
4966 	} else if (property == adev->mode_info.abm_level_property) {
4967 		dm_new_state->abm_level = val;
4968 		ret = 0;
4969 	}
4970 
4971 	return ret;
4972 }
4973 
4974 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4975 					    const struct drm_connector_state *state,
4976 					    struct drm_property *property,
4977 					    uint64_t *val)
4978 {
4979 	struct drm_device *dev = connector->dev;
4980 	struct amdgpu_device *adev = drm_to_adev(dev);
4981 	struct dm_connector_state *dm_state =
4982 		to_dm_connector_state(state);
4983 	int ret = -EINVAL;
4984 
4985 	if (property == dev->mode_config.scaling_mode_property) {
4986 		switch (dm_state->scaling) {
4987 		case RMX_CENTER:
4988 			*val = DRM_MODE_SCALE_CENTER;
4989 			break;
4990 		case RMX_ASPECT:
4991 			*val = DRM_MODE_SCALE_ASPECT;
4992 			break;
4993 		case RMX_FULL:
4994 			*val = DRM_MODE_SCALE_FULLSCREEN;
4995 			break;
4996 		case RMX_OFF:
4997 		default:
4998 			*val = DRM_MODE_SCALE_NONE;
4999 			break;
5000 		}
5001 		ret = 0;
5002 	} else if (property == adev->mode_info.underscan_hborder_property) {
5003 		*val = dm_state->underscan_hborder;
5004 		ret = 0;
5005 	} else if (property == adev->mode_info.underscan_vborder_property) {
5006 		*val = dm_state->underscan_vborder;
5007 		ret = 0;
5008 	} else if (property == adev->mode_info.underscan_property) {
5009 		*val = dm_state->underscan_enable;
5010 		ret = 0;
5011 	} else if (property == adev->mode_info.abm_level_property) {
5012 		*val = dm_state->abm_level;
5013 		ret = 0;
5014 	}
5015 
5016 	return ret;
5017 }
5018 
5019 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5020 {
5021 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5022 
5023 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5024 }
5025 
5026 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5027 {
5028 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5029 	const struct dc_link *link = aconnector->dc_link;
5030 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5031 	struct amdgpu_display_manager *dm = &adev->dm;
5032 
5033 	drm_atomic_private_obj_fini(&aconnector->mst_mgr.base);
5034 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5035 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5036 
5037 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5038 	    link->type != dc_connection_none &&
5039 	    dm->backlight_dev) {
5040 		backlight_device_unregister(dm->backlight_dev);
5041 		dm->backlight_dev = NULL;
5042 	}
5043 #endif
5044 
5045 	if (aconnector->dc_em_sink)
5046 		dc_sink_release(aconnector->dc_em_sink);
5047 	aconnector->dc_em_sink = NULL;
5048 	if (aconnector->dc_sink)
5049 		dc_sink_release(aconnector->dc_sink);
5050 	aconnector->dc_sink = NULL;
5051 
5052 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5053 	drm_connector_unregister(connector);
5054 	drm_connector_cleanup(connector);
5055 	if (aconnector->i2c) {
5056 		i2c_del_adapter(&aconnector->i2c->base);
5057 		kfree(aconnector->i2c);
5058 	}
5059 	kfree(aconnector->dm_dp_aux.aux.name);
5060 
5061 	kfree(connector);
5062 }
5063 
5064 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5065 {
5066 	struct dm_connector_state *state =
5067 		to_dm_connector_state(connector->state);
5068 
5069 	if (connector->state)
5070 		__drm_atomic_helper_connector_destroy_state(connector->state);
5071 
5072 	kfree(state);
5073 
5074 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5075 
5076 	if (state) {
5077 		state->scaling = RMX_OFF;
5078 		state->underscan_enable = false;
5079 		state->underscan_hborder = 0;
5080 		state->underscan_vborder = 0;
5081 		state->base.max_requested_bpc = 8;
5082 		state->vcpi_slots = 0;
5083 		state->pbn = 0;
5084 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5085 			state->abm_level = amdgpu_dm_abm_level;
5086 
5087 		__drm_atomic_helper_connector_reset(connector, &state->base);
5088 	}
5089 }
5090 
5091 struct drm_connector_state *
5092 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5093 {
5094 	struct dm_connector_state *state =
5095 		to_dm_connector_state(connector->state);
5096 
5097 	struct dm_connector_state *new_state =
5098 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5099 
5100 	if (!new_state)
5101 		return NULL;
5102 
5103 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5104 
5105 	new_state->freesync_capable = state->freesync_capable;
5106 	new_state->abm_level = state->abm_level;
5107 	new_state->scaling = state->scaling;
5108 	new_state->underscan_enable = state->underscan_enable;
5109 	new_state->underscan_hborder = state->underscan_hborder;
5110 	new_state->underscan_vborder = state->underscan_vborder;
5111 	new_state->vcpi_slots = state->vcpi_slots;
5112 	new_state->pbn = state->pbn;
5113 	return &new_state->base;
5114 }
5115 
5116 static int
5117 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5118 {
5119 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5120 		to_amdgpu_dm_connector(connector);
5121 	int r;
5122 
5123 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5124 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5125 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5126 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5127 		if (r)
5128 			return r;
5129 	}
5130 
5131 #if defined(CONFIG_DEBUG_FS)
5132 	connector_debugfs_init(amdgpu_dm_connector);
5133 #endif
5134 
5135 	return 0;
5136 }
5137 
5138 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5139 	.reset = amdgpu_dm_connector_funcs_reset,
5140 	.detect = amdgpu_dm_connector_detect,
5141 	.fill_modes = drm_helper_probe_single_connector_modes,
5142 	.destroy = amdgpu_dm_connector_destroy,
5143 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5144 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5145 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5146 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5147 	.late_register = amdgpu_dm_connector_late_register,
5148 	.early_unregister = amdgpu_dm_connector_unregister
5149 };
5150 
5151 static int get_modes(struct drm_connector *connector)
5152 {
5153 	return amdgpu_dm_connector_get_modes(connector);
5154 }
5155 
5156 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5157 {
5158 	struct dc_sink_init_data init_params = {
5159 			.link = aconnector->dc_link,
5160 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5161 	};
5162 	struct edid *edid;
5163 
5164 	if (!aconnector->base.edid_blob_ptr) {
5165 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5166 				aconnector->base.name);
5167 
5168 		aconnector->base.force = DRM_FORCE_OFF;
5169 		aconnector->base.override_edid = false;
5170 		return;
5171 	}
5172 
5173 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5174 
5175 	aconnector->edid = edid;
5176 
5177 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5178 		aconnector->dc_link,
5179 		(uint8_t *)edid,
5180 		(edid->extensions + 1) * EDID_LENGTH,
5181 		&init_params);
5182 
5183 	if (aconnector->base.force == DRM_FORCE_ON) {
5184 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5185 		aconnector->dc_link->local_sink :
5186 		aconnector->dc_em_sink;
5187 		dc_sink_retain(aconnector->dc_sink);
5188 	}
5189 }
5190 
5191 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5192 {
5193 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5194 
5195 	/*
5196 	 * In case of headless boot with force on for DP managed connector
5197 	 * Those settings have to be != 0 to get initial modeset
5198 	 */
5199 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5200 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5201 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5202 	}
5203 
5204 
5205 	aconnector->base.override_edid = true;
5206 	create_eml_sink(aconnector);
5207 }
5208 
5209 static struct dc_stream_state *
5210 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5211 				const struct drm_display_mode *drm_mode,
5212 				const struct dm_connector_state *dm_state,
5213 				const struct dc_stream_state *old_stream)
5214 {
5215 	struct drm_connector *connector = &aconnector->base;
5216 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5217 	struct dc_stream_state *stream;
5218 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5219 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5220 	enum dc_status dc_result = DC_OK;
5221 
5222 	do {
5223 		stream = create_stream_for_sink(aconnector, drm_mode,
5224 						dm_state, old_stream,
5225 						requested_bpc);
5226 		if (stream == NULL) {
5227 			DRM_ERROR("Failed to create stream for sink!\n");
5228 			break;
5229 		}
5230 
5231 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5232 
5233 		if (dc_result != DC_OK) {
5234 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5235 				      drm_mode->hdisplay,
5236 				      drm_mode->vdisplay,
5237 				      drm_mode->clock,
5238 				      dc_result,
5239 				      dc_status_to_str(dc_result));
5240 
5241 			dc_stream_release(stream);
5242 			stream = NULL;
5243 			requested_bpc -= 2; /* lower bpc to retry validation */
5244 		}
5245 
5246 	} while (stream == NULL && requested_bpc >= 6);
5247 
5248 	return stream;
5249 }
5250 
5251 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5252 				   struct drm_display_mode *mode)
5253 {
5254 	int result = MODE_ERROR;
5255 	struct dc_sink *dc_sink;
5256 	/* TODO: Unhardcode stream count */
5257 	struct dc_stream_state *stream;
5258 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5259 
5260 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5261 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5262 		return result;
5263 
5264 	/*
5265 	 * Only run this the first time mode_valid is called to initilialize
5266 	 * EDID mgmt
5267 	 */
5268 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5269 		!aconnector->dc_em_sink)
5270 		handle_edid_mgmt(aconnector);
5271 
5272 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5273 
5274 	if (dc_sink == NULL) {
5275 		DRM_ERROR("dc_sink is NULL!\n");
5276 		goto fail;
5277 	}
5278 
5279 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5280 	if (stream) {
5281 		dc_stream_release(stream);
5282 		result = MODE_OK;
5283 	}
5284 
5285 fail:
5286 	/* TODO: error handling*/
5287 	return result;
5288 }
5289 
5290 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5291 				struct dc_info_packet *out)
5292 {
5293 	struct hdmi_drm_infoframe frame;
5294 	unsigned char buf[30]; /* 26 + 4 */
5295 	ssize_t len;
5296 	int ret, i;
5297 
5298 	memset(out, 0, sizeof(*out));
5299 
5300 	if (!state->hdr_output_metadata)
5301 		return 0;
5302 
5303 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5304 	if (ret)
5305 		return ret;
5306 
5307 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5308 	if (len < 0)
5309 		return (int)len;
5310 
5311 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5312 	if (len != 30)
5313 		return -EINVAL;
5314 
5315 	/* Prepare the infopacket for DC. */
5316 	switch (state->connector->connector_type) {
5317 	case DRM_MODE_CONNECTOR_HDMIA:
5318 		out->hb0 = 0x87; /* type */
5319 		out->hb1 = 0x01; /* version */
5320 		out->hb2 = 0x1A; /* length */
5321 		out->sb[0] = buf[3]; /* checksum */
5322 		i = 1;
5323 		break;
5324 
5325 	case DRM_MODE_CONNECTOR_DisplayPort:
5326 	case DRM_MODE_CONNECTOR_eDP:
5327 		out->hb0 = 0x00; /* sdp id, zero */
5328 		out->hb1 = 0x87; /* type */
5329 		out->hb2 = 0x1D; /* payload len - 1 */
5330 		out->hb3 = (0x13 << 2); /* sdp version */
5331 		out->sb[0] = 0x01; /* version */
5332 		out->sb[1] = 0x1A; /* length */
5333 		i = 2;
5334 		break;
5335 
5336 	default:
5337 		return -EINVAL;
5338 	}
5339 
5340 	memcpy(&out->sb[i], &buf[4], 26);
5341 	out->valid = true;
5342 
5343 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5344 		       sizeof(out->sb), false);
5345 
5346 	return 0;
5347 }
5348 
5349 static bool
5350 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5351 			  const struct drm_connector_state *new_state)
5352 {
5353 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5354 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5355 
5356 	if (old_blob != new_blob) {
5357 		if (old_blob && new_blob &&
5358 		    old_blob->length == new_blob->length)
5359 			return memcmp(old_blob->data, new_blob->data,
5360 				      old_blob->length);
5361 
5362 		return true;
5363 	}
5364 
5365 	return false;
5366 }
5367 
5368 static int
5369 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5370 				 struct drm_atomic_state *state)
5371 {
5372 	struct drm_connector_state *new_con_state =
5373 		drm_atomic_get_new_connector_state(state, conn);
5374 	struct drm_connector_state *old_con_state =
5375 		drm_atomic_get_old_connector_state(state, conn);
5376 	struct drm_crtc *crtc = new_con_state->crtc;
5377 	struct drm_crtc_state *new_crtc_state;
5378 	int ret;
5379 
5380 	if (!crtc)
5381 		return 0;
5382 
5383 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5384 		struct dc_info_packet hdr_infopacket;
5385 
5386 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5387 		if (ret)
5388 			return ret;
5389 
5390 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5391 		if (IS_ERR(new_crtc_state))
5392 			return PTR_ERR(new_crtc_state);
5393 
5394 		/*
5395 		 * DC considers the stream backends changed if the
5396 		 * static metadata changes. Forcing the modeset also
5397 		 * gives a simple way for userspace to switch from
5398 		 * 8bpc to 10bpc when setting the metadata to enter
5399 		 * or exit HDR.
5400 		 *
5401 		 * Changing the static metadata after it's been
5402 		 * set is permissible, however. So only force a
5403 		 * modeset if we're entering or exiting HDR.
5404 		 */
5405 		new_crtc_state->mode_changed =
5406 			!old_con_state->hdr_output_metadata ||
5407 			!new_con_state->hdr_output_metadata;
5408 	}
5409 
5410 	return 0;
5411 }
5412 
5413 static const struct drm_connector_helper_funcs
5414 amdgpu_dm_connector_helper_funcs = {
5415 	/*
5416 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5417 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5418 	 * are missing after user start lightdm. So we need to renew modes list.
5419 	 * in get_modes call back, not just return the modes count
5420 	 */
5421 	.get_modes = get_modes,
5422 	.mode_valid = amdgpu_dm_connector_mode_valid,
5423 	.atomic_check = amdgpu_dm_connector_atomic_check,
5424 };
5425 
5426 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5427 {
5428 }
5429 
5430 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5431 {
5432 	struct drm_device *dev = new_crtc_state->crtc->dev;
5433 	struct drm_plane *plane;
5434 
5435 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5436 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5437 			return true;
5438 	}
5439 
5440 	return false;
5441 }
5442 
5443 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5444 {
5445 	struct drm_atomic_state *state = new_crtc_state->state;
5446 	struct drm_plane *plane;
5447 	int num_active = 0;
5448 
5449 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5450 		struct drm_plane_state *new_plane_state;
5451 
5452 		/* Cursor planes are "fake". */
5453 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5454 			continue;
5455 
5456 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5457 
5458 		if (!new_plane_state) {
5459 			/*
5460 			 * The plane is enable on the CRTC and hasn't changed
5461 			 * state. This means that it previously passed
5462 			 * validation and is therefore enabled.
5463 			 */
5464 			num_active += 1;
5465 			continue;
5466 		}
5467 
5468 		/* We need a framebuffer to be considered enabled. */
5469 		num_active += (new_plane_state->fb != NULL);
5470 	}
5471 
5472 	return num_active;
5473 }
5474 
5475 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5476 					 struct drm_crtc_state *new_crtc_state)
5477 {
5478 	struct dm_crtc_state *dm_new_crtc_state =
5479 		to_dm_crtc_state(new_crtc_state);
5480 
5481 	dm_new_crtc_state->active_planes = 0;
5482 
5483 	if (!dm_new_crtc_state->stream)
5484 		return;
5485 
5486 	dm_new_crtc_state->active_planes =
5487 		count_crtc_active_planes(new_crtc_state);
5488 }
5489 
5490 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5491 				       struct drm_crtc_state *state)
5492 {
5493 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5494 	struct dc *dc = adev->dm.dc;
5495 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5496 	int ret = -EINVAL;
5497 
5498 	dm_update_crtc_active_planes(crtc, state);
5499 
5500 	if (unlikely(!dm_crtc_state->stream &&
5501 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5502 		WARN_ON(1);
5503 		return ret;
5504 	}
5505 
5506 	/* In some use cases, like reset, no stream is attached */
5507 	if (!dm_crtc_state->stream)
5508 		return 0;
5509 
5510 	/*
5511 	 * We want at least one hardware plane enabled to use
5512 	 * the stream with a cursor enabled.
5513 	 */
5514 	if (state->enable && state->active &&
5515 	    does_crtc_have_active_cursor(state) &&
5516 	    dm_crtc_state->active_planes == 0)
5517 		return -EINVAL;
5518 
5519 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5520 		return 0;
5521 
5522 	return ret;
5523 }
5524 
5525 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5526 				      const struct drm_display_mode *mode,
5527 				      struct drm_display_mode *adjusted_mode)
5528 {
5529 	return true;
5530 }
5531 
5532 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5533 	.disable = dm_crtc_helper_disable,
5534 	.atomic_check = dm_crtc_helper_atomic_check,
5535 	.mode_fixup = dm_crtc_helper_mode_fixup,
5536 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5537 };
5538 
5539 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5540 {
5541 
5542 }
5543 
5544 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5545 {
5546 	switch (display_color_depth) {
5547 		case COLOR_DEPTH_666:
5548 			return 6;
5549 		case COLOR_DEPTH_888:
5550 			return 8;
5551 		case COLOR_DEPTH_101010:
5552 			return 10;
5553 		case COLOR_DEPTH_121212:
5554 			return 12;
5555 		case COLOR_DEPTH_141414:
5556 			return 14;
5557 		case COLOR_DEPTH_161616:
5558 			return 16;
5559 		default:
5560 			break;
5561 		}
5562 	return 0;
5563 }
5564 
5565 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5566 					  struct drm_crtc_state *crtc_state,
5567 					  struct drm_connector_state *conn_state)
5568 {
5569 	struct drm_atomic_state *state = crtc_state->state;
5570 	struct drm_connector *connector = conn_state->connector;
5571 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5572 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5573 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5574 	struct drm_dp_mst_topology_mgr *mst_mgr;
5575 	struct drm_dp_mst_port *mst_port;
5576 	enum dc_color_depth color_depth;
5577 	int clock, bpp = 0;
5578 	bool is_y420 = false;
5579 
5580 	if (!aconnector->port || !aconnector->dc_sink)
5581 		return 0;
5582 
5583 	mst_port = aconnector->port;
5584 	mst_mgr = &aconnector->mst_port->mst_mgr;
5585 
5586 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5587 		return 0;
5588 
5589 	if (!state->duplicated) {
5590 		int max_bpc = conn_state->max_requested_bpc;
5591 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5592 				aconnector->force_yuv420_output;
5593 		color_depth = convert_color_depth_from_display_info(connector,
5594 								    is_y420,
5595 								    max_bpc);
5596 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5597 		clock = adjusted_mode->clock;
5598 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5599 	}
5600 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5601 									   mst_mgr,
5602 									   mst_port,
5603 									   dm_new_connector_state->pbn,
5604 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5605 	if (dm_new_connector_state->vcpi_slots < 0) {
5606 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5607 		return dm_new_connector_state->vcpi_slots;
5608 	}
5609 	return 0;
5610 }
5611 
5612 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5613 	.disable = dm_encoder_helper_disable,
5614 	.atomic_check = dm_encoder_helper_atomic_check
5615 };
5616 
5617 #if defined(CONFIG_DRM_AMD_DC_DCN)
5618 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5619 					    struct dc_state *dc_state)
5620 {
5621 	struct dc_stream_state *stream = NULL;
5622 	struct drm_connector *connector;
5623 	struct drm_connector_state *new_con_state, *old_con_state;
5624 	struct amdgpu_dm_connector *aconnector;
5625 	struct dm_connector_state *dm_conn_state;
5626 	int i, j, clock, bpp;
5627 	int vcpi, pbn_div, pbn = 0;
5628 
5629 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5630 
5631 		aconnector = to_amdgpu_dm_connector(connector);
5632 
5633 		if (!aconnector->port)
5634 			continue;
5635 
5636 		if (!new_con_state || !new_con_state->crtc)
5637 			continue;
5638 
5639 		dm_conn_state = to_dm_connector_state(new_con_state);
5640 
5641 		for (j = 0; j < dc_state->stream_count; j++) {
5642 			stream = dc_state->streams[j];
5643 			if (!stream)
5644 				continue;
5645 
5646 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5647 				break;
5648 
5649 			stream = NULL;
5650 		}
5651 
5652 		if (!stream)
5653 			continue;
5654 
5655 		if (stream->timing.flags.DSC != 1) {
5656 			drm_dp_mst_atomic_enable_dsc(state,
5657 						     aconnector->port,
5658 						     dm_conn_state->pbn,
5659 						     0,
5660 						     false);
5661 			continue;
5662 		}
5663 
5664 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5665 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5666 		clock = stream->timing.pix_clk_100hz / 10;
5667 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5668 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5669 						    aconnector->port,
5670 						    pbn, pbn_div,
5671 						    true);
5672 		if (vcpi < 0)
5673 			return vcpi;
5674 
5675 		dm_conn_state->pbn = pbn;
5676 		dm_conn_state->vcpi_slots = vcpi;
5677 	}
5678 	return 0;
5679 }
5680 #endif
5681 
5682 static void dm_drm_plane_reset(struct drm_plane *plane)
5683 {
5684 	struct dm_plane_state *amdgpu_state = NULL;
5685 
5686 	if (plane->state)
5687 		plane->funcs->atomic_destroy_state(plane, plane->state);
5688 
5689 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5690 	WARN_ON(amdgpu_state == NULL);
5691 
5692 	if (amdgpu_state)
5693 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5694 }
5695 
5696 static struct drm_plane_state *
5697 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5698 {
5699 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5700 
5701 	old_dm_plane_state = to_dm_plane_state(plane->state);
5702 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5703 	if (!dm_plane_state)
5704 		return NULL;
5705 
5706 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5707 
5708 	if (old_dm_plane_state->dc_state) {
5709 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5710 		dc_plane_state_retain(dm_plane_state->dc_state);
5711 	}
5712 
5713 	/* Framebuffer hasn't been updated yet, so retain old flags. */
5714 	dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5715 	dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5716 
5717 	return &dm_plane_state->base;
5718 }
5719 
5720 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5721 				struct drm_plane_state *state)
5722 {
5723 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5724 
5725 	if (dm_plane_state->dc_state)
5726 		dc_plane_state_release(dm_plane_state->dc_state);
5727 
5728 	drm_atomic_helper_plane_destroy_state(plane, state);
5729 }
5730 
5731 static const struct drm_plane_funcs dm_plane_funcs = {
5732 	.update_plane	= drm_atomic_helper_update_plane,
5733 	.disable_plane	= drm_atomic_helper_disable_plane,
5734 	.destroy	= drm_primary_helper_destroy,
5735 	.reset = dm_drm_plane_reset,
5736 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5737 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5738 };
5739 
5740 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5741 				      struct drm_plane_state *new_state)
5742 {
5743 	struct amdgpu_framebuffer *afb;
5744 	struct drm_gem_object *obj;
5745 	struct amdgpu_device *adev;
5746 	struct amdgpu_bo *rbo;
5747 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5748 	struct list_head list;
5749 	struct ttm_validate_buffer tv;
5750 	struct ww_acquire_ctx ticket;
5751 	uint32_t domain;
5752 	int r;
5753 
5754 	if (!new_state->fb) {
5755 		DRM_DEBUG_DRIVER("No FB bound\n");
5756 		return 0;
5757 	}
5758 
5759 	afb = to_amdgpu_framebuffer(new_state->fb);
5760 	obj = new_state->fb->obj[0];
5761 	rbo = gem_to_amdgpu_bo(obj);
5762 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5763 	INIT_LIST_HEAD(&list);
5764 
5765 	tv.bo = &rbo->tbo;
5766 	tv.num_shared = 1;
5767 	list_add(&tv.head, &list);
5768 
5769 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5770 	if (r) {
5771 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5772 		return r;
5773 	}
5774 
5775 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5776 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5777 	else
5778 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5779 
5780 	r = amdgpu_bo_pin(rbo, domain);
5781 	if (unlikely(r != 0)) {
5782 		if (r != -ERESTARTSYS)
5783 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5784 		ttm_eu_backoff_reservation(&ticket, &list);
5785 		return r;
5786 	}
5787 
5788 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5789 	if (unlikely(r != 0)) {
5790 		amdgpu_bo_unpin(rbo);
5791 		ttm_eu_backoff_reservation(&ticket, &list);
5792 		DRM_ERROR("%p bind failed\n", rbo);
5793 		return r;
5794 	}
5795 
5796 	ttm_eu_backoff_reservation(&ticket, &list);
5797 
5798 	afb->address = amdgpu_bo_gpu_offset(rbo);
5799 
5800 	amdgpu_bo_ref(rbo);
5801 
5802 	/**
5803 	 * We don't do surface updates on planes that have been newly created,
5804 	 * but we also don't have the afb->address during atomic check.
5805 	 *
5806 	 * Fill in buffer attributes depending on the address here, but only on
5807 	 * newly created planes since they're not being used by DC yet and this
5808 	 * won't modify global state.
5809 	 */
5810 	dm_plane_state_old = to_dm_plane_state(plane->state);
5811 	dm_plane_state_new = to_dm_plane_state(new_state);
5812 
5813 	if (dm_plane_state_new->dc_state &&
5814 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5815 		struct dc_plane_state *plane_state =
5816 			dm_plane_state_new->dc_state;
5817 		bool force_disable_dcc = !plane_state->dcc.enable;
5818 
5819 		fill_plane_buffer_attributes(
5820 			adev, afb, plane_state->format, plane_state->rotation,
5821 			dm_plane_state_new->tiling_flags,
5822 			&plane_state->tiling_info, &plane_state->plane_size,
5823 			&plane_state->dcc, &plane_state->address,
5824 			dm_plane_state_new->tmz_surface, force_disable_dcc);
5825 	}
5826 
5827 	return 0;
5828 }
5829 
5830 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5831 				       struct drm_plane_state *old_state)
5832 {
5833 	struct amdgpu_bo *rbo;
5834 	int r;
5835 
5836 	if (!old_state->fb)
5837 		return;
5838 
5839 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5840 	r = amdgpu_bo_reserve(rbo, false);
5841 	if (unlikely(r)) {
5842 		DRM_ERROR("failed to reserve rbo before unpin\n");
5843 		return;
5844 	}
5845 
5846 	amdgpu_bo_unpin(rbo);
5847 	amdgpu_bo_unreserve(rbo);
5848 	amdgpu_bo_unref(&rbo);
5849 }
5850 
5851 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5852 				       struct drm_crtc_state *new_crtc_state)
5853 {
5854 	int max_downscale = 0;
5855 	int max_upscale = INT_MAX;
5856 
5857 	/* TODO: These should be checked against DC plane caps */
5858 	return drm_atomic_helper_check_plane_state(
5859 		state, new_crtc_state, max_downscale, max_upscale, true, true);
5860 }
5861 
5862 static int dm_plane_atomic_check(struct drm_plane *plane,
5863 				 struct drm_plane_state *state)
5864 {
5865 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
5866 	struct dc *dc = adev->dm.dc;
5867 	struct dm_plane_state *dm_plane_state;
5868 	struct dc_scaling_info scaling_info;
5869 	struct drm_crtc_state *new_crtc_state;
5870 	int ret;
5871 
5872 	dm_plane_state = to_dm_plane_state(state);
5873 
5874 	if (!dm_plane_state->dc_state)
5875 		return 0;
5876 
5877 	new_crtc_state =
5878 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
5879 	if (!new_crtc_state)
5880 		return -EINVAL;
5881 
5882 	ret = dm_plane_helper_check_state(state, new_crtc_state);
5883 	if (ret)
5884 		return ret;
5885 
5886 	ret = fill_dc_scaling_info(state, &scaling_info);
5887 	if (ret)
5888 		return ret;
5889 
5890 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5891 		return 0;
5892 
5893 	return -EINVAL;
5894 }
5895 
5896 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5897 				       struct drm_plane_state *new_plane_state)
5898 {
5899 	/* Only support async updates on cursor planes. */
5900 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5901 		return -EINVAL;
5902 
5903 	return 0;
5904 }
5905 
5906 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5907 					 struct drm_plane_state *new_state)
5908 {
5909 	struct drm_plane_state *old_state =
5910 		drm_atomic_get_old_plane_state(new_state->state, plane);
5911 
5912 	swap(plane->state->fb, new_state->fb);
5913 
5914 	plane->state->src_x = new_state->src_x;
5915 	plane->state->src_y = new_state->src_y;
5916 	plane->state->src_w = new_state->src_w;
5917 	plane->state->src_h = new_state->src_h;
5918 	plane->state->crtc_x = new_state->crtc_x;
5919 	plane->state->crtc_y = new_state->crtc_y;
5920 	plane->state->crtc_w = new_state->crtc_w;
5921 	plane->state->crtc_h = new_state->crtc_h;
5922 
5923 	handle_cursor_update(plane, old_state);
5924 }
5925 
5926 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5927 	.prepare_fb = dm_plane_helper_prepare_fb,
5928 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5929 	.atomic_check = dm_plane_atomic_check,
5930 	.atomic_async_check = dm_plane_atomic_async_check,
5931 	.atomic_async_update = dm_plane_atomic_async_update
5932 };
5933 
5934 /*
5935  * TODO: these are currently initialized to rgb formats only.
5936  * For future use cases we should either initialize them dynamically based on
5937  * plane capabilities, or initialize this array to all formats, so internal drm
5938  * check will succeed, and let DC implement proper check
5939  */
5940 static const uint32_t rgb_formats[] = {
5941 	DRM_FORMAT_XRGB8888,
5942 	DRM_FORMAT_ARGB8888,
5943 	DRM_FORMAT_RGBA8888,
5944 	DRM_FORMAT_XRGB2101010,
5945 	DRM_FORMAT_XBGR2101010,
5946 	DRM_FORMAT_ARGB2101010,
5947 	DRM_FORMAT_ABGR2101010,
5948 	DRM_FORMAT_XBGR8888,
5949 	DRM_FORMAT_ABGR8888,
5950 	DRM_FORMAT_RGB565,
5951 };
5952 
5953 static const uint32_t overlay_formats[] = {
5954 	DRM_FORMAT_XRGB8888,
5955 	DRM_FORMAT_ARGB8888,
5956 	DRM_FORMAT_RGBA8888,
5957 	DRM_FORMAT_XBGR8888,
5958 	DRM_FORMAT_ABGR8888,
5959 	DRM_FORMAT_RGB565
5960 };
5961 
5962 static const u32 cursor_formats[] = {
5963 	DRM_FORMAT_ARGB8888
5964 };
5965 
5966 static int get_plane_formats(const struct drm_plane *plane,
5967 			     const struct dc_plane_cap *plane_cap,
5968 			     uint32_t *formats, int max_formats)
5969 {
5970 	int i, num_formats = 0;
5971 
5972 	/*
5973 	 * TODO: Query support for each group of formats directly from
5974 	 * DC plane caps. This will require adding more formats to the
5975 	 * caps list.
5976 	 */
5977 
5978 	switch (plane->type) {
5979 	case DRM_PLANE_TYPE_PRIMARY:
5980 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5981 			if (num_formats >= max_formats)
5982 				break;
5983 
5984 			formats[num_formats++] = rgb_formats[i];
5985 		}
5986 
5987 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5988 			formats[num_formats++] = DRM_FORMAT_NV12;
5989 		if (plane_cap && plane_cap->pixel_format_support.p010)
5990 			formats[num_formats++] = DRM_FORMAT_P010;
5991 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
5992 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5993 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5994 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5995 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5996 		}
5997 		break;
5998 
5999 	case DRM_PLANE_TYPE_OVERLAY:
6000 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6001 			if (num_formats >= max_formats)
6002 				break;
6003 
6004 			formats[num_formats++] = overlay_formats[i];
6005 		}
6006 		break;
6007 
6008 	case DRM_PLANE_TYPE_CURSOR:
6009 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6010 			if (num_formats >= max_formats)
6011 				break;
6012 
6013 			formats[num_formats++] = cursor_formats[i];
6014 		}
6015 		break;
6016 	}
6017 
6018 	return num_formats;
6019 }
6020 
6021 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6022 				struct drm_plane *plane,
6023 				unsigned long possible_crtcs,
6024 				const struct dc_plane_cap *plane_cap)
6025 {
6026 	uint32_t formats[32];
6027 	int num_formats;
6028 	int res = -EPERM;
6029 	unsigned int supported_rotations;
6030 
6031 	num_formats = get_plane_formats(plane, plane_cap, formats,
6032 					ARRAY_SIZE(formats));
6033 
6034 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6035 				       &dm_plane_funcs, formats, num_formats,
6036 				       NULL, plane->type, NULL);
6037 	if (res)
6038 		return res;
6039 
6040 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6041 	    plane_cap && plane_cap->per_pixel_alpha) {
6042 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6043 					  BIT(DRM_MODE_BLEND_PREMULTI);
6044 
6045 		drm_plane_create_alpha_property(plane);
6046 		drm_plane_create_blend_mode_property(plane, blend_caps);
6047 	}
6048 
6049 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6050 	    plane_cap &&
6051 	    (plane_cap->pixel_format_support.nv12 ||
6052 	     plane_cap->pixel_format_support.p010)) {
6053 		/* This only affects YUV formats. */
6054 		drm_plane_create_color_properties(
6055 			plane,
6056 			BIT(DRM_COLOR_YCBCR_BT601) |
6057 			BIT(DRM_COLOR_YCBCR_BT709) |
6058 			BIT(DRM_COLOR_YCBCR_BT2020),
6059 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6060 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6061 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6062 	}
6063 
6064 	supported_rotations =
6065 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6066 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6067 
6068 	if (dm->adev->asic_type >= CHIP_BONAIRE)
6069 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6070 						   supported_rotations);
6071 
6072 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6073 
6074 	/* Create (reset) the plane state */
6075 	if (plane->funcs->reset)
6076 		plane->funcs->reset(plane);
6077 
6078 	return 0;
6079 }
6080 
6081 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6082 			       struct drm_plane *plane,
6083 			       uint32_t crtc_index)
6084 {
6085 	struct amdgpu_crtc *acrtc = NULL;
6086 	struct drm_plane *cursor_plane;
6087 
6088 	int res = -ENOMEM;
6089 
6090 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6091 	if (!cursor_plane)
6092 		goto fail;
6093 
6094 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6095 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6096 
6097 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6098 	if (!acrtc)
6099 		goto fail;
6100 
6101 	res = drm_crtc_init_with_planes(
6102 			dm->ddev,
6103 			&acrtc->base,
6104 			plane,
6105 			cursor_plane,
6106 			&amdgpu_dm_crtc_funcs, NULL);
6107 
6108 	if (res)
6109 		goto fail;
6110 
6111 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6112 
6113 	/* Create (reset) the plane state */
6114 	if (acrtc->base.funcs->reset)
6115 		acrtc->base.funcs->reset(&acrtc->base);
6116 
6117 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6118 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6119 
6120 	acrtc->crtc_id = crtc_index;
6121 	acrtc->base.enabled = false;
6122 	acrtc->otg_inst = -1;
6123 
6124 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6125 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6126 				   true, MAX_COLOR_LUT_ENTRIES);
6127 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6128 
6129 	return 0;
6130 
6131 fail:
6132 	kfree(acrtc);
6133 	kfree(cursor_plane);
6134 	return res;
6135 }
6136 
6137 
6138 static int to_drm_connector_type(enum signal_type st)
6139 {
6140 	switch (st) {
6141 	case SIGNAL_TYPE_HDMI_TYPE_A:
6142 		return DRM_MODE_CONNECTOR_HDMIA;
6143 	case SIGNAL_TYPE_EDP:
6144 		return DRM_MODE_CONNECTOR_eDP;
6145 	case SIGNAL_TYPE_LVDS:
6146 		return DRM_MODE_CONNECTOR_LVDS;
6147 	case SIGNAL_TYPE_RGB:
6148 		return DRM_MODE_CONNECTOR_VGA;
6149 	case SIGNAL_TYPE_DISPLAY_PORT:
6150 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6151 		return DRM_MODE_CONNECTOR_DisplayPort;
6152 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6153 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6154 		return DRM_MODE_CONNECTOR_DVID;
6155 	case SIGNAL_TYPE_VIRTUAL:
6156 		return DRM_MODE_CONNECTOR_VIRTUAL;
6157 
6158 	default:
6159 		return DRM_MODE_CONNECTOR_Unknown;
6160 	}
6161 }
6162 
6163 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6164 {
6165 	struct drm_encoder *encoder;
6166 
6167 	/* There is only one encoder per connector */
6168 	drm_connector_for_each_possible_encoder(connector, encoder)
6169 		return encoder;
6170 
6171 	return NULL;
6172 }
6173 
6174 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6175 {
6176 	struct drm_encoder *encoder;
6177 	struct amdgpu_encoder *amdgpu_encoder;
6178 
6179 	encoder = amdgpu_dm_connector_to_encoder(connector);
6180 
6181 	if (encoder == NULL)
6182 		return;
6183 
6184 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6185 
6186 	amdgpu_encoder->native_mode.clock = 0;
6187 
6188 	if (!list_empty(&connector->probed_modes)) {
6189 		struct drm_display_mode *preferred_mode = NULL;
6190 
6191 		list_for_each_entry(preferred_mode,
6192 				    &connector->probed_modes,
6193 				    head) {
6194 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6195 				amdgpu_encoder->native_mode = *preferred_mode;
6196 
6197 			break;
6198 		}
6199 
6200 	}
6201 }
6202 
6203 static struct drm_display_mode *
6204 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6205 			     char *name,
6206 			     int hdisplay, int vdisplay)
6207 {
6208 	struct drm_device *dev = encoder->dev;
6209 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6210 	struct drm_display_mode *mode = NULL;
6211 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6212 
6213 	mode = drm_mode_duplicate(dev, native_mode);
6214 
6215 	if (mode == NULL)
6216 		return NULL;
6217 
6218 	mode->hdisplay = hdisplay;
6219 	mode->vdisplay = vdisplay;
6220 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6221 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6222 
6223 	return mode;
6224 
6225 }
6226 
6227 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6228 						 struct drm_connector *connector)
6229 {
6230 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6231 	struct drm_display_mode *mode = NULL;
6232 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6233 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6234 				to_amdgpu_dm_connector(connector);
6235 	int i;
6236 	int n;
6237 	struct mode_size {
6238 		char name[DRM_DISPLAY_MODE_LEN];
6239 		int w;
6240 		int h;
6241 	} common_modes[] = {
6242 		{  "640x480",  640,  480},
6243 		{  "800x600",  800,  600},
6244 		{ "1024x768", 1024,  768},
6245 		{ "1280x720", 1280,  720},
6246 		{ "1280x800", 1280,  800},
6247 		{"1280x1024", 1280, 1024},
6248 		{ "1440x900", 1440,  900},
6249 		{"1680x1050", 1680, 1050},
6250 		{"1600x1200", 1600, 1200},
6251 		{"1920x1080", 1920, 1080},
6252 		{"1920x1200", 1920, 1200}
6253 	};
6254 
6255 	n = ARRAY_SIZE(common_modes);
6256 
6257 	for (i = 0; i < n; i++) {
6258 		struct drm_display_mode *curmode = NULL;
6259 		bool mode_existed = false;
6260 
6261 		if (common_modes[i].w > native_mode->hdisplay ||
6262 		    common_modes[i].h > native_mode->vdisplay ||
6263 		   (common_modes[i].w == native_mode->hdisplay &&
6264 		    common_modes[i].h == native_mode->vdisplay))
6265 			continue;
6266 
6267 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6268 			if (common_modes[i].w == curmode->hdisplay &&
6269 			    common_modes[i].h == curmode->vdisplay) {
6270 				mode_existed = true;
6271 				break;
6272 			}
6273 		}
6274 
6275 		if (mode_existed)
6276 			continue;
6277 
6278 		mode = amdgpu_dm_create_common_mode(encoder,
6279 				common_modes[i].name, common_modes[i].w,
6280 				common_modes[i].h);
6281 		drm_mode_probed_add(connector, mode);
6282 		amdgpu_dm_connector->num_modes++;
6283 	}
6284 }
6285 
6286 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6287 					      struct edid *edid)
6288 {
6289 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6290 			to_amdgpu_dm_connector(connector);
6291 
6292 	if (edid) {
6293 		/* empty probed_modes */
6294 		INIT_LIST_HEAD(&connector->probed_modes);
6295 		amdgpu_dm_connector->num_modes =
6296 				drm_add_edid_modes(connector, edid);
6297 
6298 		/* sorting the probed modes before calling function
6299 		 * amdgpu_dm_get_native_mode() since EDID can have
6300 		 * more than one preferred mode. The modes that are
6301 		 * later in the probed mode list could be of higher
6302 		 * and preferred resolution. For example, 3840x2160
6303 		 * resolution in base EDID preferred timing and 4096x2160
6304 		 * preferred resolution in DID extension block later.
6305 		 */
6306 		drm_mode_sort(&connector->probed_modes);
6307 		amdgpu_dm_get_native_mode(connector);
6308 	} else {
6309 		amdgpu_dm_connector->num_modes = 0;
6310 	}
6311 }
6312 
6313 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6314 {
6315 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6316 			to_amdgpu_dm_connector(connector);
6317 	struct drm_encoder *encoder;
6318 	struct edid *edid = amdgpu_dm_connector->edid;
6319 
6320 	encoder = amdgpu_dm_connector_to_encoder(connector);
6321 
6322 	if (!edid || !drm_edid_is_valid(edid)) {
6323 		amdgpu_dm_connector->num_modes =
6324 				drm_add_modes_noedid(connector, 640, 480);
6325 	} else {
6326 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6327 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6328 	}
6329 	amdgpu_dm_fbc_init(connector);
6330 
6331 	return amdgpu_dm_connector->num_modes;
6332 }
6333 
6334 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6335 				     struct amdgpu_dm_connector *aconnector,
6336 				     int connector_type,
6337 				     struct dc_link *link,
6338 				     int link_index)
6339 {
6340 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6341 
6342 	/*
6343 	 * Some of the properties below require access to state, like bpc.
6344 	 * Allocate some default initial connector state with our reset helper.
6345 	 */
6346 	if (aconnector->base.funcs->reset)
6347 		aconnector->base.funcs->reset(&aconnector->base);
6348 
6349 	aconnector->connector_id = link_index;
6350 	aconnector->dc_link = link;
6351 	aconnector->base.interlace_allowed = false;
6352 	aconnector->base.doublescan_allowed = false;
6353 	aconnector->base.stereo_allowed = false;
6354 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6355 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6356 	aconnector->audio_inst = -1;
6357 	mutex_init(&aconnector->hpd_lock);
6358 
6359 	/*
6360 	 * configure support HPD hot plug connector_>polled default value is 0
6361 	 * which means HPD hot plug not supported
6362 	 */
6363 	switch (connector_type) {
6364 	case DRM_MODE_CONNECTOR_HDMIA:
6365 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6366 		aconnector->base.ycbcr_420_allowed =
6367 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6368 		break;
6369 	case DRM_MODE_CONNECTOR_DisplayPort:
6370 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6371 		aconnector->base.ycbcr_420_allowed =
6372 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6373 		break;
6374 	case DRM_MODE_CONNECTOR_DVID:
6375 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6376 		break;
6377 	default:
6378 		break;
6379 	}
6380 
6381 	drm_object_attach_property(&aconnector->base.base,
6382 				dm->ddev->mode_config.scaling_mode_property,
6383 				DRM_MODE_SCALE_NONE);
6384 
6385 	drm_object_attach_property(&aconnector->base.base,
6386 				adev->mode_info.underscan_property,
6387 				UNDERSCAN_OFF);
6388 	drm_object_attach_property(&aconnector->base.base,
6389 				adev->mode_info.underscan_hborder_property,
6390 				0);
6391 	drm_object_attach_property(&aconnector->base.base,
6392 				adev->mode_info.underscan_vborder_property,
6393 				0);
6394 
6395 	if (!aconnector->mst_port)
6396 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6397 
6398 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6399 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6400 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6401 
6402 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6403 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6404 		drm_object_attach_property(&aconnector->base.base,
6405 				adev->mode_info.abm_level_property, 0);
6406 	}
6407 
6408 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6409 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6410 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6411 		drm_object_attach_property(
6412 			&aconnector->base.base,
6413 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6414 
6415 		if (!aconnector->mst_port)
6416 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6417 
6418 #ifdef CONFIG_DRM_AMD_DC_HDCP
6419 		if (adev->dm.hdcp_workqueue)
6420 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6421 #endif
6422 	}
6423 }
6424 
6425 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6426 			      struct i2c_msg *msgs, int num)
6427 {
6428 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6429 	struct ddc_service *ddc_service = i2c->ddc_service;
6430 	struct i2c_command cmd;
6431 	int i;
6432 	int result = -EIO;
6433 
6434 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6435 
6436 	if (!cmd.payloads)
6437 		return result;
6438 
6439 	cmd.number_of_payloads = num;
6440 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6441 	cmd.speed = 100;
6442 
6443 	for (i = 0; i < num; i++) {
6444 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6445 		cmd.payloads[i].address = msgs[i].addr;
6446 		cmd.payloads[i].length = msgs[i].len;
6447 		cmd.payloads[i].data = msgs[i].buf;
6448 	}
6449 
6450 	if (dc_submit_i2c(
6451 			ddc_service->ctx->dc,
6452 			ddc_service->ddc_pin->hw_info.ddc_channel,
6453 			&cmd))
6454 		result = num;
6455 
6456 	kfree(cmd.payloads);
6457 	return result;
6458 }
6459 
6460 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6461 {
6462 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6463 }
6464 
6465 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6466 	.master_xfer = amdgpu_dm_i2c_xfer,
6467 	.functionality = amdgpu_dm_i2c_func,
6468 };
6469 
6470 static struct amdgpu_i2c_adapter *
6471 create_i2c(struct ddc_service *ddc_service,
6472 	   int link_index,
6473 	   int *res)
6474 {
6475 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6476 	struct amdgpu_i2c_adapter *i2c;
6477 
6478 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6479 	if (!i2c)
6480 		return NULL;
6481 	i2c->base.owner = THIS_MODULE;
6482 	i2c->base.class = I2C_CLASS_DDC;
6483 	i2c->base.dev.parent = &adev->pdev->dev;
6484 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6485 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6486 	i2c_set_adapdata(&i2c->base, i2c);
6487 	i2c->ddc_service = ddc_service;
6488 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6489 
6490 	return i2c;
6491 }
6492 
6493 
6494 /*
6495  * Note: this function assumes that dc_link_detect() was called for the
6496  * dc_link which will be represented by this aconnector.
6497  */
6498 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6499 				    struct amdgpu_dm_connector *aconnector,
6500 				    uint32_t link_index,
6501 				    struct amdgpu_encoder *aencoder)
6502 {
6503 	int res = 0;
6504 	int connector_type;
6505 	struct dc *dc = dm->dc;
6506 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6507 	struct amdgpu_i2c_adapter *i2c;
6508 
6509 	link->priv = aconnector;
6510 
6511 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6512 
6513 	i2c = create_i2c(link->ddc, link->link_index, &res);
6514 	if (!i2c) {
6515 		DRM_ERROR("Failed to create i2c adapter data\n");
6516 		return -ENOMEM;
6517 	}
6518 
6519 	aconnector->i2c = i2c;
6520 	res = i2c_add_adapter(&i2c->base);
6521 
6522 	if (res) {
6523 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6524 		goto out_free;
6525 	}
6526 
6527 	connector_type = to_drm_connector_type(link->connector_signal);
6528 
6529 	res = drm_connector_init_with_ddc(
6530 			dm->ddev,
6531 			&aconnector->base,
6532 			&amdgpu_dm_connector_funcs,
6533 			connector_type,
6534 			&i2c->base);
6535 
6536 	if (res) {
6537 		DRM_ERROR("connector_init failed\n");
6538 		aconnector->connector_id = -1;
6539 		goto out_free;
6540 	}
6541 
6542 	drm_connector_helper_add(
6543 			&aconnector->base,
6544 			&amdgpu_dm_connector_helper_funcs);
6545 
6546 	amdgpu_dm_connector_init_helper(
6547 		dm,
6548 		aconnector,
6549 		connector_type,
6550 		link,
6551 		link_index);
6552 
6553 	drm_connector_attach_encoder(
6554 		&aconnector->base, &aencoder->base);
6555 
6556 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6557 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6558 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6559 
6560 out_free:
6561 	if (res) {
6562 		kfree(i2c);
6563 		aconnector->i2c = NULL;
6564 	}
6565 	return res;
6566 }
6567 
6568 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6569 {
6570 	switch (adev->mode_info.num_crtc) {
6571 	case 1:
6572 		return 0x1;
6573 	case 2:
6574 		return 0x3;
6575 	case 3:
6576 		return 0x7;
6577 	case 4:
6578 		return 0xf;
6579 	case 5:
6580 		return 0x1f;
6581 	case 6:
6582 	default:
6583 		return 0x3f;
6584 	}
6585 }
6586 
6587 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6588 				  struct amdgpu_encoder *aencoder,
6589 				  uint32_t link_index)
6590 {
6591 	struct amdgpu_device *adev = drm_to_adev(dev);
6592 
6593 	int res = drm_encoder_init(dev,
6594 				   &aencoder->base,
6595 				   &amdgpu_dm_encoder_funcs,
6596 				   DRM_MODE_ENCODER_TMDS,
6597 				   NULL);
6598 
6599 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6600 
6601 	if (!res)
6602 		aencoder->encoder_id = link_index;
6603 	else
6604 		aencoder->encoder_id = -1;
6605 
6606 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6607 
6608 	return res;
6609 }
6610 
6611 static void manage_dm_interrupts(struct amdgpu_device *adev,
6612 				 struct amdgpu_crtc *acrtc,
6613 				 bool enable)
6614 {
6615 	/*
6616 	 * We have no guarantee that the frontend index maps to the same
6617 	 * backend index - some even map to more than one.
6618 	 *
6619 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6620 	 */
6621 	int irq_type =
6622 		amdgpu_display_crtc_idx_to_irq_type(
6623 			adev,
6624 			acrtc->crtc_id);
6625 
6626 	if (enable) {
6627 		drm_crtc_vblank_on(&acrtc->base);
6628 		amdgpu_irq_get(
6629 			adev,
6630 			&adev->pageflip_irq,
6631 			irq_type);
6632 	} else {
6633 
6634 		amdgpu_irq_put(
6635 			adev,
6636 			&adev->pageflip_irq,
6637 			irq_type);
6638 		drm_crtc_vblank_off(&acrtc->base);
6639 	}
6640 }
6641 
6642 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6643 				      struct amdgpu_crtc *acrtc)
6644 {
6645 	int irq_type =
6646 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6647 
6648 	/**
6649 	 * This reads the current state for the IRQ and force reapplies
6650 	 * the setting to hardware.
6651 	 */
6652 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6653 }
6654 
6655 static bool
6656 is_scaling_state_different(const struct dm_connector_state *dm_state,
6657 			   const struct dm_connector_state *old_dm_state)
6658 {
6659 	if (dm_state->scaling != old_dm_state->scaling)
6660 		return true;
6661 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6662 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6663 			return true;
6664 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6665 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6666 			return true;
6667 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6668 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6669 		return true;
6670 	return false;
6671 }
6672 
6673 #ifdef CONFIG_DRM_AMD_DC_HDCP
6674 static bool is_content_protection_different(struct drm_connector_state *state,
6675 					    const struct drm_connector_state *old_state,
6676 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6677 {
6678 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6679 
6680 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6681 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6682 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6683 		return true;
6684 	}
6685 
6686 	/* CP is being re enabled, ignore this */
6687 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6688 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6689 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6690 		return false;
6691 	}
6692 
6693 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6694 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6695 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6696 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6697 
6698 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6699 	 * hot-plug, headless s3, dpms
6700 	 */
6701 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6702 	    aconnector->dc_sink != NULL)
6703 		return true;
6704 
6705 	if (old_state->content_protection == state->content_protection)
6706 		return false;
6707 
6708 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6709 		return true;
6710 
6711 	return false;
6712 }
6713 
6714 #endif
6715 static void remove_stream(struct amdgpu_device *adev,
6716 			  struct amdgpu_crtc *acrtc,
6717 			  struct dc_stream_state *stream)
6718 {
6719 	/* this is the update mode case */
6720 
6721 	acrtc->otg_inst = -1;
6722 	acrtc->enabled = false;
6723 }
6724 
6725 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6726 			       struct dc_cursor_position *position)
6727 {
6728 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6729 	int x, y;
6730 	int xorigin = 0, yorigin = 0;
6731 
6732 	position->enable = false;
6733 	position->x = 0;
6734 	position->y = 0;
6735 
6736 	if (!crtc || !plane->state->fb)
6737 		return 0;
6738 
6739 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6740 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6741 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6742 			  __func__,
6743 			  plane->state->crtc_w,
6744 			  plane->state->crtc_h);
6745 		return -EINVAL;
6746 	}
6747 
6748 	x = plane->state->crtc_x;
6749 	y = plane->state->crtc_y;
6750 
6751 	if (x <= -amdgpu_crtc->max_cursor_width ||
6752 	    y <= -amdgpu_crtc->max_cursor_height)
6753 		return 0;
6754 
6755 	if (x < 0) {
6756 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6757 		x = 0;
6758 	}
6759 	if (y < 0) {
6760 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6761 		y = 0;
6762 	}
6763 	position->enable = true;
6764 	position->translate_by_source = true;
6765 	position->x = x;
6766 	position->y = y;
6767 	position->x_hotspot = xorigin;
6768 	position->y_hotspot = yorigin;
6769 
6770 	return 0;
6771 }
6772 
6773 static void handle_cursor_update(struct drm_plane *plane,
6774 				 struct drm_plane_state *old_plane_state)
6775 {
6776 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6777 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6778 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6779 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6780 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6781 	uint64_t address = afb ? afb->address : 0;
6782 	struct dc_cursor_position position;
6783 	struct dc_cursor_attributes attributes;
6784 	int ret;
6785 
6786 	if (!plane->state->fb && !old_plane_state->fb)
6787 		return;
6788 
6789 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6790 			 __func__,
6791 			 amdgpu_crtc->crtc_id,
6792 			 plane->state->crtc_w,
6793 			 plane->state->crtc_h);
6794 
6795 	ret = get_cursor_position(plane, crtc, &position);
6796 	if (ret)
6797 		return;
6798 
6799 	if (!position.enable) {
6800 		/* turn off cursor */
6801 		if (crtc_state && crtc_state->stream) {
6802 			mutex_lock(&adev->dm.dc_lock);
6803 			dc_stream_set_cursor_position(crtc_state->stream,
6804 						      &position);
6805 			mutex_unlock(&adev->dm.dc_lock);
6806 		}
6807 		return;
6808 	}
6809 
6810 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6811 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6812 
6813 	memset(&attributes, 0, sizeof(attributes));
6814 	attributes.address.high_part = upper_32_bits(address);
6815 	attributes.address.low_part  = lower_32_bits(address);
6816 	attributes.width             = plane->state->crtc_w;
6817 	attributes.height            = plane->state->crtc_h;
6818 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6819 	attributes.rotation_angle    = 0;
6820 	attributes.attribute_flags.value = 0;
6821 
6822 	attributes.pitch = attributes.width;
6823 
6824 	if (crtc_state->stream) {
6825 		mutex_lock(&adev->dm.dc_lock);
6826 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6827 							 &attributes))
6828 			DRM_ERROR("DC failed to set cursor attributes\n");
6829 
6830 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6831 						   &position))
6832 			DRM_ERROR("DC failed to set cursor position\n");
6833 		mutex_unlock(&adev->dm.dc_lock);
6834 	}
6835 }
6836 
6837 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6838 {
6839 
6840 	assert_spin_locked(&acrtc->base.dev->event_lock);
6841 	WARN_ON(acrtc->event);
6842 
6843 	acrtc->event = acrtc->base.state->event;
6844 
6845 	/* Set the flip status */
6846 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6847 
6848 	/* Mark this event as consumed */
6849 	acrtc->base.state->event = NULL;
6850 
6851 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6852 						 acrtc->crtc_id);
6853 }
6854 
6855 static void update_freesync_state_on_stream(
6856 	struct amdgpu_display_manager *dm,
6857 	struct dm_crtc_state *new_crtc_state,
6858 	struct dc_stream_state *new_stream,
6859 	struct dc_plane_state *surface,
6860 	u32 flip_timestamp_in_us)
6861 {
6862 	struct mod_vrr_params vrr_params;
6863 	struct dc_info_packet vrr_infopacket = {0};
6864 	struct amdgpu_device *adev = dm->adev;
6865 	unsigned long flags;
6866 
6867 	if (!new_stream)
6868 		return;
6869 
6870 	/*
6871 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6872 	 * For now it's sufficient to just guard against these conditions.
6873 	 */
6874 
6875 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6876 		return;
6877 
6878 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6879 	vrr_params = new_crtc_state->vrr_params;
6880 
6881 	if (surface) {
6882 		mod_freesync_handle_preflip(
6883 			dm->freesync_module,
6884 			surface,
6885 			new_stream,
6886 			flip_timestamp_in_us,
6887 			&vrr_params);
6888 
6889 		if (adev->family < AMDGPU_FAMILY_AI &&
6890 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6891 			mod_freesync_handle_v_update(dm->freesync_module,
6892 						     new_stream, &vrr_params);
6893 
6894 			/* Need to call this before the frame ends. */
6895 			dc_stream_adjust_vmin_vmax(dm->dc,
6896 						   new_crtc_state->stream,
6897 						   &vrr_params.adjust);
6898 		}
6899 	}
6900 
6901 	mod_freesync_build_vrr_infopacket(
6902 		dm->freesync_module,
6903 		new_stream,
6904 		&vrr_params,
6905 		PACKET_TYPE_VRR,
6906 		TRANSFER_FUNC_UNKNOWN,
6907 		&vrr_infopacket);
6908 
6909 	new_crtc_state->freesync_timing_changed |=
6910 		(memcmp(&new_crtc_state->vrr_params.adjust,
6911 			&vrr_params.adjust,
6912 			sizeof(vrr_params.adjust)) != 0);
6913 
6914 	new_crtc_state->freesync_vrr_info_changed |=
6915 		(memcmp(&new_crtc_state->vrr_infopacket,
6916 			&vrr_infopacket,
6917 			sizeof(vrr_infopacket)) != 0);
6918 
6919 	new_crtc_state->vrr_params = vrr_params;
6920 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6921 
6922 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6923 	new_stream->vrr_infopacket = vrr_infopacket;
6924 
6925 	if (new_crtc_state->freesync_vrr_info_changed)
6926 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6927 			      new_crtc_state->base.crtc->base.id,
6928 			      (int)new_crtc_state->base.vrr_enabled,
6929 			      (int)vrr_params.state);
6930 
6931 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
6932 }
6933 
6934 static void pre_update_freesync_state_on_stream(
6935 	struct amdgpu_display_manager *dm,
6936 	struct dm_crtc_state *new_crtc_state)
6937 {
6938 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6939 	struct mod_vrr_params vrr_params;
6940 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6941 	struct amdgpu_device *adev = dm->adev;
6942 	unsigned long flags;
6943 
6944 	if (!new_stream)
6945 		return;
6946 
6947 	/*
6948 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6949 	 * For now it's sufficient to just guard against these conditions.
6950 	 */
6951 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6952 		return;
6953 
6954 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6955 	vrr_params = new_crtc_state->vrr_params;
6956 
6957 	if (new_crtc_state->vrr_supported &&
6958 	    config.min_refresh_in_uhz &&
6959 	    config.max_refresh_in_uhz) {
6960 		config.state = new_crtc_state->base.vrr_enabled ?
6961 			VRR_STATE_ACTIVE_VARIABLE :
6962 			VRR_STATE_INACTIVE;
6963 	} else {
6964 		config.state = VRR_STATE_UNSUPPORTED;
6965 	}
6966 
6967 	mod_freesync_build_vrr_params(dm->freesync_module,
6968 				      new_stream,
6969 				      &config, &vrr_params);
6970 
6971 	new_crtc_state->freesync_timing_changed |=
6972 		(memcmp(&new_crtc_state->vrr_params.adjust,
6973 			&vrr_params.adjust,
6974 			sizeof(vrr_params.adjust)) != 0);
6975 
6976 	new_crtc_state->vrr_params = vrr_params;
6977 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
6978 }
6979 
6980 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6981 					    struct dm_crtc_state *new_state)
6982 {
6983 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6984 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6985 
6986 	if (!old_vrr_active && new_vrr_active) {
6987 		/* Transition VRR inactive -> active:
6988 		 * While VRR is active, we must not disable vblank irq, as a
6989 		 * reenable after disable would compute bogus vblank/pflip
6990 		 * timestamps if it likely happened inside display front-porch.
6991 		 *
6992 		 * We also need vupdate irq for the actual core vblank handling
6993 		 * at end of vblank.
6994 		 */
6995 		dm_set_vupdate_irq(new_state->base.crtc, true);
6996 		drm_crtc_vblank_get(new_state->base.crtc);
6997 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6998 				 __func__, new_state->base.crtc->base.id);
6999 	} else if (old_vrr_active && !new_vrr_active) {
7000 		/* Transition VRR active -> inactive:
7001 		 * Allow vblank irq disable again for fixed refresh rate.
7002 		 */
7003 		dm_set_vupdate_irq(new_state->base.crtc, false);
7004 		drm_crtc_vblank_put(new_state->base.crtc);
7005 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7006 				 __func__, new_state->base.crtc->base.id);
7007 	}
7008 }
7009 
7010 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7011 {
7012 	struct drm_plane *plane;
7013 	struct drm_plane_state *old_plane_state, *new_plane_state;
7014 	int i;
7015 
7016 	/*
7017 	 * TODO: Make this per-stream so we don't issue redundant updates for
7018 	 * commits with multiple streams.
7019 	 */
7020 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7021 				       new_plane_state, i)
7022 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7023 			handle_cursor_update(plane, old_plane_state);
7024 }
7025 
7026 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7027 				    struct dc_state *dc_state,
7028 				    struct drm_device *dev,
7029 				    struct amdgpu_display_manager *dm,
7030 				    struct drm_crtc *pcrtc,
7031 				    bool wait_for_vblank)
7032 {
7033 	uint32_t i;
7034 	uint64_t timestamp_ns;
7035 	struct drm_plane *plane;
7036 	struct drm_plane_state *old_plane_state, *new_plane_state;
7037 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7038 	struct drm_crtc_state *new_pcrtc_state =
7039 			drm_atomic_get_new_crtc_state(state, pcrtc);
7040 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7041 	struct dm_crtc_state *dm_old_crtc_state =
7042 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7043 	int planes_count = 0, vpos, hpos;
7044 	long r;
7045 	unsigned long flags;
7046 	struct amdgpu_bo *abo;
7047 	uint32_t target_vblank, last_flip_vblank;
7048 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7049 	bool pflip_present = false;
7050 	struct {
7051 		struct dc_surface_update surface_updates[MAX_SURFACES];
7052 		struct dc_plane_info plane_infos[MAX_SURFACES];
7053 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7054 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7055 		struct dc_stream_update stream_update;
7056 	} *bundle;
7057 
7058 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7059 
7060 	if (!bundle) {
7061 		dm_error("Failed to allocate update bundle\n");
7062 		goto cleanup;
7063 	}
7064 
7065 	/*
7066 	 * Disable the cursor first if we're disabling all the planes.
7067 	 * It'll remain on the screen after the planes are re-enabled
7068 	 * if we don't.
7069 	 */
7070 	if (acrtc_state->active_planes == 0)
7071 		amdgpu_dm_commit_cursors(state);
7072 
7073 	/* update planes when needed */
7074 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7075 		struct drm_crtc *crtc = new_plane_state->crtc;
7076 		struct drm_crtc_state *new_crtc_state;
7077 		struct drm_framebuffer *fb = new_plane_state->fb;
7078 		bool plane_needs_flip;
7079 		struct dc_plane_state *dc_plane;
7080 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7081 
7082 		/* Cursor plane is handled after stream updates */
7083 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7084 			continue;
7085 
7086 		if (!fb || !crtc || pcrtc != crtc)
7087 			continue;
7088 
7089 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7090 		if (!new_crtc_state->active)
7091 			continue;
7092 
7093 		dc_plane = dm_new_plane_state->dc_state;
7094 
7095 		bundle->surface_updates[planes_count].surface = dc_plane;
7096 		if (new_pcrtc_state->color_mgmt_changed) {
7097 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7098 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7099 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7100 		}
7101 
7102 		fill_dc_scaling_info(new_plane_state,
7103 				     &bundle->scaling_infos[planes_count]);
7104 
7105 		bundle->surface_updates[planes_count].scaling_info =
7106 			&bundle->scaling_infos[planes_count];
7107 
7108 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7109 
7110 		pflip_present = pflip_present || plane_needs_flip;
7111 
7112 		if (!plane_needs_flip) {
7113 			planes_count += 1;
7114 			continue;
7115 		}
7116 
7117 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7118 
7119 		/*
7120 		 * Wait for all fences on this FB. Do limited wait to avoid
7121 		 * deadlock during GPU reset when this fence will not signal
7122 		 * but we hold reservation lock for the BO.
7123 		 */
7124 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7125 							false,
7126 							msecs_to_jiffies(5000));
7127 		if (unlikely(r <= 0))
7128 			DRM_ERROR("Waiting for fences timed out!");
7129 
7130 		fill_dc_plane_info_and_addr(
7131 			dm->adev, new_plane_state,
7132 			dm_new_plane_state->tiling_flags,
7133 			&bundle->plane_infos[planes_count],
7134 			&bundle->flip_addrs[planes_count].address,
7135 			dm_new_plane_state->tmz_surface, false);
7136 
7137 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7138 				 new_plane_state->plane->index,
7139 				 bundle->plane_infos[planes_count].dcc.enable);
7140 
7141 		bundle->surface_updates[planes_count].plane_info =
7142 			&bundle->plane_infos[planes_count];
7143 
7144 		/*
7145 		 * Only allow immediate flips for fast updates that don't
7146 		 * change FB pitch, DCC state, rotation or mirroing.
7147 		 */
7148 		bundle->flip_addrs[planes_count].flip_immediate =
7149 			crtc->state->async_flip &&
7150 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7151 
7152 		timestamp_ns = ktime_get_ns();
7153 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7154 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7155 		bundle->surface_updates[planes_count].surface = dc_plane;
7156 
7157 		if (!bundle->surface_updates[planes_count].surface) {
7158 			DRM_ERROR("No surface for CRTC: id=%d\n",
7159 					acrtc_attach->crtc_id);
7160 			continue;
7161 		}
7162 
7163 		if (plane == pcrtc->primary)
7164 			update_freesync_state_on_stream(
7165 				dm,
7166 				acrtc_state,
7167 				acrtc_state->stream,
7168 				dc_plane,
7169 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7170 
7171 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7172 				 __func__,
7173 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7174 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7175 
7176 		planes_count += 1;
7177 
7178 	}
7179 
7180 	if (pflip_present) {
7181 		if (!vrr_active) {
7182 			/* Use old throttling in non-vrr fixed refresh rate mode
7183 			 * to keep flip scheduling based on target vblank counts
7184 			 * working in a backwards compatible way, e.g., for
7185 			 * clients using the GLX_OML_sync_control extension or
7186 			 * DRI3/Present extension with defined target_msc.
7187 			 */
7188 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7189 		}
7190 		else {
7191 			/* For variable refresh rate mode only:
7192 			 * Get vblank of last completed flip to avoid > 1 vrr
7193 			 * flips per video frame by use of throttling, but allow
7194 			 * flip programming anywhere in the possibly large
7195 			 * variable vrr vblank interval for fine-grained flip
7196 			 * timing control and more opportunity to avoid stutter
7197 			 * on late submission of flips.
7198 			 */
7199 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7200 			last_flip_vblank = acrtc_attach->last_flip_vblank;
7201 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7202 		}
7203 
7204 		target_vblank = last_flip_vblank + wait_for_vblank;
7205 
7206 		/*
7207 		 * Wait until we're out of the vertical blank period before the one
7208 		 * targeted by the flip
7209 		 */
7210 		while ((acrtc_attach->enabled &&
7211 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7212 							    0, &vpos, &hpos, NULL,
7213 							    NULL, &pcrtc->hwmode)
7214 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7215 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7216 			(int)(target_vblank -
7217 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7218 			usleep_range(1000, 1100);
7219 		}
7220 
7221 		/**
7222 		 * Prepare the flip event for the pageflip interrupt to handle.
7223 		 *
7224 		 * This only works in the case where we've already turned on the
7225 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7226 		 * from 0 -> n planes we have to skip a hardware generated event
7227 		 * and rely on sending it from software.
7228 		 */
7229 		if (acrtc_attach->base.state->event &&
7230 		    acrtc_state->active_planes > 0) {
7231 			drm_crtc_vblank_get(pcrtc);
7232 
7233 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7234 
7235 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7236 			prepare_flip_isr(acrtc_attach);
7237 
7238 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7239 		}
7240 
7241 		if (acrtc_state->stream) {
7242 			if (acrtc_state->freesync_vrr_info_changed)
7243 				bundle->stream_update.vrr_infopacket =
7244 					&acrtc_state->stream->vrr_infopacket;
7245 		}
7246 	}
7247 
7248 	/* Update the planes if changed or disable if we don't have any. */
7249 	if ((planes_count || acrtc_state->active_planes == 0) &&
7250 		acrtc_state->stream) {
7251 		bundle->stream_update.stream = acrtc_state->stream;
7252 		if (new_pcrtc_state->mode_changed) {
7253 			bundle->stream_update.src = acrtc_state->stream->src;
7254 			bundle->stream_update.dst = acrtc_state->stream->dst;
7255 		}
7256 
7257 		if (new_pcrtc_state->color_mgmt_changed) {
7258 			/*
7259 			 * TODO: This isn't fully correct since we've actually
7260 			 * already modified the stream in place.
7261 			 */
7262 			bundle->stream_update.gamut_remap =
7263 				&acrtc_state->stream->gamut_remap_matrix;
7264 			bundle->stream_update.output_csc_transform =
7265 				&acrtc_state->stream->csc_color_matrix;
7266 			bundle->stream_update.out_transfer_func =
7267 				acrtc_state->stream->out_transfer_func;
7268 		}
7269 
7270 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7271 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7272 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7273 
7274 		/*
7275 		 * If FreeSync state on the stream has changed then we need to
7276 		 * re-adjust the min/max bounds now that DC doesn't handle this
7277 		 * as part of commit.
7278 		 */
7279 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7280 		    amdgpu_dm_vrr_active(acrtc_state)) {
7281 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7282 			dc_stream_adjust_vmin_vmax(
7283 				dm->dc, acrtc_state->stream,
7284 				&acrtc_state->vrr_params.adjust);
7285 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7286 		}
7287 		mutex_lock(&dm->dc_lock);
7288 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7289 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7290 			amdgpu_dm_psr_disable(acrtc_state->stream);
7291 
7292 		dc_commit_updates_for_stream(dm->dc,
7293 						     bundle->surface_updates,
7294 						     planes_count,
7295 						     acrtc_state->stream,
7296 						     &bundle->stream_update,
7297 						     dc_state);
7298 
7299 		/**
7300 		 * Enable or disable the interrupts on the backend.
7301 		 *
7302 		 * Most pipes are put into power gating when unused.
7303 		 *
7304 		 * When power gating is enabled on a pipe we lose the
7305 		 * interrupt enablement state when power gating is disabled.
7306 		 *
7307 		 * So we need to update the IRQ control state in hardware
7308 		 * whenever the pipe turns on (since it could be previously
7309 		 * power gated) or off (since some pipes can't be power gated
7310 		 * on some ASICs).
7311 		 */
7312 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7313 			dm_update_pflip_irq_state(drm_to_adev(dev),
7314 						  acrtc_attach);
7315 
7316 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7317 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7318 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7319 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7320 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7321 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7322 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7323 			amdgpu_dm_psr_enable(acrtc_state->stream);
7324 		}
7325 
7326 		mutex_unlock(&dm->dc_lock);
7327 	}
7328 
7329 	/*
7330 	 * Update cursor state *after* programming all the planes.
7331 	 * This avoids redundant programming in the case where we're going
7332 	 * to be disabling a single plane - those pipes are being disabled.
7333 	 */
7334 	if (acrtc_state->active_planes)
7335 		amdgpu_dm_commit_cursors(state);
7336 
7337 cleanup:
7338 	kfree(bundle);
7339 }
7340 
7341 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7342 				   struct drm_atomic_state *state)
7343 {
7344 	struct amdgpu_device *adev = drm_to_adev(dev);
7345 	struct amdgpu_dm_connector *aconnector;
7346 	struct drm_connector *connector;
7347 	struct drm_connector_state *old_con_state, *new_con_state;
7348 	struct drm_crtc_state *new_crtc_state;
7349 	struct dm_crtc_state *new_dm_crtc_state;
7350 	const struct dc_stream_status *status;
7351 	int i, inst;
7352 
7353 	/* Notify device removals. */
7354 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7355 		if (old_con_state->crtc != new_con_state->crtc) {
7356 			/* CRTC changes require notification. */
7357 			goto notify;
7358 		}
7359 
7360 		if (!new_con_state->crtc)
7361 			continue;
7362 
7363 		new_crtc_state = drm_atomic_get_new_crtc_state(
7364 			state, new_con_state->crtc);
7365 
7366 		if (!new_crtc_state)
7367 			continue;
7368 
7369 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7370 			continue;
7371 
7372 	notify:
7373 		aconnector = to_amdgpu_dm_connector(connector);
7374 
7375 		mutex_lock(&adev->dm.audio_lock);
7376 		inst = aconnector->audio_inst;
7377 		aconnector->audio_inst = -1;
7378 		mutex_unlock(&adev->dm.audio_lock);
7379 
7380 		amdgpu_dm_audio_eld_notify(adev, inst);
7381 	}
7382 
7383 	/* Notify audio device additions. */
7384 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7385 		if (!new_con_state->crtc)
7386 			continue;
7387 
7388 		new_crtc_state = drm_atomic_get_new_crtc_state(
7389 			state, new_con_state->crtc);
7390 
7391 		if (!new_crtc_state)
7392 			continue;
7393 
7394 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7395 			continue;
7396 
7397 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7398 		if (!new_dm_crtc_state->stream)
7399 			continue;
7400 
7401 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7402 		if (!status)
7403 			continue;
7404 
7405 		aconnector = to_amdgpu_dm_connector(connector);
7406 
7407 		mutex_lock(&adev->dm.audio_lock);
7408 		inst = status->audio_inst;
7409 		aconnector->audio_inst = inst;
7410 		mutex_unlock(&adev->dm.audio_lock);
7411 
7412 		amdgpu_dm_audio_eld_notify(adev, inst);
7413 	}
7414 }
7415 
7416 /*
7417  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7418  * @crtc_state: the DRM CRTC state
7419  * @stream_state: the DC stream state.
7420  *
7421  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7422  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7423  */
7424 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7425 						struct dc_stream_state *stream_state)
7426 {
7427 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7428 }
7429 
7430 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7431 				   struct drm_atomic_state *state,
7432 				   bool nonblock)
7433 {
7434 	struct drm_crtc *crtc;
7435 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7436 	struct amdgpu_device *adev = drm_to_adev(dev);
7437 	int i;
7438 
7439 	/*
7440 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7441 	 * a modeset, being disabled, or have no active planes.
7442 	 *
7443 	 * It's done in atomic commit rather than commit tail for now since
7444 	 * some of these interrupt handlers access the current CRTC state and
7445 	 * potentially the stream pointer itself.
7446 	 *
7447 	 * Since the atomic state is swapped within atomic commit and not within
7448 	 * commit tail this would leave to new state (that hasn't been committed yet)
7449 	 * being accesssed from within the handlers.
7450 	 *
7451 	 * TODO: Fix this so we can do this in commit tail and not have to block
7452 	 * in atomic check.
7453 	 */
7454 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7455 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7456 
7457 		if (old_crtc_state->active &&
7458 		    (!new_crtc_state->active ||
7459 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7460 			manage_dm_interrupts(adev, acrtc, false);
7461 	}
7462 	/*
7463 	 * Add check here for SoC's that support hardware cursor plane, to
7464 	 * unset legacy_cursor_update
7465 	 */
7466 
7467 	return drm_atomic_helper_commit(dev, state, nonblock);
7468 
7469 	/*TODO Handle EINTR, reenable IRQ*/
7470 }
7471 
7472 /**
7473  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7474  * @state: The atomic state to commit
7475  *
7476  * This will tell DC to commit the constructed DC state from atomic_check,
7477  * programming the hardware. Any failures here implies a hardware failure, since
7478  * atomic check should have filtered anything non-kosher.
7479  */
7480 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7481 {
7482 	struct drm_device *dev = state->dev;
7483 	struct amdgpu_device *adev = drm_to_adev(dev);
7484 	struct amdgpu_display_manager *dm = &adev->dm;
7485 	struct dm_atomic_state *dm_state;
7486 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7487 	uint32_t i, j;
7488 	struct drm_crtc *crtc;
7489 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7490 	unsigned long flags;
7491 	bool wait_for_vblank = true;
7492 	struct drm_connector *connector;
7493 	struct drm_connector_state *old_con_state, *new_con_state;
7494 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7495 	int crtc_disable_count = 0;
7496 	bool mode_set_reset_required = false;
7497 
7498 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7499 	drm_atomic_helper_calc_timestamping_constants(state);
7500 
7501 	dm_state = dm_atomic_get_new_state(state);
7502 	if (dm_state && dm_state->context) {
7503 		dc_state = dm_state->context;
7504 	} else {
7505 		/* No state changes, retain current state. */
7506 		dc_state_temp = dc_create_state(dm->dc);
7507 		ASSERT(dc_state_temp);
7508 		dc_state = dc_state_temp;
7509 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7510 	}
7511 
7512 	/* update changed items */
7513 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7514 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7515 
7516 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7517 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7518 
7519 		DRM_DEBUG_DRIVER(
7520 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7521 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7522 			"connectors_changed:%d\n",
7523 			acrtc->crtc_id,
7524 			new_crtc_state->enable,
7525 			new_crtc_state->active,
7526 			new_crtc_state->planes_changed,
7527 			new_crtc_state->mode_changed,
7528 			new_crtc_state->active_changed,
7529 			new_crtc_state->connectors_changed);
7530 
7531 		/* Copy all transient state flags into dc state */
7532 		if (dm_new_crtc_state->stream) {
7533 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7534 							    dm_new_crtc_state->stream);
7535 		}
7536 
7537 		/* handles headless hotplug case, updating new_state and
7538 		 * aconnector as needed
7539 		 */
7540 
7541 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7542 
7543 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7544 
7545 			if (!dm_new_crtc_state->stream) {
7546 				/*
7547 				 * this could happen because of issues with
7548 				 * userspace notifications delivery.
7549 				 * In this case userspace tries to set mode on
7550 				 * display which is disconnected in fact.
7551 				 * dc_sink is NULL in this case on aconnector.
7552 				 * We expect reset mode will come soon.
7553 				 *
7554 				 * This can also happen when unplug is done
7555 				 * during resume sequence ended
7556 				 *
7557 				 * In this case, we want to pretend we still
7558 				 * have a sink to keep the pipe running so that
7559 				 * hw state is consistent with the sw state
7560 				 */
7561 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7562 						__func__, acrtc->base.base.id);
7563 				continue;
7564 			}
7565 
7566 			if (dm_old_crtc_state->stream)
7567 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7568 
7569 			pm_runtime_get_noresume(dev->dev);
7570 
7571 			acrtc->enabled = true;
7572 			acrtc->hw_mode = new_crtc_state->mode;
7573 			crtc->hwmode = new_crtc_state->mode;
7574 			mode_set_reset_required = true;
7575 		} else if (modereset_required(new_crtc_state)) {
7576 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7577 			/* i.e. reset mode */
7578 			if (dm_old_crtc_state->stream)
7579 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7580 			mode_set_reset_required = true;
7581 		}
7582 	} /* for_each_crtc_in_state() */
7583 
7584 	if (dc_state) {
7585 		/* if there mode set or reset, disable eDP PSR */
7586 		if (mode_set_reset_required)
7587 			amdgpu_dm_psr_disable_all(dm);
7588 
7589 		dm_enable_per_frame_crtc_master_sync(dc_state);
7590 		mutex_lock(&dm->dc_lock);
7591 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7592 		mutex_unlock(&dm->dc_lock);
7593 	}
7594 
7595 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7596 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7597 
7598 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7599 
7600 		if (dm_new_crtc_state->stream != NULL) {
7601 			const struct dc_stream_status *status =
7602 					dc_stream_get_status(dm_new_crtc_state->stream);
7603 
7604 			if (!status)
7605 				status = dc_stream_get_status_from_state(dc_state,
7606 									 dm_new_crtc_state->stream);
7607 
7608 			if (!status)
7609 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7610 			else
7611 				acrtc->otg_inst = status->primary_otg_inst;
7612 		}
7613 	}
7614 #ifdef CONFIG_DRM_AMD_DC_HDCP
7615 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7616 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7617 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7618 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7619 
7620 		new_crtc_state = NULL;
7621 
7622 		if (acrtc)
7623 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7624 
7625 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7626 
7627 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7628 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7629 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7630 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7631 			continue;
7632 		}
7633 
7634 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7635 			hdcp_update_display(
7636 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7637 				new_con_state->hdcp_content_type,
7638 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7639 													 : false);
7640 	}
7641 #endif
7642 
7643 	/* Handle connector state changes */
7644 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7645 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7646 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7647 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7648 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7649 		struct dc_stream_update stream_update;
7650 		struct dc_info_packet hdr_packet;
7651 		struct dc_stream_status *status = NULL;
7652 		bool abm_changed, hdr_changed, scaling_changed;
7653 
7654 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7655 		memset(&stream_update, 0, sizeof(stream_update));
7656 
7657 		if (acrtc) {
7658 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7659 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7660 		}
7661 
7662 		/* Skip any modesets/resets */
7663 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7664 			continue;
7665 
7666 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7667 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7668 
7669 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7670 							     dm_old_con_state);
7671 
7672 		abm_changed = dm_new_crtc_state->abm_level !=
7673 			      dm_old_crtc_state->abm_level;
7674 
7675 		hdr_changed =
7676 			is_hdr_metadata_different(old_con_state, new_con_state);
7677 
7678 		if (!scaling_changed && !abm_changed && !hdr_changed)
7679 			continue;
7680 
7681 		stream_update.stream = dm_new_crtc_state->stream;
7682 		if (scaling_changed) {
7683 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7684 					dm_new_con_state, dm_new_crtc_state->stream);
7685 
7686 			stream_update.src = dm_new_crtc_state->stream->src;
7687 			stream_update.dst = dm_new_crtc_state->stream->dst;
7688 		}
7689 
7690 		if (abm_changed) {
7691 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7692 
7693 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7694 		}
7695 
7696 		if (hdr_changed) {
7697 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7698 			stream_update.hdr_static_metadata = &hdr_packet;
7699 		}
7700 
7701 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7702 		WARN_ON(!status);
7703 		WARN_ON(!status->plane_count);
7704 
7705 		/*
7706 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7707 		 * Here we create an empty update on each plane.
7708 		 * To fix this, DC should permit updating only stream properties.
7709 		 */
7710 		for (j = 0; j < status->plane_count; j++)
7711 			dummy_updates[j].surface = status->plane_states[0];
7712 
7713 
7714 		mutex_lock(&dm->dc_lock);
7715 		dc_commit_updates_for_stream(dm->dc,
7716 						     dummy_updates,
7717 						     status->plane_count,
7718 						     dm_new_crtc_state->stream,
7719 						     &stream_update,
7720 						     dc_state);
7721 		mutex_unlock(&dm->dc_lock);
7722 	}
7723 
7724 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7725 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7726 				      new_crtc_state, i) {
7727 		if (old_crtc_state->active && !new_crtc_state->active)
7728 			crtc_disable_count++;
7729 
7730 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7731 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7732 
7733 		/* Update freesync active state. */
7734 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7735 
7736 		/* Handle vrr on->off / off->on transitions */
7737 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7738 						dm_new_crtc_state);
7739 	}
7740 
7741 	/**
7742 	 * Enable interrupts for CRTCs that are newly enabled or went through
7743 	 * a modeset. It was intentionally deferred until after the front end
7744 	 * state was modified to wait until the OTG was on and so the IRQ
7745 	 * handlers didn't access stale or invalid state.
7746 	 */
7747 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7748 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7749 
7750 		if (new_crtc_state->active &&
7751 		    (!old_crtc_state->active ||
7752 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7753 			manage_dm_interrupts(adev, acrtc, true);
7754 #ifdef CONFIG_DEBUG_FS
7755 			/**
7756 			 * Frontend may have changed so reapply the CRC capture
7757 			 * settings for the stream.
7758 			 */
7759 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7760 
7761 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7762 				amdgpu_dm_crtc_configure_crc_source(
7763 					crtc, dm_new_crtc_state,
7764 					dm_new_crtc_state->crc_src);
7765 			}
7766 #endif
7767 		}
7768 	}
7769 
7770 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7771 		if (new_crtc_state->async_flip)
7772 			wait_for_vblank = false;
7773 
7774 	/* update planes when needed per crtc*/
7775 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7776 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7777 
7778 		if (dm_new_crtc_state->stream)
7779 			amdgpu_dm_commit_planes(state, dc_state, dev,
7780 						dm, crtc, wait_for_vblank);
7781 	}
7782 
7783 	/* Update audio instances for each connector. */
7784 	amdgpu_dm_commit_audio(dev, state);
7785 
7786 	/*
7787 	 * send vblank event on all events not handled in flip and
7788 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7789 	 */
7790 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7791 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7792 
7793 		if (new_crtc_state->event)
7794 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7795 
7796 		new_crtc_state->event = NULL;
7797 	}
7798 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7799 
7800 	/* Signal HW programming completion */
7801 	drm_atomic_helper_commit_hw_done(state);
7802 
7803 	if (wait_for_vblank)
7804 		drm_atomic_helper_wait_for_flip_done(dev, state);
7805 
7806 	drm_atomic_helper_cleanup_planes(dev, state);
7807 
7808 	/*
7809 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7810 	 * so we can put the GPU into runtime suspend if we're not driving any
7811 	 * displays anymore
7812 	 */
7813 	for (i = 0; i < crtc_disable_count; i++)
7814 		pm_runtime_put_autosuspend(dev->dev);
7815 	pm_runtime_mark_last_busy(dev->dev);
7816 
7817 	if (dc_state_temp)
7818 		dc_release_state(dc_state_temp);
7819 }
7820 
7821 
7822 static int dm_force_atomic_commit(struct drm_connector *connector)
7823 {
7824 	int ret = 0;
7825 	struct drm_device *ddev = connector->dev;
7826 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7827 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7828 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7829 	struct drm_connector_state *conn_state;
7830 	struct drm_crtc_state *crtc_state;
7831 	struct drm_plane_state *plane_state;
7832 
7833 	if (!state)
7834 		return -ENOMEM;
7835 
7836 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7837 
7838 	/* Construct an atomic state to restore previous display setting */
7839 
7840 	/*
7841 	 * Attach connectors to drm_atomic_state
7842 	 */
7843 	conn_state = drm_atomic_get_connector_state(state, connector);
7844 
7845 	ret = PTR_ERR_OR_ZERO(conn_state);
7846 	if (ret)
7847 		goto err;
7848 
7849 	/* Attach crtc to drm_atomic_state*/
7850 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7851 
7852 	ret = PTR_ERR_OR_ZERO(crtc_state);
7853 	if (ret)
7854 		goto err;
7855 
7856 	/* force a restore */
7857 	crtc_state->mode_changed = true;
7858 
7859 	/* Attach plane to drm_atomic_state */
7860 	plane_state = drm_atomic_get_plane_state(state, plane);
7861 
7862 	ret = PTR_ERR_OR_ZERO(plane_state);
7863 	if (ret)
7864 		goto err;
7865 
7866 
7867 	/* Call commit internally with the state we just constructed */
7868 	ret = drm_atomic_commit(state);
7869 	if (!ret)
7870 		return 0;
7871 
7872 err:
7873 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7874 	drm_atomic_state_put(state);
7875 
7876 	return ret;
7877 }
7878 
7879 /*
7880  * This function handles all cases when set mode does not come upon hotplug.
7881  * This includes when a display is unplugged then plugged back into the
7882  * same port and when running without usermode desktop manager supprot
7883  */
7884 void dm_restore_drm_connector_state(struct drm_device *dev,
7885 				    struct drm_connector *connector)
7886 {
7887 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7888 	struct amdgpu_crtc *disconnected_acrtc;
7889 	struct dm_crtc_state *acrtc_state;
7890 
7891 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7892 		return;
7893 
7894 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7895 	if (!disconnected_acrtc)
7896 		return;
7897 
7898 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7899 	if (!acrtc_state->stream)
7900 		return;
7901 
7902 	/*
7903 	 * If the previous sink is not released and different from the current,
7904 	 * we deduce we are in a state where we can not rely on usermode call
7905 	 * to turn on the display, so we do it here
7906 	 */
7907 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7908 		dm_force_atomic_commit(&aconnector->base);
7909 }
7910 
7911 /*
7912  * Grabs all modesetting locks to serialize against any blocking commits,
7913  * Waits for completion of all non blocking commits.
7914  */
7915 static int do_aquire_global_lock(struct drm_device *dev,
7916 				 struct drm_atomic_state *state)
7917 {
7918 	struct drm_crtc *crtc;
7919 	struct drm_crtc_commit *commit;
7920 	long ret;
7921 
7922 	/*
7923 	 * Adding all modeset locks to aquire_ctx will
7924 	 * ensure that when the framework release it the
7925 	 * extra locks we are locking here will get released to
7926 	 */
7927 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7928 	if (ret)
7929 		return ret;
7930 
7931 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7932 		spin_lock(&crtc->commit_lock);
7933 		commit = list_first_entry_or_null(&crtc->commit_list,
7934 				struct drm_crtc_commit, commit_entry);
7935 		if (commit)
7936 			drm_crtc_commit_get(commit);
7937 		spin_unlock(&crtc->commit_lock);
7938 
7939 		if (!commit)
7940 			continue;
7941 
7942 		/*
7943 		 * Make sure all pending HW programming completed and
7944 		 * page flips done
7945 		 */
7946 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7947 
7948 		if (ret > 0)
7949 			ret = wait_for_completion_interruptible_timeout(
7950 					&commit->flip_done, 10*HZ);
7951 
7952 		if (ret == 0)
7953 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7954 				  "timed out\n", crtc->base.id, crtc->name);
7955 
7956 		drm_crtc_commit_put(commit);
7957 	}
7958 
7959 	return ret < 0 ? ret : 0;
7960 }
7961 
7962 static void get_freesync_config_for_crtc(
7963 	struct dm_crtc_state *new_crtc_state,
7964 	struct dm_connector_state *new_con_state)
7965 {
7966 	struct mod_freesync_config config = {0};
7967 	struct amdgpu_dm_connector *aconnector =
7968 			to_amdgpu_dm_connector(new_con_state->base.connector);
7969 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7970 	int vrefresh = drm_mode_vrefresh(mode);
7971 
7972 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7973 					vrefresh >= aconnector->min_vfreq &&
7974 					vrefresh <= aconnector->max_vfreq;
7975 
7976 	if (new_crtc_state->vrr_supported) {
7977 		new_crtc_state->stream->ignore_msa_timing_param = true;
7978 		config.state = new_crtc_state->base.vrr_enabled ?
7979 				VRR_STATE_ACTIVE_VARIABLE :
7980 				VRR_STATE_INACTIVE;
7981 		config.min_refresh_in_uhz =
7982 				aconnector->min_vfreq * 1000000;
7983 		config.max_refresh_in_uhz =
7984 				aconnector->max_vfreq * 1000000;
7985 		config.vsif_supported = true;
7986 		config.btr = true;
7987 	}
7988 
7989 	new_crtc_state->freesync_config = config;
7990 }
7991 
7992 static void reset_freesync_config_for_crtc(
7993 	struct dm_crtc_state *new_crtc_state)
7994 {
7995 	new_crtc_state->vrr_supported = false;
7996 
7997 	memset(&new_crtc_state->vrr_params, 0,
7998 	       sizeof(new_crtc_state->vrr_params));
7999 	memset(&new_crtc_state->vrr_infopacket, 0,
8000 	       sizeof(new_crtc_state->vrr_infopacket));
8001 }
8002 
8003 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8004 				struct drm_atomic_state *state,
8005 				struct drm_crtc *crtc,
8006 				struct drm_crtc_state *old_crtc_state,
8007 				struct drm_crtc_state *new_crtc_state,
8008 				bool enable,
8009 				bool *lock_and_validation_needed)
8010 {
8011 	struct dm_atomic_state *dm_state = NULL;
8012 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8013 	struct dc_stream_state *new_stream;
8014 	int ret = 0;
8015 
8016 	/*
8017 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8018 	 * update changed items
8019 	 */
8020 	struct amdgpu_crtc *acrtc = NULL;
8021 	struct amdgpu_dm_connector *aconnector = NULL;
8022 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8023 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8024 
8025 	new_stream = NULL;
8026 
8027 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8028 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8029 	acrtc = to_amdgpu_crtc(crtc);
8030 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8031 
8032 	/* TODO This hack should go away */
8033 	if (aconnector && enable) {
8034 		/* Make sure fake sink is created in plug-in scenario */
8035 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8036 							    &aconnector->base);
8037 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8038 							    &aconnector->base);
8039 
8040 		if (IS_ERR(drm_new_conn_state)) {
8041 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8042 			goto fail;
8043 		}
8044 
8045 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8046 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8047 
8048 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8049 			goto skip_modeset;
8050 
8051 		new_stream = create_validate_stream_for_sink(aconnector,
8052 							     &new_crtc_state->mode,
8053 							     dm_new_conn_state,
8054 							     dm_old_crtc_state->stream);
8055 
8056 		/*
8057 		 * we can have no stream on ACTION_SET if a display
8058 		 * was disconnected during S3, in this case it is not an
8059 		 * error, the OS will be updated after detection, and
8060 		 * will do the right thing on next atomic commit
8061 		 */
8062 
8063 		if (!new_stream) {
8064 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8065 					__func__, acrtc->base.base.id);
8066 			ret = -ENOMEM;
8067 			goto fail;
8068 		}
8069 
8070 		/*
8071 		 * TODO: Check VSDB bits to decide whether this should
8072 		 * be enabled or not.
8073 		 */
8074 		new_stream->triggered_crtc_reset.enabled =
8075 			dm->force_timing_sync;
8076 
8077 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8078 
8079 		ret = fill_hdr_info_packet(drm_new_conn_state,
8080 					   &new_stream->hdr_static_metadata);
8081 		if (ret)
8082 			goto fail;
8083 
8084 		/*
8085 		 * If we already removed the old stream from the context
8086 		 * (and set the new stream to NULL) then we can't reuse
8087 		 * the old stream even if the stream and scaling are unchanged.
8088 		 * We'll hit the BUG_ON and black screen.
8089 		 *
8090 		 * TODO: Refactor this function to allow this check to work
8091 		 * in all conditions.
8092 		 */
8093 		if (dm_new_crtc_state->stream &&
8094 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8095 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8096 			new_crtc_state->mode_changed = false;
8097 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8098 					 new_crtc_state->mode_changed);
8099 		}
8100 	}
8101 
8102 	/* mode_changed flag may get updated above, need to check again */
8103 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8104 		goto skip_modeset;
8105 
8106 	DRM_DEBUG_DRIVER(
8107 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8108 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8109 		"connectors_changed:%d\n",
8110 		acrtc->crtc_id,
8111 		new_crtc_state->enable,
8112 		new_crtc_state->active,
8113 		new_crtc_state->planes_changed,
8114 		new_crtc_state->mode_changed,
8115 		new_crtc_state->active_changed,
8116 		new_crtc_state->connectors_changed);
8117 
8118 	/* Remove stream for any changed/disabled CRTC */
8119 	if (!enable) {
8120 
8121 		if (!dm_old_crtc_state->stream)
8122 			goto skip_modeset;
8123 
8124 		ret = dm_atomic_get_state(state, &dm_state);
8125 		if (ret)
8126 			goto fail;
8127 
8128 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8129 				crtc->base.id);
8130 
8131 		/* i.e. reset mode */
8132 		if (dc_remove_stream_from_ctx(
8133 				dm->dc,
8134 				dm_state->context,
8135 				dm_old_crtc_state->stream) != DC_OK) {
8136 			ret = -EINVAL;
8137 			goto fail;
8138 		}
8139 
8140 		dc_stream_release(dm_old_crtc_state->stream);
8141 		dm_new_crtc_state->stream = NULL;
8142 
8143 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8144 
8145 		*lock_and_validation_needed = true;
8146 
8147 	} else {/* Add stream for any updated/enabled CRTC */
8148 		/*
8149 		 * Quick fix to prevent NULL pointer on new_stream when
8150 		 * added MST connectors not found in existing crtc_state in the chained mode
8151 		 * TODO: need to dig out the root cause of that
8152 		 */
8153 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8154 			goto skip_modeset;
8155 
8156 		if (modereset_required(new_crtc_state))
8157 			goto skip_modeset;
8158 
8159 		if (modeset_required(new_crtc_state, new_stream,
8160 				     dm_old_crtc_state->stream)) {
8161 
8162 			WARN_ON(dm_new_crtc_state->stream);
8163 
8164 			ret = dm_atomic_get_state(state, &dm_state);
8165 			if (ret)
8166 				goto fail;
8167 
8168 			dm_new_crtc_state->stream = new_stream;
8169 
8170 			dc_stream_retain(new_stream);
8171 
8172 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8173 						crtc->base.id);
8174 
8175 			if (dc_add_stream_to_ctx(
8176 					dm->dc,
8177 					dm_state->context,
8178 					dm_new_crtc_state->stream) != DC_OK) {
8179 				ret = -EINVAL;
8180 				goto fail;
8181 			}
8182 
8183 			*lock_and_validation_needed = true;
8184 		}
8185 	}
8186 
8187 skip_modeset:
8188 	/* Release extra reference */
8189 	if (new_stream)
8190 		 dc_stream_release(new_stream);
8191 
8192 	/*
8193 	 * We want to do dc stream updates that do not require a
8194 	 * full modeset below.
8195 	 */
8196 	if (!(enable && aconnector && new_crtc_state->active))
8197 		return 0;
8198 	/*
8199 	 * Given above conditions, the dc state cannot be NULL because:
8200 	 * 1. We're in the process of enabling CRTCs (just been added
8201 	 *    to the dc context, or already is on the context)
8202 	 * 2. Has a valid connector attached, and
8203 	 * 3. Is currently active and enabled.
8204 	 * => The dc stream state currently exists.
8205 	 */
8206 	BUG_ON(dm_new_crtc_state->stream == NULL);
8207 
8208 	/* Scaling or underscan settings */
8209 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8210 		update_stream_scaling_settings(
8211 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8212 
8213 	/* ABM settings */
8214 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8215 
8216 	/*
8217 	 * Color management settings. We also update color properties
8218 	 * when a modeset is needed, to ensure it gets reprogrammed.
8219 	 */
8220 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8221 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8222 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8223 		if (ret)
8224 			goto fail;
8225 	}
8226 
8227 	/* Update Freesync settings. */
8228 	get_freesync_config_for_crtc(dm_new_crtc_state,
8229 				     dm_new_conn_state);
8230 
8231 	return ret;
8232 
8233 fail:
8234 	if (new_stream)
8235 		dc_stream_release(new_stream);
8236 	return ret;
8237 }
8238 
8239 static bool should_reset_plane(struct drm_atomic_state *state,
8240 			       struct drm_plane *plane,
8241 			       struct drm_plane_state *old_plane_state,
8242 			       struct drm_plane_state *new_plane_state)
8243 {
8244 	struct drm_plane *other;
8245 	struct drm_plane_state *old_other_state, *new_other_state;
8246 	struct drm_crtc_state *new_crtc_state;
8247 	int i;
8248 
8249 	/*
8250 	 * TODO: Remove this hack once the checks below are sufficient
8251 	 * enough to determine when we need to reset all the planes on
8252 	 * the stream.
8253 	 */
8254 	if (state->allow_modeset)
8255 		return true;
8256 
8257 	/* Exit early if we know that we're adding or removing the plane. */
8258 	if (old_plane_state->crtc != new_plane_state->crtc)
8259 		return true;
8260 
8261 	/* old crtc == new_crtc == NULL, plane not in context. */
8262 	if (!new_plane_state->crtc)
8263 		return false;
8264 
8265 	new_crtc_state =
8266 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8267 
8268 	if (!new_crtc_state)
8269 		return true;
8270 
8271 	/* CRTC Degamma changes currently require us to recreate planes. */
8272 	if (new_crtc_state->color_mgmt_changed)
8273 		return true;
8274 
8275 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8276 		return true;
8277 
8278 	/*
8279 	 * If there are any new primary or overlay planes being added or
8280 	 * removed then the z-order can potentially change. To ensure
8281 	 * correct z-order and pipe acquisition the current DC architecture
8282 	 * requires us to remove and recreate all existing planes.
8283 	 *
8284 	 * TODO: Come up with a more elegant solution for this.
8285 	 */
8286 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8287 		struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8288 
8289 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8290 			continue;
8291 
8292 		if (old_other_state->crtc != new_plane_state->crtc &&
8293 		    new_other_state->crtc != new_plane_state->crtc)
8294 			continue;
8295 
8296 		if (old_other_state->crtc != new_other_state->crtc)
8297 			return true;
8298 
8299 		/* Src/dst size and scaling updates. */
8300 		if (old_other_state->src_w != new_other_state->src_w ||
8301 		    old_other_state->src_h != new_other_state->src_h ||
8302 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8303 		    old_other_state->crtc_h != new_other_state->crtc_h)
8304 			return true;
8305 
8306 		/* Rotation / mirroring updates. */
8307 		if (old_other_state->rotation != new_other_state->rotation)
8308 			return true;
8309 
8310 		/* Blending updates. */
8311 		if (old_other_state->pixel_blend_mode !=
8312 		    new_other_state->pixel_blend_mode)
8313 			return true;
8314 
8315 		/* Alpha updates. */
8316 		if (old_other_state->alpha != new_other_state->alpha)
8317 			return true;
8318 
8319 		/* Colorspace changes. */
8320 		if (old_other_state->color_range != new_other_state->color_range ||
8321 		    old_other_state->color_encoding != new_other_state->color_encoding)
8322 			return true;
8323 
8324 		/* Framebuffer checks fall at the end. */
8325 		if (!old_other_state->fb || !new_other_state->fb)
8326 			continue;
8327 
8328 		/* Pixel format changes can require bandwidth updates. */
8329 		if (old_other_state->fb->format != new_other_state->fb->format)
8330 			return true;
8331 
8332 		old_dm_plane_state = to_dm_plane_state(old_other_state);
8333 		new_dm_plane_state = to_dm_plane_state(new_other_state);
8334 
8335 		/* Tiling and DCC changes also require bandwidth updates. */
8336 		if (old_dm_plane_state->tiling_flags !=
8337 		    new_dm_plane_state->tiling_flags)
8338 			return true;
8339 	}
8340 
8341 	return false;
8342 }
8343 
8344 static int dm_update_plane_state(struct dc *dc,
8345 				 struct drm_atomic_state *state,
8346 				 struct drm_plane *plane,
8347 				 struct drm_plane_state *old_plane_state,
8348 				 struct drm_plane_state *new_plane_state,
8349 				 bool enable,
8350 				 bool *lock_and_validation_needed)
8351 {
8352 
8353 	struct dm_atomic_state *dm_state = NULL;
8354 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8355 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8356 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8357 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8358 	struct amdgpu_crtc *new_acrtc;
8359 	bool needs_reset;
8360 	int ret = 0;
8361 
8362 
8363 	new_plane_crtc = new_plane_state->crtc;
8364 	old_plane_crtc = old_plane_state->crtc;
8365 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8366 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8367 
8368 	/*TODO Implement better atomic check for cursor plane */
8369 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8370 		if (!enable || !new_plane_crtc ||
8371 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8372 			return 0;
8373 
8374 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8375 
8376 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8377 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8378 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8379 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8380 			return -EINVAL;
8381 		}
8382 
8383 		return 0;
8384 	}
8385 
8386 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8387 					 new_plane_state);
8388 
8389 	/* Remove any changed/removed planes */
8390 	if (!enable) {
8391 		if (!needs_reset)
8392 			return 0;
8393 
8394 		if (!old_plane_crtc)
8395 			return 0;
8396 
8397 		old_crtc_state = drm_atomic_get_old_crtc_state(
8398 				state, old_plane_crtc);
8399 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8400 
8401 		if (!dm_old_crtc_state->stream)
8402 			return 0;
8403 
8404 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8405 				plane->base.id, old_plane_crtc->base.id);
8406 
8407 		ret = dm_atomic_get_state(state, &dm_state);
8408 		if (ret)
8409 			return ret;
8410 
8411 		if (!dc_remove_plane_from_context(
8412 				dc,
8413 				dm_old_crtc_state->stream,
8414 				dm_old_plane_state->dc_state,
8415 				dm_state->context)) {
8416 
8417 			return -EINVAL;
8418 		}
8419 
8420 
8421 		dc_plane_state_release(dm_old_plane_state->dc_state);
8422 		dm_new_plane_state->dc_state = NULL;
8423 
8424 		*lock_and_validation_needed = true;
8425 
8426 	} else { /* Add new planes */
8427 		struct dc_plane_state *dc_new_plane_state;
8428 
8429 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8430 			return 0;
8431 
8432 		if (!new_plane_crtc)
8433 			return 0;
8434 
8435 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8436 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8437 
8438 		if (!dm_new_crtc_state->stream)
8439 			return 0;
8440 
8441 		if (!needs_reset)
8442 			return 0;
8443 
8444 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8445 		if (ret)
8446 			return ret;
8447 
8448 		WARN_ON(dm_new_plane_state->dc_state);
8449 
8450 		dc_new_plane_state = dc_create_plane_state(dc);
8451 		if (!dc_new_plane_state)
8452 			return -ENOMEM;
8453 
8454 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8455 				plane->base.id, new_plane_crtc->base.id);
8456 
8457 		ret = fill_dc_plane_attributes(
8458 			drm_to_adev(new_plane_crtc->dev),
8459 			dc_new_plane_state,
8460 			new_plane_state,
8461 			new_crtc_state);
8462 		if (ret) {
8463 			dc_plane_state_release(dc_new_plane_state);
8464 			return ret;
8465 		}
8466 
8467 		ret = dm_atomic_get_state(state, &dm_state);
8468 		if (ret) {
8469 			dc_plane_state_release(dc_new_plane_state);
8470 			return ret;
8471 		}
8472 
8473 		/*
8474 		 * Any atomic check errors that occur after this will
8475 		 * not need a release. The plane state will be attached
8476 		 * to the stream, and therefore part of the atomic
8477 		 * state. It'll be released when the atomic state is
8478 		 * cleaned.
8479 		 */
8480 		if (!dc_add_plane_to_context(
8481 				dc,
8482 				dm_new_crtc_state->stream,
8483 				dc_new_plane_state,
8484 				dm_state->context)) {
8485 
8486 			dc_plane_state_release(dc_new_plane_state);
8487 			return -EINVAL;
8488 		}
8489 
8490 		dm_new_plane_state->dc_state = dc_new_plane_state;
8491 
8492 		/* Tell DC to do a full surface update every time there
8493 		 * is a plane change. Inefficient, but works for now.
8494 		 */
8495 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8496 
8497 		*lock_and_validation_needed = true;
8498 	}
8499 
8500 
8501 	return ret;
8502 }
8503 
8504 #if defined(CONFIG_DRM_AMD_DC_DCN)
8505 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8506 {
8507 	struct drm_connector *connector;
8508 	struct drm_connector_state *conn_state;
8509 	struct amdgpu_dm_connector *aconnector = NULL;
8510 	int i;
8511 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8512 		if (conn_state->crtc != crtc)
8513 			continue;
8514 
8515 		aconnector = to_amdgpu_dm_connector(connector);
8516 		if (!aconnector->port || !aconnector->mst_port)
8517 			aconnector = NULL;
8518 		else
8519 			break;
8520 	}
8521 
8522 	if (!aconnector)
8523 		return 0;
8524 
8525 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8526 }
8527 #endif
8528 
8529 /**
8530  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8531  * @dev: The DRM device
8532  * @state: The atomic state to commit
8533  *
8534  * Validate that the given atomic state is programmable by DC into hardware.
8535  * This involves constructing a &struct dc_state reflecting the new hardware
8536  * state we wish to commit, then querying DC to see if it is programmable. It's
8537  * important not to modify the existing DC state. Otherwise, atomic_check
8538  * may unexpectedly commit hardware changes.
8539  *
8540  * When validating the DC state, it's important that the right locks are
8541  * acquired. For full updates case which removes/adds/updates streams on one
8542  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8543  * that any such full update commit will wait for completion of any outstanding
8544  * flip using DRMs synchronization events.
8545  *
8546  * Note that DM adds the affected connectors for all CRTCs in state, when that
8547  * might not seem necessary. This is because DC stream creation requires the
8548  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8549  * be possible but non-trivial - a possible TODO item.
8550  *
8551  * Return: -Error code if validation failed.
8552  */
8553 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8554 				  struct drm_atomic_state *state)
8555 {
8556 	struct amdgpu_device *adev = drm_to_adev(dev);
8557 	struct dm_atomic_state *dm_state = NULL;
8558 	struct dc *dc = adev->dm.dc;
8559 	struct drm_connector *connector;
8560 	struct drm_connector_state *old_con_state, *new_con_state;
8561 	struct drm_crtc *crtc;
8562 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8563 	struct drm_plane *plane;
8564 	struct drm_plane_state *old_plane_state, *new_plane_state;
8565 	enum dc_status status;
8566 	int ret, i;
8567 	bool lock_and_validation_needed = false;
8568 
8569 	ret = drm_atomic_helper_check_modeset(dev, state);
8570 	if (ret)
8571 		goto fail;
8572 
8573 	/* Check connector changes */
8574 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8575 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8576 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8577 
8578 		/* Skip connectors that are disabled or part of modeset already. */
8579 		if (!old_con_state->crtc && !new_con_state->crtc)
8580 			continue;
8581 
8582 		if (!new_con_state->crtc)
8583 			continue;
8584 
8585 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8586 		if (IS_ERR(new_crtc_state)) {
8587 			ret = PTR_ERR(new_crtc_state);
8588 			goto fail;
8589 		}
8590 
8591 		if (dm_old_con_state->abm_level !=
8592 		    dm_new_con_state->abm_level)
8593 			new_crtc_state->connectors_changed = true;
8594 	}
8595 
8596 #if defined(CONFIG_DRM_AMD_DC_DCN)
8597 	if (adev->asic_type >= CHIP_NAVI10) {
8598 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8599 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8600 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8601 				if (ret)
8602 					goto fail;
8603 			}
8604 		}
8605 	}
8606 #endif
8607 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8608 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8609 		    !new_crtc_state->color_mgmt_changed &&
8610 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8611 			continue;
8612 
8613 		if (!new_crtc_state->enable)
8614 			continue;
8615 
8616 		ret = drm_atomic_add_affected_connectors(state, crtc);
8617 		if (ret)
8618 			return ret;
8619 
8620 		ret = drm_atomic_add_affected_planes(state, crtc);
8621 		if (ret)
8622 			goto fail;
8623 	}
8624 
8625 	/*
8626 	 * Add all primary and overlay planes on the CRTC to the state
8627 	 * whenever a plane is enabled to maintain correct z-ordering
8628 	 * and to enable fast surface updates.
8629 	 */
8630 	drm_for_each_crtc(crtc, dev) {
8631 		bool modified = false;
8632 
8633 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8634 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8635 				continue;
8636 
8637 			if (new_plane_state->crtc == crtc ||
8638 			    old_plane_state->crtc == crtc) {
8639 				modified = true;
8640 				break;
8641 			}
8642 		}
8643 
8644 		if (!modified)
8645 			continue;
8646 
8647 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8648 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8649 				continue;
8650 
8651 			new_plane_state =
8652 				drm_atomic_get_plane_state(state, plane);
8653 
8654 			if (IS_ERR(new_plane_state)) {
8655 				ret = PTR_ERR(new_plane_state);
8656 				goto fail;
8657 			}
8658 		}
8659 	}
8660 
8661 	/* Prepass for updating tiling flags on new planes. */
8662 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8663 		struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8664 		struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8665 
8666 		ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8667 				  &new_dm_plane_state->tmz_surface);
8668 		if (ret)
8669 			goto fail;
8670 	}
8671 
8672 	/* Remove exiting planes if they are modified */
8673 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8674 		ret = dm_update_plane_state(dc, state, plane,
8675 					    old_plane_state,
8676 					    new_plane_state,
8677 					    false,
8678 					    &lock_and_validation_needed);
8679 		if (ret)
8680 			goto fail;
8681 	}
8682 
8683 	/* Disable all crtcs which require disable */
8684 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8685 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8686 					   old_crtc_state,
8687 					   new_crtc_state,
8688 					   false,
8689 					   &lock_and_validation_needed);
8690 		if (ret)
8691 			goto fail;
8692 	}
8693 
8694 	/* Enable all crtcs which require enable */
8695 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8696 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8697 					   old_crtc_state,
8698 					   new_crtc_state,
8699 					   true,
8700 					   &lock_and_validation_needed);
8701 		if (ret)
8702 			goto fail;
8703 	}
8704 
8705 	/* Add new/modified planes */
8706 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8707 		ret = dm_update_plane_state(dc, state, plane,
8708 					    old_plane_state,
8709 					    new_plane_state,
8710 					    true,
8711 					    &lock_and_validation_needed);
8712 		if (ret)
8713 			goto fail;
8714 	}
8715 
8716 	/* Run this here since we want to validate the streams we created */
8717 	ret = drm_atomic_helper_check_planes(dev, state);
8718 	if (ret)
8719 		goto fail;
8720 
8721 	if (state->legacy_cursor_update) {
8722 		/*
8723 		 * This is a fast cursor update coming from the plane update
8724 		 * helper, check if it can be done asynchronously for better
8725 		 * performance.
8726 		 */
8727 		state->async_update =
8728 			!drm_atomic_helper_async_check(dev, state);
8729 
8730 		/*
8731 		 * Skip the remaining global validation if this is an async
8732 		 * update. Cursor updates can be done without affecting
8733 		 * state or bandwidth calcs and this avoids the performance
8734 		 * penalty of locking the private state object and
8735 		 * allocating a new dc_state.
8736 		 */
8737 		if (state->async_update)
8738 			return 0;
8739 	}
8740 
8741 	/* Check scaling and underscan changes*/
8742 	/* TODO Removed scaling changes validation due to inability to commit
8743 	 * new stream into context w\o causing full reset. Need to
8744 	 * decide how to handle.
8745 	 */
8746 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8747 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8748 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8749 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8750 
8751 		/* Skip any modesets/resets */
8752 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8753 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8754 			continue;
8755 
8756 		/* Skip any thing not scale or underscan changes */
8757 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8758 			continue;
8759 
8760 		lock_and_validation_needed = true;
8761 	}
8762 
8763 	/**
8764 	 * Streams and planes are reset when there are changes that affect
8765 	 * bandwidth. Anything that affects bandwidth needs to go through
8766 	 * DC global validation to ensure that the configuration can be applied
8767 	 * to hardware.
8768 	 *
8769 	 * We have to currently stall out here in atomic_check for outstanding
8770 	 * commits to finish in this case because our IRQ handlers reference
8771 	 * DRM state directly - we can end up disabling interrupts too early
8772 	 * if we don't.
8773 	 *
8774 	 * TODO: Remove this stall and drop DM state private objects.
8775 	 */
8776 	if (lock_and_validation_needed) {
8777 		ret = dm_atomic_get_state(state, &dm_state);
8778 		if (ret)
8779 			goto fail;
8780 
8781 		ret = do_aquire_global_lock(dev, state);
8782 		if (ret)
8783 			goto fail;
8784 
8785 #if defined(CONFIG_DRM_AMD_DC_DCN)
8786 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8787 			goto fail;
8788 
8789 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8790 		if (ret)
8791 			goto fail;
8792 #endif
8793 
8794 		/*
8795 		 * Perform validation of MST topology in the state:
8796 		 * We need to perform MST atomic check before calling
8797 		 * dc_validate_global_state(), or there is a chance
8798 		 * to get stuck in an infinite loop and hang eventually.
8799 		 */
8800 		ret = drm_dp_mst_atomic_check(state);
8801 		if (ret)
8802 			goto fail;
8803 		status = dc_validate_global_state(dc, dm_state->context, false);
8804 		if (status != DC_OK) {
8805 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
8806 				       dc_status_to_str(status), status);
8807 			ret = -EINVAL;
8808 			goto fail;
8809 		}
8810 	} else {
8811 		/*
8812 		 * The commit is a fast update. Fast updates shouldn't change
8813 		 * the DC context, affect global validation, and can have their
8814 		 * commit work done in parallel with other commits not touching
8815 		 * the same resource. If we have a new DC context as part of
8816 		 * the DM atomic state from validation we need to free it and
8817 		 * retain the existing one instead.
8818 		 *
8819 		 * Furthermore, since the DM atomic state only contains the DC
8820 		 * context and can safely be annulled, we can free the state
8821 		 * and clear the associated private object now to free
8822 		 * some memory and avoid a possible use-after-free later.
8823 		 */
8824 
8825 		for (i = 0; i < state->num_private_objs; i++) {
8826 			struct drm_private_obj *obj = state->private_objs[i].ptr;
8827 
8828 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
8829 				int j = state->num_private_objs-1;
8830 
8831 				dm_atomic_destroy_state(obj,
8832 						state->private_objs[i].state);
8833 
8834 				/* If i is not at the end of the array then the
8835 				 * last element needs to be moved to where i was
8836 				 * before the array can safely be truncated.
8837 				 */
8838 				if (i != j)
8839 					state->private_objs[i] =
8840 						state->private_objs[j];
8841 
8842 				state->private_objs[j].ptr = NULL;
8843 				state->private_objs[j].state = NULL;
8844 				state->private_objs[j].old_state = NULL;
8845 				state->private_objs[j].new_state = NULL;
8846 
8847 				state->num_private_objs = j;
8848 				break;
8849 			}
8850 		}
8851 	}
8852 
8853 	/* Store the overall update type for use later in atomic check. */
8854 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8855 		struct dm_crtc_state *dm_new_crtc_state =
8856 			to_dm_crtc_state(new_crtc_state);
8857 
8858 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
8859 							 UPDATE_TYPE_FULL :
8860 							 UPDATE_TYPE_FAST;
8861 	}
8862 
8863 	/* Must be success */
8864 	WARN_ON(ret);
8865 	return ret;
8866 
8867 fail:
8868 	if (ret == -EDEADLK)
8869 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8870 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8871 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8872 	else
8873 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8874 
8875 	return ret;
8876 }
8877 
8878 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8879 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8880 {
8881 	uint8_t dpcd_data;
8882 	bool capable = false;
8883 
8884 	if (amdgpu_dm_connector->dc_link &&
8885 		dm_helpers_dp_read_dpcd(
8886 				NULL,
8887 				amdgpu_dm_connector->dc_link,
8888 				DP_DOWN_STREAM_PORT_COUNT,
8889 				&dpcd_data,
8890 				sizeof(dpcd_data))) {
8891 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8892 	}
8893 
8894 	return capable;
8895 }
8896 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8897 					struct edid *edid)
8898 {
8899 	int i;
8900 	bool edid_check_required;
8901 	struct detailed_timing *timing;
8902 	struct detailed_non_pixel *data;
8903 	struct detailed_data_monitor_range *range;
8904 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8905 			to_amdgpu_dm_connector(connector);
8906 	struct dm_connector_state *dm_con_state = NULL;
8907 
8908 	struct drm_device *dev = connector->dev;
8909 	struct amdgpu_device *adev = drm_to_adev(dev);
8910 	bool freesync_capable = false;
8911 
8912 	if (!connector->state) {
8913 		DRM_ERROR("%s - Connector has no state", __func__);
8914 		goto update;
8915 	}
8916 
8917 	if (!edid) {
8918 		dm_con_state = to_dm_connector_state(connector->state);
8919 
8920 		amdgpu_dm_connector->min_vfreq = 0;
8921 		amdgpu_dm_connector->max_vfreq = 0;
8922 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8923 
8924 		goto update;
8925 	}
8926 
8927 	dm_con_state = to_dm_connector_state(connector->state);
8928 
8929 	edid_check_required = false;
8930 	if (!amdgpu_dm_connector->dc_sink) {
8931 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8932 		goto update;
8933 	}
8934 	if (!adev->dm.freesync_module)
8935 		goto update;
8936 	/*
8937 	 * if edid non zero restrict freesync only for dp and edp
8938 	 */
8939 	if (edid) {
8940 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8941 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8942 			edid_check_required = is_dp_capable_without_timing_msa(
8943 						adev->dm.dc,
8944 						amdgpu_dm_connector);
8945 		}
8946 	}
8947 	if (edid_check_required == true && (edid->version > 1 ||
8948 	   (edid->version == 1 && edid->revision > 1))) {
8949 		for (i = 0; i < 4; i++) {
8950 
8951 			timing	= &edid->detailed_timings[i];
8952 			data	= &timing->data.other_data;
8953 			range	= &data->data.range;
8954 			/*
8955 			 * Check if monitor has continuous frequency mode
8956 			 */
8957 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8958 				continue;
8959 			/*
8960 			 * Check for flag range limits only. If flag == 1 then
8961 			 * no additional timing information provided.
8962 			 * Default GTF, GTF Secondary curve and CVT are not
8963 			 * supported
8964 			 */
8965 			if (range->flags != 1)
8966 				continue;
8967 
8968 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8969 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8970 			amdgpu_dm_connector->pixel_clock_mhz =
8971 				range->pixel_clock_mhz * 10;
8972 			break;
8973 		}
8974 
8975 		if (amdgpu_dm_connector->max_vfreq -
8976 		    amdgpu_dm_connector->min_vfreq > 10) {
8977 
8978 			freesync_capable = true;
8979 		}
8980 	}
8981 
8982 update:
8983 	if (dm_con_state)
8984 		dm_con_state->freesync_capable = freesync_capable;
8985 
8986 	if (connector->vrr_capable_property)
8987 		drm_connector_set_vrr_capable_property(connector,
8988 						       freesync_capable);
8989 }
8990 
8991 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8992 {
8993 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8994 
8995 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8996 		return;
8997 	if (link->type == dc_connection_none)
8998 		return;
8999 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9000 					dpcd_data, sizeof(dpcd_data))) {
9001 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9002 
9003 		if (dpcd_data[0] == 0) {
9004 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9005 			link->psr_settings.psr_feature_enabled = false;
9006 		} else {
9007 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9008 			link->psr_settings.psr_feature_enabled = true;
9009 		}
9010 
9011 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9012 	}
9013 }
9014 
9015 /*
9016  * amdgpu_dm_link_setup_psr() - configure psr link
9017  * @stream: stream state
9018  *
9019  * Return: true if success
9020  */
9021 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9022 {
9023 	struct dc_link *link = NULL;
9024 	struct psr_config psr_config = {0};
9025 	struct psr_context psr_context = {0};
9026 	bool ret = false;
9027 
9028 	if (stream == NULL)
9029 		return false;
9030 
9031 	link = stream->link;
9032 
9033 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9034 
9035 	if (psr_config.psr_version > 0) {
9036 		psr_config.psr_exit_link_training_required = 0x1;
9037 		psr_config.psr_frame_capture_indication_req = 0;
9038 		psr_config.psr_rfb_setup_time = 0x37;
9039 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9040 		psr_config.allow_smu_optimizations = 0x0;
9041 
9042 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9043 
9044 	}
9045 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9046 
9047 	return ret;
9048 }
9049 
9050 /*
9051  * amdgpu_dm_psr_enable() - enable psr f/w
9052  * @stream: stream state
9053  *
9054  * Return: true if success
9055  */
9056 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9057 {
9058 	struct dc_link *link = stream->link;
9059 	unsigned int vsync_rate_hz = 0;
9060 	struct dc_static_screen_params params = {0};
9061 	/* Calculate number of static frames before generating interrupt to
9062 	 * enter PSR.
9063 	 */
9064 	// Init fail safe of 2 frames static
9065 	unsigned int num_frames_static = 2;
9066 
9067 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9068 
9069 	vsync_rate_hz = div64_u64(div64_u64((
9070 			stream->timing.pix_clk_100hz * 100),
9071 			stream->timing.v_total),
9072 			stream->timing.h_total);
9073 
9074 	/* Round up
9075 	 * Calculate number of frames such that at least 30 ms of time has
9076 	 * passed.
9077 	 */
9078 	if (vsync_rate_hz != 0) {
9079 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9080 		num_frames_static = (30000 / frame_time_microsec) + 1;
9081 	}
9082 
9083 	params.triggers.cursor_update = true;
9084 	params.triggers.overlay_update = true;
9085 	params.triggers.surface_update = true;
9086 	params.num_frames = num_frames_static;
9087 
9088 	dc_stream_set_static_screen_params(link->ctx->dc,
9089 					   &stream, 1,
9090 					   &params);
9091 
9092 	return dc_link_set_psr_allow_active(link, true, false);
9093 }
9094 
9095 /*
9096  * amdgpu_dm_psr_disable() - disable psr f/w
9097  * @stream:  stream state
9098  *
9099  * Return: true if success
9100  */
9101 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9102 {
9103 
9104 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9105 
9106 	return dc_link_set_psr_allow_active(stream->link, false, true);
9107 }
9108 
9109 /*
9110  * amdgpu_dm_psr_disable() - disable psr f/w
9111  * if psr is enabled on any stream
9112  *
9113  * Return: true if success
9114  */
9115 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9116 {
9117 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9118 	return dc_set_psr_allow_active(dm->dc, false);
9119 }
9120 
9121 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9122 {
9123 	struct amdgpu_device *adev = drm_to_adev(dev);
9124 	struct dc *dc = adev->dm.dc;
9125 	int i;
9126 
9127 	mutex_lock(&adev->dm.dc_lock);
9128 	if (dc->current_state) {
9129 		for (i = 0; i < dc->current_state->stream_count; ++i)
9130 			dc->current_state->streams[i]
9131 				->triggered_crtc_reset.enabled =
9132 				adev->dm.force_timing_sync;
9133 
9134 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9135 		dc_trigger_sync(dc, dc->current_state);
9136 	}
9137 	mutex_unlock(&adev->dm.dc_lock);
9138 }
9139