1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 
104 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
106 
107 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
109 
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
112 
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
115 
116 /**
117  * DOC: overview
118  *
119  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121  * requests into DC requests, and DC responses into DRM responses.
122  *
123  * The root control structure is &struct amdgpu_display_manager.
124  */
125 
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
129 
130 /*
131  * initializes drm_device display related structures, based on the information
132  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
133  * drm_encoder, drm_mode_config
134  *
135  * Returns 0 on success
136  */
137 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
138 /* removes and deallocates the drm structures, created by the above function */
139 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
140 
141 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
142 				struct drm_plane *plane,
143 				unsigned long possible_crtcs,
144 				const struct dc_plane_cap *plane_cap);
145 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
146 			       struct drm_plane *plane,
147 			       uint32_t link_index);
148 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
149 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
150 				    uint32_t link_index,
151 				    struct amdgpu_encoder *amdgpu_encoder);
152 static int amdgpu_dm_encoder_init(struct drm_device *dev,
153 				  struct amdgpu_encoder *aencoder,
154 				  uint32_t link_index);
155 
156 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
157 
158 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
159 				   struct drm_atomic_state *state,
160 				   bool nonblock);
161 
162 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
163 
164 static int amdgpu_dm_atomic_check(struct drm_device *dev,
165 				  struct drm_atomic_state *state);
166 
167 static void handle_cursor_update(struct drm_plane *plane,
168 				 struct drm_plane_state *old_plane_state);
169 
170 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
171 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
172 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
173 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
174 
175 
176 /*
177  * dm_vblank_get_counter
178  *
179  * @brief
180  * Get counter for number of vertical blanks
181  *
182  * @param
183  * struct amdgpu_device *adev - [in] desired amdgpu device
184  * int disp_idx - [in] which CRTC to get the counter from
185  *
186  * @return
187  * Counter for vertical blanks
188  */
189 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
190 {
191 	if (crtc >= adev->mode_info.num_crtc)
192 		return 0;
193 	else {
194 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
195 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
196 				acrtc->base.state);
197 
198 
199 		if (acrtc_state->stream == NULL) {
200 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
201 				  crtc);
202 			return 0;
203 		}
204 
205 		return dc_stream_get_vblank_counter(acrtc_state->stream);
206 	}
207 }
208 
209 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
210 				  u32 *vbl, u32 *position)
211 {
212 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
213 
214 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
215 		return -EINVAL;
216 	else {
217 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
218 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
219 						acrtc->base.state);
220 
221 		if (acrtc_state->stream ==  NULL) {
222 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
223 				  crtc);
224 			return 0;
225 		}
226 
227 		/*
228 		 * TODO rework base driver to use values directly.
229 		 * for now parse it back into reg-format
230 		 */
231 		dc_stream_get_scanoutpos(acrtc_state->stream,
232 					 &v_blank_start,
233 					 &v_blank_end,
234 					 &h_position,
235 					 &v_position);
236 
237 		*position = v_position | (h_position << 16);
238 		*vbl = v_blank_start | (v_blank_end << 16);
239 	}
240 
241 	return 0;
242 }
243 
244 static bool dm_is_idle(void *handle)
245 {
246 	/* XXX todo */
247 	return true;
248 }
249 
250 static int dm_wait_for_idle(void *handle)
251 {
252 	/* XXX todo */
253 	return 0;
254 }
255 
256 static bool dm_check_soft_reset(void *handle)
257 {
258 	return false;
259 }
260 
261 static int dm_soft_reset(void *handle)
262 {
263 	/* XXX todo */
264 	return 0;
265 }
266 
267 static struct amdgpu_crtc *
268 get_crtc_by_otg_inst(struct amdgpu_device *adev,
269 		     int otg_inst)
270 {
271 	struct drm_device *dev = adev->ddev;
272 	struct drm_crtc *crtc;
273 	struct amdgpu_crtc *amdgpu_crtc;
274 
275 	if (otg_inst == -1) {
276 		WARN_ON(1);
277 		return adev->mode_info.crtcs[0];
278 	}
279 
280 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
281 		amdgpu_crtc = to_amdgpu_crtc(crtc);
282 
283 		if (amdgpu_crtc->otg_inst == otg_inst)
284 			return amdgpu_crtc;
285 	}
286 
287 	return NULL;
288 }
289 
290 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
291 {
292 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
293 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
294 }
295 
296 /**
297  * dm_pflip_high_irq() - Handle pageflip interrupt
298  * @interrupt_params: ignored
299  *
300  * Handles the pageflip interrupt by notifying all interested parties
301  * that the pageflip has been completed.
302  */
303 static void dm_pflip_high_irq(void *interrupt_params)
304 {
305 	struct amdgpu_crtc *amdgpu_crtc;
306 	struct common_irq_params *irq_params = interrupt_params;
307 	struct amdgpu_device *adev = irq_params->adev;
308 	unsigned long flags;
309 	struct drm_pending_vblank_event *e;
310 	struct dm_crtc_state *acrtc_state;
311 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
312 	bool vrr_active;
313 
314 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
315 
316 	/* IRQ could occur when in initial stage */
317 	/* TODO work and BO cleanup */
318 	if (amdgpu_crtc == NULL) {
319 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
320 		return;
321 	}
322 
323 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
324 
325 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
326 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
327 						 amdgpu_crtc->pflip_status,
328 						 AMDGPU_FLIP_SUBMITTED,
329 						 amdgpu_crtc->crtc_id,
330 						 amdgpu_crtc);
331 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
332 		return;
333 	}
334 
335 	/* page flip completed. */
336 	e = amdgpu_crtc->event;
337 	amdgpu_crtc->event = NULL;
338 
339 	if (!e)
340 		WARN_ON(1);
341 
342 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
343 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
344 
345 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
346 	if (!vrr_active ||
347 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
348 				      &v_blank_end, &hpos, &vpos) ||
349 	    (vpos < v_blank_start)) {
350 		/* Update to correct count and vblank timestamp if racing with
351 		 * vblank irq. This also updates to the correct vblank timestamp
352 		 * even in VRR mode, as scanout is past the front-porch atm.
353 		 */
354 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
355 
356 		/* Wake up userspace by sending the pageflip event with proper
357 		 * count and timestamp of vblank of flip completion.
358 		 */
359 		if (e) {
360 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
361 
362 			/* Event sent, so done with vblank for this flip */
363 			drm_crtc_vblank_put(&amdgpu_crtc->base);
364 		}
365 	} else if (e) {
366 		/* VRR active and inside front-porch: vblank count and
367 		 * timestamp for pageflip event will only be up to date after
368 		 * drm_crtc_handle_vblank() has been executed from late vblank
369 		 * irq handler after start of back-porch (vline 0). We queue the
370 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
371 		 * updated timestamp and count, once it runs after us.
372 		 *
373 		 * We need to open-code this instead of using the helper
374 		 * drm_crtc_arm_vblank_event(), as that helper would
375 		 * call drm_crtc_accurate_vblank_count(), which we must
376 		 * not call in VRR mode while we are in front-porch!
377 		 */
378 
379 		/* sequence will be replaced by real count during send-out. */
380 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
381 		e->pipe = amdgpu_crtc->crtc_id;
382 
383 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
384 		e = NULL;
385 	}
386 
387 	/* Keep track of vblank of this flip for flip throttling. We use the
388 	 * cooked hw counter, as that one incremented at start of this vblank
389 	 * of pageflip completion, so last_flip_vblank is the forbidden count
390 	 * for queueing new pageflips if vsync + VRR is enabled.
391 	 */
392 	amdgpu_crtc->last_flip_vblank =
393 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
394 
395 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
396 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
397 
398 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
399 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
400 			 vrr_active, (int) !e);
401 }
402 
403 static void dm_vupdate_high_irq(void *interrupt_params)
404 {
405 	struct common_irq_params *irq_params = interrupt_params;
406 	struct amdgpu_device *adev = irq_params->adev;
407 	struct amdgpu_crtc *acrtc;
408 	struct dm_crtc_state *acrtc_state;
409 	unsigned long flags;
410 
411 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
412 
413 	if (acrtc) {
414 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
415 
416 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
417 			      acrtc->crtc_id,
418 			      amdgpu_dm_vrr_active(acrtc_state));
419 
420 		/* Core vblank handling is done here after end of front-porch in
421 		 * vrr mode, as vblank timestamping will give valid results
422 		 * while now done after front-porch. This will also deliver
423 		 * page-flip completion events that have been queued to us
424 		 * if a pageflip happened inside front-porch.
425 		 */
426 		if (amdgpu_dm_vrr_active(acrtc_state)) {
427 			drm_crtc_handle_vblank(&acrtc->base);
428 
429 			/* BTR processing for pre-DCE12 ASICs */
430 			if (acrtc_state->stream &&
431 			    adev->family < AMDGPU_FAMILY_AI) {
432 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
433 				mod_freesync_handle_v_update(
434 				    adev->dm.freesync_module,
435 				    acrtc_state->stream,
436 				    &acrtc_state->vrr_params);
437 
438 				dc_stream_adjust_vmin_vmax(
439 				    adev->dm.dc,
440 				    acrtc_state->stream,
441 				    &acrtc_state->vrr_params.adjust);
442 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
443 			}
444 		}
445 	}
446 }
447 
448 /**
449  * dm_crtc_high_irq() - Handles CRTC interrupt
450  * @interrupt_params: used for determining the CRTC instance
451  *
452  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
453  * event handler.
454  */
455 static void dm_crtc_high_irq(void *interrupt_params)
456 {
457 	struct common_irq_params *irq_params = interrupt_params;
458 	struct amdgpu_device *adev = irq_params->adev;
459 	struct amdgpu_crtc *acrtc;
460 	struct dm_crtc_state *acrtc_state;
461 	unsigned long flags;
462 
463 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
464 	if (!acrtc)
465 		return;
466 
467 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
468 
469 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
470 			 amdgpu_dm_vrr_active(acrtc_state),
471 			 acrtc_state->active_planes);
472 
473 	/**
474 	 * Core vblank handling at start of front-porch is only possible
475 	 * in non-vrr mode, as only there vblank timestamping will give
476 	 * valid results while done in front-porch. Otherwise defer it
477 	 * to dm_vupdate_high_irq after end of front-porch.
478 	 */
479 	if (!amdgpu_dm_vrr_active(acrtc_state))
480 		drm_crtc_handle_vblank(&acrtc->base);
481 
482 	/**
483 	 * Following stuff must happen at start of vblank, for crc
484 	 * computation and below-the-range btr support in vrr mode.
485 	 */
486 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
487 
488 	/* BTR updates need to happen before VUPDATE on Vega and above. */
489 	if (adev->family < AMDGPU_FAMILY_AI)
490 		return;
491 
492 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
493 
494 	if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
495 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
496 		mod_freesync_handle_v_update(adev->dm.freesync_module,
497 					     acrtc_state->stream,
498 					     &acrtc_state->vrr_params);
499 
500 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
501 					   &acrtc_state->vrr_params.adjust);
502 	}
503 
504 	/*
505 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
506 	 * In that case, pageflip completion interrupts won't fire and pageflip
507 	 * completion events won't get delivered. Prevent this by sending
508 	 * pending pageflip events from here if a flip is still pending.
509 	 *
510 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
511 	 * avoid race conditions between flip programming and completion,
512 	 * which could cause too early flip completion events.
513 	 */
514 	if (adev->family >= AMDGPU_FAMILY_RV &&
515 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
516 	    acrtc_state->active_planes == 0) {
517 		if (acrtc->event) {
518 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
519 			acrtc->event = NULL;
520 			drm_crtc_vblank_put(&acrtc->base);
521 		}
522 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
523 	}
524 
525 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
526 }
527 
528 static int dm_set_clockgating_state(void *handle,
529 		  enum amd_clockgating_state state)
530 {
531 	return 0;
532 }
533 
534 static int dm_set_powergating_state(void *handle,
535 		  enum amd_powergating_state state)
536 {
537 	return 0;
538 }
539 
540 /* Prototypes of private functions */
541 static int dm_early_init(void* handle);
542 
543 /* Allocate memory for FBC compressed data  */
544 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
545 {
546 	struct drm_device *dev = connector->dev;
547 	struct amdgpu_device *adev = dev->dev_private;
548 	struct dm_comressor_info *compressor = &adev->dm.compressor;
549 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
550 	struct drm_display_mode *mode;
551 	unsigned long max_size = 0;
552 
553 	if (adev->dm.dc->fbc_compressor == NULL)
554 		return;
555 
556 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
557 		return;
558 
559 	if (compressor->bo_ptr)
560 		return;
561 
562 
563 	list_for_each_entry(mode, &connector->modes, head) {
564 		if (max_size < mode->htotal * mode->vtotal)
565 			max_size = mode->htotal * mode->vtotal;
566 	}
567 
568 	if (max_size) {
569 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
570 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
571 			    &compressor->gpu_addr, &compressor->cpu_addr);
572 
573 		if (r)
574 			DRM_ERROR("DM: Failed to initialize FBC\n");
575 		else {
576 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
577 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
578 		}
579 
580 	}
581 
582 }
583 
584 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
585 					  int pipe, bool *enabled,
586 					  unsigned char *buf, int max_bytes)
587 {
588 	struct drm_device *dev = dev_get_drvdata(kdev);
589 	struct amdgpu_device *adev = dev->dev_private;
590 	struct drm_connector *connector;
591 	struct drm_connector_list_iter conn_iter;
592 	struct amdgpu_dm_connector *aconnector;
593 	int ret = 0;
594 
595 	*enabled = false;
596 
597 	mutex_lock(&adev->dm.audio_lock);
598 
599 	drm_connector_list_iter_begin(dev, &conn_iter);
600 	drm_for_each_connector_iter(connector, &conn_iter) {
601 		aconnector = to_amdgpu_dm_connector(connector);
602 		if (aconnector->audio_inst != port)
603 			continue;
604 
605 		*enabled = true;
606 		ret = drm_eld_size(connector->eld);
607 		memcpy(buf, connector->eld, min(max_bytes, ret));
608 
609 		break;
610 	}
611 	drm_connector_list_iter_end(&conn_iter);
612 
613 	mutex_unlock(&adev->dm.audio_lock);
614 
615 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
616 
617 	return ret;
618 }
619 
620 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
621 	.get_eld = amdgpu_dm_audio_component_get_eld,
622 };
623 
624 static int amdgpu_dm_audio_component_bind(struct device *kdev,
625 				       struct device *hda_kdev, void *data)
626 {
627 	struct drm_device *dev = dev_get_drvdata(kdev);
628 	struct amdgpu_device *adev = dev->dev_private;
629 	struct drm_audio_component *acomp = data;
630 
631 	acomp->ops = &amdgpu_dm_audio_component_ops;
632 	acomp->dev = kdev;
633 	adev->dm.audio_component = acomp;
634 
635 	return 0;
636 }
637 
638 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
639 					  struct device *hda_kdev, void *data)
640 {
641 	struct drm_device *dev = dev_get_drvdata(kdev);
642 	struct amdgpu_device *adev = dev->dev_private;
643 	struct drm_audio_component *acomp = data;
644 
645 	acomp->ops = NULL;
646 	acomp->dev = NULL;
647 	adev->dm.audio_component = NULL;
648 }
649 
650 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
651 	.bind	= amdgpu_dm_audio_component_bind,
652 	.unbind	= amdgpu_dm_audio_component_unbind,
653 };
654 
655 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
656 {
657 	int i, ret;
658 
659 	if (!amdgpu_audio)
660 		return 0;
661 
662 	adev->mode_info.audio.enabled = true;
663 
664 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
665 
666 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
667 		adev->mode_info.audio.pin[i].channels = -1;
668 		adev->mode_info.audio.pin[i].rate = -1;
669 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
670 		adev->mode_info.audio.pin[i].status_bits = 0;
671 		adev->mode_info.audio.pin[i].category_code = 0;
672 		adev->mode_info.audio.pin[i].connected = false;
673 		adev->mode_info.audio.pin[i].id =
674 			adev->dm.dc->res_pool->audios[i]->inst;
675 		adev->mode_info.audio.pin[i].offset = 0;
676 	}
677 
678 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
679 	if (ret < 0)
680 		return ret;
681 
682 	adev->dm.audio_registered = true;
683 
684 	return 0;
685 }
686 
687 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
688 {
689 	if (!amdgpu_audio)
690 		return;
691 
692 	if (!adev->mode_info.audio.enabled)
693 		return;
694 
695 	if (adev->dm.audio_registered) {
696 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
697 		adev->dm.audio_registered = false;
698 	}
699 
700 	/* TODO: Disable audio? */
701 
702 	adev->mode_info.audio.enabled = false;
703 }
704 
705 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
706 {
707 	struct drm_audio_component *acomp = adev->dm.audio_component;
708 
709 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
710 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
711 
712 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
713 						 pin, -1);
714 	}
715 }
716 
717 static int dm_dmub_hw_init(struct amdgpu_device *adev)
718 {
719 	const struct dmcub_firmware_header_v1_0 *hdr;
720 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
721 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
722 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
723 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
724 	struct abm *abm = adev->dm.dc->res_pool->abm;
725 	struct dmub_srv_hw_params hw_params;
726 	enum dmub_status status;
727 	const unsigned char *fw_inst_const, *fw_bss_data;
728 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
729 	bool has_hw_support;
730 
731 	if (!dmub_srv)
732 		/* DMUB isn't supported on the ASIC. */
733 		return 0;
734 
735 	if (!fb_info) {
736 		DRM_ERROR("No framebuffer info for DMUB service.\n");
737 		return -EINVAL;
738 	}
739 
740 	if (!dmub_fw) {
741 		/* Firmware required for DMUB support. */
742 		DRM_ERROR("No firmware provided for DMUB.\n");
743 		return -EINVAL;
744 	}
745 
746 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
747 	if (status != DMUB_STATUS_OK) {
748 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
749 		return -EINVAL;
750 	}
751 
752 	if (!has_hw_support) {
753 		DRM_INFO("DMUB unsupported on ASIC\n");
754 		return 0;
755 	}
756 
757 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
758 
759 	fw_inst_const = dmub_fw->data +
760 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
761 			PSP_HEADER_BYTES;
762 
763 	fw_bss_data = dmub_fw->data +
764 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
765 		      le32_to_cpu(hdr->inst_const_bytes);
766 
767 	/* Copy firmware and bios info into FB memory. */
768 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
769 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
770 
771 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
772 
773 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
774 	 * amdgpu_ucode_init_single_fw will load dmub firmware
775 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
776 	 * will be done by dm_dmub_hw_init
777 	 */
778 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
779 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
780 				fw_inst_const_size);
781 	}
782 
783 	if (fw_bss_data_size)
784 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
785 		       fw_bss_data, fw_bss_data_size);
786 
787 	/* Copy firmware bios info into FB memory. */
788 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
789 	       adev->bios_size);
790 
791 	/* Reset regions that need to be reset. */
792 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
793 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
794 
795 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
796 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
797 
798 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
799 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
800 
801 	/* Initialize hardware. */
802 	memset(&hw_params, 0, sizeof(hw_params));
803 	hw_params.fb_base = adev->gmc.fb_start;
804 	hw_params.fb_offset = adev->gmc.aper_base;
805 
806 	/* backdoor load firmware and trigger dmub running */
807 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
808 		hw_params.load_inst_const = true;
809 
810 	if (dmcu)
811 		hw_params.psp_version = dmcu->psp_version;
812 
813 	for (i = 0; i < fb_info->num_fb; ++i)
814 		hw_params.fb[i] = &fb_info->fb[i];
815 
816 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
817 	if (status != DMUB_STATUS_OK) {
818 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
819 		return -EINVAL;
820 	}
821 
822 	/* Wait for firmware load to finish. */
823 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
824 	if (status != DMUB_STATUS_OK)
825 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
826 
827 	/* Init DMCU and ABM if available. */
828 	if (dmcu && abm) {
829 		dmcu->funcs->dmcu_init(dmcu);
830 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
831 	}
832 
833 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
834 	if (!adev->dm.dc->ctx->dmub_srv) {
835 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
836 		return -ENOMEM;
837 	}
838 
839 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
840 		 adev->dm.dmcub_fw_version);
841 
842 	return 0;
843 }
844 
845 static int amdgpu_dm_init(struct amdgpu_device *adev)
846 {
847 	struct dc_init_data init_data;
848 #ifdef CONFIG_DRM_AMD_DC_HDCP
849 	struct dc_callback_init init_params;
850 #endif
851 	int r;
852 
853 	adev->dm.ddev = adev->ddev;
854 	adev->dm.adev = adev;
855 
856 	/* Zero all the fields */
857 	memset(&init_data, 0, sizeof(init_data));
858 #ifdef CONFIG_DRM_AMD_DC_HDCP
859 	memset(&init_params, 0, sizeof(init_params));
860 #endif
861 
862 	mutex_init(&adev->dm.dc_lock);
863 	mutex_init(&adev->dm.audio_lock);
864 
865 	if(amdgpu_dm_irq_init(adev)) {
866 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
867 		goto error;
868 	}
869 
870 	init_data.asic_id.chip_family = adev->family;
871 
872 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
873 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
874 
875 	init_data.asic_id.vram_width = adev->gmc.vram_width;
876 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
877 	init_data.asic_id.atombios_base_address =
878 		adev->mode_info.atom_context->bios;
879 
880 	init_data.driver = adev;
881 
882 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
883 
884 	if (!adev->dm.cgs_device) {
885 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
886 		goto error;
887 	}
888 
889 	init_data.cgs_device = adev->dm.cgs_device;
890 
891 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
892 
893 	switch (adev->asic_type) {
894 	case CHIP_CARRIZO:
895 	case CHIP_STONEY:
896 	case CHIP_RAVEN:
897 	case CHIP_RENOIR:
898 		init_data.flags.gpu_vm_support = true;
899 		break;
900 	default:
901 		break;
902 	}
903 
904 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
905 		init_data.flags.fbc_support = true;
906 
907 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
908 		init_data.flags.multi_mon_pp_mclk_switch = true;
909 
910 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
911 		init_data.flags.disable_fractional_pwm = true;
912 
913 	init_data.flags.power_down_display_on_boot = true;
914 
915 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
916 
917 	/* Display Core create. */
918 	adev->dm.dc = dc_create(&init_data);
919 
920 	if (adev->dm.dc) {
921 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
922 	} else {
923 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
924 		goto error;
925 	}
926 
927 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
928 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
929 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
930 	}
931 
932 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
933 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
934 
935 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
936 		adev->dm.dc->debug.disable_stutter = true;
937 
938 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
939 		adev->dm.dc->debug.disable_dsc = true;
940 
941 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
942 		adev->dm.dc->debug.disable_clock_gate = true;
943 
944 	r = dm_dmub_hw_init(adev);
945 	if (r) {
946 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
947 		goto error;
948 	}
949 
950 	dc_hardware_init(adev->dm.dc);
951 
952 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
953 	if (!adev->dm.freesync_module) {
954 		DRM_ERROR(
955 		"amdgpu: failed to initialize freesync_module.\n");
956 	} else
957 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
958 				adev->dm.freesync_module);
959 
960 	amdgpu_dm_init_color_mod();
961 
962 #ifdef CONFIG_DRM_AMD_DC_HDCP
963 	if (adev->asic_type >= CHIP_RAVEN) {
964 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
965 
966 		if (!adev->dm.hdcp_workqueue)
967 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
968 		else
969 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
970 
971 		dc_init_callbacks(adev->dm.dc, &init_params);
972 	}
973 #endif
974 	if (amdgpu_dm_initialize_drm_device(adev)) {
975 		DRM_ERROR(
976 		"amdgpu: failed to initialize sw for display support.\n");
977 		goto error;
978 	}
979 
980 	/* Update the actual used number of crtc */
981 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
982 
983 	/* create fake encoders for MST */
984 	dm_dp_create_fake_mst_encoders(adev);
985 
986 	/* TODO: Add_display_info? */
987 
988 	/* TODO use dynamic cursor width */
989 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
990 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
991 
992 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
993 		DRM_ERROR(
994 		"amdgpu: failed to initialize sw for display support.\n");
995 		goto error;
996 	}
997 
998 	DRM_DEBUG_DRIVER("KMS initialized.\n");
999 
1000 	return 0;
1001 error:
1002 	amdgpu_dm_fini(adev);
1003 
1004 	return -EINVAL;
1005 }
1006 
1007 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1008 {
1009 	int i;
1010 
1011 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1012 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1013 	}
1014 
1015 	amdgpu_dm_audio_fini(adev);
1016 
1017 	amdgpu_dm_destroy_drm_device(&adev->dm);
1018 
1019 #ifdef CONFIG_DRM_AMD_DC_HDCP
1020 	if (adev->dm.hdcp_workqueue) {
1021 		hdcp_destroy(adev->dm.hdcp_workqueue);
1022 		adev->dm.hdcp_workqueue = NULL;
1023 	}
1024 
1025 	if (adev->dm.dc)
1026 		dc_deinit_callbacks(adev->dm.dc);
1027 #endif
1028 	if (adev->dm.dc->ctx->dmub_srv) {
1029 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1030 		adev->dm.dc->ctx->dmub_srv = NULL;
1031 	}
1032 
1033 	if (adev->dm.dmub_bo)
1034 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1035 				      &adev->dm.dmub_bo_gpu_addr,
1036 				      &adev->dm.dmub_bo_cpu_addr);
1037 
1038 	/* DC Destroy TODO: Replace destroy DAL */
1039 	if (adev->dm.dc)
1040 		dc_destroy(&adev->dm.dc);
1041 	/*
1042 	 * TODO: pageflip, vlank interrupt
1043 	 *
1044 	 * amdgpu_dm_irq_fini(adev);
1045 	 */
1046 
1047 	if (adev->dm.cgs_device) {
1048 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1049 		adev->dm.cgs_device = NULL;
1050 	}
1051 	if (adev->dm.freesync_module) {
1052 		mod_freesync_destroy(adev->dm.freesync_module);
1053 		adev->dm.freesync_module = NULL;
1054 	}
1055 
1056 	mutex_destroy(&adev->dm.audio_lock);
1057 	mutex_destroy(&adev->dm.dc_lock);
1058 
1059 	return;
1060 }
1061 
1062 static int load_dmcu_fw(struct amdgpu_device *adev)
1063 {
1064 	const char *fw_name_dmcu = NULL;
1065 	int r;
1066 	const struct dmcu_firmware_header_v1_0 *hdr;
1067 
1068 	switch(adev->asic_type) {
1069 	case CHIP_BONAIRE:
1070 	case CHIP_HAWAII:
1071 	case CHIP_KAVERI:
1072 	case CHIP_KABINI:
1073 	case CHIP_MULLINS:
1074 	case CHIP_TONGA:
1075 	case CHIP_FIJI:
1076 	case CHIP_CARRIZO:
1077 	case CHIP_STONEY:
1078 	case CHIP_POLARIS11:
1079 	case CHIP_POLARIS10:
1080 	case CHIP_POLARIS12:
1081 	case CHIP_VEGAM:
1082 	case CHIP_VEGA10:
1083 	case CHIP_VEGA12:
1084 	case CHIP_VEGA20:
1085 	case CHIP_NAVI10:
1086 	case CHIP_NAVI14:
1087 	case CHIP_RENOIR:
1088 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1089 	case CHIP_SIENNA_CICHLID:
1090 	case CHIP_NAVY_FLOUNDER:
1091 #endif
1092 		return 0;
1093 	case CHIP_NAVI12:
1094 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1095 		break;
1096 	case CHIP_RAVEN:
1097 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1098 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1099 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1100 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1101 		else
1102 			return 0;
1103 		break;
1104 	default:
1105 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1106 		return -EINVAL;
1107 	}
1108 
1109 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1110 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1111 		return 0;
1112 	}
1113 
1114 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1115 	if (r == -ENOENT) {
1116 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1117 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1118 		adev->dm.fw_dmcu = NULL;
1119 		return 0;
1120 	}
1121 	if (r) {
1122 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1123 			fw_name_dmcu);
1124 		return r;
1125 	}
1126 
1127 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1128 	if (r) {
1129 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1130 			fw_name_dmcu);
1131 		release_firmware(adev->dm.fw_dmcu);
1132 		adev->dm.fw_dmcu = NULL;
1133 		return r;
1134 	}
1135 
1136 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1137 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1138 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1139 	adev->firmware.fw_size +=
1140 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1141 
1142 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1143 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1144 	adev->firmware.fw_size +=
1145 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1146 
1147 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1148 
1149 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1150 
1151 	return 0;
1152 }
1153 
1154 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1155 {
1156 	struct amdgpu_device *adev = ctx;
1157 
1158 	return dm_read_reg(adev->dm.dc->ctx, address);
1159 }
1160 
1161 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1162 				     uint32_t value)
1163 {
1164 	struct amdgpu_device *adev = ctx;
1165 
1166 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1167 }
1168 
1169 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1170 {
1171 	struct dmub_srv_create_params create_params;
1172 	struct dmub_srv_region_params region_params;
1173 	struct dmub_srv_region_info region_info;
1174 	struct dmub_srv_fb_params fb_params;
1175 	struct dmub_srv_fb_info *fb_info;
1176 	struct dmub_srv *dmub_srv;
1177 	const struct dmcub_firmware_header_v1_0 *hdr;
1178 	const char *fw_name_dmub;
1179 	enum dmub_asic dmub_asic;
1180 	enum dmub_status status;
1181 	int r;
1182 
1183 	switch (adev->asic_type) {
1184 	case CHIP_RENOIR:
1185 		dmub_asic = DMUB_ASIC_DCN21;
1186 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1187 		break;
1188 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1189 	case CHIP_SIENNA_CICHLID:
1190 		dmub_asic = DMUB_ASIC_DCN30;
1191 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1192 		break;
1193 	case CHIP_NAVY_FLOUNDER:
1194 		dmub_asic = DMUB_ASIC_DCN30;
1195 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1196 		break;
1197 #endif
1198 
1199 	default:
1200 		/* ASIC doesn't support DMUB. */
1201 		return 0;
1202 	}
1203 
1204 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1205 	if (r) {
1206 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1207 		return 0;
1208 	}
1209 
1210 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1211 	if (r) {
1212 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1213 		return 0;
1214 	}
1215 
1216 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1217 
1218 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1219 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1220 			AMDGPU_UCODE_ID_DMCUB;
1221 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1222 			adev->dm.dmub_fw;
1223 		adev->firmware.fw_size +=
1224 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1225 
1226 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1227 			 adev->dm.dmcub_fw_version);
1228 	}
1229 
1230 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1231 
1232 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1233 	dmub_srv = adev->dm.dmub_srv;
1234 
1235 	if (!dmub_srv) {
1236 		DRM_ERROR("Failed to allocate DMUB service!\n");
1237 		return -ENOMEM;
1238 	}
1239 
1240 	memset(&create_params, 0, sizeof(create_params));
1241 	create_params.user_ctx = adev;
1242 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1243 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1244 	create_params.asic = dmub_asic;
1245 
1246 	/* Create the DMUB service. */
1247 	status = dmub_srv_create(dmub_srv, &create_params);
1248 	if (status != DMUB_STATUS_OK) {
1249 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1250 		return -EINVAL;
1251 	}
1252 
1253 	/* Calculate the size of all the regions for the DMUB service. */
1254 	memset(&region_params, 0, sizeof(region_params));
1255 
1256 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1257 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1258 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1259 	region_params.vbios_size = adev->bios_size;
1260 	region_params.fw_bss_data = region_params.bss_data_size ?
1261 		adev->dm.dmub_fw->data +
1262 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1263 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1264 	region_params.fw_inst_const =
1265 		adev->dm.dmub_fw->data +
1266 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1267 		PSP_HEADER_BYTES;
1268 
1269 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1270 					   &region_info);
1271 
1272 	if (status != DMUB_STATUS_OK) {
1273 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1274 		return -EINVAL;
1275 	}
1276 
1277 	/*
1278 	 * Allocate a framebuffer based on the total size of all the regions.
1279 	 * TODO: Move this into GART.
1280 	 */
1281 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1282 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1283 				    &adev->dm.dmub_bo_gpu_addr,
1284 				    &adev->dm.dmub_bo_cpu_addr);
1285 	if (r)
1286 		return r;
1287 
1288 	/* Rebase the regions on the framebuffer address. */
1289 	memset(&fb_params, 0, sizeof(fb_params));
1290 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1291 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1292 	fb_params.region_info = &region_info;
1293 
1294 	adev->dm.dmub_fb_info =
1295 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1296 	fb_info = adev->dm.dmub_fb_info;
1297 
1298 	if (!fb_info) {
1299 		DRM_ERROR(
1300 			"Failed to allocate framebuffer info for DMUB service!\n");
1301 		return -ENOMEM;
1302 	}
1303 
1304 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1305 	if (status != DMUB_STATUS_OK) {
1306 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1307 		return -EINVAL;
1308 	}
1309 
1310 	return 0;
1311 }
1312 
1313 static int dm_sw_init(void *handle)
1314 {
1315 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1316 	int r;
1317 
1318 	r = dm_dmub_sw_init(adev);
1319 	if (r)
1320 		return r;
1321 
1322 	return load_dmcu_fw(adev);
1323 }
1324 
1325 static int dm_sw_fini(void *handle)
1326 {
1327 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1328 
1329 	kfree(adev->dm.dmub_fb_info);
1330 	adev->dm.dmub_fb_info = NULL;
1331 
1332 	if (adev->dm.dmub_srv) {
1333 		dmub_srv_destroy(adev->dm.dmub_srv);
1334 		adev->dm.dmub_srv = NULL;
1335 	}
1336 
1337 	release_firmware(adev->dm.dmub_fw);
1338 	adev->dm.dmub_fw = NULL;
1339 
1340 	release_firmware(adev->dm.fw_dmcu);
1341 	adev->dm.fw_dmcu = NULL;
1342 
1343 	return 0;
1344 }
1345 
1346 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1347 {
1348 	struct amdgpu_dm_connector *aconnector;
1349 	struct drm_connector *connector;
1350 	struct drm_connector_list_iter iter;
1351 	int ret = 0;
1352 
1353 	drm_connector_list_iter_begin(dev, &iter);
1354 	drm_for_each_connector_iter(connector, &iter) {
1355 		aconnector = to_amdgpu_dm_connector(connector);
1356 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1357 		    aconnector->mst_mgr.aux) {
1358 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1359 					 aconnector,
1360 					 aconnector->base.base.id);
1361 
1362 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1363 			if (ret < 0) {
1364 				DRM_ERROR("DM_MST: Failed to start MST\n");
1365 				aconnector->dc_link->type =
1366 					dc_connection_single;
1367 				break;
1368 			}
1369 		}
1370 	}
1371 	drm_connector_list_iter_end(&iter);
1372 
1373 	return ret;
1374 }
1375 
1376 static int dm_late_init(void *handle)
1377 {
1378 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1379 
1380 	struct dmcu_iram_parameters params;
1381 	unsigned int linear_lut[16];
1382 	int i;
1383 	struct dmcu *dmcu = NULL;
1384 	bool ret = true;
1385 
1386 	if (!adev->dm.fw_dmcu)
1387 		return detect_mst_link_for_all_connectors(adev->ddev);
1388 
1389 	dmcu = adev->dm.dc->res_pool->dmcu;
1390 
1391 	for (i = 0; i < 16; i++)
1392 		linear_lut[i] = 0xFFFF * i / 15;
1393 
1394 	params.set = 0;
1395 	params.backlight_ramping_start = 0xCCCC;
1396 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1397 	params.backlight_lut_array_size = 16;
1398 	params.backlight_lut_array = linear_lut;
1399 
1400 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1401 	 * 0xFFFF x 0.01 = 0x28F
1402 	 */
1403 	params.min_abm_backlight = 0x28F;
1404 
1405 	/* In the case where abm is implemented on dmcub,
1406 	 * dmcu object will be null.
1407 	 * ABM 2.4 and up are implemented on dmcub.
1408 	 */
1409 	if (dmcu)
1410 		ret = dmcu_load_iram(dmcu, params);
1411 	else if (adev->dm.dc->ctx->dmub_srv)
1412 		ret = dmub_init_abm_config(adev->dm.dc->res_pool->abm, params);
1413 
1414 	if (!ret)
1415 		return -EINVAL;
1416 
1417 	return detect_mst_link_for_all_connectors(adev->ddev);
1418 }
1419 
1420 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1421 {
1422 	struct amdgpu_dm_connector *aconnector;
1423 	struct drm_connector *connector;
1424 	struct drm_connector_list_iter iter;
1425 	struct drm_dp_mst_topology_mgr *mgr;
1426 	int ret;
1427 	bool need_hotplug = false;
1428 
1429 	drm_connector_list_iter_begin(dev, &iter);
1430 	drm_for_each_connector_iter(connector, &iter) {
1431 		aconnector = to_amdgpu_dm_connector(connector);
1432 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1433 		    aconnector->mst_port)
1434 			continue;
1435 
1436 		mgr = &aconnector->mst_mgr;
1437 
1438 		if (suspend) {
1439 			drm_dp_mst_topology_mgr_suspend(mgr);
1440 		} else {
1441 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1442 			if (ret < 0) {
1443 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1444 				need_hotplug = true;
1445 			}
1446 		}
1447 	}
1448 	drm_connector_list_iter_end(&iter);
1449 
1450 	if (need_hotplug)
1451 		drm_kms_helper_hotplug_event(dev);
1452 }
1453 
1454 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1455 {
1456 	struct smu_context *smu = &adev->smu;
1457 	int ret = 0;
1458 
1459 	if (!is_support_sw_smu(adev))
1460 		return 0;
1461 
1462 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1463 	 * on window driver dc implementation.
1464 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1465 	 * should be passed to smu during boot up and resume from s3.
1466 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1467 	 * dcn20_resource_construct
1468 	 * then call pplib functions below to pass the settings to smu:
1469 	 * smu_set_watermarks_for_clock_ranges
1470 	 * smu_set_watermarks_table
1471 	 * navi10_set_watermarks_table
1472 	 * smu_write_watermarks_table
1473 	 *
1474 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1475 	 * dc has implemented different flow for window driver:
1476 	 * dc_hardware_init / dc_set_power_state
1477 	 * dcn10_init_hw
1478 	 * notify_wm_ranges
1479 	 * set_wm_ranges
1480 	 * -- Linux
1481 	 * smu_set_watermarks_for_clock_ranges
1482 	 * renoir_set_watermarks_table
1483 	 * smu_write_watermarks_table
1484 	 *
1485 	 * For Linux,
1486 	 * dc_hardware_init -> amdgpu_dm_init
1487 	 * dc_set_power_state --> dm_resume
1488 	 *
1489 	 * therefore, this function apply to navi10/12/14 but not Renoir
1490 	 * *
1491 	 */
1492 	switch(adev->asic_type) {
1493 	case CHIP_NAVI10:
1494 	case CHIP_NAVI14:
1495 	case CHIP_NAVI12:
1496 		break;
1497 	default:
1498 		return 0;
1499 	}
1500 
1501 	ret = smu_write_watermarks_table(smu);
1502 	if (ret) {
1503 		DRM_ERROR("Failed to update WMTABLE!\n");
1504 		return ret;
1505 	}
1506 
1507 	return 0;
1508 }
1509 
1510 /**
1511  * dm_hw_init() - Initialize DC device
1512  * @handle: The base driver device containing the amdgpu_dm device.
1513  *
1514  * Initialize the &struct amdgpu_display_manager device. This involves calling
1515  * the initializers of each DM component, then populating the struct with them.
1516  *
1517  * Although the function implies hardware initialization, both hardware and
1518  * software are initialized here. Splitting them out to their relevant init
1519  * hooks is a future TODO item.
1520  *
1521  * Some notable things that are initialized here:
1522  *
1523  * - Display Core, both software and hardware
1524  * - DC modules that we need (freesync and color management)
1525  * - DRM software states
1526  * - Interrupt sources and handlers
1527  * - Vblank support
1528  * - Debug FS entries, if enabled
1529  */
1530 static int dm_hw_init(void *handle)
1531 {
1532 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1533 	/* Create DAL display manager */
1534 	amdgpu_dm_init(adev);
1535 	amdgpu_dm_hpd_init(adev);
1536 
1537 	return 0;
1538 }
1539 
1540 /**
1541  * dm_hw_fini() - Teardown DC device
1542  * @handle: The base driver device containing the amdgpu_dm device.
1543  *
1544  * Teardown components within &struct amdgpu_display_manager that require
1545  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1546  * were loaded. Also flush IRQ workqueues and disable them.
1547  */
1548 static int dm_hw_fini(void *handle)
1549 {
1550 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1551 
1552 	amdgpu_dm_hpd_fini(adev);
1553 
1554 	amdgpu_dm_irq_fini(adev);
1555 	amdgpu_dm_fini(adev);
1556 	return 0;
1557 }
1558 
1559 
1560 static int dm_enable_vblank(struct drm_crtc *crtc);
1561 static void dm_disable_vblank(struct drm_crtc *crtc);
1562 
1563 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1564 				 struct dc_state *state, bool enable)
1565 {
1566 	enum dc_irq_source irq_source;
1567 	struct amdgpu_crtc *acrtc;
1568 	int rc = -EBUSY;
1569 	int i = 0;
1570 
1571 	for (i = 0; i < state->stream_count; i++) {
1572 		acrtc = get_crtc_by_otg_inst(
1573 				adev, state->stream_status[i].primary_otg_inst);
1574 
1575 		if (acrtc && state->stream_status[i].plane_count != 0) {
1576 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1577 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1578 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1579 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1580 			if (rc)
1581 				DRM_WARN("Failed to %s pflip interrupts\n",
1582 					 enable ? "enable" : "disable");
1583 
1584 			if (enable) {
1585 				rc = dm_enable_vblank(&acrtc->base);
1586 				if (rc)
1587 					DRM_WARN("Failed to enable vblank interrupts\n");
1588 			} else {
1589 				dm_disable_vblank(&acrtc->base);
1590 			}
1591 
1592 		}
1593 	}
1594 
1595 }
1596 
1597 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1598 {
1599 	struct dc_state *context = NULL;
1600 	enum dc_status res = DC_ERROR_UNEXPECTED;
1601 	int i;
1602 	struct dc_stream_state *del_streams[MAX_PIPES];
1603 	int del_streams_count = 0;
1604 
1605 	memset(del_streams, 0, sizeof(del_streams));
1606 
1607 	context = dc_create_state(dc);
1608 	if (context == NULL)
1609 		goto context_alloc_fail;
1610 
1611 	dc_resource_state_copy_construct_current(dc, context);
1612 
1613 	/* First remove from context all streams */
1614 	for (i = 0; i < context->stream_count; i++) {
1615 		struct dc_stream_state *stream = context->streams[i];
1616 
1617 		del_streams[del_streams_count++] = stream;
1618 	}
1619 
1620 	/* Remove all planes for removed streams and then remove the streams */
1621 	for (i = 0; i < del_streams_count; i++) {
1622 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1623 			res = DC_FAIL_DETACH_SURFACES;
1624 			goto fail;
1625 		}
1626 
1627 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1628 		if (res != DC_OK)
1629 			goto fail;
1630 	}
1631 
1632 
1633 	res = dc_validate_global_state(dc, context, false);
1634 
1635 	if (res != DC_OK) {
1636 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1637 		goto fail;
1638 	}
1639 
1640 	res = dc_commit_state(dc, context);
1641 
1642 fail:
1643 	dc_release_state(context);
1644 
1645 context_alloc_fail:
1646 	return res;
1647 }
1648 
1649 static int dm_suspend(void *handle)
1650 {
1651 	struct amdgpu_device *adev = handle;
1652 	struct amdgpu_display_manager *dm = &adev->dm;
1653 	int ret = 0;
1654 
1655 	if (amdgpu_in_reset(adev)) {
1656 		mutex_lock(&dm->dc_lock);
1657 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1658 
1659 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1660 
1661 		amdgpu_dm_commit_zero_streams(dm->dc);
1662 
1663 		amdgpu_dm_irq_suspend(adev);
1664 
1665 		return ret;
1666 	}
1667 
1668 	WARN_ON(adev->dm.cached_state);
1669 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1670 
1671 	s3_handle_mst(adev->ddev, true);
1672 
1673 	amdgpu_dm_irq_suspend(adev);
1674 
1675 
1676 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1677 
1678 	return 0;
1679 }
1680 
1681 static struct amdgpu_dm_connector *
1682 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1683 					     struct drm_crtc *crtc)
1684 {
1685 	uint32_t i;
1686 	struct drm_connector_state *new_con_state;
1687 	struct drm_connector *connector;
1688 	struct drm_crtc *crtc_from_state;
1689 
1690 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1691 		crtc_from_state = new_con_state->crtc;
1692 
1693 		if (crtc_from_state == crtc)
1694 			return to_amdgpu_dm_connector(connector);
1695 	}
1696 
1697 	return NULL;
1698 }
1699 
1700 static void emulated_link_detect(struct dc_link *link)
1701 {
1702 	struct dc_sink_init_data sink_init_data = { 0 };
1703 	struct display_sink_capability sink_caps = { 0 };
1704 	enum dc_edid_status edid_status;
1705 	struct dc_context *dc_ctx = link->ctx;
1706 	struct dc_sink *sink = NULL;
1707 	struct dc_sink *prev_sink = NULL;
1708 
1709 	link->type = dc_connection_none;
1710 	prev_sink = link->local_sink;
1711 
1712 	if (prev_sink != NULL)
1713 		dc_sink_retain(prev_sink);
1714 
1715 	switch (link->connector_signal) {
1716 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1717 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1718 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1719 		break;
1720 	}
1721 
1722 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1723 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1725 		break;
1726 	}
1727 
1728 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1729 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1730 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1731 		break;
1732 	}
1733 
1734 	case SIGNAL_TYPE_LVDS: {
1735 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1736 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1737 		break;
1738 	}
1739 
1740 	case SIGNAL_TYPE_EDP: {
1741 		sink_caps.transaction_type =
1742 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1743 		sink_caps.signal = SIGNAL_TYPE_EDP;
1744 		break;
1745 	}
1746 
1747 	case SIGNAL_TYPE_DISPLAY_PORT: {
1748 		sink_caps.transaction_type =
1749 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1750 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1751 		break;
1752 	}
1753 
1754 	default:
1755 		DC_ERROR("Invalid connector type! signal:%d\n",
1756 			link->connector_signal);
1757 		return;
1758 	}
1759 
1760 	sink_init_data.link = link;
1761 	sink_init_data.sink_signal = sink_caps.signal;
1762 
1763 	sink = dc_sink_create(&sink_init_data);
1764 	if (!sink) {
1765 		DC_ERROR("Failed to create sink!\n");
1766 		return;
1767 	}
1768 
1769 	/* dc_sink_create returns a new reference */
1770 	link->local_sink = sink;
1771 
1772 	edid_status = dm_helpers_read_local_edid(
1773 			link->ctx,
1774 			link,
1775 			sink);
1776 
1777 	if (edid_status != EDID_OK)
1778 		DC_ERROR("Failed to read EDID");
1779 
1780 }
1781 
1782 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1783 				     struct amdgpu_display_manager *dm)
1784 {
1785 	struct {
1786 		struct dc_surface_update surface_updates[MAX_SURFACES];
1787 		struct dc_plane_info plane_infos[MAX_SURFACES];
1788 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1789 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1790 		struct dc_stream_update stream_update;
1791 	} * bundle;
1792 	int k, m;
1793 
1794 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1795 
1796 	if (!bundle) {
1797 		dm_error("Failed to allocate update bundle\n");
1798 		goto cleanup;
1799 	}
1800 
1801 	for (k = 0; k < dc_state->stream_count; k++) {
1802 		bundle->stream_update.stream = dc_state->streams[k];
1803 
1804 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1805 			bundle->surface_updates[m].surface =
1806 				dc_state->stream_status->plane_states[m];
1807 			bundle->surface_updates[m].surface->force_full_update =
1808 				true;
1809 		}
1810 		dc_commit_updates_for_stream(
1811 			dm->dc, bundle->surface_updates,
1812 			dc_state->stream_status->plane_count,
1813 			dc_state->streams[k], &bundle->stream_update, dc_state);
1814 	}
1815 
1816 cleanup:
1817 	kfree(bundle);
1818 
1819 	return;
1820 }
1821 
1822 static int dm_resume(void *handle)
1823 {
1824 	struct amdgpu_device *adev = handle;
1825 	struct drm_device *ddev = adev->ddev;
1826 	struct amdgpu_display_manager *dm = &adev->dm;
1827 	struct amdgpu_dm_connector *aconnector;
1828 	struct drm_connector *connector;
1829 	struct drm_connector_list_iter iter;
1830 	struct drm_crtc *crtc;
1831 	struct drm_crtc_state *new_crtc_state;
1832 	struct dm_crtc_state *dm_new_crtc_state;
1833 	struct drm_plane *plane;
1834 	struct drm_plane_state *new_plane_state;
1835 	struct dm_plane_state *dm_new_plane_state;
1836 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1837 	enum dc_connection_type new_connection_type = dc_connection_none;
1838 	struct dc_state *dc_state;
1839 	int i, r, j;
1840 
1841 	if (amdgpu_in_reset(adev)) {
1842 		dc_state = dm->cached_dc_state;
1843 
1844 		r = dm_dmub_hw_init(adev);
1845 		if (r)
1846 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1847 
1848 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1849 		dc_resume(dm->dc);
1850 
1851 		amdgpu_dm_irq_resume_early(adev);
1852 
1853 		for (i = 0; i < dc_state->stream_count; i++) {
1854 			dc_state->streams[i]->mode_changed = true;
1855 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1856 				dc_state->stream_status->plane_states[j]->update_flags.raw
1857 					= 0xffffffff;
1858 			}
1859 		}
1860 
1861 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
1862 
1863 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
1864 
1865 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1866 
1867 		dc_release_state(dm->cached_dc_state);
1868 		dm->cached_dc_state = NULL;
1869 
1870 		amdgpu_dm_irq_resume_late(adev);
1871 
1872 		mutex_unlock(&dm->dc_lock);
1873 
1874 		return 0;
1875 	}
1876 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1877 	dc_release_state(dm_state->context);
1878 	dm_state->context = dc_create_state(dm->dc);
1879 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1880 	dc_resource_state_construct(dm->dc, dm_state->context);
1881 
1882 	/* Before powering on DC we need to re-initialize DMUB. */
1883 	r = dm_dmub_hw_init(adev);
1884 	if (r)
1885 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1886 
1887 	/* power on hardware */
1888 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1889 
1890 	/* program HPD filter */
1891 	dc_resume(dm->dc);
1892 
1893 	/*
1894 	 * early enable HPD Rx IRQ, should be done before set mode as short
1895 	 * pulse interrupts are used for MST
1896 	 */
1897 	amdgpu_dm_irq_resume_early(adev);
1898 
1899 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1900 	s3_handle_mst(ddev, false);
1901 
1902 	/* Do detection*/
1903 	drm_connector_list_iter_begin(ddev, &iter);
1904 	drm_for_each_connector_iter(connector, &iter) {
1905 		aconnector = to_amdgpu_dm_connector(connector);
1906 
1907 		/*
1908 		 * this is the case when traversing through already created
1909 		 * MST connectors, should be skipped
1910 		 */
1911 		if (aconnector->mst_port)
1912 			continue;
1913 
1914 		mutex_lock(&aconnector->hpd_lock);
1915 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1916 			DRM_ERROR("KMS: Failed to detect connector\n");
1917 
1918 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1919 			emulated_link_detect(aconnector->dc_link);
1920 		else
1921 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1922 
1923 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1924 			aconnector->fake_enable = false;
1925 
1926 		if (aconnector->dc_sink)
1927 			dc_sink_release(aconnector->dc_sink);
1928 		aconnector->dc_sink = NULL;
1929 		amdgpu_dm_update_connector_after_detect(aconnector);
1930 		mutex_unlock(&aconnector->hpd_lock);
1931 	}
1932 	drm_connector_list_iter_end(&iter);
1933 
1934 	/* Force mode set in atomic commit */
1935 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1936 		new_crtc_state->active_changed = true;
1937 
1938 	/*
1939 	 * atomic_check is expected to create the dc states. We need to release
1940 	 * them here, since they were duplicated as part of the suspend
1941 	 * procedure.
1942 	 */
1943 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1944 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1945 		if (dm_new_crtc_state->stream) {
1946 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1947 			dc_stream_release(dm_new_crtc_state->stream);
1948 			dm_new_crtc_state->stream = NULL;
1949 		}
1950 	}
1951 
1952 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1953 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1954 		if (dm_new_plane_state->dc_state) {
1955 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1956 			dc_plane_state_release(dm_new_plane_state->dc_state);
1957 			dm_new_plane_state->dc_state = NULL;
1958 		}
1959 	}
1960 
1961 	drm_atomic_helper_resume(ddev, dm->cached_state);
1962 
1963 	dm->cached_state = NULL;
1964 
1965 	amdgpu_dm_irq_resume_late(adev);
1966 
1967 	amdgpu_dm_smu_write_watermarks_table(adev);
1968 
1969 	return 0;
1970 }
1971 
1972 /**
1973  * DOC: DM Lifecycle
1974  *
1975  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1976  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1977  * the base driver's device list to be initialized and torn down accordingly.
1978  *
1979  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1980  */
1981 
1982 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1983 	.name = "dm",
1984 	.early_init = dm_early_init,
1985 	.late_init = dm_late_init,
1986 	.sw_init = dm_sw_init,
1987 	.sw_fini = dm_sw_fini,
1988 	.hw_init = dm_hw_init,
1989 	.hw_fini = dm_hw_fini,
1990 	.suspend = dm_suspend,
1991 	.resume = dm_resume,
1992 	.is_idle = dm_is_idle,
1993 	.wait_for_idle = dm_wait_for_idle,
1994 	.check_soft_reset = dm_check_soft_reset,
1995 	.soft_reset = dm_soft_reset,
1996 	.set_clockgating_state = dm_set_clockgating_state,
1997 	.set_powergating_state = dm_set_powergating_state,
1998 };
1999 
2000 const struct amdgpu_ip_block_version dm_ip_block =
2001 {
2002 	.type = AMD_IP_BLOCK_TYPE_DCE,
2003 	.major = 1,
2004 	.minor = 0,
2005 	.rev = 0,
2006 	.funcs = &amdgpu_dm_funcs,
2007 };
2008 
2009 
2010 /**
2011  * DOC: atomic
2012  *
2013  * *WIP*
2014  */
2015 
2016 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2017 	.fb_create = amdgpu_display_user_framebuffer_create,
2018 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2019 	.atomic_check = amdgpu_dm_atomic_check,
2020 	.atomic_commit = amdgpu_dm_atomic_commit,
2021 };
2022 
2023 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2024 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2025 };
2026 
2027 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2028 {
2029 	u32 max_cll, min_cll, max, min, q, r;
2030 	struct amdgpu_dm_backlight_caps *caps;
2031 	struct amdgpu_display_manager *dm;
2032 	struct drm_connector *conn_base;
2033 	struct amdgpu_device *adev;
2034 	struct dc_link *link = NULL;
2035 	static const u8 pre_computed_values[] = {
2036 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2037 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2038 
2039 	if (!aconnector || !aconnector->dc_link)
2040 		return;
2041 
2042 	link = aconnector->dc_link;
2043 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2044 		return;
2045 
2046 	conn_base = &aconnector->base;
2047 	adev = conn_base->dev->dev_private;
2048 	dm = &adev->dm;
2049 	caps = &dm->backlight_caps;
2050 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2051 	caps->aux_support = false;
2052 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2053 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2054 
2055 	if (caps->ext_caps->bits.oled == 1 ||
2056 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2057 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2058 		caps->aux_support = true;
2059 
2060 	/* From the specification (CTA-861-G), for calculating the maximum
2061 	 * luminance we need to use:
2062 	 *	Luminance = 50*2**(CV/32)
2063 	 * Where CV is a one-byte value.
2064 	 * For calculating this expression we may need float point precision;
2065 	 * to avoid this complexity level, we take advantage that CV is divided
2066 	 * by a constant. From the Euclids division algorithm, we know that CV
2067 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2068 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2069 	 * need to pre-compute the value of r/32. For pre-computing the values
2070 	 * We just used the following Ruby line:
2071 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2072 	 * The results of the above expressions can be verified at
2073 	 * pre_computed_values.
2074 	 */
2075 	q = max_cll >> 5;
2076 	r = max_cll % 32;
2077 	max = (1 << q) * pre_computed_values[r];
2078 
2079 	// min luminance: maxLum * (CV/255)^2 / 100
2080 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2081 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2082 
2083 	caps->aux_max_input_signal = max;
2084 	caps->aux_min_input_signal = min;
2085 }
2086 
2087 void amdgpu_dm_update_connector_after_detect(
2088 		struct amdgpu_dm_connector *aconnector)
2089 {
2090 	struct drm_connector *connector = &aconnector->base;
2091 	struct drm_device *dev = connector->dev;
2092 	struct dc_sink *sink;
2093 
2094 	/* MST handled by drm_mst framework */
2095 	if (aconnector->mst_mgr.mst_state == true)
2096 		return;
2097 
2098 
2099 	sink = aconnector->dc_link->local_sink;
2100 	if (sink)
2101 		dc_sink_retain(sink);
2102 
2103 	/*
2104 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2105 	 * the connector sink is set to either fake or physical sink depends on link status.
2106 	 * Skip if already done during boot.
2107 	 */
2108 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2109 			&& aconnector->dc_em_sink) {
2110 
2111 		/*
2112 		 * For S3 resume with headless use eml_sink to fake stream
2113 		 * because on resume connector->sink is set to NULL
2114 		 */
2115 		mutex_lock(&dev->mode_config.mutex);
2116 
2117 		if (sink) {
2118 			if (aconnector->dc_sink) {
2119 				amdgpu_dm_update_freesync_caps(connector, NULL);
2120 				/*
2121 				 * retain and release below are used to
2122 				 * bump up refcount for sink because the link doesn't point
2123 				 * to it anymore after disconnect, so on next crtc to connector
2124 				 * reshuffle by UMD we will get into unwanted dc_sink release
2125 				 */
2126 				dc_sink_release(aconnector->dc_sink);
2127 			}
2128 			aconnector->dc_sink = sink;
2129 			dc_sink_retain(aconnector->dc_sink);
2130 			amdgpu_dm_update_freesync_caps(connector,
2131 					aconnector->edid);
2132 		} else {
2133 			amdgpu_dm_update_freesync_caps(connector, NULL);
2134 			if (!aconnector->dc_sink) {
2135 				aconnector->dc_sink = aconnector->dc_em_sink;
2136 				dc_sink_retain(aconnector->dc_sink);
2137 			}
2138 		}
2139 
2140 		mutex_unlock(&dev->mode_config.mutex);
2141 
2142 		if (sink)
2143 			dc_sink_release(sink);
2144 		return;
2145 	}
2146 
2147 	/*
2148 	 * TODO: temporary guard to look for proper fix
2149 	 * if this sink is MST sink, we should not do anything
2150 	 */
2151 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2152 		dc_sink_release(sink);
2153 		return;
2154 	}
2155 
2156 	if (aconnector->dc_sink == sink) {
2157 		/*
2158 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2159 		 * Do nothing!!
2160 		 */
2161 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2162 				aconnector->connector_id);
2163 		if (sink)
2164 			dc_sink_release(sink);
2165 		return;
2166 	}
2167 
2168 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2169 		aconnector->connector_id, aconnector->dc_sink, sink);
2170 
2171 	mutex_lock(&dev->mode_config.mutex);
2172 
2173 	/*
2174 	 * 1. Update status of the drm connector
2175 	 * 2. Send an event and let userspace tell us what to do
2176 	 */
2177 	if (sink) {
2178 		/*
2179 		 * TODO: check if we still need the S3 mode update workaround.
2180 		 * If yes, put it here.
2181 		 */
2182 		if (aconnector->dc_sink)
2183 			amdgpu_dm_update_freesync_caps(connector, NULL);
2184 
2185 		aconnector->dc_sink = sink;
2186 		dc_sink_retain(aconnector->dc_sink);
2187 		if (sink->dc_edid.length == 0) {
2188 			aconnector->edid = NULL;
2189 			if (aconnector->dc_link->aux_mode) {
2190 				drm_dp_cec_unset_edid(
2191 					&aconnector->dm_dp_aux.aux);
2192 			}
2193 		} else {
2194 			aconnector->edid =
2195 				(struct edid *)sink->dc_edid.raw_edid;
2196 
2197 			drm_connector_update_edid_property(connector,
2198 							   aconnector->edid);
2199 
2200 			if (aconnector->dc_link->aux_mode)
2201 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2202 						    aconnector->edid);
2203 		}
2204 
2205 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2206 		update_connector_ext_caps(aconnector);
2207 	} else {
2208 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2209 		amdgpu_dm_update_freesync_caps(connector, NULL);
2210 		drm_connector_update_edid_property(connector, NULL);
2211 		aconnector->num_modes = 0;
2212 		dc_sink_release(aconnector->dc_sink);
2213 		aconnector->dc_sink = NULL;
2214 		aconnector->edid = NULL;
2215 #ifdef CONFIG_DRM_AMD_DC_HDCP
2216 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2217 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2218 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2219 #endif
2220 	}
2221 
2222 	mutex_unlock(&dev->mode_config.mutex);
2223 
2224 	if (sink)
2225 		dc_sink_release(sink);
2226 }
2227 
2228 static void handle_hpd_irq(void *param)
2229 {
2230 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2231 	struct drm_connector *connector = &aconnector->base;
2232 	struct drm_device *dev = connector->dev;
2233 	enum dc_connection_type new_connection_type = dc_connection_none;
2234 #ifdef CONFIG_DRM_AMD_DC_HDCP
2235 	struct amdgpu_device *adev = dev->dev_private;
2236 #endif
2237 
2238 	/*
2239 	 * In case of failure or MST no need to update connector status or notify the OS
2240 	 * since (for MST case) MST does this in its own context.
2241 	 */
2242 	mutex_lock(&aconnector->hpd_lock);
2243 
2244 #ifdef CONFIG_DRM_AMD_DC_HDCP
2245 	if (adev->dm.hdcp_workqueue)
2246 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2247 #endif
2248 	if (aconnector->fake_enable)
2249 		aconnector->fake_enable = false;
2250 
2251 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2252 		DRM_ERROR("KMS: Failed to detect connector\n");
2253 
2254 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2255 		emulated_link_detect(aconnector->dc_link);
2256 
2257 
2258 		drm_modeset_lock_all(dev);
2259 		dm_restore_drm_connector_state(dev, connector);
2260 		drm_modeset_unlock_all(dev);
2261 
2262 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2263 			drm_kms_helper_hotplug_event(dev);
2264 
2265 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2266 		amdgpu_dm_update_connector_after_detect(aconnector);
2267 
2268 
2269 		drm_modeset_lock_all(dev);
2270 		dm_restore_drm_connector_state(dev, connector);
2271 		drm_modeset_unlock_all(dev);
2272 
2273 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2274 			drm_kms_helper_hotplug_event(dev);
2275 	}
2276 	mutex_unlock(&aconnector->hpd_lock);
2277 
2278 }
2279 
2280 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2281 {
2282 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2283 	uint8_t dret;
2284 	bool new_irq_handled = false;
2285 	int dpcd_addr;
2286 	int dpcd_bytes_to_read;
2287 
2288 	const int max_process_count = 30;
2289 	int process_count = 0;
2290 
2291 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2292 
2293 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2294 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2295 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2296 		dpcd_addr = DP_SINK_COUNT;
2297 	} else {
2298 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2299 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2300 		dpcd_addr = DP_SINK_COUNT_ESI;
2301 	}
2302 
2303 	dret = drm_dp_dpcd_read(
2304 		&aconnector->dm_dp_aux.aux,
2305 		dpcd_addr,
2306 		esi,
2307 		dpcd_bytes_to_read);
2308 
2309 	while (dret == dpcd_bytes_to_read &&
2310 		process_count < max_process_count) {
2311 		uint8_t retry;
2312 		dret = 0;
2313 
2314 		process_count++;
2315 
2316 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2317 		/* handle HPD short pulse irq */
2318 		if (aconnector->mst_mgr.mst_state)
2319 			drm_dp_mst_hpd_irq(
2320 				&aconnector->mst_mgr,
2321 				esi,
2322 				&new_irq_handled);
2323 
2324 		if (new_irq_handled) {
2325 			/* ACK at DPCD to notify down stream */
2326 			const int ack_dpcd_bytes_to_write =
2327 				dpcd_bytes_to_read - 1;
2328 
2329 			for (retry = 0; retry < 3; retry++) {
2330 				uint8_t wret;
2331 
2332 				wret = drm_dp_dpcd_write(
2333 					&aconnector->dm_dp_aux.aux,
2334 					dpcd_addr + 1,
2335 					&esi[1],
2336 					ack_dpcd_bytes_to_write);
2337 				if (wret == ack_dpcd_bytes_to_write)
2338 					break;
2339 			}
2340 
2341 			/* check if there is new irq to be handled */
2342 			dret = drm_dp_dpcd_read(
2343 				&aconnector->dm_dp_aux.aux,
2344 				dpcd_addr,
2345 				esi,
2346 				dpcd_bytes_to_read);
2347 
2348 			new_irq_handled = false;
2349 		} else {
2350 			break;
2351 		}
2352 	}
2353 
2354 	if (process_count == max_process_count)
2355 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2356 }
2357 
2358 static void handle_hpd_rx_irq(void *param)
2359 {
2360 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2361 	struct drm_connector *connector = &aconnector->base;
2362 	struct drm_device *dev = connector->dev;
2363 	struct dc_link *dc_link = aconnector->dc_link;
2364 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2365 	enum dc_connection_type new_connection_type = dc_connection_none;
2366 #ifdef CONFIG_DRM_AMD_DC_HDCP
2367 	union hpd_irq_data hpd_irq_data;
2368 	struct amdgpu_device *adev = dev->dev_private;
2369 
2370 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2371 #endif
2372 
2373 	/*
2374 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2375 	 * conflict, after implement i2c helper, this mutex should be
2376 	 * retired.
2377 	 */
2378 	if (dc_link->type != dc_connection_mst_branch)
2379 		mutex_lock(&aconnector->hpd_lock);
2380 
2381 
2382 #ifdef CONFIG_DRM_AMD_DC_HDCP
2383 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2384 #else
2385 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2386 #endif
2387 			!is_mst_root_connector) {
2388 		/* Downstream Port status changed. */
2389 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2390 			DRM_ERROR("KMS: Failed to detect connector\n");
2391 
2392 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2393 			emulated_link_detect(dc_link);
2394 
2395 			if (aconnector->fake_enable)
2396 				aconnector->fake_enable = false;
2397 
2398 			amdgpu_dm_update_connector_after_detect(aconnector);
2399 
2400 
2401 			drm_modeset_lock_all(dev);
2402 			dm_restore_drm_connector_state(dev, connector);
2403 			drm_modeset_unlock_all(dev);
2404 
2405 			drm_kms_helper_hotplug_event(dev);
2406 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2407 
2408 			if (aconnector->fake_enable)
2409 				aconnector->fake_enable = false;
2410 
2411 			amdgpu_dm_update_connector_after_detect(aconnector);
2412 
2413 
2414 			drm_modeset_lock_all(dev);
2415 			dm_restore_drm_connector_state(dev, connector);
2416 			drm_modeset_unlock_all(dev);
2417 
2418 			drm_kms_helper_hotplug_event(dev);
2419 		}
2420 	}
2421 #ifdef CONFIG_DRM_AMD_DC_HDCP
2422 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2423 		if (adev->dm.hdcp_workqueue)
2424 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2425 	}
2426 #endif
2427 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2428 	    (dc_link->type == dc_connection_mst_branch))
2429 		dm_handle_hpd_rx_irq(aconnector);
2430 
2431 	if (dc_link->type != dc_connection_mst_branch) {
2432 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2433 		mutex_unlock(&aconnector->hpd_lock);
2434 	}
2435 }
2436 
2437 static void register_hpd_handlers(struct amdgpu_device *adev)
2438 {
2439 	struct drm_device *dev = adev->ddev;
2440 	struct drm_connector *connector;
2441 	struct amdgpu_dm_connector *aconnector;
2442 	const struct dc_link *dc_link;
2443 	struct dc_interrupt_params int_params = {0};
2444 
2445 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2446 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2447 
2448 	list_for_each_entry(connector,
2449 			&dev->mode_config.connector_list, head)	{
2450 
2451 		aconnector = to_amdgpu_dm_connector(connector);
2452 		dc_link = aconnector->dc_link;
2453 
2454 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2455 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2456 			int_params.irq_source = dc_link->irq_source_hpd;
2457 
2458 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2459 					handle_hpd_irq,
2460 					(void *) aconnector);
2461 		}
2462 
2463 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2464 
2465 			/* Also register for DP short pulse (hpd_rx). */
2466 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2467 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2468 
2469 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2470 					handle_hpd_rx_irq,
2471 					(void *) aconnector);
2472 		}
2473 	}
2474 }
2475 
2476 /* Register IRQ sources and initialize IRQ callbacks */
2477 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2478 {
2479 	struct dc *dc = adev->dm.dc;
2480 	struct common_irq_params *c_irq_params;
2481 	struct dc_interrupt_params int_params = {0};
2482 	int r;
2483 	int i;
2484 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2485 
2486 	if (adev->asic_type >= CHIP_VEGA10)
2487 		client_id = SOC15_IH_CLIENTID_DCE;
2488 
2489 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2490 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2491 
2492 	/*
2493 	 * Actions of amdgpu_irq_add_id():
2494 	 * 1. Register a set() function with base driver.
2495 	 *    Base driver will call set() function to enable/disable an
2496 	 *    interrupt in DC hardware.
2497 	 * 2. Register amdgpu_dm_irq_handler().
2498 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2499 	 *    coming from DC hardware.
2500 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2501 	 *    for acknowledging and handling. */
2502 
2503 	/* Use VBLANK interrupt */
2504 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2505 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2506 		if (r) {
2507 			DRM_ERROR("Failed to add crtc irq id!\n");
2508 			return r;
2509 		}
2510 
2511 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2512 		int_params.irq_source =
2513 			dc_interrupt_to_irq_source(dc, i, 0);
2514 
2515 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2516 
2517 		c_irq_params->adev = adev;
2518 		c_irq_params->irq_src = int_params.irq_source;
2519 
2520 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2521 				dm_crtc_high_irq, c_irq_params);
2522 	}
2523 
2524 	/* Use VUPDATE interrupt */
2525 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2526 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2527 		if (r) {
2528 			DRM_ERROR("Failed to add vupdate irq id!\n");
2529 			return r;
2530 		}
2531 
2532 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2533 		int_params.irq_source =
2534 			dc_interrupt_to_irq_source(dc, i, 0);
2535 
2536 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2537 
2538 		c_irq_params->adev = adev;
2539 		c_irq_params->irq_src = int_params.irq_source;
2540 
2541 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2542 				dm_vupdate_high_irq, c_irq_params);
2543 	}
2544 
2545 	/* Use GRPH_PFLIP interrupt */
2546 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2547 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2548 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2549 		if (r) {
2550 			DRM_ERROR("Failed to add page flip irq id!\n");
2551 			return r;
2552 		}
2553 
2554 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2555 		int_params.irq_source =
2556 			dc_interrupt_to_irq_source(dc, i, 0);
2557 
2558 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2559 
2560 		c_irq_params->adev = adev;
2561 		c_irq_params->irq_src = int_params.irq_source;
2562 
2563 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2564 				dm_pflip_high_irq, c_irq_params);
2565 
2566 	}
2567 
2568 	/* HPD */
2569 	r = amdgpu_irq_add_id(adev, client_id,
2570 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2571 	if (r) {
2572 		DRM_ERROR("Failed to add hpd irq id!\n");
2573 		return r;
2574 	}
2575 
2576 	register_hpd_handlers(adev);
2577 
2578 	return 0;
2579 }
2580 
2581 #if defined(CONFIG_DRM_AMD_DC_DCN)
2582 /* Register IRQ sources and initialize IRQ callbacks */
2583 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2584 {
2585 	struct dc *dc = adev->dm.dc;
2586 	struct common_irq_params *c_irq_params;
2587 	struct dc_interrupt_params int_params = {0};
2588 	int r;
2589 	int i;
2590 
2591 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2592 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2593 
2594 	/*
2595 	 * Actions of amdgpu_irq_add_id():
2596 	 * 1. Register a set() function with base driver.
2597 	 *    Base driver will call set() function to enable/disable an
2598 	 *    interrupt in DC hardware.
2599 	 * 2. Register amdgpu_dm_irq_handler().
2600 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2601 	 *    coming from DC hardware.
2602 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2603 	 *    for acknowledging and handling.
2604 	 */
2605 
2606 	/* Use VSTARTUP interrupt */
2607 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2608 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2609 			i++) {
2610 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2611 
2612 		if (r) {
2613 			DRM_ERROR("Failed to add crtc irq id!\n");
2614 			return r;
2615 		}
2616 
2617 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2618 		int_params.irq_source =
2619 			dc_interrupt_to_irq_source(dc, i, 0);
2620 
2621 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2622 
2623 		c_irq_params->adev = adev;
2624 		c_irq_params->irq_src = int_params.irq_source;
2625 
2626 		amdgpu_dm_irq_register_interrupt(
2627 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2628 	}
2629 
2630 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2631 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2632 	 * to trigger at end of each vblank, regardless of state of the lock,
2633 	 * matching DCE behaviour.
2634 	 */
2635 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2636 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2637 	     i++) {
2638 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2639 
2640 		if (r) {
2641 			DRM_ERROR("Failed to add vupdate irq id!\n");
2642 			return r;
2643 		}
2644 
2645 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2646 		int_params.irq_source =
2647 			dc_interrupt_to_irq_source(dc, i, 0);
2648 
2649 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2650 
2651 		c_irq_params->adev = adev;
2652 		c_irq_params->irq_src = int_params.irq_source;
2653 
2654 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2655 				dm_vupdate_high_irq, c_irq_params);
2656 	}
2657 
2658 	/* Use GRPH_PFLIP interrupt */
2659 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2660 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2661 			i++) {
2662 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2663 		if (r) {
2664 			DRM_ERROR("Failed to add page flip irq id!\n");
2665 			return r;
2666 		}
2667 
2668 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2669 		int_params.irq_source =
2670 			dc_interrupt_to_irq_source(dc, i, 0);
2671 
2672 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2673 
2674 		c_irq_params->adev = adev;
2675 		c_irq_params->irq_src = int_params.irq_source;
2676 
2677 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2678 				dm_pflip_high_irq, c_irq_params);
2679 
2680 	}
2681 
2682 	/* HPD */
2683 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2684 			&adev->hpd_irq);
2685 	if (r) {
2686 		DRM_ERROR("Failed to add hpd irq id!\n");
2687 		return r;
2688 	}
2689 
2690 	register_hpd_handlers(adev);
2691 
2692 	return 0;
2693 }
2694 #endif
2695 
2696 /*
2697  * Acquires the lock for the atomic state object and returns
2698  * the new atomic state.
2699  *
2700  * This should only be called during atomic check.
2701  */
2702 static int dm_atomic_get_state(struct drm_atomic_state *state,
2703 			       struct dm_atomic_state **dm_state)
2704 {
2705 	struct drm_device *dev = state->dev;
2706 	struct amdgpu_device *adev = dev->dev_private;
2707 	struct amdgpu_display_manager *dm = &adev->dm;
2708 	struct drm_private_state *priv_state;
2709 
2710 	if (*dm_state)
2711 		return 0;
2712 
2713 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2714 	if (IS_ERR(priv_state))
2715 		return PTR_ERR(priv_state);
2716 
2717 	*dm_state = to_dm_atomic_state(priv_state);
2718 
2719 	return 0;
2720 }
2721 
2722 static struct dm_atomic_state *
2723 dm_atomic_get_new_state(struct drm_atomic_state *state)
2724 {
2725 	struct drm_device *dev = state->dev;
2726 	struct amdgpu_device *adev = dev->dev_private;
2727 	struct amdgpu_display_manager *dm = &adev->dm;
2728 	struct drm_private_obj *obj;
2729 	struct drm_private_state *new_obj_state;
2730 	int i;
2731 
2732 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2733 		if (obj->funcs == dm->atomic_obj.funcs)
2734 			return to_dm_atomic_state(new_obj_state);
2735 	}
2736 
2737 	return NULL;
2738 }
2739 
2740 static struct dm_atomic_state *
2741 dm_atomic_get_old_state(struct drm_atomic_state *state)
2742 {
2743 	struct drm_device *dev = state->dev;
2744 	struct amdgpu_device *adev = dev->dev_private;
2745 	struct amdgpu_display_manager *dm = &adev->dm;
2746 	struct drm_private_obj *obj;
2747 	struct drm_private_state *old_obj_state;
2748 	int i;
2749 
2750 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2751 		if (obj->funcs == dm->atomic_obj.funcs)
2752 			return to_dm_atomic_state(old_obj_state);
2753 	}
2754 
2755 	return NULL;
2756 }
2757 
2758 static struct drm_private_state *
2759 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2760 {
2761 	struct dm_atomic_state *old_state, *new_state;
2762 
2763 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2764 	if (!new_state)
2765 		return NULL;
2766 
2767 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2768 
2769 	old_state = to_dm_atomic_state(obj->state);
2770 
2771 	if (old_state && old_state->context)
2772 		new_state->context = dc_copy_state(old_state->context);
2773 
2774 	if (!new_state->context) {
2775 		kfree(new_state);
2776 		return NULL;
2777 	}
2778 
2779 	return &new_state->base;
2780 }
2781 
2782 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2783 				    struct drm_private_state *state)
2784 {
2785 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2786 
2787 	if (dm_state && dm_state->context)
2788 		dc_release_state(dm_state->context);
2789 
2790 	kfree(dm_state);
2791 }
2792 
2793 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2794 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2795 	.atomic_destroy_state = dm_atomic_destroy_state,
2796 };
2797 
2798 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2799 {
2800 	struct dm_atomic_state *state;
2801 	int r;
2802 
2803 	adev->mode_info.mode_config_initialized = true;
2804 
2805 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2806 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2807 
2808 	adev->ddev->mode_config.max_width = 16384;
2809 	adev->ddev->mode_config.max_height = 16384;
2810 
2811 	adev->ddev->mode_config.preferred_depth = 24;
2812 	adev->ddev->mode_config.prefer_shadow = 1;
2813 	/* indicates support for immediate flip */
2814 	adev->ddev->mode_config.async_page_flip = true;
2815 
2816 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2817 
2818 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2819 	if (!state)
2820 		return -ENOMEM;
2821 
2822 	state->context = dc_create_state(adev->dm.dc);
2823 	if (!state->context) {
2824 		kfree(state);
2825 		return -ENOMEM;
2826 	}
2827 
2828 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2829 
2830 	drm_atomic_private_obj_init(adev->ddev,
2831 				    &adev->dm.atomic_obj,
2832 				    &state->base,
2833 				    &dm_atomic_state_funcs);
2834 
2835 	r = amdgpu_display_modeset_create_props(adev);
2836 	if (r)
2837 		return r;
2838 
2839 	r = amdgpu_dm_audio_init(adev);
2840 	if (r)
2841 		return r;
2842 
2843 	return 0;
2844 }
2845 
2846 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2847 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2848 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2849 
2850 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2851 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2852 
2853 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2854 {
2855 #if defined(CONFIG_ACPI)
2856 	struct amdgpu_dm_backlight_caps caps;
2857 
2858 	if (dm->backlight_caps.caps_valid)
2859 		return;
2860 
2861 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2862 	if (caps.caps_valid) {
2863 		dm->backlight_caps.caps_valid = true;
2864 		if (caps.aux_support)
2865 			return;
2866 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2867 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2868 	} else {
2869 		dm->backlight_caps.min_input_signal =
2870 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2871 		dm->backlight_caps.max_input_signal =
2872 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2873 	}
2874 #else
2875 	if (dm->backlight_caps.aux_support)
2876 		return;
2877 
2878 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2879 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2880 #endif
2881 }
2882 
2883 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2884 {
2885 	bool rc;
2886 
2887 	if (!link)
2888 		return 1;
2889 
2890 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2891 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2892 
2893 	return rc ? 0 : 1;
2894 }
2895 
2896 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2897 			      const uint32_t user_brightness)
2898 {
2899 	u32 min, max, conversion_pace;
2900 	u32 brightness = user_brightness;
2901 
2902 	if (!caps)
2903 		goto out;
2904 
2905 	if (!caps->aux_support) {
2906 		max = caps->max_input_signal;
2907 		min = caps->min_input_signal;
2908 		/*
2909 		 * The brightness input is in the range 0-255
2910 		 * It needs to be rescaled to be between the
2911 		 * requested min and max input signal
2912 		 * It also needs to be scaled up by 0x101 to
2913 		 * match the DC interface which has a range of
2914 		 * 0 to 0xffff
2915 		 */
2916 		conversion_pace = 0x101;
2917 		brightness =
2918 			user_brightness
2919 			* conversion_pace
2920 			* (max - min)
2921 			/ AMDGPU_MAX_BL_LEVEL
2922 			+ min * conversion_pace;
2923 	} else {
2924 		/* TODO
2925 		 * We are doing a linear interpolation here, which is OK but
2926 		 * does not provide the optimal result. We probably want
2927 		 * something close to the Perceptual Quantizer (PQ) curve.
2928 		 */
2929 		max = caps->aux_max_input_signal;
2930 		min = caps->aux_min_input_signal;
2931 
2932 		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2933 			       + user_brightness * max;
2934 		// Multiple the value by 1000 since we use millinits
2935 		brightness *= 1000;
2936 		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2937 	}
2938 
2939 out:
2940 	return brightness;
2941 }
2942 
2943 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2944 {
2945 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2946 	struct amdgpu_dm_backlight_caps caps;
2947 	struct dc_link *link = NULL;
2948 	u32 brightness;
2949 	bool rc;
2950 
2951 	amdgpu_dm_update_backlight_caps(dm);
2952 	caps = dm->backlight_caps;
2953 
2954 	link = (struct dc_link *)dm->backlight_link;
2955 
2956 	brightness = convert_brightness(&caps, bd->props.brightness);
2957 	// Change brightness based on AUX property
2958 	if (caps.aux_support)
2959 		return set_backlight_via_aux(link, brightness);
2960 
2961 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2962 
2963 	return rc ? 0 : 1;
2964 }
2965 
2966 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2967 {
2968 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2969 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2970 
2971 	if (ret == DC_ERROR_UNEXPECTED)
2972 		return bd->props.brightness;
2973 	return ret;
2974 }
2975 
2976 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2977 	.options = BL_CORE_SUSPENDRESUME,
2978 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2979 	.update_status	= amdgpu_dm_backlight_update_status,
2980 };
2981 
2982 static void
2983 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2984 {
2985 	char bl_name[16];
2986 	struct backlight_properties props = { 0 };
2987 
2988 	amdgpu_dm_update_backlight_caps(dm);
2989 
2990 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2991 	props.brightness = AMDGPU_MAX_BL_LEVEL;
2992 	props.type = BACKLIGHT_RAW;
2993 
2994 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2995 			dm->adev->ddev->primary->index);
2996 
2997 	dm->backlight_dev = backlight_device_register(bl_name,
2998 			dm->adev->ddev->dev,
2999 			dm,
3000 			&amdgpu_dm_backlight_ops,
3001 			&props);
3002 
3003 	if (IS_ERR(dm->backlight_dev))
3004 		DRM_ERROR("DM: Backlight registration failed!\n");
3005 	else
3006 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3007 }
3008 
3009 #endif
3010 
3011 static int initialize_plane(struct amdgpu_display_manager *dm,
3012 			    struct amdgpu_mode_info *mode_info, int plane_id,
3013 			    enum drm_plane_type plane_type,
3014 			    const struct dc_plane_cap *plane_cap)
3015 {
3016 	struct drm_plane *plane;
3017 	unsigned long possible_crtcs;
3018 	int ret = 0;
3019 
3020 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3021 	if (!plane) {
3022 		DRM_ERROR("KMS: Failed to allocate plane\n");
3023 		return -ENOMEM;
3024 	}
3025 	plane->type = plane_type;
3026 
3027 	/*
3028 	 * HACK: IGT tests expect that the primary plane for a CRTC
3029 	 * can only have one possible CRTC. Only expose support for
3030 	 * any CRTC if they're not going to be used as a primary plane
3031 	 * for a CRTC - like overlay or underlay planes.
3032 	 */
3033 	possible_crtcs = 1 << plane_id;
3034 	if (plane_id >= dm->dc->caps.max_streams)
3035 		possible_crtcs = 0xff;
3036 
3037 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3038 
3039 	if (ret) {
3040 		DRM_ERROR("KMS: Failed to initialize plane\n");
3041 		kfree(plane);
3042 		return ret;
3043 	}
3044 
3045 	if (mode_info)
3046 		mode_info->planes[plane_id] = plane;
3047 
3048 	return ret;
3049 }
3050 
3051 
3052 static void register_backlight_device(struct amdgpu_display_manager *dm,
3053 				      struct dc_link *link)
3054 {
3055 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3056 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3057 
3058 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3059 	    link->type != dc_connection_none) {
3060 		/*
3061 		 * Event if registration failed, we should continue with
3062 		 * DM initialization because not having a backlight control
3063 		 * is better then a black screen.
3064 		 */
3065 		amdgpu_dm_register_backlight_device(dm);
3066 
3067 		if (dm->backlight_dev)
3068 			dm->backlight_link = link;
3069 	}
3070 #endif
3071 }
3072 
3073 
3074 /*
3075  * In this architecture, the association
3076  * connector -> encoder -> crtc
3077  * id not really requried. The crtc and connector will hold the
3078  * display_index as an abstraction to use with DAL component
3079  *
3080  * Returns 0 on success
3081  */
3082 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3083 {
3084 	struct amdgpu_display_manager *dm = &adev->dm;
3085 	int32_t i;
3086 	struct amdgpu_dm_connector *aconnector = NULL;
3087 	struct amdgpu_encoder *aencoder = NULL;
3088 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3089 	uint32_t link_cnt;
3090 	int32_t primary_planes;
3091 	enum dc_connection_type new_connection_type = dc_connection_none;
3092 	const struct dc_plane_cap *plane;
3093 
3094 	link_cnt = dm->dc->caps.max_links;
3095 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3096 		DRM_ERROR("DM: Failed to initialize mode config\n");
3097 		return -EINVAL;
3098 	}
3099 
3100 	/* There is one primary plane per CRTC */
3101 	primary_planes = dm->dc->caps.max_streams;
3102 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3103 
3104 	/*
3105 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3106 	 * Order is reversed to match iteration order in atomic check.
3107 	 */
3108 	for (i = (primary_planes - 1); i >= 0; i--) {
3109 		plane = &dm->dc->caps.planes[i];
3110 
3111 		if (initialize_plane(dm, mode_info, i,
3112 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3113 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3114 			goto fail;
3115 		}
3116 	}
3117 
3118 	/*
3119 	 * Initialize overlay planes, index starting after primary planes.
3120 	 * These planes have a higher DRM index than the primary planes since
3121 	 * they should be considered as having a higher z-order.
3122 	 * Order is reversed to match iteration order in atomic check.
3123 	 *
3124 	 * Only support DCN for now, and only expose one so we don't encourage
3125 	 * userspace to use up all the pipes.
3126 	 */
3127 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3128 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3129 
3130 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3131 			continue;
3132 
3133 		if (!plane->blends_with_above || !plane->blends_with_below)
3134 			continue;
3135 
3136 		if (!plane->pixel_format_support.argb8888)
3137 			continue;
3138 
3139 		if (initialize_plane(dm, NULL, primary_planes + i,
3140 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3141 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3142 			goto fail;
3143 		}
3144 
3145 		/* Only create one overlay plane. */
3146 		break;
3147 	}
3148 
3149 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3150 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3151 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3152 			goto fail;
3153 		}
3154 
3155 	dm->display_indexes_num = dm->dc->caps.max_streams;
3156 
3157 	/* loops over all connectors on the board */
3158 	for (i = 0; i < link_cnt; i++) {
3159 		struct dc_link *link = NULL;
3160 
3161 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3162 			DRM_ERROR(
3163 				"KMS: Cannot support more than %d display indexes\n",
3164 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3165 			continue;
3166 		}
3167 
3168 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3169 		if (!aconnector)
3170 			goto fail;
3171 
3172 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3173 		if (!aencoder)
3174 			goto fail;
3175 
3176 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3177 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3178 			goto fail;
3179 		}
3180 
3181 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3182 			DRM_ERROR("KMS: Failed to initialize connector\n");
3183 			goto fail;
3184 		}
3185 
3186 		link = dc_get_link_at_index(dm->dc, i);
3187 
3188 		if (!dc_link_detect_sink(link, &new_connection_type))
3189 			DRM_ERROR("KMS: Failed to detect connector\n");
3190 
3191 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3192 			emulated_link_detect(link);
3193 			amdgpu_dm_update_connector_after_detect(aconnector);
3194 
3195 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3196 			amdgpu_dm_update_connector_after_detect(aconnector);
3197 			register_backlight_device(dm, link);
3198 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3199 				amdgpu_dm_set_psr_caps(link);
3200 		}
3201 
3202 
3203 	}
3204 
3205 	/* Software is initialized. Now we can register interrupt handlers. */
3206 	switch (adev->asic_type) {
3207 	case CHIP_BONAIRE:
3208 	case CHIP_HAWAII:
3209 	case CHIP_KAVERI:
3210 	case CHIP_KABINI:
3211 	case CHIP_MULLINS:
3212 	case CHIP_TONGA:
3213 	case CHIP_FIJI:
3214 	case CHIP_CARRIZO:
3215 	case CHIP_STONEY:
3216 	case CHIP_POLARIS11:
3217 	case CHIP_POLARIS10:
3218 	case CHIP_POLARIS12:
3219 	case CHIP_VEGAM:
3220 	case CHIP_VEGA10:
3221 	case CHIP_VEGA12:
3222 	case CHIP_VEGA20:
3223 		if (dce110_register_irq_handlers(dm->adev)) {
3224 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3225 			goto fail;
3226 		}
3227 		break;
3228 #if defined(CONFIG_DRM_AMD_DC_DCN)
3229 	case CHIP_RAVEN:
3230 	case CHIP_NAVI12:
3231 	case CHIP_NAVI10:
3232 	case CHIP_NAVI14:
3233 	case CHIP_RENOIR:
3234 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3235 	case CHIP_SIENNA_CICHLID:
3236 	case CHIP_NAVY_FLOUNDER:
3237 #endif
3238 		if (dcn10_register_irq_handlers(dm->adev)) {
3239 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3240 			goto fail;
3241 		}
3242 		break;
3243 #endif
3244 	default:
3245 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3246 		goto fail;
3247 	}
3248 
3249 	/* No userspace support. */
3250 	dm->dc->debug.disable_tri_buf = true;
3251 
3252 	return 0;
3253 fail:
3254 	kfree(aencoder);
3255 	kfree(aconnector);
3256 
3257 	return -EINVAL;
3258 }
3259 
3260 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3261 {
3262 	drm_mode_config_cleanup(dm->ddev);
3263 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3264 	return;
3265 }
3266 
3267 /******************************************************************************
3268  * amdgpu_display_funcs functions
3269  *****************************************************************************/
3270 
3271 /*
3272  * dm_bandwidth_update - program display watermarks
3273  *
3274  * @adev: amdgpu_device pointer
3275  *
3276  * Calculate and program the display watermarks and line buffer allocation.
3277  */
3278 static void dm_bandwidth_update(struct amdgpu_device *adev)
3279 {
3280 	/* TODO: implement later */
3281 }
3282 
3283 static const struct amdgpu_display_funcs dm_display_funcs = {
3284 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3285 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3286 	.backlight_set_level = NULL, /* never called for DC */
3287 	.backlight_get_level = NULL, /* never called for DC */
3288 	.hpd_sense = NULL,/* called unconditionally */
3289 	.hpd_set_polarity = NULL, /* called unconditionally */
3290 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3291 	.page_flip_get_scanoutpos =
3292 		dm_crtc_get_scanoutpos,/* called unconditionally */
3293 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3294 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3295 };
3296 
3297 #if defined(CONFIG_DEBUG_KERNEL_DC)
3298 
3299 static ssize_t s3_debug_store(struct device *device,
3300 			      struct device_attribute *attr,
3301 			      const char *buf,
3302 			      size_t count)
3303 {
3304 	int ret;
3305 	int s3_state;
3306 	struct drm_device *drm_dev = dev_get_drvdata(device);
3307 	struct amdgpu_device *adev = drm_dev->dev_private;
3308 
3309 	ret = kstrtoint(buf, 0, &s3_state);
3310 
3311 	if (ret == 0) {
3312 		if (s3_state) {
3313 			dm_resume(adev);
3314 			drm_kms_helper_hotplug_event(adev->ddev);
3315 		} else
3316 			dm_suspend(adev);
3317 	}
3318 
3319 	return ret == 0 ? count : 0;
3320 }
3321 
3322 DEVICE_ATTR_WO(s3_debug);
3323 
3324 #endif
3325 
3326 static int dm_early_init(void *handle)
3327 {
3328 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3329 
3330 	switch (adev->asic_type) {
3331 	case CHIP_BONAIRE:
3332 	case CHIP_HAWAII:
3333 		adev->mode_info.num_crtc = 6;
3334 		adev->mode_info.num_hpd = 6;
3335 		adev->mode_info.num_dig = 6;
3336 		break;
3337 	case CHIP_KAVERI:
3338 		adev->mode_info.num_crtc = 4;
3339 		adev->mode_info.num_hpd = 6;
3340 		adev->mode_info.num_dig = 7;
3341 		break;
3342 	case CHIP_KABINI:
3343 	case CHIP_MULLINS:
3344 		adev->mode_info.num_crtc = 2;
3345 		adev->mode_info.num_hpd = 6;
3346 		adev->mode_info.num_dig = 6;
3347 		break;
3348 	case CHIP_FIJI:
3349 	case CHIP_TONGA:
3350 		adev->mode_info.num_crtc = 6;
3351 		adev->mode_info.num_hpd = 6;
3352 		adev->mode_info.num_dig = 7;
3353 		break;
3354 	case CHIP_CARRIZO:
3355 		adev->mode_info.num_crtc = 3;
3356 		adev->mode_info.num_hpd = 6;
3357 		adev->mode_info.num_dig = 9;
3358 		break;
3359 	case CHIP_STONEY:
3360 		adev->mode_info.num_crtc = 2;
3361 		adev->mode_info.num_hpd = 6;
3362 		adev->mode_info.num_dig = 9;
3363 		break;
3364 	case CHIP_POLARIS11:
3365 	case CHIP_POLARIS12:
3366 		adev->mode_info.num_crtc = 5;
3367 		adev->mode_info.num_hpd = 5;
3368 		adev->mode_info.num_dig = 5;
3369 		break;
3370 	case CHIP_POLARIS10:
3371 	case CHIP_VEGAM:
3372 		adev->mode_info.num_crtc = 6;
3373 		adev->mode_info.num_hpd = 6;
3374 		adev->mode_info.num_dig = 6;
3375 		break;
3376 	case CHIP_VEGA10:
3377 	case CHIP_VEGA12:
3378 	case CHIP_VEGA20:
3379 		adev->mode_info.num_crtc = 6;
3380 		adev->mode_info.num_hpd = 6;
3381 		adev->mode_info.num_dig = 6;
3382 		break;
3383 #if defined(CONFIG_DRM_AMD_DC_DCN)
3384 	case CHIP_RAVEN:
3385 		adev->mode_info.num_crtc = 4;
3386 		adev->mode_info.num_hpd = 4;
3387 		adev->mode_info.num_dig = 4;
3388 		break;
3389 #endif
3390 	case CHIP_NAVI10:
3391 	case CHIP_NAVI12:
3392 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3393 	case CHIP_SIENNA_CICHLID:
3394 	case CHIP_NAVY_FLOUNDER:
3395 #endif
3396 		adev->mode_info.num_crtc = 6;
3397 		adev->mode_info.num_hpd = 6;
3398 		adev->mode_info.num_dig = 6;
3399 		break;
3400 	case CHIP_NAVI14:
3401 		adev->mode_info.num_crtc = 5;
3402 		adev->mode_info.num_hpd = 5;
3403 		adev->mode_info.num_dig = 5;
3404 		break;
3405 	case CHIP_RENOIR:
3406 		adev->mode_info.num_crtc = 4;
3407 		adev->mode_info.num_hpd = 4;
3408 		adev->mode_info.num_dig = 4;
3409 		break;
3410 	default:
3411 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3412 		return -EINVAL;
3413 	}
3414 
3415 	amdgpu_dm_set_irq_funcs(adev);
3416 
3417 	if (adev->mode_info.funcs == NULL)
3418 		adev->mode_info.funcs = &dm_display_funcs;
3419 
3420 	/*
3421 	 * Note: Do NOT change adev->audio_endpt_rreg and
3422 	 * adev->audio_endpt_wreg because they are initialised in
3423 	 * amdgpu_device_init()
3424 	 */
3425 #if defined(CONFIG_DEBUG_KERNEL_DC)
3426 	device_create_file(
3427 		adev->ddev->dev,
3428 		&dev_attr_s3_debug);
3429 #endif
3430 
3431 	return 0;
3432 }
3433 
3434 static bool modeset_required(struct drm_crtc_state *crtc_state,
3435 			     struct dc_stream_state *new_stream,
3436 			     struct dc_stream_state *old_stream)
3437 {
3438 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3439 }
3440 
3441 static bool modereset_required(struct drm_crtc_state *crtc_state)
3442 {
3443 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3444 }
3445 
3446 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3447 {
3448 	drm_encoder_cleanup(encoder);
3449 	kfree(encoder);
3450 }
3451 
3452 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3453 	.destroy = amdgpu_dm_encoder_destroy,
3454 };
3455 
3456 
3457 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3458 				struct dc_scaling_info *scaling_info)
3459 {
3460 	int scale_w, scale_h;
3461 
3462 	memset(scaling_info, 0, sizeof(*scaling_info));
3463 
3464 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3465 	scaling_info->src_rect.x = state->src_x >> 16;
3466 	scaling_info->src_rect.y = state->src_y >> 16;
3467 
3468 	scaling_info->src_rect.width = state->src_w >> 16;
3469 	if (scaling_info->src_rect.width == 0)
3470 		return -EINVAL;
3471 
3472 	scaling_info->src_rect.height = state->src_h >> 16;
3473 	if (scaling_info->src_rect.height == 0)
3474 		return -EINVAL;
3475 
3476 	scaling_info->dst_rect.x = state->crtc_x;
3477 	scaling_info->dst_rect.y = state->crtc_y;
3478 
3479 	if (state->crtc_w == 0)
3480 		return -EINVAL;
3481 
3482 	scaling_info->dst_rect.width = state->crtc_w;
3483 
3484 	if (state->crtc_h == 0)
3485 		return -EINVAL;
3486 
3487 	scaling_info->dst_rect.height = state->crtc_h;
3488 
3489 	/* DRM doesn't specify clipping on destination output. */
3490 	scaling_info->clip_rect = scaling_info->dst_rect;
3491 
3492 	/* TODO: Validate scaling per-format with DC plane caps */
3493 	scale_w = scaling_info->dst_rect.width * 1000 /
3494 		  scaling_info->src_rect.width;
3495 
3496 	if (scale_w < 250 || scale_w > 16000)
3497 		return -EINVAL;
3498 
3499 	scale_h = scaling_info->dst_rect.height * 1000 /
3500 		  scaling_info->src_rect.height;
3501 
3502 	if (scale_h < 250 || scale_h > 16000)
3503 		return -EINVAL;
3504 
3505 	/*
3506 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3507 	 * assume reasonable defaults based on the format.
3508 	 */
3509 
3510 	return 0;
3511 }
3512 
3513 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3514 		       uint64_t *tiling_flags, bool *tmz_surface)
3515 {
3516 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3517 	int r = amdgpu_bo_reserve(rbo, false);
3518 
3519 	if (unlikely(r)) {
3520 		/* Don't show error message when returning -ERESTARTSYS */
3521 		if (r != -ERESTARTSYS)
3522 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3523 		return r;
3524 	}
3525 
3526 	if (tiling_flags)
3527 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3528 
3529 	if (tmz_surface)
3530 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3531 
3532 	amdgpu_bo_unreserve(rbo);
3533 
3534 	return r;
3535 }
3536 
3537 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3538 {
3539 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3540 
3541 	return offset ? (address + offset * 256) : 0;
3542 }
3543 
3544 static int
3545 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3546 			  const struct amdgpu_framebuffer *afb,
3547 			  const enum surface_pixel_format format,
3548 			  const enum dc_rotation_angle rotation,
3549 			  const struct plane_size *plane_size,
3550 			  const union dc_tiling_info *tiling_info,
3551 			  const uint64_t info,
3552 			  struct dc_plane_dcc_param *dcc,
3553 			  struct dc_plane_address *address,
3554 			  bool force_disable_dcc)
3555 {
3556 	struct dc *dc = adev->dm.dc;
3557 	struct dc_dcc_surface_param input;
3558 	struct dc_surface_dcc_cap output;
3559 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3560 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3561 	uint64_t dcc_address;
3562 
3563 	memset(&input, 0, sizeof(input));
3564 	memset(&output, 0, sizeof(output));
3565 
3566 	if (force_disable_dcc)
3567 		return 0;
3568 
3569 	if (!offset)
3570 		return 0;
3571 
3572 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3573 		return 0;
3574 
3575 	if (!dc->cap_funcs.get_dcc_compression_cap)
3576 		return -EINVAL;
3577 
3578 	input.format = format;
3579 	input.surface_size.width = plane_size->surface_size.width;
3580 	input.surface_size.height = plane_size->surface_size.height;
3581 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3582 
3583 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3584 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3585 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3586 		input.scan = SCAN_DIRECTION_VERTICAL;
3587 
3588 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3589 		return -EINVAL;
3590 
3591 	if (!output.capable)
3592 		return -EINVAL;
3593 
3594 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3595 		return -EINVAL;
3596 
3597 	dcc->enable = 1;
3598 	dcc->meta_pitch =
3599 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3600 	dcc->independent_64b_blks = i64b;
3601 
3602 	dcc_address = get_dcc_address(afb->address, info);
3603 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3604 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3605 
3606 	return 0;
3607 }
3608 
3609 static int
3610 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3611 			     const struct amdgpu_framebuffer *afb,
3612 			     const enum surface_pixel_format format,
3613 			     const enum dc_rotation_angle rotation,
3614 			     const uint64_t tiling_flags,
3615 			     union dc_tiling_info *tiling_info,
3616 			     struct plane_size *plane_size,
3617 			     struct dc_plane_dcc_param *dcc,
3618 			     struct dc_plane_address *address,
3619 			     bool tmz_surface,
3620 			     bool force_disable_dcc)
3621 {
3622 	const struct drm_framebuffer *fb = &afb->base;
3623 	int ret;
3624 
3625 	memset(tiling_info, 0, sizeof(*tiling_info));
3626 	memset(plane_size, 0, sizeof(*plane_size));
3627 	memset(dcc, 0, sizeof(*dcc));
3628 	memset(address, 0, sizeof(*address));
3629 
3630 	address->tmz_surface = tmz_surface;
3631 
3632 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3633 		plane_size->surface_size.x = 0;
3634 		plane_size->surface_size.y = 0;
3635 		plane_size->surface_size.width = fb->width;
3636 		plane_size->surface_size.height = fb->height;
3637 		plane_size->surface_pitch =
3638 			fb->pitches[0] / fb->format->cpp[0];
3639 
3640 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3641 		address->grph.addr.low_part = lower_32_bits(afb->address);
3642 		address->grph.addr.high_part = upper_32_bits(afb->address);
3643 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3644 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3645 
3646 		plane_size->surface_size.x = 0;
3647 		plane_size->surface_size.y = 0;
3648 		plane_size->surface_size.width = fb->width;
3649 		plane_size->surface_size.height = fb->height;
3650 		plane_size->surface_pitch =
3651 			fb->pitches[0] / fb->format->cpp[0];
3652 
3653 		plane_size->chroma_size.x = 0;
3654 		plane_size->chroma_size.y = 0;
3655 		/* TODO: set these based on surface format */
3656 		plane_size->chroma_size.width = fb->width / 2;
3657 		plane_size->chroma_size.height = fb->height / 2;
3658 
3659 		plane_size->chroma_pitch =
3660 			fb->pitches[1] / fb->format->cpp[1];
3661 
3662 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3663 		address->video_progressive.luma_addr.low_part =
3664 			lower_32_bits(afb->address);
3665 		address->video_progressive.luma_addr.high_part =
3666 			upper_32_bits(afb->address);
3667 		address->video_progressive.chroma_addr.low_part =
3668 			lower_32_bits(chroma_addr);
3669 		address->video_progressive.chroma_addr.high_part =
3670 			upper_32_bits(chroma_addr);
3671 	}
3672 
3673 	/* Fill GFX8 params */
3674 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3675 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3676 
3677 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3678 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3679 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3680 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3681 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3682 
3683 		/* XXX fix me for VI */
3684 		tiling_info->gfx8.num_banks = num_banks;
3685 		tiling_info->gfx8.array_mode =
3686 				DC_ARRAY_2D_TILED_THIN1;
3687 		tiling_info->gfx8.tile_split = tile_split;
3688 		tiling_info->gfx8.bank_width = bankw;
3689 		tiling_info->gfx8.bank_height = bankh;
3690 		tiling_info->gfx8.tile_aspect = mtaspect;
3691 		tiling_info->gfx8.tile_mode =
3692 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3693 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3694 			== DC_ARRAY_1D_TILED_THIN1) {
3695 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3696 	}
3697 
3698 	tiling_info->gfx8.pipe_config =
3699 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3700 
3701 	if (adev->asic_type == CHIP_VEGA10 ||
3702 	    adev->asic_type == CHIP_VEGA12 ||
3703 	    adev->asic_type == CHIP_VEGA20 ||
3704 	    adev->asic_type == CHIP_NAVI10 ||
3705 	    adev->asic_type == CHIP_NAVI14 ||
3706 	    adev->asic_type == CHIP_NAVI12 ||
3707 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3708 		adev->asic_type == CHIP_SIENNA_CICHLID ||
3709 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
3710 #endif
3711 	    adev->asic_type == CHIP_RENOIR ||
3712 	    adev->asic_type == CHIP_RAVEN) {
3713 		/* Fill GFX9 params */
3714 		tiling_info->gfx9.num_pipes =
3715 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3716 		tiling_info->gfx9.num_banks =
3717 			adev->gfx.config.gb_addr_config_fields.num_banks;
3718 		tiling_info->gfx9.pipe_interleave =
3719 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3720 		tiling_info->gfx9.num_shader_engines =
3721 			adev->gfx.config.gb_addr_config_fields.num_se;
3722 		tiling_info->gfx9.max_compressed_frags =
3723 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3724 		tiling_info->gfx9.num_rb_per_se =
3725 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3726 		tiling_info->gfx9.swizzle =
3727 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3728 		tiling_info->gfx9.shaderEnable = 1;
3729 
3730 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3731 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3732 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
3733 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3734 #endif
3735 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3736 						plane_size, tiling_info,
3737 						tiling_flags, dcc, address,
3738 						force_disable_dcc);
3739 		if (ret)
3740 			return ret;
3741 	}
3742 
3743 	return 0;
3744 }
3745 
3746 static void
3747 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3748 			       bool *per_pixel_alpha, bool *global_alpha,
3749 			       int *global_alpha_value)
3750 {
3751 	*per_pixel_alpha = false;
3752 	*global_alpha = false;
3753 	*global_alpha_value = 0xff;
3754 
3755 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3756 		return;
3757 
3758 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3759 		static const uint32_t alpha_formats[] = {
3760 			DRM_FORMAT_ARGB8888,
3761 			DRM_FORMAT_RGBA8888,
3762 			DRM_FORMAT_ABGR8888,
3763 		};
3764 		uint32_t format = plane_state->fb->format->format;
3765 		unsigned int i;
3766 
3767 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3768 			if (format == alpha_formats[i]) {
3769 				*per_pixel_alpha = true;
3770 				break;
3771 			}
3772 		}
3773 	}
3774 
3775 	if (plane_state->alpha < 0xffff) {
3776 		*global_alpha = true;
3777 		*global_alpha_value = plane_state->alpha >> 8;
3778 	}
3779 }
3780 
3781 static int
3782 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3783 			    const enum surface_pixel_format format,
3784 			    enum dc_color_space *color_space)
3785 {
3786 	bool full_range;
3787 
3788 	*color_space = COLOR_SPACE_SRGB;
3789 
3790 	/* DRM color properties only affect non-RGB formats. */
3791 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3792 		return 0;
3793 
3794 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3795 
3796 	switch (plane_state->color_encoding) {
3797 	case DRM_COLOR_YCBCR_BT601:
3798 		if (full_range)
3799 			*color_space = COLOR_SPACE_YCBCR601;
3800 		else
3801 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3802 		break;
3803 
3804 	case DRM_COLOR_YCBCR_BT709:
3805 		if (full_range)
3806 			*color_space = COLOR_SPACE_YCBCR709;
3807 		else
3808 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3809 		break;
3810 
3811 	case DRM_COLOR_YCBCR_BT2020:
3812 		if (full_range)
3813 			*color_space = COLOR_SPACE_2020_YCBCR;
3814 		else
3815 			return -EINVAL;
3816 		break;
3817 
3818 	default:
3819 		return -EINVAL;
3820 	}
3821 
3822 	return 0;
3823 }
3824 
3825 static int
3826 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3827 			    const struct drm_plane_state *plane_state,
3828 			    const uint64_t tiling_flags,
3829 			    struct dc_plane_info *plane_info,
3830 			    struct dc_plane_address *address,
3831 			    bool tmz_surface,
3832 			    bool force_disable_dcc)
3833 {
3834 	const struct drm_framebuffer *fb = plane_state->fb;
3835 	const struct amdgpu_framebuffer *afb =
3836 		to_amdgpu_framebuffer(plane_state->fb);
3837 	struct drm_format_name_buf format_name;
3838 	int ret;
3839 
3840 	memset(plane_info, 0, sizeof(*plane_info));
3841 
3842 	switch (fb->format->format) {
3843 	case DRM_FORMAT_C8:
3844 		plane_info->format =
3845 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3846 		break;
3847 	case DRM_FORMAT_RGB565:
3848 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3849 		break;
3850 	case DRM_FORMAT_XRGB8888:
3851 	case DRM_FORMAT_ARGB8888:
3852 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3853 		break;
3854 	case DRM_FORMAT_XRGB2101010:
3855 	case DRM_FORMAT_ARGB2101010:
3856 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3857 		break;
3858 	case DRM_FORMAT_XBGR2101010:
3859 	case DRM_FORMAT_ABGR2101010:
3860 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3861 		break;
3862 	case DRM_FORMAT_XBGR8888:
3863 	case DRM_FORMAT_ABGR8888:
3864 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3865 		break;
3866 	case DRM_FORMAT_NV21:
3867 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3868 		break;
3869 	case DRM_FORMAT_NV12:
3870 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3871 		break;
3872 	case DRM_FORMAT_P010:
3873 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3874 		break;
3875 	case DRM_FORMAT_XRGB16161616F:
3876 	case DRM_FORMAT_ARGB16161616F:
3877 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3878 		break;
3879 	case DRM_FORMAT_XBGR16161616F:
3880 	case DRM_FORMAT_ABGR16161616F:
3881 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3882 		break;
3883 	default:
3884 		DRM_ERROR(
3885 			"Unsupported screen format %s\n",
3886 			drm_get_format_name(fb->format->format, &format_name));
3887 		return -EINVAL;
3888 	}
3889 
3890 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3891 	case DRM_MODE_ROTATE_0:
3892 		plane_info->rotation = ROTATION_ANGLE_0;
3893 		break;
3894 	case DRM_MODE_ROTATE_90:
3895 		plane_info->rotation = ROTATION_ANGLE_90;
3896 		break;
3897 	case DRM_MODE_ROTATE_180:
3898 		plane_info->rotation = ROTATION_ANGLE_180;
3899 		break;
3900 	case DRM_MODE_ROTATE_270:
3901 		plane_info->rotation = ROTATION_ANGLE_270;
3902 		break;
3903 	default:
3904 		plane_info->rotation = ROTATION_ANGLE_0;
3905 		break;
3906 	}
3907 
3908 	plane_info->visible = true;
3909 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3910 
3911 	plane_info->layer_index = 0;
3912 
3913 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3914 					  &plane_info->color_space);
3915 	if (ret)
3916 		return ret;
3917 
3918 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3919 					   plane_info->rotation, tiling_flags,
3920 					   &plane_info->tiling_info,
3921 					   &plane_info->plane_size,
3922 					   &plane_info->dcc, address, tmz_surface,
3923 					   force_disable_dcc);
3924 	if (ret)
3925 		return ret;
3926 
3927 	fill_blending_from_plane_state(
3928 		plane_state, &plane_info->per_pixel_alpha,
3929 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3930 
3931 	return 0;
3932 }
3933 
3934 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3935 				    struct dc_plane_state *dc_plane_state,
3936 				    struct drm_plane_state *plane_state,
3937 				    struct drm_crtc_state *crtc_state)
3938 {
3939 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3940 	const struct amdgpu_framebuffer *amdgpu_fb =
3941 		to_amdgpu_framebuffer(plane_state->fb);
3942 	struct dc_scaling_info scaling_info;
3943 	struct dc_plane_info plane_info;
3944 	uint64_t tiling_flags;
3945 	int ret;
3946 	bool tmz_surface = false;
3947 	bool force_disable_dcc = false;
3948 
3949 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3950 	if (ret)
3951 		return ret;
3952 
3953 	dc_plane_state->src_rect = scaling_info.src_rect;
3954 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3955 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3956 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3957 
3958 	ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3959 	if (ret)
3960 		return ret;
3961 
3962 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3963 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3964 					  &plane_info,
3965 					  &dc_plane_state->address,
3966 					  tmz_surface,
3967 					  force_disable_dcc);
3968 	if (ret)
3969 		return ret;
3970 
3971 	dc_plane_state->format = plane_info.format;
3972 	dc_plane_state->color_space = plane_info.color_space;
3973 	dc_plane_state->format = plane_info.format;
3974 	dc_plane_state->plane_size = plane_info.plane_size;
3975 	dc_plane_state->rotation = plane_info.rotation;
3976 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3977 	dc_plane_state->stereo_format = plane_info.stereo_format;
3978 	dc_plane_state->tiling_info = plane_info.tiling_info;
3979 	dc_plane_state->visible = plane_info.visible;
3980 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3981 	dc_plane_state->global_alpha = plane_info.global_alpha;
3982 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3983 	dc_plane_state->dcc = plane_info.dcc;
3984 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3985 
3986 	/*
3987 	 * Always set input transfer function, since plane state is refreshed
3988 	 * every time.
3989 	 */
3990 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3991 	if (ret)
3992 		return ret;
3993 
3994 	return 0;
3995 }
3996 
3997 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3998 					   const struct dm_connector_state *dm_state,
3999 					   struct dc_stream_state *stream)
4000 {
4001 	enum amdgpu_rmx_type rmx_type;
4002 
4003 	struct rect src = { 0 }; /* viewport in composition space*/
4004 	struct rect dst = { 0 }; /* stream addressable area */
4005 
4006 	/* no mode. nothing to be done */
4007 	if (!mode)
4008 		return;
4009 
4010 	/* Full screen scaling by default */
4011 	src.width = mode->hdisplay;
4012 	src.height = mode->vdisplay;
4013 	dst.width = stream->timing.h_addressable;
4014 	dst.height = stream->timing.v_addressable;
4015 
4016 	if (dm_state) {
4017 		rmx_type = dm_state->scaling;
4018 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4019 			if (src.width * dst.height <
4020 					src.height * dst.width) {
4021 				/* height needs less upscaling/more downscaling */
4022 				dst.width = src.width *
4023 						dst.height / src.height;
4024 			} else {
4025 				/* width needs less upscaling/more downscaling */
4026 				dst.height = src.height *
4027 						dst.width / src.width;
4028 			}
4029 		} else if (rmx_type == RMX_CENTER) {
4030 			dst = src;
4031 		}
4032 
4033 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4034 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4035 
4036 		if (dm_state->underscan_enable) {
4037 			dst.x += dm_state->underscan_hborder / 2;
4038 			dst.y += dm_state->underscan_vborder / 2;
4039 			dst.width -= dm_state->underscan_hborder;
4040 			dst.height -= dm_state->underscan_vborder;
4041 		}
4042 	}
4043 
4044 	stream->src = src;
4045 	stream->dst = dst;
4046 
4047 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4048 			dst.x, dst.y, dst.width, dst.height);
4049 
4050 }
4051 
4052 static enum dc_color_depth
4053 convert_color_depth_from_display_info(const struct drm_connector *connector,
4054 				      bool is_y420, int requested_bpc)
4055 {
4056 	uint8_t bpc;
4057 
4058 	if (is_y420) {
4059 		bpc = 8;
4060 
4061 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4062 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4063 			bpc = 16;
4064 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4065 			bpc = 12;
4066 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4067 			bpc = 10;
4068 	} else {
4069 		bpc = (uint8_t)connector->display_info.bpc;
4070 		/* Assume 8 bpc by default if no bpc is specified. */
4071 		bpc = bpc ? bpc : 8;
4072 	}
4073 
4074 	if (requested_bpc > 0) {
4075 		/*
4076 		 * Cap display bpc based on the user requested value.
4077 		 *
4078 		 * The value for state->max_bpc may not correctly updated
4079 		 * depending on when the connector gets added to the state
4080 		 * or if this was called outside of atomic check, so it
4081 		 * can't be used directly.
4082 		 */
4083 		bpc = min_t(u8, bpc, requested_bpc);
4084 
4085 		/* Round down to the nearest even number. */
4086 		bpc = bpc - (bpc & 1);
4087 	}
4088 
4089 	switch (bpc) {
4090 	case 0:
4091 		/*
4092 		 * Temporary Work around, DRM doesn't parse color depth for
4093 		 * EDID revision before 1.4
4094 		 * TODO: Fix edid parsing
4095 		 */
4096 		return COLOR_DEPTH_888;
4097 	case 6:
4098 		return COLOR_DEPTH_666;
4099 	case 8:
4100 		return COLOR_DEPTH_888;
4101 	case 10:
4102 		return COLOR_DEPTH_101010;
4103 	case 12:
4104 		return COLOR_DEPTH_121212;
4105 	case 14:
4106 		return COLOR_DEPTH_141414;
4107 	case 16:
4108 		return COLOR_DEPTH_161616;
4109 	default:
4110 		return COLOR_DEPTH_UNDEFINED;
4111 	}
4112 }
4113 
4114 static enum dc_aspect_ratio
4115 get_aspect_ratio(const struct drm_display_mode *mode_in)
4116 {
4117 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4118 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4119 }
4120 
4121 static enum dc_color_space
4122 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4123 {
4124 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4125 
4126 	switch (dc_crtc_timing->pixel_encoding)	{
4127 	case PIXEL_ENCODING_YCBCR422:
4128 	case PIXEL_ENCODING_YCBCR444:
4129 	case PIXEL_ENCODING_YCBCR420:
4130 	{
4131 		/*
4132 		 * 27030khz is the separation point between HDTV and SDTV
4133 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4134 		 * respectively
4135 		 */
4136 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4137 			if (dc_crtc_timing->flags.Y_ONLY)
4138 				color_space =
4139 					COLOR_SPACE_YCBCR709_LIMITED;
4140 			else
4141 				color_space = COLOR_SPACE_YCBCR709;
4142 		} else {
4143 			if (dc_crtc_timing->flags.Y_ONLY)
4144 				color_space =
4145 					COLOR_SPACE_YCBCR601_LIMITED;
4146 			else
4147 				color_space = COLOR_SPACE_YCBCR601;
4148 		}
4149 
4150 	}
4151 	break;
4152 	case PIXEL_ENCODING_RGB:
4153 		color_space = COLOR_SPACE_SRGB;
4154 		break;
4155 
4156 	default:
4157 		WARN_ON(1);
4158 		break;
4159 	}
4160 
4161 	return color_space;
4162 }
4163 
4164 static bool adjust_colour_depth_from_display_info(
4165 	struct dc_crtc_timing *timing_out,
4166 	const struct drm_display_info *info)
4167 {
4168 	enum dc_color_depth depth = timing_out->display_color_depth;
4169 	int normalized_clk;
4170 	do {
4171 		normalized_clk = timing_out->pix_clk_100hz / 10;
4172 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4173 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4174 			normalized_clk /= 2;
4175 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4176 		switch (depth) {
4177 		case COLOR_DEPTH_888:
4178 			break;
4179 		case COLOR_DEPTH_101010:
4180 			normalized_clk = (normalized_clk * 30) / 24;
4181 			break;
4182 		case COLOR_DEPTH_121212:
4183 			normalized_clk = (normalized_clk * 36) / 24;
4184 			break;
4185 		case COLOR_DEPTH_161616:
4186 			normalized_clk = (normalized_clk * 48) / 24;
4187 			break;
4188 		default:
4189 			/* The above depths are the only ones valid for HDMI. */
4190 			return false;
4191 		}
4192 		if (normalized_clk <= info->max_tmds_clock) {
4193 			timing_out->display_color_depth = depth;
4194 			return true;
4195 		}
4196 	} while (--depth > COLOR_DEPTH_666);
4197 	return false;
4198 }
4199 
4200 static void fill_stream_properties_from_drm_display_mode(
4201 	struct dc_stream_state *stream,
4202 	const struct drm_display_mode *mode_in,
4203 	const struct drm_connector *connector,
4204 	const struct drm_connector_state *connector_state,
4205 	const struct dc_stream_state *old_stream,
4206 	int requested_bpc)
4207 {
4208 	struct dc_crtc_timing *timing_out = &stream->timing;
4209 	const struct drm_display_info *info = &connector->display_info;
4210 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4211 	struct hdmi_vendor_infoframe hv_frame;
4212 	struct hdmi_avi_infoframe avi_frame;
4213 
4214 	memset(&hv_frame, 0, sizeof(hv_frame));
4215 	memset(&avi_frame, 0, sizeof(avi_frame));
4216 
4217 	timing_out->h_border_left = 0;
4218 	timing_out->h_border_right = 0;
4219 	timing_out->v_border_top = 0;
4220 	timing_out->v_border_bottom = 0;
4221 	/* TODO: un-hardcode */
4222 	if (drm_mode_is_420_only(info, mode_in)
4223 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4224 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4225 	else if (drm_mode_is_420_also(info, mode_in)
4226 			&& aconnector->force_yuv420_output)
4227 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4228 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4229 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4230 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4231 	else
4232 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4233 
4234 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4235 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4236 		connector,
4237 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4238 		requested_bpc);
4239 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4240 	timing_out->hdmi_vic = 0;
4241 
4242 	if(old_stream) {
4243 		timing_out->vic = old_stream->timing.vic;
4244 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4245 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4246 	} else {
4247 		timing_out->vic = drm_match_cea_mode(mode_in);
4248 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4249 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4250 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4251 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4252 	}
4253 
4254 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4255 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4256 		timing_out->vic = avi_frame.video_code;
4257 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4258 		timing_out->hdmi_vic = hv_frame.vic;
4259 	}
4260 
4261 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4262 	timing_out->h_total = mode_in->crtc_htotal;
4263 	timing_out->h_sync_width =
4264 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4265 	timing_out->h_front_porch =
4266 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4267 	timing_out->v_total = mode_in->crtc_vtotal;
4268 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4269 	timing_out->v_front_porch =
4270 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4271 	timing_out->v_sync_width =
4272 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4273 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4274 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4275 
4276 	stream->output_color_space = get_output_color_space(timing_out);
4277 
4278 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4279 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4280 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4281 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4282 		    drm_mode_is_420_also(info, mode_in) &&
4283 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4284 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4285 			adjust_colour_depth_from_display_info(timing_out, info);
4286 		}
4287 	}
4288 }
4289 
4290 static void fill_audio_info(struct audio_info *audio_info,
4291 			    const struct drm_connector *drm_connector,
4292 			    const struct dc_sink *dc_sink)
4293 {
4294 	int i = 0;
4295 	int cea_revision = 0;
4296 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4297 
4298 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4299 	audio_info->product_id = edid_caps->product_id;
4300 
4301 	cea_revision = drm_connector->display_info.cea_rev;
4302 
4303 	strscpy(audio_info->display_name,
4304 		edid_caps->display_name,
4305 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4306 
4307 	if (cea_revision >= 3) {
4308 		audio_info->mode_count = edid_caps->audio_mode_count;
4309 
4310 		for (i = 0; i < audio_info->mode_count; ++i) {
4311 			audio_info->modes[i].format_code =
4312 					(enum audio_format_code)
4313 					(edid_caps->audio_modes[i].format_code);
4314 			audio_info->modes[i].channel_count =
4315 					edid_caps->audio_modes[i].channel_count;
4316 			audio_info->modes[i].sample_rates.all =
4317 					edid_caps->audio_modes[i].sample_rate;
4318 			audio_info->modes[i].sample_size =
4319 					edid_caps->audio_modes[i].sample_size;
4320 		}
4321 	}
4322 
4323 	audio_info->flags.all = edid_caps->speaker_flags;
4324 
4325 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4326 	if (drm_connector->latency_present[0]) {
4327 		audio_info->video_latency = drm_connector->video_latency[0];
4328 		audio_info->audio_latency = drm_connector->audio_latency[0];
4329 	}
4330 
4331 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4332 
4333 }
4334 
4335 static void
4336 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4337 				      struct drm_display_mode *dst_mode)
4338 {
4339 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4340 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4341 	dst_mode->crtc_clock = src_mode->crtc_clock;
4342 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4343 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4344 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4345 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4346 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4347 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4348 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4349 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4350 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4351 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4352 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4353 }
4354 
4355 static void
4356 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4357 					const struct drm_display_mode *native_mode,
4358 					bool scale_enabled)
4359 {
4360 	if (scale_enabled) {
4361 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4362 	} else if (native_mode->clock == drm_mode->clock &&
4363 			native_mode->htotal == drm_mode->htotal &&
4364 			native_mode->vtotal == drm_mode->vtotal) {
4365 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4366 	} else {
4367 		/* no scaling nor amdgpu inserted, no need to patch */
4368 	}
4369 }
4370 
4371 static struct dc_sink *
4372 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4373 {
4374 	struct dc_sink_init_data sink_init_data = { 0 };
4375 	struct dc_sink *sink = NULL;
4376 	sink_init_data.link = aconnector->dc_link;
4377 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4378 
4379 	sink = dc_sink_create(&sink_init_data);
4380 	if (!sink) {
4381 		DRM_ERROR("Failed to create sink!\n");
4382 		return NULL;
4383 	}
4384 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4385 
4386 	return sink;
4387 }
4388 
4389 static void set_multisync_trigger_params(
4390 		struct dc_stream_state *stream)
4391 {
4392 	if (stream->triggered_crtc_reset.enabled) {
4393 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4394 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4395 	}
4396 }
4397 
4398 static void set_master_stream(struct dc_stream_state *stream_set[],
4399 			      int stream_count)
4400 {
4401 	int j, highest_rfr = 0, master_stream = 0;
4402 
4403 	for (j = 0;  j < stream_count; j++) {
4404 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4405 			int refresh_rate = 0;
4406 
4407 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4408 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4409 			if (refresh_rate > highest_rfr) {
4410 				highest_rfr = refresh_rate;
4411 				master_stream = j;
4412 			}
4413 		}
4414 	}
4415 	for (j = 0;  j < stream_count; j++) {
4416 		if (stream_set[j])
4417 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4418 	}
4419 }
4420 
4421 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4422 {
4423 	int i = 0;
4424 
4425 	if (context->stream_count < 2)
4426 		return;
4427 	for (i = 0; i < context->stream_count ; i++) {
4428 		if (!context->streams[i])
4429 			continue;
4430 		/*
4431 		 * TODO: add a function to read AMD VSDB bits and set
4432 		 * crtc_sync_master.multi_sync_enabled flag
4433 		 * For now it's set to false
4434 		 */
4435 		set_multisync_trigger_params(context->streams[i]);
4436 	}
4437 	set_master_stream(context->streams, context->stream_count);
4438 }
4439 
4440 static struct dc_stream_state *
4441 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4442 		       const struct drm_display_mode *drm_mode,
4443 		       const struct dm_connector_state *dm_state,
4444 		       const struct dc_stream_state *old_stream,
4445 		       int requested_bpc)
4446 {
4447 	struct drm_display_mode *preferred_mode = NULL;
4448 	struct drm_connector *drm_connector;
4449 	const struct drm_connector_state *con_state =
4450 		dm_state ? &dm_state->base : NULL;
4451 	struct dc_stream_state *stream = NULL;
4452 	struct drm_display_mode mode = *drm_mode;
4453 	bool native_mode_found = false;
4454 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4455 	int mode_refresh;
4456 	int preferred_refresh = 0;
4457 #if defined(CONFIG_DRM_AMD_DC_DCN)
4458 	struct dsc_dec_dpcd_caps dsc_caps;
4459 #endif
4460 	uint32_t link_bandwidth_kbps;
4461 
4462 	struct dc_sink *sink = NULL;
4463 	if (aconnector == NULL) {
4464 		DRM_ERROR("aconnector is NULL!\n");
4465 		return stream;
4466 	}
4467 
4468 	drm_connector = &aconnector->base;
4469 
4470 	if (!aconnector->dc_sink) {
4471 		sink = create_fake_sink(aconnector);
4472 		if (!sink)
4473 			return stream;
4474 	} else {
4475 		sink = aconnector->dc_sink;
4476 		dc_sink_retain(sink);
4477 	}
4478 
4479 	stream = dc_create_stream_for_sink(sink);
4480 
4481 	if (stream == NULL) {
4482 		DRM_ERROR("Failed to create stream for sink!\n");
4483 		goto finish;
4484 	}
4485 
4486 	stream->dm_stream_context = aconnector;
4487 
4488 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4489 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4490 
4491 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4492 		/* Search for preferred mode */
4493 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4494 			native_mode_found = true;
4495 			break;
4496 		}
4497 	}
4498 	if (!native_mode_found)
4499 		preferred_mode = list_first_entry_or_null(
4500 				&aconnector->base.modes,
4501 				struct drm_display_mode,
4502 				head);
4503 
4504 	mode_refresh = drm_mode_vrefresh(&mode);
4505 
4506 	if (preferred_mode == NULL) {
4507 		/*
4508 		 * This may not be an error, the use case is when we have no
4509 		 * usermode calls to reset and set mode upon hotplug. In this
4510 		 * case, we call set mode ourselves to restore the previous mode
4511 		 * and the modelist may not be filled in in time.
4512 		 */
4513 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4514 	} else {
4515 		decide_crtc_timing_for_drm_display_mode(
4516 				&mode, preferred_mode,
4517 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4518 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4519 	}
4520 
4521 	if (!dm_state)
4522 		drm_mode_set_crtcinfo(&mode, 0);
4523 
4524 	/*
4525 	* If scaling is enabled and refresh rate didn't change
4526 	* we copy the vic and polarities of the old timings
4527 	*/
4528 	if (!scale || mode_refresh != preferred_refresh)
4529 		fill_stream_properties_from_drm_display_mode(stream,
4530 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4531 	else
4532 		fill_stream_properties_from_drm_display_mode(stream,
4533 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4534 
4535 	stream->timing.flags.DSC = 0;
4536 
4537 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4538 #if defined(CONFIG_DRM_AMD_DC_DCN)
4539 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4540 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4541 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4542 				      &dsc_caps);
4543 #endif
4544 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4545 							     dc_link_get_link_cap(aconnector->dc_link));
4546 
4547 #if defined(CONFIG_DRM_AMD_DC_DCN)
4548 		if (dsc_caps.is_dsc_supported)
4549 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4550 						  &dsc_caps,
4551 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4552 						  link_bandwidth_kbps,
4553 						  &stream->timing,
4554 						  &stream->timing.dsc_cfg))
4555 				stream->timing.flags.DSC = 1;
4556 #endif
4557 	}
4558 
4559 	update_stream_scaling_settings(&mode, dm_state, stream);
4560 
4561 	fill_audio_info(
4562 		&stream->audio_info,
4563 		drm_connector,
4564 		sink);
4565 
4566 	update_stream_signal(stream, sink);
4567 
4568 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4569 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4570 	if (stream->link->psr_settings.psr_feature_enabled) {
4571 		//
4572 		// should decide stream support vsc sdp colorimetry capability
4573 		// before building vsc info packet
4574 		//
4575 		stream->use_vsc_sdp_for_colorimetry = false;
4576 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4577 			stream->use_vsc_sdp_for_colorimetry =
4578 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4579 		} else {
4580 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4581 				stream->use_vsc_sdp_for_colorimetry = true;
4582 		}
4583 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4584 	}
4585 finish:
4586 	dc_sink_release(sink);
4587 
4588 	return stream;
4589 }
4590 
4591 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4592 {
4593 	drm_crtc_cleanup(crtc);
4594 	kfree(crtc);
4595 }
4596 
4597 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4598 				  struct drm_crtc_state *state)
4599 {
4600 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4601 
4602 	/* TODO Destroy dc_stream objects are stream object is flattened */
4603 	if (cur->stream)
4604 		dc_stream_release(cur->stream);
4605 
4606 
4607 	__drm_atomic_helper_crtc_destroy_state(state);
4608 
4609 
4610 	kfree(state);
4611 }
4612 
4613 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4614 {
4615 	struct dm_crtc_state *state;
4616 
4617 	if (crtc->state)
4618 		dm_crtc_destroy_state(crtc, crtc->state);
4619 
4620 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4621 	if (WARN_ON(!state))
4622 		return;
4623 
4624 	crtc->state = &state->base;
4625 	crtc->state->crtc = crtc;
4626 
4627 }
4628 
4629 static struct drm_crtc_state *
4630 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4631 {
4632 	struct dm_crtc_state *state, *cur;
4633 
4634 	cur = to_dm_crtc_state(crtc->state);
4635 
4636 	if (WARN_ON(!crtc->state))
4637 		return NULL;
4638 
4639 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4640 	if (!state)
4641 		return NULL;
4642 
4643 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4644 
4645 	if (cur->stream) {
4646 		state->stream = cur->stream;
4647 		dc_stream_retain(state->stream);
4648 	}
4649 
4650 	state->active_planes = cur->active_planes;
4651 	state->vrr_params = cur->vrr_params;
4652 	state->vrr_infopacket = cur->vrr_infopacket;
4653 	state->abm_level = cur->abm_level;
4654 	state->vrr_supported = cur->vrr_supported;
4655 	state->freesync_config = cur->freesync_config;
4656 	state->crc_src = cur->crc_src;
4657 	state->cm_has_degamma = cur->cm_has_degamma;
4658 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4659 
4660 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4661 
4662 	return &state->base;
4663 }
4664 
4665 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4666 {
4667 	enum dc_irq_source irq_source;
4668 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4669 	struct amdgpu_device *adev = crtc->dev->dev_private;
4670 	int rc;
4671 
4672 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4673 
4674 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4675 
4676 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4677 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4678 	return rc;
4679 }
4680 
4681 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4682 {
4683 	enum dc_irq_source irq_source;
4684 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4685 	struct amdgpu_device *adev = crtc->dev->dev_private;
4686 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4687 	int rc = 0;
4688 
4689 	if (enable) {
4690 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4691 		if (amdgpu_dm_vrr_active(acrtc_state))
4692 			rc = dm_set_vupdate_irq(crtc, true);
4693 	} else {
4694 		/* vblank irq off -> vupdate irq off */
4695 		rc = dm_set_vupdate_irq(crtc, false);
4696 	}
4697 
4698 	if (rc)
4699 		return rc;
4700 
4701 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4702 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4703 }
4704 
4705 static int dm_enable_vblank(struct drm_crtc *crtc)
4706 {
4707 	return dm_set_vblank(crtc, true);
4708 }
4709 
4710 static void dm_disable_vblank(struct drm_crtc *crtc)
4711 {
4712 	dm_set_vblank(crtc, false);
4713 }
4714 
4715 /* Implemented only the options currently availible for the driver */
4716 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4717 	.reset = dm_crtc_reset_state,
4718 	.destroy = amdgpu_dm_crtc_destroy,
4719 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4720 	.set_config = drm_atomic_helper_set_config,
4721 	.page_flip = drm_atomic_helper_page_flip,
4722 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4723 	.atomic_destroy_state = dm_crtc_destroy_state,
4724 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4725 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4726 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4727 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4728 	.enable_vblank = dm_enable_vblank,
4729 	.disable_vblank = dm_disable_vblank,
4730 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4731 };
4732 
4733 static enum drm_connector_status
4734 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4735 {
4736 	bool connected;
4737 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4738 
4739 	/*
4740 	 * Notes:
4741 	 * 1. This interface is NOT called in context of HPD irq.
4742 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4743 	 * makes it a bad place for *any* MST-related activity.
4744 	 */
4745 
4746 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4747 	    !aconnector->fake_enable)
4748 		connected = (aconnector->dc_sink != NULL);
4749 	else
4750 		connected = (aconnector->base.force == DRM_FORCE_ON);
4751 
4752 	return (connected ? connector_status_connected :
4753 			connector_status_disconnected);
4754 }
4755 
4756 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4757 					    struct drm_connector_state *connector_state,
4758 					    struct drm_property *property,
4759 					    uint64_t val)
4760 {
4761 	struct drm_device *dev = connector->dev;
4762 	struct amdgpu_device *adev = dev->dev_private;
4763 	struct dm_connector_state *dm_old_state =
4764 		to_dm_connector_state(connector->state);
4765 	struct dm_connector_state *dm_new_state =
4766 		to_dm_connector_state(connector_state);
4767 
4768 	int ret = -EINVAL;
4769 
4770 	if (property == dev->mode_config.scaling_mode_property) {
4771 		enum amdgpu_rmx_type rmx_type;
4772 
4773 		switch (val) {
4774 		case DRM_MODE_SCALE_CENTER:
4775 			rmx_type = RMX_CENTER;
4776 			break;
4777 		case DRM_MODE_SCALE_ASPECT:
4778 			rmx_type = RMX_ASPECT;
4779 			break;
4780 		case DRM_MODE_SCALE_FULLSCREEN:
4781 			rmx_type = RMX_FULL;
4782 			break;
4783 		case DRM_MODE_SCALE_NONE:
4784 		default:
4785 			rmx_type = RMX_OFF;
4786 			break;
4787 		}
4788 
4789 		if (dm_old_state->scaling == rmx_type)
4790 			return 0;
4791 
4792 		dm_new_state->scaling = rmx_type;
4793 		ret = 0;
4794 	} else if (property == adev->mode_info.underscan_hborder_property) {
4795 		dm_new_state->underscan_hborder = val;
4796 		ret = 0;
4797 	} else if (property == adev->mode_info.underscan_vborder_property) {
4798 		dm_new_state->underscan_vborder = val;
4799 		ret = 0;
4800 	} else if (property == adev->mode_info.underscan_property) {
4801 		dm_new_state->underscan_enable = val;
4802 		ret = 0;
4803 	} else if (property == adev->mode_info.abm_level_property) {
4804 		dm_new_state->abm_level = val;
4805 		ret = 0;
4806 	}
4807 
4808 	return ret;
4809 }
4810 
4811 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4812 					    const struct drm_connector_state *state,
4813 					    struct drm_property *property,
4814 					    uint64_t *val)
4815 {
4816 	struct drm_device *dev = connector->dev;
4817 	struct amdgpu_device *adev = dev->dev_private;
4818 	struct dm_connector_state *dm_state =
4819 		to_dm_connector_state(state);
4820 	int ret = -EINVAL;
4821 
4822 	if (property == dev->mode_config.scaling_mode_property) {
4823 		switch (dm_state->scaling) {
4824 		case RMX_CENTER:
4825 			*val = DRM_MODE_SCALE_CENTER;
4826 			break;
4827 		case RMX_ASPECT:
4828 			*val = DRM_MODE_SCALE_ASPECT;
4829 			break;
4830 		case RMX_FULL:
4831 			*val = DRM_MODE_SCALE_FULLSCREEN;
4832 			break;
4833 		case RMX_OFF:
4834 		default:
4835 			*val = DRM_MODE_SCALE_NONE;
4836 			break;
4837 		}
4838 		ret = 0;
4839 	} else if (property == adev->mode_info.underscan_hborder_property) {
4840 		*val = dm_state->underscan_hborder;
4841 		ret = 0;
4842 	} else if (property == adev->mode_info.underscan_vborder_property) {
4843 		*val = dm_state->underscan_vborder;
4844 		ret = 0;
4845 	} else if (property == adev->mode_info.underscan_property) {
4846 		*val = dm_state->underscan_enable;
4847 		ret = 0;
4848 	} else if (property == adev->mode_info.abm_level_property) {
4849 		*val = dm_state->abm_level;
4850 		ret = 0;
4851 	}
4852 
4853 	return ret;
4854 }
4855 
4856 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4857 {
4858 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4859 
4860 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4861 }
4862 
4863 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4864 {
4865 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4866 	const struct dc_link *link = aconnector->dc_link;
4867 	struct amdgpu_device *adev = connector->dev->dev_private;
4868 	struct amdgpu_display_manager *dm = &adev->dm;
4869 
4870 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4871 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4872 
4873 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4874 	    link->type != dc_connection_none &&
4875 	    dm->backlight_dev) {
4876 		backlight_device_unregister(dm->backlight_dev);
4877 		dm->backlight_dev = NULL;
4878 	}
4879 #endif
4880 
4881 	if (aconnector->dc_em_sink)
4882 		dc_sink_release(aconnector->dc_em_sink);
4883 	aconnector->dc_em_sink = NULL;
4884 	if (aconnector->dc_sink)
4885 		dc_sink_release(aconnector->dc_sink);
4886 	aconnector->dc_sink = NULL;
4887 
4888 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4889 	drm_connector_unregister(connector);
4890 	drm_connector_cleanup(connector);
4891 	if (aconnector->i2c) {
4892 		i2c_del_adapter(&aconnector->i2c->base);
4893 		kfree(aconnector->i2c);
4894 	}
4895 	kfree(aconnector->dm_dp_aux.aux.name);
4896 
4897 	kfree(connector);
4898 }
4899 
4900 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4901 {
4902 	struct dm_connector_state *state =
4903 		to_dm_connector_state(connector->state);
4904 
4905 	if (connector->state)
4906 		__drm_atomic_helper_connector_destroy_state(connector->state);
4907 
4908 	kfree(state);
4909 
4910 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4911 
4912 	if (state) {
4913 		state->scaling = RMX_OFF;
4914 		state->underscan_enable = false;
4915 		state->underscan_hborder = 0;
4916 		state->underscan_vborder = 0;
4917 		state->base.max_requested_bpc = 8;
4918 		state->vcpi_slots = 0;
4919 		state->pbn = 0;
4920 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4921 			state->abm_level = amdgpu_dm_abm_level;
4922 
4923 		__drm_atomic_helper_connector_reset(connector, &state->base);
4924 	}
4925 }
4926 
4927 struct drm_connector_state *
4928 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4929 {
4930 	struct dm_connector_state *state =
4931 		to_dm_connector_state(connector->state);
4932 
4933 	struct dm_connector_state *new_state =
4934 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4935 
4936 	if (!new_state)
4937 		return NULL;
4938 
4939 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4940 
4941 	new_state->freesync_capable = state->freesync_capable;
4942 	new_state->abm_level = state->abm_level;
4943 	new_state->scaling = state->scaling;
4944 	new_state->underscan_enable = state->underscan_enable;
4945 	new_state->underscan_hborder = state->underscan_hborder;
4946 	new_state->underscan_vborder = state->underscan_vborder;
4947 	new_state->vcpi_slots = state->vcpi_slots;
4948 	new_state->pbn = state->pbn;
4949 	return &new_state->base;
4950 }
4951 
4952 static int
4953 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4954 {
4955 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4956 		to_amdgpu_dm_connector(connector);
4957 	int r;
4958 
4959 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4960 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4961 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4962 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4963 		if (r)
4964 			return r;
4965 	}
4966 
4967 #if defined(CONFIG_DEBUG_FS)
4968 	connector_debugfs_init(amdgpu_dm_connector);
4969 #endif
4970 
4971 	return 0;
4972 }
4973 
4974 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4975 	.reset = amdgpu_dm_connector_funcs_reset,
4976 	.detect = amdgpu_dm_connector_detect,
4977 	.fill_modes = drm_helper_probe_single_connector_modes,
4978 	.destroy = amdgpu_dm_connector_destroy,
4979 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4980 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4981 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4982 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4983 	.late_register = amdgpu_dm_connector_late_register,
4984 	.early_unregister = amdgpu_dm_connector_unregister
4985 };
4986 
4987 static int get_modes(struct drm_connector *connector)
4988 {
4989 	return amdgpu_dm_connector_get_modes(connector);
4990 }
4991 
4992 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4993 {
4994 	struct dc_sink_init_data init_params = {
4995 			.link = aconnector->dc_link,
4996 			.sink_signal = SIGNAL_TYPE_VIRTUAL
4997 	};
4998 	struct edid *edid;
4999 
5000 	if (!aconnector->base.edid_blob_ptr) {
5001 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5002 				aconnector->base.name);
5003 
5004 		aconnector->base.force = DRM_FORCE_OFF;
5005 		aconnector->base.override_edid = false;
5006 		return;
5007 	}
5008 
5009 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5010 
5011 	aconnector->edid = edid;
5012 
5013 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5014 		aconnector->dc_link,
5015 		(uint8_t *)edid,
5016 		(edid->extensions + 1) * EDID_LENGTH,
5017 		&init_params);
5018 
5019 	if (aconnector->base.force == DRM_FORCE_ON) {
5020 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5021 		aconnector->dc_link->local_sink :
5022 		aconnector->dc_em_sink;
5023 		dc_sink_retain(aconnector->dc_sink);
5024 	}
5025 }
5026 
5027 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5028 {
5029 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5030 
5031 	/*
5032 	 * In case of headless boot with force on for DP managed connector
5033 	 * Those settings have to be != 0 to get initial modeset
5034 	 */
5035 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5036 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5037 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5038 	}
5039 
5040 
5041 	aconnector->base.override_edid = true;
5042 	create_eml_sink(aconnector);
5043 }
5044 
5045 static struct dc_stream_state *
5046 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5047 				const struct drm_display_mode *drm_mode,
5048 				const struct dm_connector_state *dm_state,
5049 				const struct dc_stream_state *old_stream)
5050 {
5051 	struct drm_connector *connector = &aconnector->base;
5052 	struct amdgpu_device *adev = connector->dev->dev_private;
5053 	struct dc_stream_state *stream;
5054 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5055 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5056 	enum dc_status dc_result = DC_OK;
5057 
5058 	do {
5059 		stream = create_stream_for_sink(aconnector, drm_mode,
5060 						dm_state, old_stream,
5061 						requested_bpc);
5062 		if (stream == NULL) {
5063 			DRM_ERROR("Failed to create stream for sink!\n");
5064 			break;
5065 		}
5066 
5067 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5068 
5069 		if (dc_result != DC_OK) {
5070 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5071 				      drm_mode->hdisplay,
5072 				      drm_mode->vdisplay,
5073 				      drm_mode->clock,
5074 				      dc_result,
5075 				      dc_status_to_str(dc_result));
5076 
5077 			dc_stream_release(stream);
5078 			stream = NULL;
5079 			requested_bpc -= 2; /* lower bpc to retry validation */
5080 		}
5081 
5082 	} while (stream == NULL && requested_bpc >= 6);
5083 
5084 	return stream;
5085 }
5086 
5087 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5088 				   struct drm_display_mode *mode)
5089 {
5090 	int result = MODE_ERROR;
5091 	struct dc_sink *dc_sink;
5092 	/* TODO: Unhardcode stream count */
5093 	struct dc_stream_state *stream;
5094 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5095 
5096 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5097 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5098 		return result;
5099 
5100 	/*
5101 	 * Only run this the first time mode_valid is called to initilialize
5102 	 * EDID mgmt
5103 	 */
5104 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5105 		!aconnector->dc_em_sink)
5106 		handle_edid_mgmt(aconnector);
5107 
5108 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5109 
5110 	if (dc_sink == NULL) {
5111 		DRM_ERROR("dc_sink is NULL!\n");
5112 		goto fail;
5113 	}
5114 
5115 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5116 	if (stream) {
5117 		dc_stream_release(stream);
5118 		result = MODE_OK;
5119 	}
5120 
5121 fail:
5122 	/* TODO: error handling*/
5123 	return result;
5124 }
5125 
5126 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5127 				struct dc_info_packet *out)
5128 {
5129 	struct hdmi_drm_infoframe frame;
5130 	unsigned char buf[30]; /* 26 + 4 */
5131 	ssize_t len;
5132 	int ret, i;
5133 
5134 	memset(out, 0, sizeof(*out));
5135 
5136 	if (!state->hdr_output_metadata)
5137 		return 0;
5138 
5139 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5140 	if (ret)
5141 		return ret;
5142 
5143 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5144 	if (len < 0)
5145 		return (int)len;
5146 
5147 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5148 	if (len != 30)
5149 		return -EINVAL;
5150 
5151 	/* Prepare the infopacket for DC. */
5152 	switch (state->connector->connector_type) {
5153 	case DRM_MODE_CONNECTOR_HDMIA:
5154 		out->hb0 = 0x87; /* type */
5155 		out->hb1 = 0x01; /* version */
5156 		out->hb2 = 0x1A; /* length */
5157 		out->sb[0] = buf[3]; /* checksum */
5158 		i = 1;
5159 		break;
5160 
5161 	case DRM_MODE_CONNECTOR_DisplayPort:
5162 	case DRM_MODE_CONNECTOR_eDP:
5163 		out->hb0 = 0x00; /* sdp id, zero */
5164 		out->hb1 = 0x87; /* type */
5165 		out->hb2 = 0x1D; /* payload len - 1 */
5166 		out->hb3 = (0x13 << 2); /* sdp version */
5167 		out->sb[0] = 0x01; /* version */
5168 		out->sb[1] = 0x1A; /* length */
5169 		i = 2;
5170 		break;
5171 
5172 	default:
5173 		return -EINVAL;
5174 	}
5175 
5176 	memcpy(&out->sb[i], &buf[4], 26);
5177 	out->valid = true;
5178 
5179 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5180 		       sizeof(out->sb), false);
5181 
5182 	return 0;
5183 }
5184 
5185 static bool
5186 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5187 			  const struct drm_connector_state *new_state)
5188 {
5189 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5190 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5191 
5192 	if (old_blob != new_blob) {
5193 		if (old_blob && new_blob &&
5194 		    old_blob->length == new_blob->length)
5195 			return memcmp(old_blob->data, new_blob->data,
5196 				      old_blob->length);
5197 
5198 		return true;
5199 	}
5200 
5201 	return false;
5202 }
5203 
5204 static int
5205 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5206 				 struct drm_atomic_state *state)
5207 {
5208 	struct drm_connector_state *new_con_state =
5209 		drm_atomic_get_new_connector_state(state, conn);
5210 	struct drm_connector_state *old_con_state =
5211 		drm_atomic_get_old_connector_state(state, conn);
5212 	struct drm_crtc *crtc = new_con_state->crtc;
5213 	struct drm_crtc_state *new_crtc_state;
5214 	int ret;
5215 
5216 	if (!crtc)
5217 		return 0;
5218 
5219 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5220 		struct dc_info_packet hdr_infopacket;
5221 
5222 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5223 		if (ret)
5224 			return ret;
5225 
5226 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5227 		if (IS_ERR(new_crtc_state))
5228 			return PTR_ERR(new_crtc_state);
5229 
5230 		/*
5231 		 * DC considers the stream backends changed if the
5232 		 * static metadata changes. Forcing the modeset also
5233 		 * gives a simple way for userspace to switch from
5234 		 * 8bpc to 10bpc when setting the metadata to enter
5235 		 * or exit HDR.
5236 		 *
5237 		 * Changing the static metadata after it's been
5238 		 * set is permissible, however. So only force a
5239 		 * modeset if we're entering or exiting HDR.
5240 		 */
5241 		new_crtc_state->mode_changed =
5242 			!old_con_state->hdr_output_metadata ||
5243 			!new_con_state->hdr_output_metadata;
5244 	}
5245 
5246 	return 0;
5247 }
5248 
5249 static const struct drm_connector_helper_funcs
5250 amdgpu_dm_connector_helper_funcs = {
5251 	/*
5252 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5253 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5254 	 * are missing after user start lightdm. So we need to renew modes list.
5255 	 * in get_modes call back, not just return the modes count
5256 	 */
5257 	.get_modes = get_modes,
5258 	.mode_valid = amdgpu_dm_connector_mode_valid,
5259 	.atomic_check = amdgpu_dm_connector_atomic_check,
5260 };
5261 
5262 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5263 {
5264 }
5265 
5266 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5267 {
5268 	struct drm_device *dev = new_crtc_state->crtc->dev;
5269 	struct drm_plane *plane;
5270 
5271 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5272 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5273 			return true;
5274 	}
5275 
5276 	return false;
5277 }
5278 
5279 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5280 {
5281 	struct drm_atomic_state *state = new_crtc_state->state;
5282 	struct drm_plane *plane;
5283 	int num_active = 0;
5284 
5285 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5286 		struct drm_plane_state *new_plane_state;
5287 
5288 		/* Cursor planes are "fake". */
5289 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5290 			continue;
5291 
5292 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5293 
5294 		if (!new_plane_state) {
5295 			/*
5296 			 * The plane is enable on the CRTC and hasn't changed
5297 			 * state. This means that it previously passed
5298 			 * validation and is therefore enabled.
5299 			 */
5300 			num_active += 1;
5301 			continue;
5302 		}
5303 
5304 		/* We need a framebuffer to be considered enabled. */
5305 		num_active += (new_plane_state->fb != NULL);
5306 	}
5307 
5308 	return num_active;
5309 }
5310 
5311 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5312 					 struct drm_crtc_state *new_crtc_state)
5313 {
5314 	struct dm_crtc_state *dm_new_crtc_state =
5315 		to_dm_crtc_state(new_crtc_state);
5316 
5317 	dm_new_crtc_state->active_planes = 0;
5318 
5319 	if (!dm_new_crtc_state->stream)
5320 		return;
5321 
5322 	dm_new_crtc_state->active_planes =
5323 		count_crtc_active_planes(new_crtc_state);
5324 }
5325 
5326 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5327 				       struct drm_crtc_state *state)
5328 {
5329 	struct amdgpu_device *adev = crtc->dev->dev_private;
5330 	struct dc *dc = adev->dm.dc;
5331 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5332 	int ret = -EINVAL;
5333 
5334 	dm_update_crtc_active_planes(crtc, state);
5335 
5336 	if (unlikely(!dm_crtc_state->stream &&
5337 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5338 		WARN_ON(1);
5339 		return ret;
5340 	}
5341 
5342 	/* In some use cases, like reset, no stream is attached */
5343 	if (!dm_crtc_state->stream)
5344 		return 0;
5345 
5346 	/*
5347 	 * We want at least one hardware plane enabled to use
5348 	 * the stream with a cursor enabled.
5349 	 */
5350 	if (state->enable && state->active &&
5351 	    does_crtc_have_active_cursor(state) &&
5352 	    dm_crtc_state->active_planes == 0)
5353 		return -EINVAL;
5354 
5355 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5356 		return 0;
5357 
5358 	return ret;
5359 }
5360 
5361 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5362 				      const struct drm_display_mode *mode,
5363 				      struct drm_display_mode *adjusted_mode)
5364 {
5365 	return true;
5366 }
5367 
5368 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5369 	.disable = dm_crtc_helper_disable,
5370 	.atomic_check = dm_crtc_helper_atomic_check,
5371 	.mode_fixup = dm_crtc_helper_mode_fixup,
5372 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5373 };
5374 
5375 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5376 {
5377 
5378 }
5379 
5380 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5381 {
5382 	switch (display_color_depth) {
5383 		case COLOR_DEPTH_666:
5384 			return 6;
5385 		case COLOR_DEPTH_888:
5386 			return 8;
5387 		case COLOR_DEPTH_101010:
5388 			return 10;
5389 		case COLOR_DEPTH_121212:
5390 			return 12;
5391 		case COLOR_DEPTH_141414:
5392 			return 14;
5393 		case COLOR_DEPTH_161616:
5394 			return 16;
5395 		default:
5396 			break;
5397 		}
5398 	return 0;
5399 }
5400 
5401 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5402 					  struct drm_crtc_state *crtc_state,
5403 					  struct drm_connector_state *conn_state)
5404 {
5405 	struct drm_atomic_state *state = crtc_state->state;
5406 	struct drm_connector *connector = conn_state->connector;
5407 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5408 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5409 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5410 	struct drm_dp_mst_topology_mgr *mst_mgr;
5411 	struct drm_dp_mst_port *mst_port;
5412 	enum dc_color_depth color_depth;
5413 	int clock, bpp = 0;
5414 	bool is_y420 = false;
5415 
5416 	if (!aconnector->port || !aconnector->dc_sink)
5417 		return 0;
5418 
5419 	mst_port = aconnector->port;
5420 	mst_mgr = &aconnector->mst_port->mst_mgr;
5421 
5422 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5423 		return 0;
5424 
5425 	if (!state->duplicated) {
5426 		int max_bpc = conn_state->max_requested_bpc;
5427 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5428 				aconnector->force_yuv420_output;
5429 		color_depth = convert_color_depth_from_display_info(connector,
5430 								    is_y420,
5431 								    max_bpc);
5432 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5433 		clock = adjusted_mode->clock;
5434 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5435 	}
5436 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5437 									   mst_mgr,
5438 									   mst_port,
5439 									   dm_new_connector_state->pbn,
5440 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5441 	if (dm_new_connector_state->vcpi_slots < 0) {
5442 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5443 		return dm_new_connector_state->vcpi_slots;
5444 	}
5445 	return 0;
5446 }
5447 
5448 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5449 	.disable = dm_encoder_helper_disable,
5450 	.atomic_check = dm_encoder_helper_atomic_check
5451 };
5452 
5453 #if defined(CONFIG_DRM_AMD_DC_DCN)
5454 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5455 					    struct dc_state *dc_state)
5456 {
5457 	struct dc_stream_state *stream = NULL;
5458 	struct drm_connector *connector;
5459 	struct drm_connector_state *new_con_state, *old_con_state;
5460 	struct amdgpu_dm_connector *aconnector;
5461 	struct dm_connector_state *dm_conn_state;
5462 	int i, j, clock, bpp;
5463 	int vcpi, pbn_div, pbn = 0;
5464 
5465 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5466 
5467 		aconnector = to_amdgpu_dm_connector(connector);
5468 
5469 		if (!aconnector->port)
5470 			continue;
5471 
5472 		if (!new_con_state || !new_con_state->crtc)
5473 			continue;
5474 
5475 		dm_conn_state = to_dm_connector_state(new_con_state);
5476 
5477 		for (j = 0; j < dc_state->stream_count; j++) {
5478 			stream = dc_state->streams[j];
5479 			if (!stream)
5480 				continue;
5481 
5482 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5483 				break;
5484 
5485 			stream = NULL;
5486 		}
5487 
5488 		if (!stream)
5489 			continue;
5490 
5491 		if (stream->timing.flags.DSC != 1) {
5492 			drm_dp_mst_atomic_enable_dsc(state,
5493 						     aconnector->port,
5494 						     dm_conn_state->pbn,
5495 						     0,
5496 						     false);
5497 			continue;
5498 		}
5499 
5500 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5501 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5502 		clock = stream->timing.pix_clk_100hz / 10;
5503 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5504 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5505 						    aconnector->port,
5506 						    pbn, pbn_div,
5507 						    true);
5508 		if (vcpi < 0)
5509 			return vcpi;
5510 
5511 		dm_conn_state->pbn = pbn;
5512 		dm_conn_state->vcpi_slots = vcpi;
5513 	}
5514 	return 0;
5515 }
5516 #endif
5517 
5518 static void dm_drm_plane_reset(struct drm_plane *plane)
5519 {
5520 	struct dm_plane_state *amdgpu_state = NULL;
5521 
5522 	if (plane->state)
5523 		plane->funcs->atomic_destroy_state(plane, plane->state);
5524 
5525 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5526 	WARN_ON(amdgpu_state == NULL);
5527 
5528 	if (amdgpu_state)
5529 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5530 }
5531 
5532 static struct drm_plane_state *
5533 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5534 {
5535 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5536 
5537 	old_dm_plane_state = to_dm_plane_state(plane->state);
5538 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5539 	if (!dm_plane_state)
5540 		return NULL;
5541 
5542 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5543 
5544 	if (old_dm_plane_state->dc_state) {
5545 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5546 		dc_plane_state_retain(dm_plane_state->dc_state);
5547 	}
5548 
5549 	return &dm_plane_state->base;
5550 }
5551 
5552 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5553 				struct drm_plane_state *state)
5554 {
5555 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5556 
5557 	if (dm_plane_state->dc_state)
5558 		dc_plane_state_release(dm_plane_state->dc_state);
5559 
5560 	drm_atomic_helper_plane_destroy_state(plane, state);
5561 }
5562 
5563 static const struct drm_plane_funcs dm_plane_funcs = {
5564 	.update_plane	= drm_atomic_helper_update_plane,
5565 	.disable_plane	= drm_atomic_helper_disable_plane,
5566 	.destroy	= drm_primary_helper_destroy,
5567 	.reset = dm_drm_plane_reset,
5568 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5569 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5570 };
5571 
5572 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5573 				      struct drm_plane_state *new_state)
5574 {
5575 	struct amdgpu_framebuffer *afb;
5576 	struct drm_gem_object *obj;
5577 	struct amdgpu_device *adev;
5578 	struct amdgpu_bo *rbo;
5579 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5580 	struct list_head list;
5581 	struct ttm_validate_buffer tv;
5582 	struct ww_acquire_ctx ticket;
5583 	uint64_t tiling_flags;
5584 	uint32_t domain;
5585 	int r;
5586 	bool tmz_surface = false;
5587 	bool force_disable_dcc = false;
5588 
5589 	dm_plane_state_old = to_dm_plane_state(plane->state);
5590 	dm_plane_state_new = to_dm_plane_state(new_state);
5591 
5592 	if (!new_state->fb) {
5593 		DRM_DEBUG_DRIVER("No FB bound\n");
5594 		return 0;
5595 	}
5596 
5597 	afb = to_amdgpu_framebuffer(new_state->fb);
5598 	obj = new_state->fb->obj[0];
5599 	rbo = gem_to_amdgpu_bo(obj);
5600 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5601 	INIT_LIST_HEAD(&list);
5602 
5603 	tv.bo = &rbo->tbo;
5604 	tv.num_shared = 1;
5605 	list_add(&tv.head, &list);
5606 
5607 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5608 	if (r) {
5609 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5610 		return r;
5611 	}
5612 
5613 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5614 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5615 	else
5616 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5617 
5618 	r = amdgpu_bo_pin(rbo, domain);
5619 	if (unlikely(r != 0)) {
5620 		if (r != -ERESTARTSYS)
5621 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5622 		ttm_eu_backoff_reservation(&ticket, &list);
5623 		return r;
5624 	}
5625 
5626 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5627 	if (unlikely(r != 0)) {
5628 		amdgpu_bo_unpin(rbo);
5629 		ttm_eu_backoff_reservation(&ticket, &list);
5630 		DRM_ERROR("%p bind failed\n", rbo);
5631 		return r;
5632 	}
5633 
5634 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5635 
5636 	tmz_surface = amdgpu_bo_encrypted(rbo);
5637 
5638 	ttm_eu_backoff_reservation(&ticket, &list);
5639 
5640 	afb->address = amdgpu_bo_gpu_offset(rbo);
5641 
5642 	amdgpu_bo_ref(rbo);
5643 
5644 	if (dm_plane_state_new->dc_state &&
5645 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5646 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5647 
5648 		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5649 		fill_plane_buffer_attributes(
5650 			adev, afb, plane_state->format, plane_state->rotation,
5651 			tiling_flags, &plane_state->tiling_info,
5652 			&plane_state->plane_size, &plane_state->dcc,
5653 			&plane_state->address, tmz_surface,
5654 			force_disable_dcc);
5655 	}
5656 
5657 	return 0;
5658 }
5659 
5660 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5661 				       struct drm_plane_state *old_state)
5662 {
5663 	struct amdgpu_bo *rbo;
5664 	int r;
5665 
5666 	if (!old_state->fb)
5667 		return;
5668 
5669 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5670 	r = amdgpu_bo_reserve(rbo, false);
5671 	if (unlikely(r)) {
5672 		DRM_ERROR("failed to reserve rbo before unpin\n");
5673 		return;
5674 	}
5675 
5676 	amdgpu_bo_unpin(rbo);
5677 	amdgpu_bo_unreserve(rbo);
5678 	amdgpu_bo_unref(&rbo);
5679 }
5680 
5681 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5682 				       struct drm_crtc_state *new_crtc_state)
5683 {
5684 	int max_downscale = 0;
5685 	int max_upscale = INT_MAX;
5686 
5687 	/* TODO: These should be checked against DC plane caps */
5688 	return drm_atomic_helper_check_plane_state(
5689 		state, new_crtc_state, max_downscale, max_upscale, true, true);
5690 }
5691 
5692 static int dm_plane_atomic_check(struct drm_plane *plane,
5693 				 struct drm_plane_state *state)
5694 {
5695 	struct amdgpu_device *adev = plane->dev->dev_private;
5696 	struct dc *dc = adev->dm.dc;
5697 	struct dm_plane_state *dm_plane_state;
5698 	struct dc_scaling_info scaling_info;
5699 	struct drm_crtc_state *new_crtc_state;
5700 	int ret;
5701 
5702 	dm_plane_state = to_dm_plane_state(state);
5703 
5704 	if (!dm_plane_state->dc_state)
5705 		return 0;
5706 
5707 	new_crtc_state =
5708 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
5709 	if (!new_crtc_state)
5710 		return -EINVAL;
5711 
5712 	ret = dm_plane_helper_check_state(state, new_crtc_state);
5713 	if (ret)
5714 		return ret;
5715 
5716 	ret = fill_dc_scaling_info(state, &scaling_info);
5717 	if (ret)
5718 		return ret;
5719 
5720 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5721 		return 0;
5722 
5723 	return -EINVAL;
5724 }
5725 
5726 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5727 				       struct drm_plane_state *new_plane_state)
5728 {
5729 	/* Only support async updates on cursor planes. */
5730 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5731 		return -EINVAL;
5732 
5733 	return 0;
5734 }
5735 
5736 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5737 					 struct drm_plane_state *new_state)
5738 {
5739 	struct drm_plane_state *old_state =
5740 		drm_atomic_get_old_plane_state(new_state->state, plane);
5741 
5742 	swap(plane->state->fb, new_state->fb);
5743 
5744 	plane->state->src_x = new_state->src_x;
5745 	plane->state->src_y = new_state->src_y;
5746 	plane->state->src_w = new_state->src_w;
5747 	plane->state->src_h = new_state->src_h;
5748 	plane->state->crtc_x = new_state->crtc_x;
5749 	plane->state->crtc_y = new_state->crtc_y;
5750 	plane->state->crtc_w = new_state->crtc_w;
5751 	plane->state->crtc_h = new_state->crtc_h;
5752 
5753 	handle_cursor_update(plane, old_state);
5754 }
5755 
5756 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5757 	.prepare_fb = dm_plane_helper_prepare_fb,
5758 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5759 	.atomic_check = dm_plane_atomic_check,
5760 	.atomic_async_check = dm_plane_atomic_async_check,
5761 	.atomic_async_update = dm_plane_atomic_async_update
5762 };
5763 
5764 /*
5765  * TODO: these are currently initialized to rgb formats only.
5766  * For future use cases we should either initialize them dynamically based on
5767  * plane capabilities, or initialize this array to all formats, so internal drm
5768  * check will succeed, and let DC implement proper check
5769  */
5770 static const uint32_t rgb_formats[] = {
5771 	DRM_FORMAT_XRGB8888,
5772 	DRM_FORMAT_ARGB8888,
5773 	DRM_FORMAT_RGBA8888,
5774 	DRM_FORMAT_XRGB2101010,
5775 	DRM_FORMAT_XBGR2101010,
5776 	DRM_FORMAT_ARGB2101010,
5777 	DRM_FORMAT_ABGR2101010,
5778 	DRM_FORMAT_XBGR8888,
5779 	DRM_FORMAT_ABGR8888,
5780 	DRM_FORMAT_RGB565,
5781 };
5782 
5783 static const uint32_t overlay_formats[] = {
5784 	DRM_FORMAT_XRGB8888,
5785 	DRM_FORMAT_ARGB8888,
5786 	DRM_FORMAT_RGBA8888,
5787 	DRM_FORMAT_XBGR8888,
5788 	DRM_FORMAT_ABGR8888,
5789 	DRM_FORMAT_RGB565
5790 };
5791 
5792 static const u32 cursor_formats[] = {
5793 	DRM_FORMAT_ARGB8888
5794 };
5795 
5796 static int get_plane_formats(const struct drm_plane *plane,
5797 			     const struct dc_plane_cap *plane_cap,
5798 			     uint32_t *formats, int max_formats)
5799 {
5800 	int i, num_formats = 0;
5801 
5802 	/*
5803 	 * TODO: Query support for each group of formats directly from
5804 	 * DC plane caps. This will require adding more formats to the
5805 	 * caps list.
5806 	 */
5807 
5808 	switch (plane->type) {
5809 	case DRM_PLANE_TYPE_PRIMARY:
5810 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5811 			if (num_formats >= max_formats)
5812 				break;
5813 
5814 			formats[num_formats++] = rgb_formats[i];
5815 		}
5816 
5817 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5818 			formats[num_formats++] = DRM_FORMAT_NV12;
5819 		if (plane_cap && plane_cap->pixel_format_support.p010)
5820 			formats[num_formats++] = DRM_FORMAT_P010;
5821 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
5822 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5823 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5824 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5825 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5826 		}
5827 		break;
5828 
5829 	case DRM_PLANE_TYPE_OVERLAY:
5830 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5831 			if (num_formats >= max_formats)
5832 				break;
5833 
5834 			formats[num_formats++] = overlay_formats[i];
5835 		}
5836 		break;
5837 
5838 	case DRM_PLANE_TYPE_CURSOR:
5839 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5840 			if (num_formats >= max_formats)
5841 				break;
5842 
5843 			formats[num_formats++] = cursor_formats[i];
5844 		}
5845 		break;
5846 	}
5847 
5848 	return num_formats;
5849 }
5850 
5851 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5852 				struct drm_plane *plane,
5853 				unsigned long possible_crtcs,
5854 				const struct dc_plane_cap *plane_cap)
5855 {
5856 	uint32_t formats[32];
5857 	int num_formats;
5858 	int res = -EPERM;
5859 	unsigned int supported_rotations;
5860 
5861 	num_formats = get_plane_formats(plane, plane_cap, formats,
5862 					ARRAY_SIZE(formats));
5863 
5864 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5865 				       &dm_plane_funcs, formats, num_formats,
5866 				       NULL, plane->type, NULL);
5867 	if (res)
5868 		return res;
5869 
5870 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5871 	    plane_cap && plane_cap->per_pixel_alpha) {
5872 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5873 					  BIT(DRM_MODE_BLEND_PREMULTI);
5874 
5875 		drm_plane_create_alpha_property(plane);
5876 		drm_plane_create_blend_mode_property(plane, blend_caps);
5877 	}
5878 
5879 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5880 	    plane_cap &&
5881 	    (plane_cap->pixel_format_support.nv12 ||
5882 	     plane_cap->pixel_format_support.p010)) {
5883 		/* This only affects YUV formats. */
5884 		drm_plane_create_color_properties(
5885 			plane,
5886 			BIT(DRM_COLOR_YCBCR_BT601) |
5887 			BIT(DRM_COLOR_YCBCR_BT709) |
5888 			BIT(DRM_COLOR_YCBCR_BT2020),
5889 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5890 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5891 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5892 	}
5893 
5894 	supported_rotations =
5895 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
5896 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
5897 
5898 	drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
5899 					   supported_rotations);
5900 
5901 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5902 
5903 	/* Create (reset) the plane state */
5904 	if (plane->funcs->reset)
5905 		plane->funcs->reset(plane);
5906 
5907 	return 0;
5908 }
5909 
5910 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5911 			       struct drm_plane *plane,
5912 			       uint32_t crtc_index)
5913 {
5914 	struct amdgpu_crtc *acrtc = NULL;
5915 	struct drm_plane *cursor_plane;
5916 
5917 	int res = -ENOMEM;
5918 
5919 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5920 	if (!cursor_plane)
5921 		goto fail;
5922 
5923 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5924 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5925 
5926 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5927 	if (!acrtc)
5928 		goto fail;
5929 
5930 	res = drm_crtc_init_with_planes(
5931 			dm->ddev,
5932 			&acrtc->base,
5933 			plane,
5934 			cursor_plane,
5935 			&amdgpu_dm_crtc_funcs, NULL);
5936 
5937 	if (res)
5938 		goto fail;
5939 
5940 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5941 
5942 	/* Create (reset) the plane state */
5943 	if (acrtc->base.funcs->reset)
5944 		acrtc->base.funcs->reset(&acrtc->base);
5945 
5946 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5947 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5948 
5949 	acrtc->crtc_id = crtc_index;
5950 	acrtc->base.enabled = false;
5951 	acrtc->otg_inst = -1;
5952 
5953 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5954 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5955 				   true, MAX_COLOR_LUT_ENTRIES);
5956 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5957 
5958 	return 0;
5959 
5960 fail:
5961 	kfree(acrtc);
5962 	kfree(cursor_plane);
5963 	return res;
5964 }
5965 
5966 
5967 static int to_drm_connector_type(enum signal_type st)
5968 {
5969 	switch (st) {
5970 	case SIGNAL_TYPE_HDMI_TYPE_A:
5971 		return DRM_MODE_CONNECTOR_HDMIA;
5972 	case SIGNAL_TYPE_EDP:
5973 		return DRM_MODE_CONNECTOR_eDP;
5974 	case SIGNAL_TYPE_LVDS:
5975 		return DRM_MODE_CONNECTOR_LVDS;
5976 	case SIGNAL_TYPE_RGB:
5977 		return DRM_MODE_CONNECTOR_VGA;
5978 	case SIGNAL_TYPE_DISPLAY_PORT:
5979 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
5980 		return DRM_MODE_CONNECTOR_DisplayPort;
5981 	case SIGNAL_TYPE_DVI_DUAL_LINK:
5982 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
5983 		return DRM_MODE_CONNECTOR_DVID;
5984 	case SIGNAL_TYPE_VIRTUAL:
5985 		return DRM_MODE_CONNECTOR_VIRTUAL;
5986 
5987 	default:
5988 		return DRM_MODE_CONNECTOR_Unknown;
5989 	}
5990 }
5991 
5992 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5993 {
5994 	struct drm_encoder *encoder;
5995 
5996 	/* There is only one encoder per connector */
5997 	drm_connector_for_each_possible_encoder(connector, encoder)
5998 		return encoder;
5999 
6000 	return NULL;
6001 }
6002 
6003 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6004 {
6005 	struct drm_encoder *encoder;
6006 	struct amdgpu_encoder *amdgpu_encoder;
6007 
6008 	encoder = amdgpu_dm_connector_to_encoder(connector);
6009 
6010 	if (encoder == NULL)
6011 		return;
6012 
6013 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6014 
6015 	amdgpu_encoder->native_mode.clock = 0;
6016 
6017 	if (!list_empty(&connector->probed_modes)) {
6018 		struct drm_display_mode *preferred_mode = NULL;
6019 
6020 		list_for_each_entry(preferred_mode,
6021 				    &connector->probed_modes,
6022 				    head) {
6023 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6024 				amdgpu_encoder->native_mode = *preferred_mode;
6025 
6026 			break;
6027 		}
6028 
6029 	}
6030 }
6031 
6032 static struct drm_display_mode *
6033 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6034 			     char *name,
6035 			     int hdisplay, int vdisplay)
6036 {
6037 	struct drm_device *dev = encoder->dev;
6038 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6039 	struct drm_display_mode *mode = NULL;
6040 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6041 
6042 	mode = drm_mode_duplicate(dev, native_mode);
6043 
6044 	if (mode == NULL)
6045 		return NULL;
6046 
6047 	mode->hdisplay = hdisplay;
6048 	mode->vdisplay = vdisplay;
6049 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6050 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6051 
6052 	return mode;
6053 
6054 }
6055 
6056 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6057 						 struct drm_connector *connector)
6058 {
6059 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6060 	struct drm_display_mode *mode = NULL;
6061 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6062 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6063 				to_amdgpu_dm_connector(connector);
6064 	int i;
6065 	int n;
6066 	struct mode_size {
6067 		char name[DRM_DISPLAY_MODE_LEN];
6068 		int w;
6069 		int h;
6070 	} common_modes[] = {
6071 		{  "640x480",  640,  480},
6072 		{  "800x600",  800,  600},
6073 		{ "1024x768", 1024,  768},
6074 		{ "1280x720", 1280,  720},
6075 		{ "1280x800", 1280,  800},
6076 		{"1280x1024", 1280, 1024},
6077 		{ "1440x900", 1440,  900},
6078 		{"1680x1050", 1680, 1050},
6079 		{"1600x1200", 1600, 1200},
6080 		{"1920x1080", 1920, 1080},
6081 		{"1920x1200", 1920, 1200}
6082 	};
6083 
6084 	n = ARRAY_SIZE(common_modes);
6085 
6086 	for (i = 0; i < n; i++) {
6087 		struct drm_display_mode *curmode = NULL;
6088 		bool mode_existed = false;
6089 
6090 		if (common_modes[i].w > native_mode->hdisplay ||
6091 		    common_modes[i].h > native_mode->vdisplay ||
6092 		   (common_modes[i].w == native_mode->hdisplay &&
6093 		    common_modes[i].h == native_mode->vdisplay))
6094 			continue;
6095 
6096 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6097 			if (common_modes[i].w == curmode->hdisplay &&
6098 			    common_modes[i].h == curmode->vdisplay) {
6099 				mode_existed = true;
6100 				break;
6101 			}
6102 		}
6103 
6104 		if (mode_existed)
6105 			continue;
6106 
6107 		mode = amdgpu_dm_create_common_mode(encoder,
6108 				common_modes[i].name, common_modes[i].w,
6109 				common_modes[i].h);
6110 		drm_mode_probed_add(connector, mode);
6111 		amdgpu_dm_connector->num_modes++;
6112 	}
6113 }
6114 
6115 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6116 					      struct edid *edid)
6117 {
6118 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6119 			to_amdgpu_dm_connector(connector);
6120 
6121 	if (edid) {
6122 		/* empty probed_modes */
6123 		INIT_LIST_HEAD(&connector->probed_modes);
6124 		amdgpu_dm_connector->num_modes =
6125 				drm_add_edid_modes(connector, edid);
6126 
6127 		/* sorting the probed modes before calling function
6128 		 * amdgpu_dm_get_native_mode() since EDID can have
6129 		 * more than one preferred mode. The modes that are
6130 		 * later in the probed mode list could be of higher
6131 		 * and preferred resolution. For example, 3840x2160
6132 		 * resolution in base EDID preferred timing and 4096x2160
6133 		 * preferred resolution in DID extension block later.
6134 		 */
6135 		drm_mode_sort(&connector->probed_modes);
6136 		amdgpu_dm_get_native_mode(connector);
6137 	} else {
6138 		amdgpu_dm_connector->num_modes = 0;
6139 	}
6140 }
6141 
6142 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6143 {
6144 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6145 			to_amdgpu_dm_connector(connector);
6146 	struct drm_encoder *encoder;
6147 	struct edid *edid = amdgpu_dm_connector->edid;
6148 
6149 	encoder = amdgpu_dm_connector_to_encoder(connector);
6150 
6151 	if (!edid || !drm_edid_is_valid(edid)) {
6152 		amdgpu_dm_connector->num_modes =
6153 				drm_add_modes_noedid(connector, 640, 480);
6154 	} else {
6155 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6156 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6157 	}
6158 	amdgpu_dm_fbc_init(connector);
6159 
6160 	return amdgpu_dm_connector->num_modes;
6161 }
6162 
6163 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6164 				     struct amdgpu_dm_connector *aconnector,
6165 				     int connector_type,
6166 				     struct dc_link *link,
6167 				     int link_index)
6168 {
6169 	struct amdgpu_device *adev = dm->ddev->dev_private;
6170 
6171 	/*
6172 	 * Some of the properties below require access to state, like bpc.
6173 	 * Allocate some default initial connector state with our reset helper.
6174 	 */
6175 	if (aconnector->base.funcs->reset)
6176 		aconnector->base.funcs->reset(&aconnector->base);
6177 
6178 	aconnector->connector_id = link_index;
6179 	aconnector->dc_link = link;
6180 	aconnector->base.interlace_allowed = false;
6181 	aconnector->base.doublescan_allowed = false;
6182 	aconnector->base.stereo_allowed = false;
6183 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6184 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6185 	aconnector->audio_inst = -1;
6186 	mutex_init(&aconnector->hpd_lock);
6187 
6188 	/*
6189 	 * configure support HPD hot plug connector_>polled default value is 0
6190 	 * which means HPD hot plug not supported
6191 	 */
6192 	switch (connector_type) {
6193 	case DRM_MODE_CONNECTOR_HDMIA:
6194 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6195 		aconnector->base.ycbcr_420_allowed =
6196 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6197 		break;
6198 	case DRM_MODE_CONNECTOR_DisplayPort:
6199 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6200 		aconnector->base.ycbcr_420_allowed =
6201 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6202 		break;
6203 	case DRM_MODE_CONNECTOR_DVID:
6204 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6205 		break;
6206 	default:
6207 		break;
6208 	}
6209 
6210 	drm_object_attach_property(&aconnector->base.base,
6211 				dm->ddev->mode_config.scaling_mode_property,
6212 				DRM_MODE_SCALE_NONE);
6213 
6214 	drm_object_attach_property(&aconnector->base.base,
6215 				adev->mode_info.underscan_property,
6216 				UNDERSCAN_OFF);
6217 	drm_object_attach_property(&aconnector->base.base,
6218 				adev->mode_info.underscan_hborder_property,
6219 				0);
6220 	drm_object_attach_property(&aconnector->base.base,
6221 				adev->mode_info.underscan_vborder_property,
6222 				0);
6223 
6224 	if (!aconnector->mst_port)
6225 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6226 
6227 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6228 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6229 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6230 
6231 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6232 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6233 		drm_object_attach_property(&aconnector->base.base,
6234 				adev->mode_info.abm_level_property, 0);
6235 	}
6236 
6237 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6238 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6239 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6240 		drm_object_attach_property(
6241 			&aconnector->base.base,
6242 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6243 
6244 		if (!aconnector->mst_port)
6245 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6246 
6247 #ifdef CONFIG_DRM_AMD_DC_HDCP
6248 		if (adev->dm.hdcp_workqueue)
6249 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6250 #endif
6251 	}
6252 }
6253 
6254 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6255 			      struct i2c_msg *msgs, int num)
6256 {
6257 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6258 	struct ddc_service *ddc_service = i2c->ddc_service;
6259 	struct i2c_command cmd;
6260 	int i;
6261 	int result = -EIO;
6262 
6263 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6264 
6265 	if (!cmd.payloads)
6266 		return result;
6267 
6268 	cmd.number_of_payloads = num;
6269 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6270 	cmd.speed = 100;
6271 
6272 	for (i = 0; i < num; i++) {
6273 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6274 		cmd.payloads[i].address = msgs[i].addr;
6275 		cmd.payloads[i].length = msgs[i].len;
6276 		cmd.payloads[i].data = msgs[i].buf;
6277 	}
6278 
6279 	if (dc_submit_i2c(
6280 			ddc_service->ctx->dc,
6281 			ddc_service->ddc_pin->hw_info.ddc_channel,
6282 			&cmd))
6283 		result = num;
6284 
6285 	kfree(cmd.payloads);
6286 	return result;
6287 }
6288 
6289 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6290 {
6291 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6292 }
6293 
6294 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6295 	.master_xfer = amdgpu_dm_i2c_xfer,
6296 	.functionality = amdgpu_dm_i2c_func,
6297 };
6298 
6299 static struct amdgpu_i2c_adapter *
6300 create_i2c(struct ddc_service *ddc_service,
6301 	   int link_index,
6302 	   int *res)
6303 {
6304 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6305 	struct amdgpu_i2c_adapter *i2c;
6306 
6307 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6308 	if (!i2c)
6309 		return NULL;
6310 	i2c->base.owner = THIS_MODULE;
6311 	i2c->base.class = I2C_CLASS_DDC;
6312 	i2c->base.dev.parent = &adev->pdev->dev;
6313 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6314 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6315 	i2c_set_adapdata(&i2c->base, i2c);
6316 	i2c->ddc_service = ddc_service;
6317 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6318 
6319 	return i2c;
6320 }
6321 
6322 
6323 /*
6324  * Note: this function assumes that dc_link_detect() was called for the
6325  * dc_link which will be represented by this aconnector.
6326  */
6327 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6328 				    struct amdgpu_dm_connector *aconnector,
6329 				    uint32_t link_index,
6330 				    struct amdgpu_encoder *aencoder)
6331 {
6332 	int res = 0;
6333 	int connector_type;
6334 	struct dc *dc = dm->dc;
6335 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6336 	struct amdgpu_i2c_adapter *i2c;
6337 
6338 	link->priv = aconnector;
6339 
6340 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6341 
6342 	i2c = create_i2c(link->ddc, link->link_index, &res);
6343 	if (!i2c) {
6344 		DRM_ERROR("Failed to create i2c adapter data\n");
6345 		return -ENOMEM;
6346 	}
6347 
6348 	aconnector->i2c = i2c;
6349 	res = i2c_add_adapter(&i2c->base);
6350 
6351 	if (res) {
6352 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6353 		goto out_free;
6354 	}
6355 
6356 	connector_type = to_drm_connector_type(link->connector_signal);
6357 
6358 	res = drm_connector_init_with_ddc(
6359 			dm->ddev,
6360 			&aconnector->base,
6361 			&amdgpu_dm_connector_funcs,
6362 			connector_type,
6363 			&i2c->base);
6364 
6365 	if (res) {
6366 		DRM_ERROR("connector_init failed\n");
6367 		aconnector->connector_id = -1;
6368 		goto out_free;
6369 	}
6370 
6371 	drm_connector_helper_add(
6372 			&aconnector->base,
6373 			&amdgpu_dm_connector_helper_funcs);
6374 
6375 	amdgpu_dm_connector_init_helper(
6376 		dm,
6377 		aconnector,
6378 		connector_type,
6379 		link,
6380 		link_index);
6381 
6382 	drm_connector_attach_encoder(
6383 		&aconnector->base, &aencoder->base);
6384 
6385 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6386 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6387 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6388 
6389 out_free:
6390 	if (res) {
6391 		kfree(i2c);
6392 		aconnector->i2c = NULL;
6393 	}
6394 	return res;
6395 }
6396 
6397 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6398 {
6399 	switch (adev->mode_info.num_crtc) {
6400 	case 1:
6401 		return 0x1;
6402 	case 2:
6403 		return 0x3;
6404 	case 3:
6405 		return 0x7;
6406 	case 4:
6407 		return 0xf;
6408 	case 5:
6409 		return 0x1f;
6410 	case 6:
6411 	default:
6412 		return 0x3f;
6413 	}
6414 }
6415 
6416 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6417 				  struct amdgpu_encoder *aencoder,
6418 				  uint32_t link_index)
6419 {
6420 	struct amdgpu_device *adev = dev->dev_private;
6421 
6422 	int res = drm_encoder_init(dev,
6423 				   &aencoder->base,
6424 				   &amdgpu_dm_encoder_funcs,
6425 				   DRM_MODE_ENCODER_TMDS,
6426 				   NULL);
6427 
6428 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6429 
6430 	if (!res)
6431 		aencoder->encoder_id = link_index;
6432 	else
6433 		aencoder->encoder_id = -1;
6434 
6435 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6436 
6437 	return res;
6438 }
6439 
6440 static void manage_dm_interrupts(struct amdgpu_device *adev,
6441 				 struct amdgpu_crtc *acrtc,
6442 				 bool enable)
6443 {
6444 	/*
6445 	 * We have no guarantee that the frontend index maps to the same
6446 	 * backend index - some even map to more than one.
6447 	 *
6448 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6449 	 */
6450 	int irq_type =
6451 		amdgpu_display_crtc_idx_to_irq_type(
6452 			adev,
6453 			acrtc->crtc_id);
6454 
6455 	if (enable) {
6456 		drm_crtc_vblank_on(&acrtc->base);
6457 		amdgpu_irq_get(
6458 			adev,
6459 			&adev->pageflip_irq,
6460 			irq_type);
6461 	} else {
6462 
6463 		amdgpu_irq_put(
6464 			adev,
6465 			&adev->pageflip_irq,
6466 			irq_type);
6467 		drm_crtc_vblank_off(&acrtc->base);
6468 	}
6469 }
6470 
6471 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6472 				      struct amdgpu_crtc *acrtc)
6473 {
6474 	int irq_type =
6475 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6476 
6477 	/**
6478 	 * This reads the current state for the IRQ and force reapplies
6479 	 * the setting to hardware.
6480 	 */
6481 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6482 }
6483 
6484 static bool
6485 is_scaling_state_different(const struct dm_connector_state *dm_state,
6486 			   const struct dm_connector_state *old_dm_state)
6487 {
6488 	if (dm_state->scaling != old_dm_state->scaling)
6489 		return true;
6490 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6491 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6492 			return true;
6493 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6494 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6495 			return true;
6496 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6497 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6498 		return true;
6499 	return false;
6500 }
6501 
6502 #ifdef CONFIG_DRM_AMD_DC_HDCP
6503 static bool is_content_protection_different(struct drm_connector_state *state,
6504 					    const struct drm_connector_state *old_state,
6505 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6506 {
6507 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6508 
6509 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6510 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6511 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6512 		return true;
6513 	}
6514 
6515 	/* CP is being re enabled, ignore this */
6516 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6517 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6518 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6519 		return false;
6520 	}
6521 
6522 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6523 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6524 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6525 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6526 
6527 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6528 	 * hot-plug, headless s3, dpms
6529 	 */
6530 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6531 	    aconnector->dc_sink != NULL)
6532 		return true;
6533 
6534 	if (old_state->content_protection == state->content_protection)
6535 		return false;
6536 
6537 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6538 		return true;
6539 
6540 	return false;
6541 }
6542 
6543 #endif
6544 static void remove_stream(struct amdgpu_device *adev,
6545 			  struct amdgpu_crtc *acrtc,
6546 			  struct dc_stream_state *stream)
6547 {
6548 	/* this is the update mode case */
6549 
6550 	acrtc->otg_inst = -1;
6551 	acrtc->enabled = false;
6552 }
6553 
6554 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6555 			       struct dc_cursor_position *position)
6556 {
6557 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6558 	int x, y;
6559 	int xorigin = 0, yorigin = 0;
6560 
6561 	position->enable = false;
6562 	position->x = 0;
6563 	position->y = 0;
6564 
6565 	if (!crtc || !plane->state->fb)
6566 		return 0;
6567 
6568 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6569 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6570 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6571 			  __func__,
6572 			  plane->state->crtc_w,
6573 			  plane->state->crtc_h);
6574 		return -EINVAL;
6575 	}
6576 
6577 	x = plane->state->crtc_x;
6578 	y = plane->state->crtc_y;
6579 
6580 	if (x <= -amdgpu_crtc->max_cursor_width ||
6581 	    y <= -amdgpu_crtc->max_cursor_height)
6582 		return 0;
6583 
6584 	if (x < 0) {
6585 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6586 		x = 0;
6587 	}
6588 	if (y < 0) {
6589 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6590 		y = 0;
6591 	}
6592 	position->enable = true;
6593 	position->translate_by_source = true;
6594 	position->x = x;
6595 	position->y = y;
6596 	position->x_hotspot = xorigin;
6597 	position->y_hotspot = yorigin;
6598 
6599 	return 0;
6600 }
6601 
6602 static void handle_cursor_update(struct drm_plane *plane,
6603 				 struct drm_plane_state *old_plane_state)
6604 {
6605 	struct amdgpu_device *adev = plane->dev->dev_private;
6606 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6607 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6608 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6609 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6610 	uint64_t address = afb ? afb->address : 0;
6611 	struct dc_cursor_position position;
6612 	struct dc_cursor_attributes attributes;
6613 	int ret;
6614 
6615 	if (!plane->state->fb && !old_plane_state->fb)
6616 		return;
6617 
6618 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6619 			 __func__,
6620 			 amdgpu_crtc->crtc_id,
6621 			 plane->state->crtc_w,
6622 			 plane->state->crtc_h);
6623 
6624 	ret = get_cursor_position(plane, crtc, &position);
6625 	if (ret)
6626 		return;
6627 
6628 	if (!position.enable) {
6629 		/* turn off cursor */
6630 		if (crtc_state && crtc_state->stream) {
6631 			mutex_lock(&adev->dm.dc_lock);
6632 			dc_stream_set_cursor_position(crtc_state->stream,
6633 						      &position);
6634 			mutex_unlock(&adev->dm.dc_lock);
6635 		}
6636 		return;
6637 	}
6638 
6639 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6640 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6641 
6642 	memset(&attributes, 0, sizeof(attributes));
6643 	attributes.address.high_part = upper_32_bits(address);
6644 	attributes.address.low_part  = lower_32_bits(address);
6645 	attributes.width             = plane->state->crtc_w;
6646 	attributes.height            = plane->state->crtc_h;
6647 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6648 	attributes.rotation_angle    = 0;
6649 	attributes.attribute_flags.value = 0;
6650 
6651 	attributes.pitch = attributes.width;
6652 
6653 	if (crtc_state->stream) {
6654 		mutex_lock(&adev->dm.dc_lock);
6655 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6656 							 &attributes))
6657 			DRM_ERROR("DC failed to set cursor attributes\n");
6658 
6659 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6660 						   &position))
6661 			DRM_ERROR("DC failed to set cursor position\n");
6662 		mutex_unlock(&adev->dm.dc_lock);
6663 	}
6664 }
6665 
6666 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6667 {
6668 
6669 	assert_spin_locked(&acrtc->base.dev->event_lock);
6670 	WARN_ON(acrtc->event);
6671 
6672 	acrtc->event = acrtc->base.state->event;
6673 
6674 	/* Set the flip status */
6675 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6676 
6677 	/* Mark this event as consumed */
6678 	acrtc->base.state->event = NULL;
6679 
6680 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6681 						 acrtc->crtc_id);
6682 }
6683 
6684 static void update_freesync_state_on_stream(
6685 	struct amdgpu_display_manager *dm,
6686 	struct dm_crtc_state *new_crtc_state,
6687 	struct dc_stream_state *new_stream,
6688 	struct dc_plane_state *surface,
6689 	u32 flip_timestamp_in_us)
6690 {
6691 	struct mod_vrr_params vrr_params;
6692 	struct dc_info_packet vrr_infopacket = {0};
6693 	struct amdgpu_device *adev = dm->adev;
6694 	unsigned long flags;
6695 
6696 	if (!new_stream)
6697 		return;
6698 
6699 	/*
6700 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6701 	 * For now it's sufficient to just guard against these conditions.
6702 	 */
6703 
6704 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6705 		return;
6706 
6707 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6708 	vrr_params = new_crtc_state->vrr_params;
6709 
6710 	if (surface) {
6711 		mod_freesync_handle_preflip(
6712 			dm->freesync_module,
6713 			surface,
6714 			new_stream,
6715 			flip_timestamp_in_us,
6716 			&vrr_params);
6717 
6718 		if (adev->family < AMDGPU_FAMILY_AI &&
6719 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6720 			mod_freesync_handle_v_update(dm->freesync_module,
6721 						     new_stream, &vrr_params);
6722 
6723 			/* Need to call this before the frame ends. */
6724 			dc_stream_adjust_vmin_vmax(dm->dc,
6725 						   new_crtc_state->stream,
6726 						   &vrr_params.adjust);
6727 		}
6728 	}
6729 
6730 	mod_freesync_build_vrr_infopacket(
6731 		dm->freesync_module,
6732 		new_stream,
6733 		&vrr_params,
6734 		PACKET_TYPE_VRR,
6735 		TRANSFER_FUNC_UNKNOWN,
6736 		&vrr_infopacket);
6737 
6738 	new_crtc_state->freesync_timing_changed |=
6739 		(memcmp(&new_crtc_state->vrr_params.adjust,
6740 			&vrr_params.adjust,
6741 			sizeof(vrr_params.adjust)) != 0);
6742 
6743 	new_crtc_state->freesync_vrr_info_changed |=
6744 		(memcmp(&new_crtc_state->vrr_infopacket,
6745 			&vrr_infopacket,
6746 			sizeof(vrr_infopacket)) != 0);
6747 
6748 	new_crtc_state->vrr_params = vrr_params;
6749 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6750 
6751 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6752 	new_stream->vrr_infopacket = vrr_infopacket;
6753 
6754 	if (new_crtc_state->freesync_vrr_info_changed)
6755 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6756 			      new_crtc_state->base.crtc->base.id,
6757 			      (int)new_crtc_state->base.vrr_enabled,
6758 			      (int)vrr_params.state);
6759 
6760 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6761 }
6762 
6763 static void pre_update_freesync_state_on_stream(
6764 	struct amdgpu_display_manager *dm,
6765 	struct dm_crtc_state *new_crtc_state)
6766 {
6767 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6768 	struct mod_vrr_params vrr_params;
6769 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6770 	struct amdgpu_device *adev = dm->adev;
6771 	unsigned long flags;
6772 
6773 	if (!new_stream)
6774 		return;
6775 
6776 	/*
6777 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6778 	 * For now it's sufficient to just guard against these conditions.
6779 	 */
6780 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6781 		return;
6782 
6783 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6784 	vrr_params = new_crtc_state->vrr_params;
6785 
6786 	if (new_crtc_state->vrr_supported &&
6787 	    config.min_refresh_in_uhz &&
6788 	    config.max_refresh_in_uhz) {
6789 		config.state = new_crtc_state->base.vrr_enabled ?
6790 			VRR_STATE_ACTIVE_VARIABLE :
6791 			VRR_STATE_INACTIVE;
6792 	} else {
6793 		config.state = VRR_STATE_UNSUPPORTED;
6794 	}
6795 
6796 	mod_freesync_build_vrr_params(dm->freesync_module,
6797 				      new_stream,
6798 				      &config, &vrr_params);
6799 
6800 	new_crtc_state->freesync_timing_changed |=
6801 		(memcmp(&new_crtc_state->vrr_params.adjust,
6802 			&vrr_params.adjust,
6803 			sizeof(vrr_params.adjust)) != 0);
6804 
6805 	new_crtc_state->vrr_params = vrr_params;
6806 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6807 }
6808 
6809 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6810 					    struct dm_crtc_state *new_state)
6811 {
6812 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6813 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6814 
6815 	if (!old_vrr_active && new_vrr_active) {
6816 		/* Transition VRR inactive -> active:
6817 		 * While VRR is active, we must not disable vblank irq, as a
6818 		 * reenable after disable would compute bogus vblank/pflip
6819 		 * timestamps if it likely happened inside display front-porch.
6820 		 *
6821 		 * We also need vupdate irq for the actual core vblank handling
6822 		 * at end of vblank.
6823 		 */
6824 		dm_set_vupdate_irq(new_state->base.crtc, true);
6825 		drm_crtc_vblank_get(new_state->base.crtc);
6826 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6827 				 __func__, new_state->base.crtc->base.id);
6828 	} else if (old_vrr_active && !new_vrr_active) {
6829 		/* Transition VRR active -> inactive:
6830 		 * Allow vblank irq disable again for fixed refresh rate.
6831 		 */
6832 		dm_set_vupdate_irq(new_state->base.crtc, false);
6833 		drm_crtc_vblank_put(new_state->base.crtc);
6834 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6835 				 __func__, new_state->base.crtc->base.id);
6836 	}
6837 }
6838 
6839 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6840 {
6841 	struct drm_plane *plane;
6842 	struct drm_plane_state *old_plane_state, *new_plane_state;
6843 	int i;
6844 
6845 	/*
6846 	 * TODO: Make this per-stream so we don't issue redundant updates for
6847 	 * commits with multiple streams.
6848 	 */
6849 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6850 				       new_plane_state, i)
6851 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6852 			handle_cursor_update(plane, old_plane_state);
6853 }
6854 
6855 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6856 				    struct dc_state *dc_state,
6857 				    struct drm_device *dev,
6858 				    struct amdgpu_display_manager *dm,
6859 				    struct drm_crtc *pcrtc,
6860 				    bool wait_for_vblank)
6861 {
6862 	uint32_t i;
6863 	uint64_t timestamp_ns;
6864 	struct drm_plane *plane;
6865 	struct drm_plane_state *old_plane_state, *new_plane_state;
6866 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6867 	struct drm_crtc_state *new_pcrtc_state =
6868 			drm_atomic_get_new_crtc_state(state, pcrtc);
6869 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6870 	struct dm_crtc_state *dm_old_crtc_state =
6871 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6872 	int planes_count = 0, vpos, hpos;
6873 	long r;
6874 	unsigned long flags;
6875 	struct amdgpu_bo *abo;
6876 	uint64_t tiling_flags;
6877 	bool tmz_surface = false;
6878 	uint32_t target_vblank, last_flip_vblank;
6879 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6880 	bool pflip_present = false;
6881 	struct {
6882 		struct dc_surface_update surface_updates[MAX_SURFACES];
6883 		struct dc_plane_info plane_infos[MAX_SURFACES];
6884 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6885 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6886 		struct dc_stream_update stream_update;
6887 	} *bundle;
6888 
6889 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6890 
6891 	if (!bundle) {
6892 		dm_error("Failed to allocate update bundle\n");
6893 		goto cleanup;
6894 	}
6895 
6896 	/*
6897 	 * Disable the cursor first if we're disabling all the planes.
6898 	 * It'll remain on the screen after the planes are re-enabled
6899 	 * if we don't.
6900 	 */
6901 	if (acrtc_state->active_planes == 0)
6902 		amdgpu_dm_commit_cursors(state);
6903 
6904 	/* update planes when needed */
6905 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6906 		struct drm_crtc *crtc = new_plane_state->crtc;
6907 		struct drm_crtc_state *new_crtc_state;
6908 		struct drm_framebuffer *fb = new_plane_state->fb;
6909 		bool plane_needs_flip;
6910 		struct dc_plane_state *dc_plane;
6911 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6912 
6913 		/* Cursor plane is handled after stream updates */
6914 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6915 			continue;
6916 
6917 		if (!fb || !crtc || pcrtc != crtc)
6918 			continue;
6919 
6920 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6921 		if (!new_crtc_state->active)
6922 			continue;
6923 
6924 		dc_plane = dm_new_plane_state->dc_state;
6925 
6926 		bundle->surface_updates[planes_count].surface = dc_plane;
6927 		if (new_pcrtc_state->color_mgmt_changed) {
6928 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6929 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6930 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6931 		}
6932 
6933 		fill_dc_scaling_info(new_plane_state,
6934 				     &bundle->scaling_infos[planes_count]);
6935 
6936 		bundle->surface_updates[planes_count].scaling_info =
6937 			&bundle->scaling_infos[planes_count];
6938 
6939 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6940 
6941 		pflip_present = pflip_present || plane_needs_flip;
6942 
6943 		if (!plane_needs_flip) {
6944 			planes_count += 1;
6945 			continue;
6946 		}
6947 
6948 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6949 
6950 		/*
6951 		 * Wait for all fences on this FB. Do limited wait to avoid
6952 		 * deadlock during GPU reset when this fence will not signal
6953 		 * but we hold reservation lock for the BO.
6954 		 */
6955 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6956 							false,
6957 							msecs_to_jiffies(5000));
6958 		if (unlikely(r <= 0))
6959 			DRM_ERROR("Waiting for fences timed out!");
6960 
6961 		/*
6962 		 * TODO This might fail and hence better not used, wait
6963 		 * explicitly on fences instead
6964 		 * and in general should be called for
6965 		 * blocking commit to as per framework helpers
6966 		 */
6967 		r = amdgpu_bo_reserve(abo, true);
6968 		if (unlikely(r != 0))
6969 			DRM_ERROR("failed to reserve buffer before flip\n");
6970 
6971 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6972 
6973 		tmz_surface = amdgpu_bo_encrypted(abo);
6974 
6975 		amdgpu_bo_unreserve(abo);
6976 
6977 		fill_dc_plane_info_and_addr(
6978 			dm->adev, new_plane_state, tiling_flags,
6979 			&bundle->plane_infos[planes_count],
6980 			&bundle->flip_addrs[planes_count].address,
6981 			tmz_surface,
6982 			false);
6983 
6984 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6985 				 new_plane_state->plane->index,
6986 				 bundle->plane_infos[planes_count].dcc.enable);
6987 
6988 		bundle->surface_updates[planes_count].plane_info =
6989 			&bundle->plane_infos[planes_count];
6990 
6991 		/*
6992 		 * Only allow immediate flips for fast updates that don't
6993 		 * change FB pitch, DCC state, rotation or mirroing.
6994 		 */
6995 		bundle->flip_addrs[planes_count].flip_immediate =
6996 			crtc->state->async_flip &&
6997 			acrtc_state->update_type == UPDATE_TYPE_FAST;
6998 
6999 		timestamp_ns = ktime_get_ns();
7000 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7001 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7002 		bundle->surface_updates[planes_count].surface = dc_plane;
7003 
7004 		if (!bundle->surface_updates[planes_count].surface) {
7005 			DRM_ERROR("No surface for CRTC: id=%d\n",
7006 					acrtc_attach->crtc_id);
7007 			continue;
7008 		}
7009 
7010 		if (plane == pcrtc->primary)
7011 			update_freesync_state_on_stream(
7012 				dm,
7013 				acrtc_state,
7014 				acrtc_state->stream,
7015 				dc_plane,
7016 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7017 
7018 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7019 				 __func__,
7020 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7021 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7022 
7023 		planes_count += 1;
7024 
7025 	}
7026 
7027 	if (pflip_present) {
7028 		if (!vrr_active) {
7029 			/* Use old throttling in non-vrr fixed refresh rate mode
7030 			 * to keep flip scheduling based on target vblank counts
7031 			 * working in a backwards compatible way, e.g., for
7032 			 * clients using the GLX_OML_sync_control extension or
7033 			 * DRI3/Present extension with defined target_msc.
7034 			 */
7035 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7036 		}
7037 		else {
7038 			/* For variable refresh rate mode only:
7039 			 * Get vblank of last completed flip to avoid > 1 vrr
7040 			 * flips per video frame by use of throttling, but allow
7041 			 * flip programming anywhere in the possibly large
7042 			 * variable vrr vblank interval for fine-grained flip
7043 			 * timing control and more opportunity to avoid stutter
7044 			 * on late submission of flips.
7045 			 */
7046 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7047 			last_flip_vblank = acrtc_attach->last_flip_vblank;
7048 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7049 		}
7050 
7051 		target_vblank = last_flip_vblank + wait_for_vblank;
7052 
7053 		/*
7054 		 * Wait until we're out of the vertical blank period before the one
7055 		 * targeted by the flip
7056 		 */
7057 		while ((acrtc_attach->enabled &&
7058 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7059 							    0, &vpos, &hpos, NULL,
7060 							    NULL, &pcrtc->hwmode)
7061 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7062 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7063 			(int)(target_vblank -
7064 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7065 			usleep_range(1000, 1100);
7066 		}
7067 
7068 		/**
7069 		 * Prepare the flip event for the pageflip interrupt to handle.
7070 		 *
7071 		 * This only works in the case where we've already turned on the
7072 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7073 		 * from 0 -> n planes we have to skip a hardware generated event
7074 		 * and rely on sending it from software.
7075 		 */
7076 		if (acrtc_attach->base.state->event &&
7077 		    acrtc_state->active_planes > 0) {
7078 			drm_crtc_vblank_get(pcrtc);
7079 
7080 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7081 
7082 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7083 			prepare_flip_isr(acrtc_attach);
7084 
7085 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7086 		}
7087 
7088 		if (acrtc_state->stream) {
7089 			if (acrtc_state->freesync_vrr_info_changed)
7090 				bundle->stream_update.vrr_infopacket =
7091 					&acrtc_state->stream->vrr_infopacket;
7092 		}
7093 	}
7094 
7095 	/* Update the planes if changed or disable if we don't have any. */
7096 	if ((planes_count || acrtc_state->active_planes == 0) &&
7097 		acrtc_state->stream) {
7098 		bundle->stream_update.stream = acrtc_state->stream;
7099 		if (new_pcrtc_state->mode_changed) {
7100 			bundle->stream_update.src = acrtc_state->stream->src;
7101 			bundle->stream_update.dst = acrtc_state->stream->dst;
7102 		}
7103 
7104 		if (new_pcrtc_state->color_mgmt_changed) {
7105 			/*
7106 			 * TODO: This isn't fully correct since we've actually
7107 			 * already modified the stream in place.
7108 			 */
7109 			bundle->stream_update.gamut_remap =
7110 				&acrtc_state->stream->gamut_remap_matrix;
7111 			bundle->stream_update.output_csc_transform =
7112 				&acrtc_state->stream->csc_color_matrix;
7113 			bundle->stream_update.out_transfer_func =
7114 				acrtc_state->stream->out_transfer_func;
7115 		}
7116 
7117 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7118 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7119 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7120 
7121 		/*
7122 		 * If FreeSync state on the stream has changed then we need to
7123 		 * re-adjust the min/max bounds now that DC doesn't handle this
7124 		 * as part of commit.
7125 		 */
7126 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7127 		    amdgpu_dm_vrr_active(acrtc_state)) {
7128 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7129 			dc_stream_adjust_vmin_vmax(
7130 				dm->dc, acrtc_state->stream,
7131 				&acrtc_state->vrr_params.adjust);
7132 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7133 		}
7134 		mutex_lock(&dm->dc_lock);
7135 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7136 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7137 			amdgpu_dm_psr_disable(acrtc_state->stream);
7138 
7139 		dc_commit_updates_for_stream(dm->dc,
7140 						     bundle->surface_updates,
7141 						     planes_count,
7142 						     acrtc_state->stream,
7143 						     &bundle->stream_update,
7144 						     dc_state);
7145 
7146 		/**
7147 		 * Enable or disable the interrupts on the backend.
7148 		 *
7149 		 * Most pipes are put into power gating when unused.
7150 		 *
7151 		 * When power gating is enabled on a pipe we lose the
7152 		 * interrupt enablement state when power gating is disabled.
7153 		 *
7154 		 * So we need to update the IRQ control state in hardware
7155 		 * whenever the pipe turns on (since it could be previously
7156 		 * power gated) or off (since some pipes can't be power gated
7157 		 * on some ASICs).
7158 		 */
7159 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7160 			dm_update_pflip_irq_state(
7161 				(struct amdgpu_device *)dev->dev_private,
7162 				acrtc_attach);
7163 
7164 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7165 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7166 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7167 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7168 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7169 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7170 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7171 			amdgpu_dm_psr_enable(acrtc_state->stream);
7172 		}
7173 
7174 		mutex_unlock(&dm->dc_lock);
7175 	}
7176 
7177 	/*
7178 	 * Update cursor state *after* programming all the planes.
7179 	 * This avoids redundant programming in the case where we're going
7180 	 * to be disabling a single plane - those pipes are being disabled.
7181 	 */
7182 	if (acrtc_state->active_planes)
7183 		amdgpu_dm_commit_cursors(state);
7184 
7185 cleanup:
7186 	kfree(bundle);
7187 }
7188 
7189 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7190 				   struct drm_atomic_state *state)
7191 {
7192 	struct amdgpu_device *adev = dev->dev_private;
7193 	struct amdgpu_dm_connector *aconnector;
7194 	struct drm_connector *connector;
7195 	struct drm_connector_state *old_con_state, *new_con_state;
7196 	struct drm_crtc_state *new_crtc_state;
7197 	struct dm_crtc_state *new_dm_crtc_state;
7198 	const struct dc_stream_status *status;
7199 	int i, inst;
7200 
7201 	/* Notify device removals. */
7202 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7203 		if (old_con_state->crtc != new_con_state->crtc) {
7204 			/* CRTC changes require notification. */
7205 			goto notify;
7206 		}
7207 
7208 		if (!new_con_state->crtc)
7209 			continue;
7210 
7211 		new_crtc_state = drm_atomic_get_new_crtc_state(
7212 			state, new_con_state->crtc);
7213 
7214 		if (!new_crtc_state)
7215 			continue;
7216 
7217 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7218 			continue;
7219 
7220 	notify:
7221 		aconnector = to_amdgpu_dm_connector(connector);
7222 
7223 		mutex_lock(&adev->dm.audio_lock);
7224 		inst = aconnector->audio_inst;
7225 		aconnector->audio_inst = -1;
7226 		mutex_unlock(&adev->dm.audio_lock);
7227 
7228 		amdgpu_dm_audio_eld_notify(adev, inst);
7229 	}
7230 
7231 	/* Notify audio device additions. */
7232 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7233 		if (!new_con_state->crtc)
7234 			continue;
7235 
7236 		new_crtc_state = drm_atomic_get_new_crtc_state(
7237 			state, new_con_state->crtc);
7238 
7239 		if (!new_crtc_state)
7240 			continue;
7241 
7242 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7243 			continue;
7244 
7245 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7246 		if (!new_dm_crtc_state->stream)
7247 			continue;
7248 
7249 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7250 		if (!status)
7251 			continue;
7252 
7253 		aconnector = to_amdgpu_dm_connector(connector);
7254 
7255 		mutex_lock(&adev->dm.audio_lock);
7256 		inst = status->audio_inst;
7257 		aconnector->audio_inst = inst;
7258 		mutex_unlock(&adev->dm.audio_lock);
7259 
7260 		amdgpu_dm_audio_eld_notify(adev, inst);
7261 	}
7262 }
7263 
7264 /*
7265  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7266  * @crtc_state: the DRM CRTC state
7267  * @stream_state: the DC stream state.
7268  *
7269  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7270  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7271  */
7272 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7273 						struct dc_stream_state *stream_state)
7274 {
7275 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7276 }
7277 
7278 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7279 				   struct drm_atomic_state *state,
7280 				   bool nonblock)
7281 {
7282 	struct drm_crtc *crtc;
7283 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7284 	struct amdgpu_device *adev = dev->dev_private;
7285 	int i;
7286 
7287 	/*
7288 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7289 	 * a modeset, being disabled, or have no active planes.
7290 	 *
7291 	 * It's done in atomic commit rather than commit tail for now since
7292 	 * some of these interrupt handlers access the current CRTC state and
7293 	 * potentially the stream pointer itself.
7294 	 *
7295 	 * Since the atomic state is swapped within atomic commit and not within
7296 	 * commit tail this would leave to new state (that hasn't been committed yet)
7297 	 * being accesssed from within the handlers.
7298 	 *
7299 	 * TODO: Fix this so we can do this in commit tail and not have to block
7300 	 * in atomic check.
7301 	 */
7302 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7303 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7304 
7305 		if (old_crtc_state->active &&
7306 		    (!new_crtc_state->active ||
7307 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7308 			manage_dm_interrupts(adev, acrtc, false);
7309 	}
7310 	/*
7311 	 * Add check here for SoC's that support hardware cursor plane, to
7312 	 * unset legacy_cursor_update
7313 	 */
7314 
7315 	return drm_atomic_helper_commit(dev, state, nonblock);
7316 
7317 	/*TODO Handle EINTR, reenable IRQ*/
7318 }
7319 
7320 /**
7321  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7322  * @state: The atomic state to commit
7323  *
7324  * This will tell DC to commit the constructed DC state from atomic_check,
7325  * programming the hardware. Any failures here implies a hardware failure, since
7326  * atomic check should have filtered anything non-kosher.
7327  */
7328 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7329 {
7330 	struct drm_device *dev = state->dev;
7331 	struct amdgpu_device *adev = dev->dev_private;
7332 	struct amdgpu_display_manager *dm = &adev->dm;
7333 	struct dm_atomic_state *dm_state;
7334 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7335 	uint32_t i, j;
7336 	struct drm_crtc *crtc;
7337 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7338 	unsigned long flags;
7339 	bool wait_for_vblank = true;
7340 	struct drm_connector *connector;
7341 	struct drm_connector_state *old_con_state, *new_con_state;
7342 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7343 	int crtc_disable_count = 0;
7344 
7345 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7346 
7347 	dm_state = dm_atomic_get_new_state(state);
7348 	if (dm_state && dm_state->context) {
7349 		dc_state = dm_state->context;
7350 	} else {
7351 		/* No state changes, retain current state. */
7352 		dc_state_temp = dc_create_state(dm->dc);
7353 		ASSERT(dc_state_temp);
7354 		dc_state = dc_state_temp;
7355 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7356 	}
7357 
7358 	/* update changed items */
7359 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7360 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7361 
7362 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7363 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7364 
7365 		DRM_DEBUG_DRIVER(
7366 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7367 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7368 			"connectors_changed:%d\n",
7369 			acrtc->crtc_id,
7370 			new_crtc_state->enable,
7371 			new_crtc_state->active,
7372 			new_crtc_state->planes_changed,
7373 			new_crtc_state->mode_changed,
7374 			new_crtc_state->active_changed,
7375 			new_crtc_state->connectors_changed);
7376 
7377 		/* Copy all transient state flags into dc state */
7378 		if (dm_new_crtc_state->stream) {
7379 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7380 							    dm_new_crtc_state->stream);
7381 		}
7382 
7383 		/* handles headless hotplug case, updating new_state and
7384 		 * aconnector as needed
7385 		 */
7386 
7387 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7388 
7389 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7390 
7391 			if (!dm_new_crtc_state->stream) {
7392 				/*
7393 				 * this could happen because of issues with
7394 				 * userspace notifications delivery.
7395 				 * In this case userspace tries to set mode on
7396 				 * display which is disconnected in fact.
7397 				 * dc_sink is NULL in this case on aconnector.
7398 				 * We expect reset mode will come soon.
7399 				 *
7400 				 * This can also happen when unplug is done
7401 				 * during resume sequence ended
7402 				 *
7403 				 * In this case, we want to pretend we still
7404 				 * have a sink to keep the pipe running so that
7405 				 * hw state is consistent with the sw state
7406 				 */
7407 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7408 						__func__, acrtc->base.base.id);
7409 				continue;
7410 			}
7411 
7412 			if (dm_old_crtc_state->stream)
7413 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7414 
7415 			pm_runtime_get_noresume(dev->dev);
7416 
7417 			acrtc->enabled = true;
7418 			acrtc->hw_mode = new_crtc_state->mode;
7419 			crtc->hwmode = new_crtc_state->mode;
7420 		} else if (modereset_required(new_crtc_state)) {
7421 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7422 			/* i.e. reset mode */
7423 			if (dm_old_crtc_state->stream) {
7424 				if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7425 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7426 
7427 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7428 			}
7429 		}
7430 	} /* for_each_crtc_in_state() */
7431 
7432 	if (dc_state) {
7433 		dm_enable_per_frame_crtc_master_sync(dc_state);
7434 		mutex_lock(&dm->dc_lock);
7435 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7436 		mutex_unlock(&dm->dc_lock);
7437 	}
7438 
7439 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7440 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7441 
7442 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7443 
7444 		if (dm_new_crtc_state->stream != NULL) {
7445 			const struct dc_stream_status *status =
7446 					dc_stream_get_status(dm_new_crtc_state->stream);
7447 
7448 			if (!status)
7449 				status = dc_stream_get_status_from_state(dc_state,
7450 									 dm_new_crtc_state->stream);
7451 
7452 			if (!status)
7453 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7454 			else
7455 				acrtc->otg_inst = status->primary_otg_inst;
7456 		}
7457 	}
7458 #ifdef CONFIG_DRM_AMD_DC_HDCP
7459 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7460 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7461 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7462 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7463 
7464 		new_crtc_state = NULL;
7465 
7466 		if (acrtc)
7467 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7468 
7469 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7470 
7471 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7472 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7473 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7474 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7475 			continue;
7476 		}
7477 
7478 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7479 			hdcp_update_display(
7480 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7481 				new_con_state->hdcp_content_type,
7482 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7483 													 : false);
7484 	}
7485 #endif
7486 
7487 	/* Handle connector state changes */
7488 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7489 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7490 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7491 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7492 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7493 		struct dc_stream_update stream_update;
7494 		struct dc_info_packet hdr_packet;
7495 		struct dc_stream_status *status = NULL;
7496 		bool abm_changed, hdr_changed, scaling_changed;
7497 
7498 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7499 		memset(&stream_update, 0, sizeof(stream_update));
7500 
7501 		if (acrtc) {
7502 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7503 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7504 		}
7505 
7506 		/* Skip any modesets/resets */
7507 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7508 			continue;
7509 
7510 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7511 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7512 
7513 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7514 							     dm_old_con_state);
7515 
7516 		abm_changed = dm_new_crtc_state->abm_level !=
7517 			      dm_old_crtc_state->abm_level;
7518 
7519 		hdr_changed =
7520 			is_hdr_metadata_different(old_con_state, new_con_state);
7521 
7522 		if (!scaling_changed && !abm_changed && !hdr_changed)
7523 			continue;
7524 
7525 		stream_update.stream = dm_new_crtc_state->stream;
7526 		if (scaling_changed) {
7527 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7528 					dm_new_con_state, dm_new_crtc_state->stream);
7529 
7530 			stream_update.src = dm_new_crtc_state->stream->src;
7531 			stream_update.dst = dm_new_crtc_state->stream->dst;
7532 		}
7533 
7534 		if (abm_changed) {
7535 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7536 
7537 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7538 		}
7539 
7540 		if (hdr_changed) {
7541 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7542 			stream_update.hdr_static_metadata = &hdr_packet;
7543 		}
7544 
7545 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7546 		WARN_ON(!status);
7547 		WARN_ON(!status->plane_count);
7548 
7549 		/*
7550 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7551 		 * Here we create an empty update on each plane.
7552 		 * To fix this, DC should permit updating only stream properties.
7553 		 */
7554 		for (j = 0; j < status->plane_count; j++)
7555 			dummy_updates[j].surface = status->plane_states[0];
7556 
7557 
7558 		mutex_lock(&dm->dc_lock);
7559 		dc_commit_updates_for_stream(dm->dc,
7560 						     dummy_updates,
7561 						     status->plane_count,
7562 						     dm_new_crtc_state->stream,
7563 						     &stream_update,
7564 						     dc_state);
7565 		mutex_unlock(&dm->dc_lock);
7566 	}
7567 
7568 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7569 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7570 				      new_crtc_state, i) {
7571 		if (old_crtc_state->active && !new_crtc_state->active)
7572 			crtc_disable_count++;
7573 
7574 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7575 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7576 
7577 		/* Update freesync active state. */
7578 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7579 
7580 		/* Handle vrr on->off / off->on transitions */
7581 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7582 						dm_new_crtc_state);
7583 	}
7584 
7585 	/**
7586 	 * Enable interrupts for CRTCs that are newly enabled or went through
7587 	 * a modeset. It was intentionally deferred until after the front end
7588 	 * state was modified to wait until the OTG was on and so the IRQ
7589 	 * handlers didn't access stale or invalid state.
7590 	 */
7591 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7592 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7593 
7594 		if (new_crtc_state->active &&
7595 		    (!old_crtc_state->active ||
7596 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7597 			manage_dm_interrupts(adev, acrtc, true);
7598 #ifdef CONFIG_DEBUG_FS
7599 			/**
7600 			 * Frontend may have changed so reapply the CRC capture
7601 			 * settings for the stream.
7602 			 */
7603 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7604 
7605 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7606 				amdgpu_dm_crtc_configure_crc_source(
7607 					crtc, dm_new_crtc_state,
7608 					dm_new_crtc_state->crc_src);
7609 			}
7610 #endif
7611 		}
7612 	}
7613 
7614 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7615 		if (new_crtc_state->async_flip)
7616 			wait_for_vblank = false;
7617 
7618 	/* update planes when needed per crtc*/
7619 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7620 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7621 
7622 		if (dm_new_crtc_state->stream)
7623 			amdgpu_dm_commit_planes(state, dc_state, dev,
7624 						dm, crtc, wait_for_vblank);
7625 	}
7626 
7627 	/* Update audio instances for each connector. */
7628 	amdgpu_dm_commit_audio(dev, state);
7629 
7630 	/*
7631 	 * send vblank event on all events not handled in flip and
7632 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7633 	 */
7634 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7635 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7636 
7637 		if (new_crtc_state->event)
7638 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7639 
7640 		new_crtc_state->event = NULL;
7641 	}
7642 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7643 
7644 	/* Signal HW programming completion */
7645 	drm_atomic_helper_commit_hw_done(state);
7646 
7647 	if (wait_for_vblank)
7648 		drm_atomic_helper_wait_for_flip_done(dev, state);
7649 
7650 	drm_atomic_helper_cleanup_planes(dev, state);
7651 
7652 	/*
7653 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7654 	 * so we can put the GPU into runtime suspend if we're not driving any
7655 	 * displays anymore
7656 	 */
7657 	for (i = 0; i < crtc_disable_count; i++)
7658 		pm_runtime_put_autosuspend(dev->dev);
7659 	pm_runtime_mark_last_busy(dev->dev);
7660 
7661 	if (dc_state_temp)
7662 		dc_release_state(dc_state_temp);
7663 }
7664 
7665 
7666 static int dm_force_atomic_commit(struct drm_connector *connector)
7667 {
7668 	int ret = 0;
7669 	struct drm_device *ddev = connector->dev;
7670 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7671 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7672 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7673 	struct drm_connector_state *conn_state;
7674 	struct drm_crtc_state *crtc_state;
7675 	struct drm_plane_state *plane_state;
7676 
7677 	if (!state)
7678 		return -ENOMEM;
7679 
7680 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7681 
7682 	/* Construct an atomic state to restore previous display setting */
7683 
7684 	/*
7685 	 * Attach connectors to drm_atomic_state
7686 	 */
7687 	conn_state = drm_atomic_get_connector_state(state, connector);
7688 
7689 	ret = PTR_ERR_OR_ZERO(conn_state);
7690 	if (ret)
7691 		goto err;
7692 
7693 	/* Attach crtc to drm_atomic_state*/
7694 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7695 
7696 	ret = PTR_ERR_OR_ZERO(crtc_state);
7697 	if (ret)
7698 		goto err;
7699 
7700 	/* force a restore */
7701 	crtc_state->mode_changed = true;
7702 
7703 	/* Attach plane to drm_atomic_state */
7704 	plane_state = drm_atomic_get_plane_state(state, plane);
7705 
7706 	ret = PTR_ERR_OR_ZERO(plane_state);
7707 	if (ret)
7708 		goto err;
7709 
7710 
7711 	/* Call commit internally with the state we just constructed */
7712 	ret = drm_atomic_commit(state);
7713 	if (!ret)
7714 		return 0;
7715 
7716 err:
7717 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7718 	drm_atomic_state_put(state);
7719 
7720 	return ret;
7721 }
7722 
7723 /*
7724  * This function handles all cases when set mode does not come upon hotplug.
7725  * This includes when a display is unplugged then plugged back into the
7726  * same port and when running without usermode desktop manager supprot
7727  */
7728 void dm_restore_drm_connector_state(struct drm_device *dev,
7729 				    struct drm_connector *connector)
7730 {
7731 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7732 	struct amdgpu_crtc *disconnected_acrtc;
7733 	struct dm_crtc_state *acrtc_state;
7734 
7735 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7736 		return;
7737 
7738 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7739 	if (!disconnected_acrtc)
7740 		return;
7741 
7742 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7743 	if (!acrtc_state->stream)
7744 		return;
7745 
7746 	/*
7747 	 * If the previous sink is not released and different from the current,
7748 	 * we deduce we are in a state where we can not rely on usermode call
7749 	 * to turn on the display, so we do it here
7750 	 */
7751 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7752 		dm_force_atomic_commit(&aconnector->base);
7753 }
7754 
7755 /*
7756  * Grabs all modesetting locks to serialize against any blocking commits,
7757  * Waits for completion of all non blocking commits.
7758  */
7759 static int do_aquire_global_lock(struct drm_device *dev,
7760 				 struct drm_atomic_state *state)
7761 {
7762 	struct drm_crtc *crtc;
7763 	struct drm_crtc_commit *commit;
7764 	long ret;
7765 
7766 	/*
7767 	 * Adding all modeset locks to aquire_ctx will
7768 	 * ensure that when the framework release it the
7769 	 * extra locks we are locking here will get released to
7770 	 */
7771 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7772 	if (ret)
7773 		return ret;
7774 
7775 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7776 		spin_lock(&crtc->commit_lock);
7777 		commit = list_first_entry_or_null(&crtc->commit_list,
7778 				struct drm_crtc_commit, commit_entry);
7779 		if (commit)
7780 			drm_crtc_commit_get(commit);
7781 		spin_unlock(&crtc->commit_lock);
7782 
7783 		if (!commit)
7784 			continue;
7785 
7786 		/*
7787 		 * Make sure all pending HW programming completed and
7788 		 * page flips done
7789 		 */
7790 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7791 
7792 		if (ret > 0)
7793 			ret = wait_for_completion_interruptible_timeout(
7794 					&commit->flip_done, 10*HZ);
7795 
7796 		if (ret == 0)
7797 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7798 				  "timed out\n", crtc->base.id, crtc->name);
7799 
7800 		drm_crtc_commit_put(commit);
7801 	}
7802 
7803 	return ret < 0 ? ret : 0;
7804 }
7805 
7806 static void get_freesync_config_for_crtc(
7807 	struct dm_crtc_state *new_crtc_state,
7808 	struct dm_connector_state *new_con_state)
7809 {
7810 	struct mod_freesync_config config = {0};
7811 	struct amdgpu_dm_connector *aconnector =
7812 			to_amdgpu_dm_connector(new_con_state->base.connector);
7813 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7814 	int vrefresh = drm_mode_vrefresh(mode);
7815 
7816 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7817 					vrefresh >= aconnector->min_vfreq &&
7818 					vrefresh <= aconnector->max_vfreq;
7819 
7820 	if (new_crtc_state->vrr_supported) {
7821 		new_crtc_state->stream->ignore_msa_timing_param = true;
7822 		config.state = new_crtc_state->base.vrr_enabled ?
7823 				VRR_STATE_ACTIVE_VARIABLE :
7824 				VRR_STATE_INACTIVE;
7825 		config.min_refresh_in_uhz =
7826 				aconnector->min_vfreq * 1000000;
7827 		config.max_refresh_in_uhz =
7828 				aconnector->max_vfreq * 1000000;
7829 		config.vsif_supported = true;
7830 		config.btr = true;
7831 	}
7832 
7833 	new_crtc_state->freesync_config = config;
7834 }
7835 
7836 static void reset_freesync_config_for_crtc(
7837 	struct dm_crtc_state *new_crtc_state)
7838 {
7839 	new_crtc_state->vrr_supported = false;
7840 
7841 	memset(&new_crtc_state->vrr_params, 0,
7842 	       sizeof(new_crtc_state->vrr_params));
7843 	memset(&new_crtc_state->vrr_infopacket, 0,
7844 	       sizeof(new_crtc_state->vrr_infopacket));
7845 }
7846 
7847 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7848 				struct drm_atomic_state *state,
7849 				struct drm_crtc *crtc,
7850 				struct drm_crtc_state *old_crtc_state,
7851 				struct drm_crtc_state *new_crtc_state,
7852 				bool enable,
7853 				bool *lock_and_validation_needed)
7854 {
7855 	struct dm_atomic_state *dm_state = NULL;
7856 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7857 	struct dc_stream_state *new_stream;
7858 	int ret = 0;
7859 
7860 	/*
7861 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7862 	 * update changed items
7863 	 */
7864 	struct amdgpu_crtc *acrtc = NULL;
7865 	struct amdgpu_dm_connector *aconnector = NULL;
7866 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7867 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7868 
7869 	new_stream = NULL;
7870 
7871 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7872 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7873 	acrtc = to_amdgpu_crtc(crtc);
7874 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7875 
7876 	/* TODO This hack should go away */
7877 	if (aconnector && enable) {
7878 		/* Make sure fake sink is created in plug-in scenario */
7879 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7880 							    &aconnector->base);
7881 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7882 							    &aconnector->base);
7883 
7884 		if (IS_ERR(drm_new_conn_state)) {
7885 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7886 			goto fail;
7887 		}
7888 
7889 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7890 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7891 
7892 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7893 			goto skip_modeset;
7894 
7895 		new_stream = create_validate_stream_for_sink(aconnector,
7896 							     &new_crtc_state->mode,
7897 							     dm_new_conn_state,
7898 							     dm_old_crtc_state->stream);
7899 
7900 		/*
7901 		 * we can have no stream on ACTION_SET if a display
7902 		 * was disconnected during S3, in this case it is not an
7903 		 * error, the OS will be updated after detection, and
7904 		 * will do the right thing on next atomic commit
7905 		 */
7906 
7907 		if (!new_stream) {
7908 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7909 					__func__, acrtc->base.base.id);
7910 			ret = -ENOMEM;
7911 			goto fail;
7912 		}
7913 
7914 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7915 
7916 		ret = fill_hdr_info_packet(drm_new_conn_state,
7917 					   &new_stream->hdr_static_metadata);
7918 		if (ret)
7919 			goto fail;
7920 
7921 		/*
7922 		 * If we already removed the old stream from the context
7923 		 * (and set the new stream to NULL) then we can't reuse
7924 		 * the old stream even if the stream and scaling are unchanged.
7925 		 * We'll hit the BUG_ON and black screen.
7926 		 *
7927 		 * TODO: Refactor this function to allow this check to work
7928 		 * in all conditions.
7929 		 */
7930 		if (dm_new_crtc_state->stream &&
7931 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7932 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7933 			new_crtc_state->mode_changed = false;
7934 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7935 					 new_crtc_state->mode_changed);
7936 		}
7937 	}
7938 
7939 	/* mode_changed flag may get updated above, need to check again */
7940 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7941 		goto skip_modeset;
7942 
7943 	DRM_DEBUG_DRIVER(
7944 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7945 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7946 		"connectors_changed:%d\n",
7947 		acrtc->crtc_id,
7948 		new_crtc_state->enable,
7949 		new_crtc_state->active,
7950 		new_crtc_state->planes_changed,
7951 		new_crtc_state->mode_changed,
7952 		new_crtc_state->active_changed,
7953 		new_crtc_state->connectors_changed);
7954 
7955 	/* Remove stream for any changed/disabled CRTC */
7956 	if (!enable) {
7957 
7958 		if (!dm_old_crtc_state->stream)
7959 			goto skip_modeset;
7960 
7961 		ret = dm_atomic_get_state(state, &dm_state);
7962 		if (ret)
7963 			goto fail;
7964 
7965 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7966 				crtc->base.id);
7967 
7968 		/* i.e. reset mode */
7969 		if (dc_remove_stream_from_ctx(
7970 				dm->dc,
7971 				dm_state->context,
7972 				dm_old_crtc_state->stream) != DC_OK) {
7973 			ret = -EINVAL;
7974 			goto fail;
7975 		}
7976 
7977 		dc_stream_release(dm_old_crtc_state->stream);
7978 		dm_new_crtc_state->stream = NULL;
7979 
7980 		reset_freesync_config_for_crtc(dm_new_crtc_state);
7981 
7982 		*lock_and_validation_needed = true;
7983 
7984 	} else {/* Add stream for any updated/enabled CRTC */
7985 		/*
7986 		 * Quick fix to prevent NULL pointer on new_stream when
7987 		 * added MST connectors not found in existing crtc_state in the chained mode
7988 		 * TODO: need to dig out the root cause of that
7989 		 */
7990 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7991 			goto skip_modeset;
7992 
7993 		if (modereset_required(new_crtc_state))
7994 			goto skip_modeset;
7995 
7996 		if (modeset_required(new_crtc_state, new_stream,
7997 				     dm_old_crtc_state->stream)) {
7998 
7999 			WARN_ON(dm_new_crtc_state->stream);
8000 
8001 			ret = dm_atomic_get_state(state, &dm_state);
8002 			if (ret)
8003 				goto fail;
8004 
8005 			dm_new_crtc_state->stream = new_stream;
8006 
8007 			dc_stream_retain(new_stream);
8008 
8009 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8010 						crtc->base.id);
8011 
8012 			if (dc_add_stream_to_ctx(
8013 					dm->dc,
8014 					dm_state->context,
8015 					dm_new_crtc_state->stream) != DC_OK) {
8016 				ret = -EINVAL;
8017 				goto fail;
8018 			}
8019 
8020 			*lock_and_validation_needed = true;
8021 		}
8022 	}
8023 
8024 skip_modeset:
8025 	/* Release extra reference */
8026 	if (new_stream)
8027 		 dc_stream_release(new_stream);
8028 
8029 	/*
8030 	 * We want to do dc stream updates that do not require a
8031 	 * full modeset below.
8032 	 */
8033 	if (!(enable && aconnector && new_crtc_state->active))
8034 		return 0;
8035 	/*
8036 	 * Given above conditions, the dc state cannot be NULL because:
8037 	 * 1. We're in the process of enabling CRTCs (just been added
8038 	 *    to the dc context, or already is on the context)
8039 	 * 2. Has a valid connector attached, and
8040 	 * 3. Is currently active and enabled.
8041 	 * => The dc stream state currently exists.
8042 	 */
8043 	BUG_ON(dm_new_crtc_state->stream == NULL);
8044 
8045 	/* Scaling or underscan settings */
8046 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8047 		update_stream_scaling_settings(
8048 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8049 
8050 	/* ABM settings */
8051 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8052 
8053 	/*
8054 	 * Color management settings. We also update color properties
8055 	 * when a modeset is needed, to ensure it gets reprogrammed.
8056 	 */
8057 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8058 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8059 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8060 		if (ret)
8061 			goto fail;
8062 	}
8063 
8064 	/* Update Freesync settings. */
8065 	get_freesync_config_for_crtc(dm_new_crtc_state,
8066 				     dm_new_conn_state);
8067 
8068 	return ret;
8069 
8070 fail:
8071 	if (new_stream)
8072 		dc_stream_release(new_stream);
8073 	return ret;
8074 }
8075 
8076 static bool should_reset_plane(struct drm_atomic_state *state,
8077 			       struct drm_plane *plane,
8078 			       struct drm_plane_state *old_plane_state,
8079 			       struct drm_plane_state *new_plane_state)
8080 {
8081 	struct drm_plane *other;
8082 	struct drm_plane_state *old_other_state, *new_other_state;
8083 	struct drm_crtc_state *new_crtc_state;
8084 	int i;
8085 
8086 	/*
8087 	 * TODO: Remove this hack once the checks below are sufficient
8088 	 * enough to determine when we need to reset all the planes on
8089 	 * the stream.
8090 	 */
8091 	if (state->allow_modeset)
8092 		return true;
8093 
8094 	/* Exit early if we know that we're adding or removing the plane. */
8095 	if (old_plane_state->crtc != new_plane_state->crtc)
8096 		return true;
8097 
8098 	/* old crtc == new_crtc == NULL, plane not in context. */
8099 	if (!new_plane_state->crtc)
8100 		return false;
8101 
8102 	new_crtc_state =
8103 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8104 
8105 	if (!new_crtc_state)
8106 		return true;
8107 
8108 	/* CRTC Degamma changes currently require us to recreate planes. */
8109 	if (new_crtc_state->color_mgmt_changed)
8110 		return true;
8111 
8112 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8113 		return true;
8114 
8115 	/*
8116 	 * If there are any new primary or overlay planes being added or
8117 	 * removed then the z-order can potentially change. To ensure
8118 	 * correct z-order and pipe acquisition the current DC architecture
8119 	 * requires us to remove and recreate all existing planes.
8120 	 *
8121 	 * TODO: Come up with a more elegant solution for this.
8122 	 */
8123 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8124 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8125 			continue;
8126 
8127 		if (old_other_state->crtc != new_plane_state->crtc &&
8128 		    new_other_state->crtc != new_plane_state->crtc)
8129 			continue;
8130 
8131 		if (old_other_state->crtc != new_other_state->crtc)
8132 			return true;
8133 
8134 		/* TODO: Remove this once we can handle fast format changes. */
8135 		if (old_other_state->fb && new_other_state->fb &&
8136 		    old_other_state->fb->format != new_other_state->fb->format)
8137 			return true;
8138 	}
8139 
8140 	return false;
8141 }
8142 
8143 static int dm_update_plane_state(struct dc *dc,
8144 				 struct drm_atomic_state *state,
8145 				 struct drm_plane *plane,
8146 				 struct drm_plane_state *old_plane_state,
8147 				 struct drm_plane_state *new_plane_state,
8148 				 bool enable,
8149 				 bool *lock_and_validation_needed)
8150 {
8151 
8152 	struct dm_atomic_state *dm_state = NULL;
8153 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8154 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8155 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8156 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8157 	struct amdgpu_crtc *new_acrtc;
8158 	bool needs_reset;
8159 	int ret = 0;
8160 
8161 
8162 	new_plane_crtc = new_plane_state->crtc;
8163 	old_plane_crtc = old_plane_state->crtc;
8164 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8165 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8166 
8167 	/*TODO Implement better atomic check for cursor plane */
8168 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8169 		if (!enable || !new_plane_crtc ||
8170 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8171 			return 0;
8172 
8173 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8174 
8175 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8176 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8177 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8178 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8179 			return -EINVAL;
8180 		}
8181 
8182 		return 0;
8183 	}
8184 
8185 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8186 					 new_plane_state);
8187 
8188 	/* Remove any changed/removed planes */
8189 	if (!enable) {
8190 		if (!needs_reset)
8191 			return 0;
8192 
8193 		if (!old_plane_crtc)
8194 			return 0;
8195 
8196 		old_crtc_state = drm_atomic_get_old_crtc_state(
8197 				state, old_plane_crtc);
8198 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8199 
8200 		if (!dm_old_crtc_state->stream)
8201 			return 0;
8202 
8203 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8204 				plane->base.id, old_plane_crtc->base.id);
8205 
8206 		ret = dm_atomic_get_state(state, &dm_state);
8207 		if (ret)
8208 			return ret;
8209 
8210 		if (!dc_remove_plane_from_context(
8211 				dc,
8212 				dm_old_crtc_state->stream,
8213 				dm_old_plane_state->dc_state,
8214 				dm_state->context)) {
8215 
8216 			ret = EINVAL;
8217 			return ret;
8218 		}
8219 
8220 
8221 		dc_plane_state_release(dm_old_plane_state->dc_state);
8222 		dm_new_plane_state->dc_state = NULL;
8223 
8224 		*lock_and_validation_needed = true;
8225 
8226 	} else { /* Add new planes */
8227 		struct dc_plane_state *dc_new_plane_state;
8228 
8229 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8230 			return 0;
8231 
8232 		if (!new_plane_crtc)
8233 			return 0;
8234 
8235 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8236 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8237 
8238 		if (!dm_new_crtc_state->stream)
8239 			return 0;
8240 
8241 		if (!needs_reset)
8242 			return 0;
8243 
8244 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8245 		if (ret)
8246 			return ret;
8247 
8248 		WARN_ON(dm_new_plane_state->dc_state);
8249 
8250 		dc_new_plane_state = dc_create_plane_state(dc);
8251 		if (!dc_new_plane_state)
8252 			return -ENOMEM;
8253 
8254 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8255 				plane->base.id, new_plane_crtc->base.id);
8256 
8257 		ret = fill_dc_plane_attributes(
8258 			new_plane_crtc->dev->dev_private,
8259 			dc_new_plane_state,
8260 			new_plane_state,
8261 			new_crtc_state);
8262 		if (ret) {
8263 			dc_plane_state_release(dc_new_plane_state);
8264 			return ret;
8265 		}
8266 
8267 		ret = dm_atomic_get_state(state, &dm_state);
8268 		if (ret) {
8269 			dc_plane_state_release(dc_new_plane_state);
8270 			return ret;
8271 		}
8272 
8273 		/*
8274 		 * Any atomic check errors that occur after this will
8275 		 * not need a release. The plane state will be attached
8276 		 * to the stream, and therefore part of the atomic
8277 		 * state. It'll be released when the atomic state is
8278 		 * cleaned.
8279 		 */
8280 		if (!dc_add_plane_to_context(
8281 				dc,
8282 				dm_new_crtc_state->stream,
8283 				dc_new_plane_state,
8284 				dm_state->context)) {
8285 
8286 			dc_plane_state_release(dc_new_plane_state);
8287 			return -EINVAL;
8288 		}
8289 
8290 		dm_new_plane_state->dc_state = dc_new_plane_state;
8291 
8292 		/* Tell DC to do a full surface update every time there
8293 		 * is a plane change. Inefficient, but works for now.
8294 		 */
8295 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8296 
8297 		*lock_and_validation_needed = true;
8298 	}
8299 
8300 
8301 	return ret;
8302 }
8303 
8304 static int
8305 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8306 				    struct drm_atomic_state *state,
8307 				    enum surface_update_type *out_type)
8308 {
8309 	struct dc *dc = dm->dc;
8310 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8311 	int i, j, num_plane, ret = 0;
8312 	struct drm_plane_state *old_plane_state, *new_plane_state;
8313 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8314 	struct drm_crtc *new_plane_crtc;
8315 	struct drm_plane *plane;
8316 
8317 	struct drm_crtc *crtc;
8318 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8319 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8320 	struct dc_stream_status *status = NULL;
8321 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8322 	struct surface_info_bundle {
8323 		struct dc_surface_update surface_updates[MAX_SURFACES];
8324 		struct dc_plane_info plane_infos[MAX_SURFACES];
8325 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8326 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8327 		struct dc_stream_update stream_update;
8328 	} *bundle;
8329 
8330 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8331 
8332 	if (!bundle) {
8333 		DRM_ERROR("Failed to allocate update bundle\n");
8334 		/* Set type to FULL to avoid crashing in DC*/
8335 		update_type = UPDATE_TYPE_FULL;
8336 		goto cleanup;
8337 	}
8338 
8339 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8340 
8341 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8342 
8343 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8344 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8345 		num_plane = 0;
8346 
8347 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8348 			update_type = UPDATE_TYPE_FULL;
8349 			goto cleanup;
8350 		}
8351 
8352 		if (!new_dm_crtc_state->stream)
8353 			continue;
8354 
8355 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8356 			const struct amdgpu_framebuffer *amdgpu_fb =
8357 				to_amdgpu_framebuffer(new_plane_state->fb);
8358 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8359 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8360 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8361 			uint64_t tiling_flags;
8362 			bool tmz_surface = false;
8363 
8364 			new_plane_crtc = new_plane_state->crtc;
8365 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8366 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8367 
8368 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8369 				continue;
8370 
8371 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8372 				update_type = UPDATE_TYPE_FULL;
8373 				goto cleanup;
8374 			}
8375 
8376 			if (crtc != new_plane_crtc)
8377 				continue;
8378 
8379 			bundle->surface_updates[num_plane].surface =
8380 					new_dm_plane_state->dc_state;
8381 
8382 			if (new_crtc_state->mode_changed) {
8383 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8384 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8385 			}
8386 
8387 			if (new_crtc_state->color_mgmt_changed) {
8388 				bundle->surface_updates[num_plane].gamma =
8389 						new_dm_plane_state->dc_state->gamma_correction;
8390 				bundle->surface_updates[num_plane].in_transfer_func =
8391 						new_dm_plane_state->dc_state->in_transfer_func;
8392 				bundle->surface_updates[num_plane].gamut_remap_matrix =
8393 						&new_dm_plane_state->dc_state->gamut_remap_matrix;
8394 				bundle->stream_update.gamut_remap =
8395 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8396 				bundle->stream_update.output_csc_transform =
8397 						&new_dm_crtc_state->stream->csc_color_matrix;
8398 				bundle->stream_update.out_transfer_func =
8399 						new_dm_crtc_state->stream->out_transfer_func;
8400 			}
8401 
8402 			ret = fill_dc_scaling_info(new_plane_state,
8403 						   scaling_info);
8404 			if (ret)
8405 				goto cleanup;
8406 
8407 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8408 
8409 			if (amdgpu_fb) {
8410 				ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8411 				if (ret)
8412 					goto cleanup;
8413 
8414 				ret = fill_dc_plane_info_and_addr(
8415 					dm->adev, new_plane_state, tiling_flags,
8416 					plane_info,
8417 					&flip_addr->address, tmz_surface,
8418 					false);
8419 				if (ret)
8420 					goto cleanup;
8421 
8422 				bundle->surface_updates[num_plane].plane_info = plane_info;
8423 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8424 			}
8425 
8426 			num_plane++;
8427 		}
8428 
8429 		if (num_plane == 0)
8430 			continue;
8431 
8432 		ret = dm_atomic_get_state(state, &dm_state);
8433 		if (ret)
8434 			goto cleanup;
8435 
8436 		old_dm_state = dm_atomic_get_old_state(state);
8437 		if (!old_dm_state) {
8438 			ret = -EINVAL;
8439 			goto cleanup;
8440 		}
8441 
8442 		status = dc_stream_get_status_from_state(old_dm_state->context,
8443 							 new_dm_crtc_state->stream);
8444 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8445 		/*
8446 		 * TODO: DC modifies the surface during this call so we need
8447 		 * to lock here - find a way to do this without locking.
8448 		 */
8449 		mutex_lock(&dm->dc_lock);
8450 		update_type = dc_check_update_surfaces_for_stream(
8451 				dc,	bundle->surface_updates, num_plane,
8452 				&bundle->stream_update, status);
8453 		mutex_unlock(&dm->dc_lock);
8454 
8455 		if (update_type > UPDATE_TYPE_MED) {
8456 			update_type = UPDATE_TYPE_FULL;
8457 			goto cleanup;
8458 		}
8459 	}
8460 
8461 cleanup:
8462 	kfree(bundle);
8463 
8464 	*out_type = update_type;
8465 	return ret;
8466 }
8467 #if defined(CONFIG_DRM_AMD_DC_DCN)
8468 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8469 {
8470 	struct drm_connector *connector;
8471 	struct drm_connector_state *conn_state;
8472 	struct amdgpu_dm_connector *aconnector = NULL;
8473 	int i;
8474 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8475 		if (conn_state->crtc != crtc)
8476 			continue;
8477 
8478 		aconnector = to_amdgpu_dm_connector(connector);
8479 		if (!aconnector->port || !aconnector->mst_port)
8480 			aconnector = NULL;
8481 		else
8482 			break;
8483 	}
8484 
8485 	if (!aconnector)
8486 		return 0;
8487 
8488 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8489 }
8490 #endif
8491 
8492 /**
8493  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8494  * @dev: The DRM device
8495  * @state: The atomic state to commit
8496  *
8497  * Validate that the given atomic state is programmable by DC into hardware.
8498  * This involves constructing a &struct dc_state reflecting the new hardware
8499  * state we wish to commit, then querying DC to see if it is programmable. It's
8500  * important not to modify the existing DC state. Otherwise, atomic_check
8501  * may unexpectedly commit hardware changes.
8502  *
8503  * When validating the DC state, it's important that the right locks are
8504  * acquired. For full updates case which removes/adds/updates streams on one
8505  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8506  * that any such full update commit will wait for completion of any outstanding
8507  * flip using DRMs synchronization events. See
8508  * dm_determine_update_type_for_commit()
8509  *
8510  * Note that DM adds the affected connectors for all CRTCs in state, when that
8511  * might not seem necessary. This is because DC stream creation requires the
8512  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8513  * be possible but non-trivial - a possible TODO item.
8514  *
8515  * Return: -Error code if validation failed.
8516  */
8517 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8518 				  struct drm_atomic_state *state)
8519 {
8520 	struct amdgpu_device *adev = dev->dev_private;
8521 	struct dm_atomic_state *dm_state = NULL;
8522 	struct dc *dc = adev->dm.dc;
8523 	struct drm_connector *connector;
8524 	struct drm_connector_state *old_con_state, *new_con_state;
8525 	struct drm_crtc *crtc;
8526 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8527 	struct drm_plane *plane;
8528 	struct drm_plane_state *old_plane_state, *new_plane_state;
8529 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8530 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8531 	enum dc_status status;
8532 	int ret, i;
8533 
8534 	/*
8535 	 * This bool will be set for true for any modeset/reset
8536 	 * or plane update which implies non fast surface update.
8537 	 */
8538 	bool lock_and_validation_needed = false;
8539 
8540 	ret = drm_atomic_helper_check_modeset(dev, state);
8541 	if (ret)
8542 		goto fail;
8543 
8544 #if defined(CONFIG_DRM_AMD_DC_DCN)
8545 	if (adev->asic_type >= CHIP_NAVI10) {
8546 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8547 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8548 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8549 				if (ret)
8550 					goto fail;
8551 			}
8552 		}
8553 	}
8554 #endif
8555 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8556 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8557 		    !new_crtc_state->color_mgmt_changed &&
8558 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8559 			continue;
8560 
8561 		if (!new_crtc_state->enable)
8562 			continue;
8563 
8564 		ret = drm_atomic_add_affected_connectors(state, crtc);
8565 		if (ret)
8566 			return ret;
8567 
8568 		ret = drm_atomic_add_affected_planes(state, crtc);
8569 		if (ret)
8570 			goto fail;
8571 	}
8572 
8573 	/*
8574 	 * Add all primary and overlay planes on the CRTC to the state
8575 	 * whenever a plane is enabled to maintain correct z-ordering
8576 	 * and to enable fast surface updates.
8577 	 */
8578 	drm_for_each_crtc(crtc, dev) {
8579 		bool modified = false;
8580 
8581 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8582 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8583 				continue;
8584 
8585 			if (new_plane_state->crtc == crtc ||
8586 			    old_plane_state->crtc == crtc) {
8587 				modified = true;
8588 				break;
8589 			}
8590 		}
8591 
8592 		if (!modified)
8593 			continue;
8594 
8595 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8596 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8597 				continue;
8598 
8599 			new_plane_state =
8600 				drm_atomic_get_plane_state(state, plane);
8601 
8602 			if (IS_ERR(new_plane_state)) {
8603 				ret = PTR_ERR(new_plane_state);
8604 				goto fail;
8605 			}
8606 		}
8607 	}
8608 
8609 	/* Remove exiting planes if they are modified */
8610 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8611 		ret = dm_update_plane_state(dc, state, plane,
8612 					    old_plane_state,
8613 					    new_plane_state,
8614 					    false,
8615 					    &lock_and_validation_needed);
8616 		if (ret)
8617 			goto fail;
8618 	}
8619 
8620 	/* Disable all crtcs which require disable */
8621 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8622 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8623 					   old_crtc_state,
8624 					   new_crtc_state,
8625 					   false,
8626 					   &lock_and_validation_needed);
8627 		if (ret)
8628 			goto fail;
8629 	}
8630 
8631 	/* Enable all crtcs which require enable */
8632 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8633 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8634 					   old_crtc_state,
8635 					   new_crtc_state,
8636 					   true,
8637 					   &lock_and_validation_needed);
8638 		if (ret)
8639 			goto fail;
8640 	}
8641 
8642 	/* Add new/modified planes */
8643 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8644 		ret = dm_update_plane_state(dc, state, plane,
8645 					    old_plane_state,
8646 					    new_plane_state,
8647 					    true,
8648 					    &lock_and_validation_needed);
8649 		if (ret)
8650 			goto fail;
8651 	}
8652 
8653 	/* Run this here since we want to validate the streams we created */
8654 	ret = drm_atomic_helper_check_planes(dev, state);
8655 	if (ret)
8656 		goto fail;
8657 
8658 	if (state->legacy_cursor_update) {
8659 		/*
8660 		 * This is a fast cursor update coming from the plane update
8661 		 * helper, check if it can be done asynchronously for better
8662 		 * performance.
8663 		 */
8664 		state->async_update =
8665 			!drm_atomic_helper_async_check(dev, state);
8666 
8667 		/*
8668 		 * Skip the remaining global validation if this is an async
8669 		 * update. Cursor updates can be done without affecting
8670 		 * state or bandwidth calcs and this avoids the performance
8671 		 * penalty of locking the private state object and
8672 		 * allocating a new dc_state.
8673 		 */
8674 		if (state->async_update)
8675 			return 0;
8676 	}
8677 
8678 	/* Check scaling and underscan changes*/
8679 	/* TODO Removed scaling changes validation due to inability to commit
8680 	 * new stream into context w\o causing full reset. Need to
8681 	 * decide how to handle.
8682 	 */
8683 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8684 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8685 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8686 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8687 
8688 		/* Skip any modesets/resets */
8689 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8690 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8691 			continue;
8692 
8693 		/* Skip any thing not scale or underscan changes */
8694 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8695 			continue;
8696 
8697 		overall_update_type = UPDATE_TYPE_FULL;
8698 		lock_and_validation_needed = true;
8699 	}
8700 
8701 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8702 	if (ret)
8703 		goto fail;
8704 
8705 	if (overall_update_type < update_type)
8706 		overall_update_type = update_type;
8707 
8708 	/*
8709 	 * lock_and_validation_needed was an old way to determine if we need to set
8710 	 * the global lock. Leaving it in to check if we broke any corner cases
8711 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8712 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8713 	 */
8714 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8715 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8716 
8717 	if (overall_update_type > UPDATE_TYPE_FAST) {
8718 		ret = dm_atomic_get_state(state, &dm_state);
8719 		if (ret)
8720 			goto fail;
8721 
8722 		ret = do_aquire_global_lock(dev, state);
8723 		if (ret)
8724 			goto fail;
8725 
8726 #if defined(CONFIG_DRM_AMD_DC_DCN)
8727 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8728 			goto fail;
8729 
8730 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8731 		if (ret)
8732 			goto fail;
8733 #endif
8734 
8735 		/*
8736 		 * Perform validation of MST topology in the state:
8737 		 * We need to perform MST atomic check before calling
8738 		 * dc_validate_global_state(), or there is a chance
8739 		 * to get stuck in an infinite loop and hang eventually.
8740 		 */
8741 		ret = drm_dp_mst_atomic_check(state);
8742 		if (ret)
8743 			goto fail;
8744 		status = dc_validate_global_state(dc, dm_state->context, false);
8745 		if (status != DC_OK) {
8746 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
8747 				       dc_status_to_str(status), status);
8748 			ret = -EINVAL;
8749 			goto fail;
8750 		}
8751 	} else {
8752 		/*
8753 		 * The commit is a fast update. Fast updates shouldn't change
8754 		 * the DC context, affect global validation, and can have their
8755 		 * commit work done in parallel with other commits not touching
8756 		 * the same resource. If we have a new DC context as part of
8757 		 * the DM atomic state from validation we need to free it and
8758 		 * retain the existing one instead.
8759 		 */
8760 		struct dm_atomic_state *new_dm_state, *old_dm_state;
8761 
8762 		new_dm_state = dm_atomic_get_new_state(state);
8763 		old_dm_state = dm_atomic_get_old_state(state);
8764 
8765 		if (new_dm_state && old_dm_state) {
8766 			if (new_dm_state->context)
8767 				dc_release_state(new_dm_state->context);
8768 
8769 			new_dm_state->context = old_dm_state->context;
8770 
8771 			if (old_dm_state->context)
8772 				dc_retain_state(old_dm_state->context);
8773 		}
8774 	}
8775 
8776 	/* Store the overall update type for use later in atomic check. */
8777 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8778 		struct dm_crtc_state *dm_new_crtc_state =
8779 			to_dm_crtc_state(new_crtc_state);
8780 
8781 		dm_new_crtc_state->update_type = (int)overall_update_type;
8782 	}
8783 
8784 	/* Must be success */
8785 	WARN_ON(ret);
8786 	return ret;
8787 
8788 fail:
8789 	if (ret == -EDEADLK)
8790 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8791 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8792 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8793 	else
8794 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8795 
8796 	return ret;
8797 }
8798 
8799 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8800 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8801 {
8802 	uint8_t dpcd_data;
8803 	bool capable = false;
8804 
8805 	if (amdgpu_dm_connector->dc_link &&
8806 		dm_helpers_dp_read_dpcd(
8807 				NULL,
8808 				amdgpu_dm_connector->dc_link,
8809 				DP_DOWN_STREAM_PORT_COUNT,
8810 				&dpcd_data,
8811 				sizeof(dpcd_data))) {
8812 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8813 	}
8814 
8815 	return capable;
8816 }
8817 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8818 					struct edid *edid)
8819 {
8820 	int i;
8821 	bool edid_check_required;
8822 	struct detailed_timing *timing;
8823 	struct detailed_non_pixel *data;
8824 	struct detailed_data_monitor_range *range;
8825 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8826 			to_amdgpu_dm_connector(connector);
8827 	struct dm_connector_state *dm_con_state = NULL;
8828 
8829 	struct drm_device *dev = connector->dev;
8830 	struct amdgpu_device *adev = dev->dev_private;
8831 	bool freesync_capable = false;
8832 
8833 	if (!connector->state) {
8834 		DRM_ERROR("%s - Connector has no state", __func__);
8835 		goto update;
8836 	}
8837 
8838 	if (!edid) {
8839 		dm_con_state = to_dm_connector_state(connector->state);
8840 
8841 		amdgpu_dm_connector->min_vfreq = 0;
8842 		amdgpu_dm_connector->max_vfreq = 0;
8843 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8844 
8845 		goto update;
8846 	}
8847 
8848 	dm_con_state = to_dm_connector_state(connector->state);
8849 
8850 	edid_check_required = false;
8851 	if (!amdgpu_dm_connector->dc_sink) {
8852 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8853 		goto update;
8854 	}
8855 	if (!adev->dm.freesync_module)
8856 		goto update;
8857 	/*
8858 	 * if edid non zero restrict freesync only for dp and edp
8859 	 */
8860 	if (edid) {
8861 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8862 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8863 			edid_check_required = is_dp_capable_without_timing_msa(
8864 						adev->dm.dc,
8865 						amdgpu_dm_connector);
8866 		}
8867 	}
8868 	if (edid_check_required == true && (edid->version > 1 ||
8869 	   (edid->version == 1 && edid->revision > 1))) {
8870 		for (i = 0; i < 4; i++) {
8871 
8872 			timing	= &edid->detailed_timings[i];
8873 			data	= &timing->data.other_data;
8874 			range	= &data->data.range;
8875 			/*
8876 			 * Check if monitor has continuous frequency mode
8877 			 */
8878 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8879 				continue;
8880 			/*
8881 			 * Check for flag range limits only. If flag == 1 then
8882 			 * no additional timing information provided.
8883 			 * Default GTF, GTF Secondary curve and CVT are not
8884 			 * supported
8885 			 */
8886 			if (range->flags != 1)
8887 				continue;
8888 
8889 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8890 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8891 			amdgpu_dm_connector->pixel_clock_mhz =
8892 				range->pixel_clock_mhz * 10;
8893 			break;
8894 		}
8895 
8896 		if (amdgpu_dm_connector->max_vfreq -
8897 		    amdgpu_dm_connector->min_vfreq > 10) {
8898 
8899 			freesync_capable = true;
8900 		}
8901 	}
8902 
8903 update:
8904 	if (dm_con_state)
8905 		dm_con_state->freesync_capable = freesync_capable;
8906 
8907 	if (connector->vrr_capable_property)
8908 		drm_connector_set_vrr_capable_property(connector,
8909 						       freesync_capable);
8910 }
8911 
8912 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8913 {
8914 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8915 
8916 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8917 		return;
8918 	if (link->type == dc_connection_none)
8919 		return;
8920 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8921 					dpcd_data, sizeof(dpcd_data))) {
8922 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8923 
8924 		if (dpcd_data[0] == 0) {
8925 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8926 			link->psr_settings.psr_feature_enabled = false;
8927 		} else {
8928 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
8929 			link->psr_settings.psr_feature_enabled = true;
8930 		}
8931 
8932 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8933 	}
8934 }
8935 
8936 /*
8937  * amdgpu_dm_link_setup_psr() - configure psr link
8938  * @stream: stream state
8939  *
8940  * Return: true if success
8941  */
8942 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8943 {
8944 	struct dc_link *link = NULL;
8945 	struct psr_config psr_config = {0};
8946 	struct psr_context psr_context = {0};
8947 	bool ret = false;
8948 
8949 	if (stream == NULL)
8950 		return false;
8951 
8952 	link = stream->link;
8953 
8954 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8955 
8956 	if (psr_config.psr_version > 0) {
8957 		psr_config.psr_exit_link_training_required = 0x1;
8958 		psr_config.psr_frame_capture_indication_req = 0;
8959 		psr_config.psr_rfb_setup_time = 0x37;
8960 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8961 		psr_config.allow_smu_optimizations = 0x0;
8962 
8963 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8964 
8965 	}
8966 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
8967 
8968 	return ret;
8969 }
8970 
8971 /*
8972  * amdgpu_dm_psr_enable() - enable psr f/w
8973  * @stream: stream state
8974  *
8975  * Return: true if success
8976  */
8977 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8978 {
8979 	struct dc_link *link = stream->link;
8980 	unsigned int vsync_rate_hz = 0;
8981 	struct dc_static_screen_params params = {0};
8982 	/* Calculate number of static frames before generating interrupt to
8983 	 * enter PSR.
8984 	 */
8985 	// Init fail safe of 2 frames static
8986 	unsigned int num_frames_static = 2;
8987 
8988 	DRM_DEBUG_DRIVER("Enabling psr...\n");
8989 
8990 	vsync_rate_hz = div64_u64(div64_u64((
8991 			stream->timing.pix_clk_100hz * 100),
8992 			stream->timing.v_total),
8993 			stream->timing.h_total);
8994 
8995 	/* Round up
8996 	 * Calculate number of frames such that at least 30 ms of time has
8997 	 * passed.
8998 	 */
8999 	if (vsync_rate_hz != 0) {
9000 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9001 		num_frames_static = (30000 / frame_time_microsec) + 1;
9002 	}
9003 
9004 	params.triggers.cursor_update = true;
9005 	params.triggers.overlay_update = true;
9006 	params.triggers.surface_update = true;
9007 	params.num_frames = num_frames_static;
9008 
9009 	dc_stream_set_static_screen_params(link->ctx->dc,
9010 					   &stream, 1,
9011 					   &params);
9012 
9013 	return dc_link_set_psr_allow_active(link, true, false);
9014 }
9015 
9016 /*
9017  * amdgpu_dm_psr_disable() - disable psr f/w
9018  * @stream:  stream state
9019  *
9020  * Return: true if success
9021  */
9022 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9023 {
9024 
9025 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9026 
9027 	return dc_link_set_psr_allow_active(stream->link, false, true);
9028 }
9029