1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 
104 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
105 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
106 
107 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
108 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
109 
110 /* Number of bytes in PSP header for firmware. */
111 #define PSP_HEADER_BYTES 0x100
112 
113 /* Number of bytes in PSP footer for firmware. */
114 #define PSP_FOOTER_BYTES 0x100
115 
116 /**
117  * DOC: overview
118  *
119  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
120  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
121  * requests into DC requests, and DC responses into DRM responses.
122  *
123  * The root control structure is &struct amdgpu_display_manager.
124  */
125 
126 /* basic init/fini API */
127 static int amdgpu_dm_init(struct amdgpu_device *adev);
128 static void amdgpu_dm_fini(struct amdgpu_device *adev);
129 
130 /*
131  * initializes drm_device display related structures, based on the information
132  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
133  * drm_encoder, drm_mode_config
134  *
135  * Returns 0 on success
136  */
137 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
138 /* removes and deallocates the drm structures, created by the above function */
139 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
140 
141 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
142 				struct drm_plane *plane,
143 				unsigned long possible_crtcs,
144 				const struct dc_plane_cap *plane_cap);
145 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
146 			       struct drm_plane *plane,
147 			       uint32_t link_index);
148 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
149 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
150 				    uint32_t link_index,
151 				    struct amdgpu_encoder *amdgpu_encoder);
152 static int amdgpu_dm_encoder_init(struct drm_device *dev,
153 				  struct amdgpu_encoder *aencoder,
154 				  uint32_t link_index);
155 
156 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
157 
158 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
159 				   struct drm_atomic_state *state,
160 				   bool nonblock);
161 
162 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
163 
164 static int amdgpu_dm_atomic_check(struct drm_device *dev,
165 				  struct drm_atomic_state *state);
166 
167 static void handle_cursor_update(struct drm_plane *plane,
168 				 struct drm_plane_state *old_plane_state);
169 
170 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
171 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
172 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
173 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
174 
175 
176 /*
177  * dm_vblank_get_counter
178  *
179  * @brief
180  * Get counter for number of vertical blanks
181  *
182  * @param
183  * struct amdgpu_device *adev - [in] desired amdgpu device
184  * int disp_idx - [in] which CRTC to get the counter from
185  *
186  * @return
187  * Counter for vertical blanks
188  */
189 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
190 {
191 	if (crtc >= adev->mode_info.num_crtc)
192 		return 0;
193 	else {
194 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
195 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
196 				acrtc->base.state);
197 
198 
199 		if (acrtc_state->stream == NULL) {
200 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
201 				  crtc);
202 			return 0;
203 		}
204 
205 		return dc_stream_get_vblank_counter(acrtc_state->stream);
206 	}
207 }
208 
209 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
210 				  u32 *vbl, u32 *position)
211 {
212 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
213 
214 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
215 		return -EINVAL;
216 	else {
217 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
218 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
219 						acrtc->base.state);
220 
221 		if (acrtc_state->stream ==  NULL) {
222 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
223 				  crtc);
224 			return 0;
225 		}
226 
227 		/*
228 		 * TODO rework base driver to use values directly.
229 		 * for now parse it back into reg-format
230 		 */
231 		dc_stream_get_scanoutpos(acrtc_state->stream,
232 					 &v_blank_start,
233 					 &v_blank_end,
234 					 &h_position,
235 					 &v_position);
236 
237 		*position = v_position | (h_position << 16);
238 		*vbl = v_blank_start | (v_blank_end << 16);
239 	}
240 
241 	return 0;
242 }
243 
244 static bool dm_is_idle(void *handle)
245 {
246 	/* XXX todo */
247 	return true;
248 }
249 
250 static int dm_wait_for_idle(void *handle)
251 {
252 	/* XXX todo */
253 	return 0;
254 }
255 
256 static bool dm_check_soft_reset(void *handle)
257 {
258 	return false;
259 }
260 
261 static int dm_soft_reset(void *handle)
262 {
263 	/* XXX todo */
264 	return 0;
265 }
266 
267 static struct amdgpu_crtc *
268 get_crtc_by_otg_inst(struct amdgpu_device *adev,
269 		     int otg_inst)
270 {
271 	struct drm_device *dev = adev->ddev;
272 	struct drm_crtc *crtc;
273 	struct amdgpu_crtc *amdgpu_crtc;
274 
275 	if (otg_inst == -1) {
276 		WARN_ON(1);
277 		return adev->mode_info.crtcs[0];
278 	}
279 
280 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
281 		amdgpu_crtc = to_amdgpu_crtc(crtc);
282 
283 		if (amdgpu_crtc->otg_inst == otg_inst)
284 			return amdgpu_crtc;
285 	}
286 
287 	return NULL;
288 }
289 
290 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
291 {
292 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
293 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
294 }
295 
296 /**
297  * dm_pflip_high_irq() - Handle pageflip interrupt
298  * @interrupt_params: ignored
299  *
300  * Handles the pageflip interrupt by notifying all interested parties
301  * that the pageflip has been completed.
302  */
303 static void dm_pflip_high_irq(void *interrupt_params)
304 {
305 	struct amdgpu_crtc *amdgpu_crtc;
306 	struct common_irq_params *irq_params = interrupt_params;
307 	struct amdgpu_device *adev = irq_params->adev;
308 	unsigned long flags;
309 	struct drm_pending_vblank_event *e;
310 	struct dm_crtc_state *acrtc_state;
311 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
312 	bool vrr_active;
313 
314 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
315 
316 	/* IRQ could occur when in initial stage */
317 	/* TODO work and BO cleanup */
318 	if (amdgpu_crtc == NULL) {
319 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
320 		return;
321 	}
322 
323 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
324 
325 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
326 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
327 						 amdgpu_crtc->pflip_status,
328 						 AMDGPU_FLIP_SUBMITTED,
329 						 amdgpu_crtc->crtc_id,
330 						 amdgpu_crtc);
331 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
332 		return;
333 	}
334 
335 	/* page flip completed. */
336 	e = amdgpu_crtc->event;
337 	amdgpu_crtc->event = NULL;
338 
339 	if (!e)
340 		WARN_ON(1);
341 
342 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
343 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
344 
345 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
346 	if (!vrr_active ||
347 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
348 				      &v_blank_end, &hpos, &vpos) ||
349 	    (vpos < v_blank_start)) {
350 		/* Update to correct count and vblank timestamp if racing with
351 		 * vblank irq. This also updates to the correct vblank timestamp
352 		 * even in VRR mode, as scanout is past the front-porch atm.
353 		 */
354 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
355 
356 		/* Wake up userspace by sending the pageflip event with proper
357 		 * count and timestamp of vblank of flip completion.
358 		 */
359 		if (e) {
360 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
361 
362 			/* Event sent, so done with vblank for this flip */
363 			drm_crtc_vblank_put(&amdgpu_crtc->base);
364 		}
365 	} else if (e) {
366 		/* VRR active and inside front-porch: vblank count and
367 		 * timestamp for pageflip event will only be up to date after
368 		 * drm_crtc_handle_vblank() has been executed from late vblank
369 		 * irq handler after start of back-porch (vline 0). We queue the
370 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
371 		 * updated timestamp and count, once it runs after us.
372 		 *
373 		 * We need to open-code this instead of using the helper
374 		 * drm_crtc_arm_vblank_event(), as that helper would
375 		 * call drm_crtc_accurate_vblank_count(), which we must
376 		 * not call in VRR mode while we are in front-porch!
377 		 */
378 
379 		/* sequence will be replaced by real count during send-out. */
380 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
381 		e->pipe = amdgpu_crtc->crtc_id;
382 
383 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
384 		e = NULL;
385 	}
386 
387 	/* Keep track of vblank of this flip for flip throttling. We use the
388 	 * cooked hw counter, as that one incremented at start of this vblank
389 	 * of pageflip completion, so last_flip_vblank is the forbidden count
390 	 * for queueing new pageflips if vsync + VRR is enabled.
391 	 */
392 	amdgpu_crtc->last_flip_vblank =
393 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
394 
395 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
396 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
397 
398 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
399 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
400 			 vrr_active, (int) !e);
401 }
402 
403 static void dm_vupdate_high_irq(void *interrupt_params)
404 {
405 	struct common_irq_params *irq_params = interrupt_params;
406 	struct amdgpu_device *adev = irq_params->adev;
407 	struct amdgpu_crtc *acrtc;
408 	struct dm_crtc_state *acrtc_state;
409 	unsigned long flags;
410 
411 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
412 
413 	if (acrtc) {
414 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
415 
416 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
417 			      acrtc->crtc_id,
418 			      amdgpu_dm_vrr_active(acrtc_state));
419 
420 		/* Core vblank handling is done here after end of front-porch in
421 		 * vrr mode, as vblank timestamping will give valid results
422 		 * while now done after front-porch. This will also deliver
423 		 * page-flip completion events that have been queued to us
424 		 * if a pageflip happened inside front-porch.
425 		 */
426 		if (amdgpu_dm_vrr_active(acrtc_state)) {
427 			drm_crtc_handle_vblank(&acrtc->base);
428 
429 			/* BTR processing for pre-DCE12 ASICs */
430 			if (acrtc_state->stream &&
431 			    adev->family < AMDGPU_FAMILY_AI) {
432 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
433 				mod_freesync_handle_v_update(
434 				    adev->dm.freesync_module,
435 				    acrtc_state->stream,
436 				    &acrtc_state->vrr_params);
437 
438 				dc_stream_adjust_vmin_vmax(
439 				    adev->dm.dc,
440 				    acrtc_state->stream,
441 				    &acrtc_state->vrr_params.adjust);
442 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
443 			}
444 		}
445 	}
446 }
447 
448 /**
449  * dm_crtc_high_irq() - Handles CRTC interrupt
450  * @interrupt_params: used for determining the CRTC instance
451  *
452  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
453  * event handler.
454  */
455 static void dm_crtc_high_irq(void *interrupt_params)
456 {
457 	struct common_irq_params *irq_params = interrupt_params;
458 	struct amdgpu_device *adev = irq_params->adev;
459 	struct amdgpu_crtc *acrtc;
460 	struct dm_crtc_state *acrtc_state;
461 	unsigned long flags;
462 
463 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
464 	if (!acrtc)
465 		return;
466 
467 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
468 
469 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
470 			 amdgpu_dm_vrr_active(acrtc_state),
471 			 acrtc_state->active_planes);
472 
473 	/**
474 	 * Core vblank handling at start of front-porch is only possible
475 	 * in non-vrr mode, as only there vblank timestamping will give
476 	 * valid results while done in front-porch. Otherwise defer it
477 	 * to dm_vupdate_high_irq after end of front-porch.
478 	 */
479 	if (!amdgpu_dm_vrr_active(acrtc_state))
480 		drm_crtc_handle_vblank(&acrtc->base);
481 
482 	/**
483 	 * Following stuff must happen at start of vblank, for crc
484 	 * computation and below-the-range btr support in vrr mode.
485 	 */
486 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
487 
488 	/* BTR updates need to happen before VUPDATE on Vega and above. */
489 	if (adev->family < AMDGPU_FAMILY_AI)
490 		return;
491 
492 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
493 
494 	if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
495 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
496 		mod_freesync_handle_v_update(adev->dm.freesync_module,
497 					     acrtc_state->stream,
498 					     &acrtc_state->vrr_params);
499 
500 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
501 					   &acrtc_state->vrr_params.adjust);
502 	}
503 
504 	/*
505 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
506 	 * In that case, pageflip completion interrupts won't fire and pageflip
507 	 * completion events won't get delivered. Prevent this by sending
508 	 * pending pageflip events from here if a flip is still pending.
509 	 *
510 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
511 	 * avoid race conditions between flip programming and completion,
512 	 * which could cause too early flip completion events.
513 	 */
514 	if (adev->family >= AMDGPU_FAMILY_RV &&
515 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
516 	    acrtc_state->active_planes == 0) {
517 		if (acrtc->event) {
518 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
519 			acrtc->event = NULL;
520 			drm_crtc_vblank_put(&acrtc->base);
521 		}
522 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
523 	}
524 
525 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
526 }
527 
528 static int dm_set_clockgating_state(void *handle,
529 		  enum amd_clockgating_state state)
530 {
531 	return 0;
532 }
533 
534 static int dm_set_powergating_state(void *handle,
535 		  enum amd_powergating_state state)
536 {
537 	return 0;
538 }
539 
540 /* Prototypes of private functions */
541 static int dm_early_init(void* handle);
542 
543 /* Allocate memory for FBC compressed data  */
544 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
545 {
546 	struct drm_device *dev = connector->dev;
547 	struct amdgpu_device *adev = dev->dev_private;
548 	struct dm_comressor_info *compressor = &adev->dm.compressor;
549 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
550 	struct drm_display_mode *mode;
551 	unsigned long max_size = 0;
552 
553 	if (adev->dm.dc->fbc_compressor == NULL)
554 		return;
555 
556 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
557 		return;
558 
559 	if (compressor->bo_ptr)
560 		return;
561 
562 
563 	list_for_each_entry(mode, &connector->modes, head) {
564 		if (max_size < mode->htotal * mode->vtotal)
565 			max_size = mode->htotal * mode->vtotal;
566 	}
567 
568 	if (max_size) {
569 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
570 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
571 			    &compressor->gpu_addr, &compressor->cpu_addr);
572 
573 		if (r)
574 			DRM_ERROR("DM: Failed to initialize FBC\n");
575 		else {
576 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
577 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
578 		}
579 
580 	}
581 
582 }
583 
584 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
585 					  int pipe, bool *enabled,
586 					  unsigned char *buf, int max_bytes)
587 {
588 	struct drm_device *dev = dev_get_drvdata(kdev);
589 	struct amdgpu_device *adev = dev->dev_private;
590 	struct drm_connector *connector;
591 	struct drm_connector_list_iter conn_iter;
592 	struct amdgpu_dm_connector *aconnector;
593 	int ret = 0;
594 
595 	*enabled = false;
596 
597 	mutex_lock(&adev->dm.audio_lock);
598 
599 	drm_connector_list_iter_begin(dev, &conn_iter);
600 	drm_for_each_connector_iter(connector, &conn_iter) {
601 		aconnector = to_amdgpu_dm_connector(connector);
602 		if (aconnector->audio_inst != port)
603 			continue;
604 
605 		*enabled = true;
606 		ret = drm_eld_size(connector->eld);
607 		memcpy(buf, connector->eld, min(max_bytes, ret));
608 
609 		break;
610 	}
611 	drm_connector_list_iter_end(&conn_iter);
612 
613 	mutex_unlock(&adev->dm.audio_lock);
614 
615 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
616 
617 	return ret;
618 }
619 
620 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
621 	.get_eld = amdgpu_dm_audio_component_get_eld,
622 };
623 
624 static int amdgpu_dm_audio_component_bind(struct device *kdev,
625 				       struct device *hda_kdev, void *data)
626 {
627 	struct drm_device *dev = dev_get_drvdata(kdev);
628 	struct amdgpu_device *adev = dev->dev_private;
629 	struct drm_audio_component *acomp = data;
630 
631 	acomp->ops = &amdgpu_dm_audio_component_ops;
632 	acomp->dev = kdev;
633 	adev->dm.audio_component = acomp;
634 
635 	return 0;
636 }
637 
638 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
639 					  struct device *hda_kdev, void *data)
640 {
641 	struct drm_device *dev = dev_get_drvdata(kdev);
642 	struct amdgpu_device *adev = dev->dev_private;
643 	struct drm_audio_component *acomp = data;
644 
645 	acomp->ops = NULL;
646 	acomp->dev = NULL;
647 	adev->dm.audio_component = NULL;
648 }
649 
650 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
651 	.bind	= amdgpu_dm_audio_component_bind,
652 	.unbind	= amdgpu_dm_audio_component_unbind,
653 };
654 
655 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
656 {
657 	int i, ret;
658 
659 	if (!amdgpu_audio)
660 		return 0;
661 
662 	adev->mode_info.audio.enabled = true;
663 
664 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
665 
666 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
667 		adev->mode_info.audio.pin[i].channels = -1;
668 		adev->mode_info.audio.pin[i].rate = -1;
669 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
670 		adev->mode_info.audio.pin[i].status_bits = 0;
671 		adev->mode_info.audio.pin[i].category_code = 0;
672 		adev->mode_info.audio.pin[i].connected = false;
673 		adev->mode_info.audio.pin[i].id =
674 			adev->dm.dc->res_pool->audios[i]->inst;
675 		adev->mode_info.audio.pin[i].offset = 0;
676 	}
677 
678 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
679 	if (ret < 0)
680 		return ret;
681 
682 	adev->dm.audio_registered = true;
683 
684 	return 0;
685 }
686 
687 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
688 {
689 	if (!amdgpu_audio)
690 		return;
691 
692 	if (!adev->mode_info.audio.enabled)
693 		return;
694 
695 	if (adev->dm.audio_registered) {
696 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
697 		adev->dm.audio_registered = false;
698 	}
699 
700 	/* TODO: Disable audio? */
701 
702 	adev->mode_info.audio.enabled = false;
703 }
704 
705 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
706 {
707 	struct drm_audio_component *acomp = adev->dm.audio_component;
708 
709 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
710 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
711 
712 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
713 						 pin, -1);
714 	}
715 }
716 
717 static int dm_dmub_hw_init(struct amdgpu_device *adev)
718 {
719 	const struct dmcub_firmware_header_v1_0 *hdr;
720 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
721 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
722 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
723 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
724 	struct abm *abm = adev->dm.dc->res_pool->abm;
725 	struct dmub_srv_hw_params hw_params;
726 	enum dmub_status status;
727 	const unsigned char *fw_inst_const, *fw_bss_data;
728 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
729 	bool has_hw_support;
730 
731 	if (!dmub_srv)
732 		/* DMUB isn't supported on the ASIC. */
733 		return 0;
734 
735 	if (!fb_info) {
736 		DRM_ERROR("No framebuffer info for DMUB service.\n");
737 		return -EINVAL;
738 	}
739 
740 	if (!dmub_fw) {
741 		/* Firmware required for DMUB support. */
742 		DRM_ERROR("No firmware provided for DMUB.\n");
743 		return -EINVAL;
744 	}
745 
746 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
747 	if (status != DMUB_STATUS_OK) {
748 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
749 		return -EINVAL;
750 	}
751 
752 	if (!has_hw_support) {
753 		DRM_INFO("DMUB unsupported on ASIC\n");
754 		return 0;
755 	}
756 
757 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
758 
759 	fw_inst_const = dmub_fw->data +
760 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
761 			PSP_HEADER_BYTES;
762 
763 	fw_bss_data = dmub_fw->data +
764 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
765 		      le32_to_cpu(hdr->inst_const_bytes);
766 
767 	/* Copy firmware and bios info into FB memory. */
768 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
769 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
770 
771 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
772 
773 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
774 	 * amdgpu_ucode_init_single_fw will load dmub firmware
775 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
776 	 * will be done by dm_dmub_hw_init
777 	 */
778 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
779 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
780 				fw_inst_const_size);
781 	}
782 
783 	if (fw_bss_data_size)
784 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
785 		       fw_bss_data, fw_bss_data_size);
786 
787 	/* Copy firmware bios info into FB memory. */
788 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
789 	       adev->bios_size);
790 
791 	/* Reset regions that need to be reset. */
792 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
793 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
794 
795 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
796 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
797 
798 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
799 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
800 
801 	/* Initialize hardware. */
802 	memset(&hw_params, 0, sizeof(hw_params));
803 	hw_params.fb_base = adev->gmc.fb_start;
804 	hw_params.fb_offset = adev->gmc.aper_base;
805 
806 	/* backdoor load firmware and trigger dmub running */
807 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
808 		hw_params.load_inst_const = true;
809 
810 	if (dmcu)
811 		hw_params.psp_version = dmcu->psp_version;
812 
813 	for (i = 0; i < fb_info->num_fb; ++i)
814 		hw_params.fb[i] = &fb_info->fb[i];
815 
816 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
817 	if (status != DMUB_STATUS_OK) {
818 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
819 		return -EINVAL;
820 	}
821 
822 	/* Wait for firmware load to finish. */
823 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
824 	if (status != DMUB_STATUS_OK)
825 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
826 
827 	/* Init DMCU and ABM if available. */
828 	if (dmcu && abm) {
829 		dmcu->funcs->dmcu_init(dmcu);
830 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
831 	}
832 
833 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
834 	if (!adev->dm.dc->ctx->dmub_srv) {
835 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
836 		return -ENOMEM;
837 	}
838 
839 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
840 		 adev->dm.dmcub_fw_version);
841 
842 	return 0;
843 }
844 
845 static int amdgpu_dm_init(struct amdgpu_device *adev)
846 {
847 	struct dc_init_data init_data;
848 #ifdef CONFIG_DRM_AMD_DC_HDCP
849 	struct dc_callback_init init_params;
850 #endif
851 	int r;
852 
853 	adev->dm.ddev = adev->ddev;
854 	adev->dm.adev = adev;
855 
856 	/* Zero all the fields */
857 	memset(&init_data, 0, sizeof(init_data));
858 #ifdef CONFIG_DRM_AMD_DC_HDCP
859 	memset(&init_params, 0, sizeof(init_params));
860 #endif
861 
862 	mutex_init(&adev->dm.dc_lock);
863 	mutex_init(&adev->dm.audio_lock);
864 
865 	if(amdgpu_dm_irq_init(adev)) {
866 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
867 		goto error;
868 	}
869 
870 	init_data.asic_id.chip_family = adev->family;
871 
872 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
873 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
874 
875 	init_data.asic_id.vram_width = adev->gmc.vram_width;
876 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
877 	init_data.asic_id.atombios_base_address =
878 		adev->mode_info.atom_context->bios;
879 
880 	init_data.driver = adev;
881 
882 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
883 
884 	if (!adev->dm.cgs_device) {
885 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
886 		goto error;
887 	}
888 
889 	init_data.cgs_device = adev->dm.cgs_device;
890 
891 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
892 
893 	switch (adev->asic_type) {
894 	case CHIP_CARRIZO:
895 	case CHIP_STONEY:
896 	case CHIP_RAVEN:
897 	case CHIP_RENOIR:
898 		init_data.flags.gpu_vm_support = true;
899 		break;
900 	default:
901 		break;
902 	}
903 
904 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
905 		init_data.flags.fbc_support = true;
906 
907 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
908 		init_data.flags.multi_mon_pp_mclk_switch = true;
909 
910 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
911 		init_data.flags.disable_fractional_pwm = true;
912 
913 	init_data.flags.power_down_display_on_boot = true;
914 
915 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
916 
917 	/* Display Core create. */
918 	adev->dm.dc = dc_create(&init_data);
919 
920 	if (adev->dm.dc) {
921 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
922 	} else {
923 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
924 		goto error;
925 	}
926 
927 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
928 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
929 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
930 	}
931 
932 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
933 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
934 
935 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
936 		adev->dm.dc->debug.disable_stutter = true;
937 
938 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
939 		adev->dm.dc->debug.disable_dsc = true;
940 
941 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
942 		adev->dm.dc->debug.disable_clock_gate = true;
943 
944 	r = dm_dmub_hw_init(adev);
945 	if (r) {
946 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
947 		goto error;
948 	}
949 
950 	dc_hardware_init(adev->dm.dc);
951 
952 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
953 	if (!adev->dm.freesync_module) {
954 		DRM_ERROR(
955 		"amdgpu: failed to initialize freesync_module.\n");
956 	} else
957 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
958 				adev->dm.freesync_module);
959 
960 	amdgpu_dm_init_color_mod();
961 
962 #ifdef CONFIG_DRM_AMD_DC_HDCP
963 	if (adev->asic_type >= CHIP_RAVEN) {
964 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
965 
966 		if (!adev->dm.hdcp_workqueue)
967 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
968 		else
969 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
970 
971 		dc_init_callbacks(adev->dm.dc, &init_params);
972 	}
973 #endif
974 	if (amdgpu_dm_initialize_drm_device(adev)) {
975 		DRM_ERROR(
976 		"amdgpu: failed to initialize sw for display support.\n");
977 		goto error;
978 	}
979 
980 	/* Update the actual used number of crtc */
981 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
982 
983 	/* create fake encoders for MST */
984 	dm_dp_create_fake_mst_encoders(adev);
985 
986 	/* TODO: Add_display_info? */
987 
988 	/* TODO use dynamic cursor width */
989 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
990 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
991 
992 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
993 		DRM_ERROR(
994 		"amdgpu: failed to initialize sw for display support.\n");
995 		goto error;
996 	}
997 
998 	DRM_DEBUG_DRIVER("KMS initialized.\n");
999 
1000 	return 0;
1001 error:
1002 	amdgpu_dm_fini(adev);
1003 
1004 	return -EINVAL;
1005 }
1006 
1007 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1008 {
1009 	int i;
1010 
1011 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1012 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1013 	}
1014 
1015 	amdgpu_dm_audio_fini(adev);
1016 
1017 	amdgpu_dm_destroy_drm_device(&adev->dm);
1018 
1019 #ifdef CONFIG_DRM_AMD_DC_HDCP
1020 	if (adev->dm.hdcp_workqueue) {
1021 		hdcp_destroy(adev->dm.hdcp_workqueue);
1022 		adev->dm.hdcp_workqueue = NULL;
1023 	}
1024 
1025 	if (adev->dm.dc)
1026 		dc_deinit_callbacks(adev->dm.dc);
1027 #endif
1028 	if (adev->dm.dc->ctx->dmub_srv) {
1029 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1030 		adev->dm.dc->ctx->dmub_srv = NULL;
1031 	}
1032 
1033 	if (adev->dm.dmub_bo)
1034 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1035 				      &adev->dm.dmub_bo_gpu_addr,
1036 				      &adev->dm.dmub_bo_cpu_addr);
1037 
1038 	/* DC Destroy TODO: Replace destroy DAL */
1039 	if (adev->dm.dc)
1040 		dc_destroy(&adev->dm.dc);
1041 	/*
1042 	 * TODO: pageflip, vlank interrupt
1043 	 *
1044 	 * amdgpu_dm_irq_fini(adev);
1045 	 */
1046 
1047 	if (adev->dm.cgs_device) {
1048 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1049 		adev->dm.cgs_device = NULL;
1050 	}
1051 	if (adev->dm.freesync_module) {
1052 		mod_freesync_destroy(adev->dm.freesync_module);
1053 		adev->dm.freesync_module = NULL;
1054 	}
1055 
1056 	mutex_destroy(&adev->dm.audio_lock);
1057 	mutex_destroy(&adev->dm.dc_lock);
1058 
1059 	return;
1060 }
1061 
1062 static int load_dmcu_fw(struct amdgpu_device *adev)
1063 {
1064 	const char *fw_name_dmcu = NULL;
1065 	int r;
1066 	const struct dmcu_firmware_header_v1_0 *hdr;
1067 
1068 	switch(adev->asic_type) {
1069 	case CHIP_BONAIRE:
1070 	case CHIP_HAWAII:
1071 	case CHIP_KAVERI:
1072 	case CHIP_KABINI:
1073 	case CHIP_MULLINS:
1074 	case CHIP_TONGA:
1075 	case CHIP_FIJI:
1076 	case CHIP_CARRIZO:
1077 	case CHIP_STONEY:
1078 	case CHIP_POLARIS11:
1079 	case CHIP_POLARIS10:
1080 	case CHIP_POLARIS12:
1081 	case CHIP_VEGAM:
1082 	case CHIP_VEGA10:
1083 	case CHIP_VEGA12:
1084 	case CHIP_VEGA20:
1085 	case CHIP_NAVI10:
1086 	case CHIP_NAVI14:
1087 	case CHIP_RENOIR:
1088 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1089 	case CHIP_SIENNA_CICHLID:
1090 	case CHIP_NAVY_FLOUNDER:
1091 #endif
1092 		return 0;
1093 	case CHIP_NAVI12:
1094 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1095 		break;
1096 	case CHIP_RAVEN:
1097 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1098 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1099 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1100 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1101 		else
1102 			return 0;
1103 		break;
1104 	default:
1105 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1106 		return -EINVAL;
1107 	}
1108 
1109 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1110 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1111 		return 0;
1112 	}
1113 
1114 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1115 	if (r == -ENOENT) {
1116 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1117 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1118 		adev->dm.fw_dmcu = NULL;
1119 		return 0;
1120 	}
1121 	if (r) {
1122 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1123 			fw_name_dmcu);
1124 		return r;
1125 	}
1126 
1127 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1128 	if (r) {
1129 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1130 			fw_name_dmcu);
1131 		release_firmware(adev->dm.fw_dmcu);
1132 		adev->dm.fw_dmcu = NULL;
1133 		return r;
1134 	}
1135 
1136 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1137 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1138 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1139 	adev->firmware.fw_size +=
1140 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1141 
1142 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1143 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1144 	adev->firmware.fw_size +=
1145 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1146 
1147 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1148 
1149 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1150 
1151 	return 0;
1152 }
1153 
1154 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1155 {
1156 	struct amdgpu_device *adev = ctx;
1157 
1158 	return dm_read_reg(adev->dm.dc->ctx, address);
1159 }
1160 
1161 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1162 				     uint32_t value)
1163 {
1164 	struct amdgpu_device *adev = ctx;
1165 
1166 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1167 }
1168 
1169 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1170 {
1171 	struct dmub_srv_create_params create_params;
1172 	struct dmub_srv_region_params region_params;
1173 	struct dmub_srv_region_info region_info;
1174 	struct dmub_srv_fb_params fb_params;
1175 	struct dmub_srv_fb_info *fb_info;
1176 	struct dmub_srv *dmub_srv;
1177 	const struct dmcub_firmware_header_v1_0 *hdr;
1178 	const char *fw_name_dmub;
1179 	enum dmub_asic dmub_asic;
1180 	enum dmub_status status;
1181 	int r;
1182 
1183 	switch (adev->asic_type) {
1184 	case CHIP_RENOIR:
1185 		dmub_asic = DMUB_ASIC_DCN21;
1186 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1187 		break;
1188 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1189 	case CHIP_SIENNA_CICHLID:
1190 		dmub_asic = DMUB_ASIC_DCN30;
1191 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1192 		break;
1193 	case CHIP_NAVY_FLOUNDER:
1194 		dmub_asic = DMUB_ASIC_DCN30;
1195 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1196 		break;
1197 #endif
1198 
1199 	default:
1200 		/* ASIC doesn't support DMUB. */
1201 		return 0;
1202 	}
1203 
1204 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1205 	if (r) {
1206 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1207 		return 0;
1208 	}
1209 
1210 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1211 	if (r) {
1212 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1213 		return 0;
1214 	}
1215 
1216 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1217 
1218 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1219 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1220 			AMDGPU_UCODE_ID_DMCUB;
1221 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1222 			adev->dm.dmub_fw;
1223 		adev->firmware.fw_size +=
1224 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1225 
1226 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1227 			 adev->dm.dmcub_fw_version);
1228 	}
1229 
1230 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1231 
1232 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1233 	dmub_srv = adev->dm.dmub_srv;
1234 
1235 	if (!dmub_srv) {
1236 		DRM_ERROR("Failed to allocate DMUB service!\n");
1237 		return -ENOMEM;
1238 	}
1239 
1240 	memset(&create_params, 0, sizeof(create_params));
1241 	create_params.user_ctx = adev;
1242 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1243 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1244 	create_params.asic = dmub_asic;
1245 
1246 	/* Create the DMUB service. */
1247 	status = dmub_srv_create(dmub_srv, &create_params);
1248 	if (status != DMUB_STATUS_OK) {
1249 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1250 		return -EINVAL;
1251 	}
1252 
1253 	/* Calculate the size of all the regions for the DMUB service. */
1254 	memset(&region_params, 0, sizeof(region_params));
1255 
1256 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1257 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1258 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1259 	region_params.vbios_size = adev->bios_size;
1260 	region_params.fw_bss_data = region_params.bss_data_size ?
1261 		adev->dm.dmub_fw->data +
1262 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1263 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1264 	region_params.fw_inst_const =
1265 		adev->dm.dmub_fw->data +
1266 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1267 		PSP_HEADER_BYTES;
1268 
1269 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1270 					   &region_info);
1271 
1272 	if (status != DMUB_STATUS_OK) {
1273 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1274 		return -EINVAL;
1275 	}
1276 
1277 	/*
1278 	 * Allocate a framebuffer based on the total size of all the regions.
1279 	 * TODO: Move this into GART.
1280 	 */
1281 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1282 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1283 				    &adev->dm.dmub_bo_gpu_addr,
1284 				    &adev->dm.dmub_bo_cpu_addr);
1285 	if (r)
1286 		return r;
1287 
1288 	/* Rebase the regions on the framebuffer address. */
1289 	memset(&fb_params, 0, sizeof(fb_params));
1290 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1291 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1292 	fb_params.region_info = &region_info;
1293 
1294 	adev->dm.dmub_fb_info =
1295 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1296 	fb_info = adev->dm.dmub_fb_info;
1297 
1298 	if (!fb_info) {
1299 		DRM_ERROR(
1300 			"Failed to allocate framebuffer info for DMUB service!\n");
1301 		return -ENOMEM;
1302 	}
1303 
1304 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1305 	if (status != DMUB_STATUS_OK) {
1306 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1307 		return -EINVAL;
1308 	}
1309 
1310 	return 0;
1311 }
1312 
1313 static int dm_sw_init(void *handle)
1314 {
1315 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1316 	int r;
1317 
1318 	r = dm_dmub_sw_init(adev);
1319 	if (r)
1320 		return r;
1321 
1322 	return load_dmcu_fw(adev);
1323 }
1324 
1325 static int dm_sw_fini(void *handle)
1326 {
1327 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1328 
1329 	kfree(adev->dm.dmub_fb_info);
1330 	adev->dm.dmub_fb_info = NULL;
1331 
1332 	if (adev->dm.dmub_srv) {
1333 		dmub_srv_destroy(adev->dm.dmub_srv);
1334 		adev->dm.dmub_srv = NULL;
1335 	}
1336 
1337 	release_firmware(adev->dm.dmub_fw);
1338 	adev->dm.dmub_fw = NULL;
1339 
1340 	release_firmware(adev->dm.fw_dmcu);
1341 	adev->dm.fw_dmcu = NULL;
1342 
1343 	return 0;
1344 }
1345 
1346 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1347 {
1348 	struct amdgpu_dm_connector *aconnector;
1349 	struct drm_connector *connector;
1350 	struct drm_connector_list_iter iter;
1351 	int ret = 0;
1352 
1353 	drm_connector_list_iter_begin(dev, &iter);
1354 	drm_for_each_connector_iter(connector, &iter) {
1355 		aconnector = to_amdgpu_dm_connector(connector);
1356 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1357 		    aconnector->mst_mgr.aux) {
1358 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1359 					 aconnector,
1360 					 aconnector->base.base.id);
1361 
1362 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1363 			if (ret < 0) {
1364 				DRM_ERROR("DM_MST: Failed to start MST\n");
1365 				aconnector->dc_link->type =
1366 					dc_connection_single;
1367 				break;
1368 			}
1369 		}
1370 	}
1371 	drm_connector_list_iter_end(&iter);
1372 
1373 	return ret;
1374 }
1375 
1376 static int dm_late_init(void *handle)
1377 {
1378 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1379 
1380 	struct dmcu_iram_parameters params;
1381 	unsigned int linear_lut[16];
1382 	int i;
1383 	struct dmcu *dmcu = NULL;
1384 	bool ret = true;
1385 
1386 	if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
1387 		return detect_mst_link_for_all_connectors(adev->ddev);
1388 
1389 	dmcu = adev->dm.dc->res_pool->dmcu;
1390 
1391 	for (i = 0; i < 16; i++)
1392 		linear_lut[i] = 0xFFFF * i / 15;
1393 
1394 	params.set = 0;
1395 	params.backlight_ramping_start = 0xCCCC;
1396 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1397 	params.backlight_lut_array_size = 16;
1398 	params.backlight_lut_array = linear_lut;
1399 
1400 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1401 	 * 0xFFFF x 0.01 = 0x28F
1402 	 */
1403 	params.min_abm_backlight = 0x28F;
1404 
1405 	/* In the case where abm is implemented on dmcub,
1406 	 * dmcu object will be null.
1407 	 * ABM 2.4 and up are implemented on dmcub.
1408 	 */
1409 	if (dmcu)
1410 		ret = dmcu_load_iram(dmcu, params);
1411 	else if (adev->dm.dc->ctx->dmub_srv)
1412 		ret = dmub_init_abm_config(adev->dm.dc->res_pool->abm, params);
1413 
1414 	if (!ret)
1415 		return -EINVAL;
1416 
1417 	return detect_mst_link_for_all_connectors(adev->ddev);
1418 }
1419 
1420 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1421 {
1422 	struct amdgpu_dm_connector *aconnector;
1423 	struct drm_connector *connector;
1424 	struct drm_connector_list_iter iter;
1425 	struct drm_dp_mst_topology_mgr *mgr;
1426 	int ret;
1427 	bool need_hotplug = false;
1428 
1429 	drm_connector_list_iter_begin(dev, &iter);
1430 	drm_for_each_connector_iter(connector, &iter) {
1431 		aconnector = to_amdgpu_dm_connector(connector);
1432 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1433 		    aconnector->mst_port)
1434 			continue;
1435 
1436 		mgr = &aconnector->mst_mgr;
1437 
1438 		if (suspend) {
1439 			drm_dp_mst_topology_mgr_suspend(mgr);
1440 		} else {
1441 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1442 			if (ret < 0) {
1443 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1444 				need_hotplug = true;
1445 			}
1446 		}
1447 	}
1448 	drm_connector_list_iter_end(&iter);
1449 
1450 	if (need_hotplug)
1451 		drm_kms_helper_hotplug_event(dev);
1452 }
1453 
1454 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1455 {
1456 	struct smu_context *smu = &adev->smu;
1457 	int ret = 0;
1458 
1459 	if (!is_support_sw_smu(adev))
1460 		return 0;
1461 
1462 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1463 	 * on window driver dc implementation.
1464 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1465 	 * should be passed to smu during boot up and resume from s3.
1466 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1467 	 * dcn20_resource_construct
1468 	 * then call pplib functions below to pass the settings to smu:
1469 	 * smu_set_watermarks_for_clock_ranges
1470 	 * smu_set_watermarks_table
1471 	 * navi10_set_watermarks_table
1472 	 * smu_write_watermarks_table
1473 	 *
1474 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1475 	 * dc has implemented different flow for window driver:
1476 	 * dc_hardware_init / dc_set_power_state
1477 	 * dcn10_init_hw
1478 	 * notify_wm_ranges
1479 	 * set_wm_ranges
1480 	 * -- Linux
1481 	 * smu_set_watermarks_for_clock_ranges
1482 	 * renoir_set_watermarks_table
1483 	 * smu_write_watermarks_table
1484 	 *
1485 	 * For Linux,
1486 	 * dc_hardware_init -> amdgpu_dm_init
1487 	 * dc_set_power_state --> dm_resume
1488 	 *
1489 	 * therefore, this function apply to navi10/12/14 but not Renoir
1490 	 * *
1491 	 */
1492 	switch(adev->asic_type) {
1493 	case CHIP_NAVI10:
1494 	case CHIP_NAVI14:
1495 	case CHIP_NAVI12:
1496 		break;
1497 	default:
1498 		return 0;
1499 	}
1500 
1501 	ret = smu_write_watermarks_table(smu);
1502 	if (ret) {
1503 		DRM_ERROR("Failed to update WMTABLE!\n");
1504 		return ret;
1505 	}
1506 
1507 	return 0;
1508 }
1509 
1510 /**
1511  * dm_hw_init() - Initialize DC device
1512  * @handle: The base driver device containing the amdgpu_dm device.
1513  *
1514  * Initialize the &struct amdgpu_display_manager device. This involves calling
1515  * the initializers of each DM component, then populating the struct with them.
1516  *
1517  * Although the function implies hardware initialization, both hardware and
1518  * software are initialized here. Splitting them out to their relevant init
1519  * hooks is a future TODO item.
1520  *
1521  * Some notable things that are initialized here:
1522  *
1523  * - Display Core, both software and hardware
1524  * - DC modules that we need (freesync and color management)
1525  * - DRM software states
1526  * - Interrupt sources and handlers
1527  * - Vblank support
1528  * - Debug FS entries, if enabled
1529  */
1530 static int dm_hw_init(void *handle)
1531 {
1532 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1533 	/* Create DAL display manager */
1534 	amdgpu_dm_init(adev);
1535 	amdgpu_dm_hpd_init(adev);
1536 
1537 	return 0;
1538 }
1539 
1540 /**
1541  * dm_hw_fini() - Teardown DC device
1542  * @handle: The base driver device containing the amdgpu_dm device.
1543  *
1544  * Teardown components within &struct amdgpu_display_manager that require
1545  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1546  * were loaded. Also flush IRQ workqueues and disable them.
1547  */
1548 static int dm_hw_fini(void *handle)
1549 {
1550 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1551 
1552 	amdgpu_dm_hpd_fini(adev);
1553 
1554 	amdgpu_dm_irq_fini(adev);
1555 	amdgpu_dm_fini(adev);
1556 	return 0;
1557 }
1558 
1559 
1560 static int dm_enable_vblank(struct drm_crtc *crtc);
1561 static void dm_disable_vblank(struct drm_crtc *crtc);
1562 
1563 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1564 				 struct dc_state *state, bool enable)
1565 {
1566 	enum dc_irq_source irq_source;
1567 	struct amdgpu_crtc *acrtc;
1568 	int rc = -EBUSY;
1569 	int i = 0;
1570 
1571 	for (i = 0; i < state->stream_count; i++) {
1572 		acrtc = get_crtc_by_otg_inst(
1573 				adev, state->stream_status[i].primary_otg_inst);
1574 
1575 		if (acrtc && state->stream_status[i].plane_count != 0) {
1576 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1577 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1578 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1579 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1580 			if (rc)
1581 				DRM_WARN("Failed to %s pflip interrupts\n",
1582 					 enable ? "enable" : "disable");
1583 
1584 			if (enable) {
1585 				rc = dm_enable_vblank(&acrtc->base);
1586 				if (rc)
1587 					DRM_WARN("Failed to enable vblank interrupts\n");
1588 			} else {
1589 				dm_disable_vblank(&acrtc->base);
1590 			}
1591 
1592 		}
1593 	}
1594 
1595 }
1596 
1597 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1598 {
1599 	struct dc_state *context = NULL;
1600 	enum dc_status res = DC_ERROR_UNEXPECTED;
1601 	int i;
1602 	struct dc_stream_state *del_streams[MAX_PIPES];
1603 	int del_streams_count = 0;
1604 
1605 	memset(del_streams, 0, sizeof(del_streams));
1606 
1607 	context = dc_create_state(dc);
1608 	if (context == NULL)
1609 		goto context_alloc_fail;
1610 
1611 	dc_resource_state_copy_construct_current(dc, context);
1612 
1613 	/* First remove from context all streams */
1614 	for (i = 0; i < context->stream_count; i++) {
1615 		struct dc_stream_state *stream = context->streams[i];
1616 
1617 		del_streams[del_streams_count++] = stream;
1618 	}
1619 
1620 	/* Remove all planes for removed streams and then remove the streams */
1621 	for (i = 0; i < del_streams_count; i++) {
1622 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1623 			res = DC_FAIL_DETACH_SURFACES;
1624 			goto fail;
1625 		}
1626 
1627 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1628 		if (res != DC_OK)
1629 			goto fail;
1630 	}
1631 
1632 
1633 	res = dc_validate_global_state(dc, context, false);
1634 
1635 	if (res != DC_OK) {
1636 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1637 		goto fail;
1638 	}
1639 
1640 	res = dc_commit_state(dc, context);
1641 
1642 fail:
1643 	dc_release_state(context);
1644 
1645 context_alloc_fail:
1646 	return res;
1647 }
1648 
1649 static int dm_suspend(void *handle)
1650 {
1651 	struct amdgpu_device *adev = handle;
1652 	struct amdgpu_display_manager *dm = &adev->dm;
1653 	int ret = 0;
1654 
1655 	if (adev->in_gpu_reset) {
1656 		mutex_lock(&dm->dc_lock);
1657 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1658 
1659 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1660 
1661 		amdgpu_dm_commit_zero_streams(dm->dc);
1662 
1663 		amdgpu_dm_irq_suspend(adev);
1664 
1665 		return ret;
1666 	}
1667 
1668 	WARN_ON(adev->dm.cached_state);
1669 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1670 
1671 	s3_handle_mst(adev->ddev, true);
1672 
1673 	amdgpu_dm_irq_suspend(adev);
1674 
1675 
1676 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1677 
1678 	return 0;
1679 }
1680 
1681 static struct amdgpu_dm_connector *
1682 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1683 					     struct drm_crtc *crtc)
1684 {
1685 	uint32_t i;
1686 	struct drm_connector_state *new_con_state;
1687 	struct drm_connector *connector;
1688 	struct drm_crtc *crtc_from_state;
1689 
1690 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1691 		crtc_from_state = new_con_state->crtc;
1692 
1693 		if (crtc_from_state == crtc)
1694 			return to_amdgpu_dm_connector(connector);
1695 	}
1696 
1697 	return NULL;
1698 }
1699 
1700 static void emulated_link_detect(struct dc_link *link)
1701 {
1702 	struct dc_sink_init_data sink_init_data = { 0 };
1703 	struct display_sink_capability sink_caps = { 0 };
1704 	enum dc_edid_status edid_status;
1705 	struct dc_context *dc_ctx = link->ctx;
1706 	struct dc_sink *sink = NULL;
1707 	struct dc_sink *prev_sink = NULL;
1708 
1709 	link->type = dc_connection_none;
1710 	prev_sink = link->local_sink;
1711 
1712 	if (prev_sink != NULL)
1713 		dc_sink_retain(prev_sink);
1714 
1715 	switch (link->connector_signal) {
1716 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1717 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1718 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1719 		break;
1720 	}
1721 
1722 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1723 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1724 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1725 		break;
1726 	}
1727 
1728 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1729 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1730 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1731 		break;
1732 	}
1733 
1734 	case SIGNAL_TYPE_LVDS: {
1735 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1736 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1737 		break;
1738 	}
1739 
1740 	case SIGNAL_TYPE_EDP: {
1741 		sink_caps.transaction_type =
1742 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1743 		sink_caps.signal = SIGNAL_TYPE_EDP;
1744 		break;
1745 	}
1746 
1747 	case SIGNAL_TYPE_DISPLAY_PORT: {
1748 		sink_caps.transaction_type =
1749 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1750 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1751 		break;
1752 	}
1753 
1754 	default:
1755 		DC_ERROR("Invalid connector type! signal:%d\n",
1756 			link->connector_signal);
1757 		return;
1758 	}
1759 
1760 	sink_init_data.link = link;
1761 	sink_init_data.sink_signal = sink_caps.signal;
1762 
1763 	sink = dc_sink_create(&sink_init_data);
1764 	if (!sink) {
1765 		DC_ERROR("Failed to create sink!\n");
1766 		return;
1767 	}
1768 
1769 	/* dc_sink_create returns a new reference */
1770 	link->local_sink = sink;
1771 
1772 	edid_status = dm_helpers_read_local_edid(
1773 			link->ctx,
1774 			link,
1775 			sink);
1776 
1777 	if (edid_status != EDID_OK)
1778 		DC_ERROR("Failed to read EDID");
1779 
1780 }
1781 
1782 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1783 				     struct amdgpu_display_manager *dm)
1784 {
1785 	struct {
1786 		struct dc_surface_update surface_updates[MAX_SURFACES];
1787 		struct dc_plane_info plane_infos[MAX_SURFACES];
1788 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1789 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1790 		struct dc_stream_update stream_update;
1791 	} * bundle;
1792 	int k, m;
1793 
1794 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1795 
1796 	if (!bundle) {
1797 		dm_error("Failed to allocate update bundle\n");
1798 		goto cleanup;
1799 	}
1800 
1801 	for (k = 0; k < dc_state->stream_count; k++) {
1802 		bundle->stream_update.stream = dc_state->streams[k];
1803 
1804 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1805 			bundle->surface_updates[m].surface =
1806 				dc_state->stream_status->plane_states[m];
1807 			bundle->surface_updates[m].surface->force_full_update =
1808 				true;
1809 		}
1810 		dc_commit_updates_for_stream(
1811 			dm->dc, bundle->surface_updates,
1812 			dc_state->stream_status->plane_count,
1813 			dc_state->streams[k], &bundle->stream_update, dc_state);
1814 	}
1815 
1816 cleanup:
1817 	kfree(bundle);
1818 
1819 	return;
1820 }
1821 
1822 static int dm_resume(void *handle)
1823 {
1824 	struct amdgpu_device *adev = handle;
1825 	struct drm_device *ddev = adev->ddev;
1826 	struct amdgpu_display_manager *dm = &adev->dm;
1827 	struct amdgpu_dm_connector *aconnector;
1828 	struct drm_connector *connector;
1829 	struct drm_connector_list_iter iter;
1830 	struct drm_crtc *crtc;
1831 	struct drm_crtc_state *new_crtc_state;
1832 	struct dm_crtc_state *dm_new_crtc_state;
1833 	struct drm_plane *plane;
1834 	struct drm_plane_state *new_plane_state;
1835 	struct dm_plane_state *dm_new_plane_state;
1836 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1837 	enum dc_connection_type new_connection_type = dc_connection_none;
1838 	struct dc_state *dc_state;
1839 	int i, r, j;
1840 
1841 	if (adev->in_gpu_reset) {
1842 		dc_state = dm->cached_dc_state;
1843 
1844 		r = dm_dmub_hw_init(adev);
1845 		if (r)
1846 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1847 
1848 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1849 		dc_resume(dm->dc);
1850 
1851 		amdgpu_dm_irq_resume_early(adev);
1852 
1853 		for (i = 0; i < dc_state->stream_count; i++) {
1854 			dc_state->streams[i]->mode_changed = true;
1855 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1856 				dc_state->stream_status->plane_states[j]->update_flags.raw
1857 					= 0xffffffff;
1858 			}
1859 		}
1860 
1861 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
1862 
1863 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
1864 
1865 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1866 
1867 		dc_release_state(dm->cached_dc_state);
1868 		dm->cached_dc_state = NULL;
1869 
1870 		amdgpu_dm_irq_resume_late(adev);
1871 
1872 		mutex_unlock(&dm->dc_lock);
1873 
1874 		return 0;
1875 	}
1876 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1877 	dc_release_state(dm_state->context);
1878 	dm_state->context = dc_create_state(dm->dc);
1879 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1880 	dc_resource_state_construct(dm->dc, dm_state->context);
1881 
1882 	/* Before powering on DC we need to re-initialize DMUB. */
1883 	r = dm_dmub_hw_init(adev);
1884 	if (r)
1885 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1886 
1887 	/* power on hardware */
1888 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1889 
1890 	/* program HPD filter */
1891 	dc_resume(dm->dc);
1892 
1893 	/*
1894 	 * early enable HPD Rx IRQ, should be done before set mode as short
1895 	 * pulse interrupts are used for MST
1896 	 */
1897 	amdgpu_dm_irq_resume_early(adev);
1898 
1899 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1900 	s3_handle_mst(ddev, false);
1901 
1902 	/* Do detection*/
1903 	drm_connector_list_iter_begin(ddev, &iter);
1904 	drm_for_each_connector_iter(connector, &iter) {
1905 		aconnector = to_amdgpu_dm_connector(connector);
1906 
1907 		/*
1908 		 * this is the case when traversing through already created
1909 		 * MST connectors, should be skipped
1910 		 */
1911 		if (aconnector->mst_port)
1912 			continue;
1913 
1914 		mutex_lock(&aconnector->hpd_lock);
1915 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1916 			DRM_ERROR("KMS: Failed to detect connector\n");
1917 
1918 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1919 			emulated_link_detect(aconnector->dc_link);
1920 		else
1921 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1922 
1923 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1924 			aconnector->fake_enable = false;
1925 
1926 		if (aconnector->dc_sink)
1927 			dc_sink_release(aconnector->dc_sink);
1928 		aconnector->dc_sink = NULL;
1929 		amdgpu_dm_update_connector_after_detect(aconnector);
1930 		mutex_unlock(&aconnector->hpd_lock);
1931 	}
1932 	drm_connector_list_iter_end(&iter);
1933 
1934 	/* Force mode set in atomic commit */
1935 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1936 		new_crtc_state->active_changed = true;
1937 
1938 	/*
1939 	 * atomic_check is expected to create the dc states. We need to release
1940 	 * them here, since they were duplicated as part of the suspend
1941 	 * procedure.
1942 	 */
1943 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1944 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1945 		if (dm_new_crtc_state->stream) {
1946 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1947 			dc_stream_release(dm_new_crtc_state->stream);
1948 			dm_new_crtc_state->stream = NULL;
1949 		}
1950 	}
1951 
1952 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1953 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1954 		if (dm_new_plane_state->dc_state) {
1955 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1956 			dc_plane_state_release(dm_new_plane_state->dc_state);
1957 			dm_new_plane_state->dc_state = NULL;
1958 		}
1959 	}
1960 
1961 	drm_atomic_helper_resume(ddev, dm->cached_state);
1962 
1963 	dm->cached_state = NULL;
1964 
1965 	amdgpu_dm_irq_resume_late(adev);
1966 
1967 	amdgpu_dm_smu_write_watermarks_table(adev);
1968 
1969 	return 0;
1970 }
1971 
1972 /**
1973  * DOC: DM Lifecycle
1974  *
1975  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1976  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1977  * the base driver's device list to be initialized and torn down accordingly.
1978  *
1979  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1980  */
1981 
1982 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1983 	.name = "dm",
1984 	.early_init = dm_early_init,
1985 	.late_init = dm_late_init,
1986 	.sw_init = dm_sw_init,
1987 	.sw_fini = dm_sw_fini,
1988 	.hw_init = dm_hw_init,
1989 	.hw_fini = dm_hw_fini,
1990 	.suspend = dm_suspend,
1991 	.resume = dm_resume,
1992 	.is_idle = dm_is_idle,
1993 	.wait_for_idle = dm_wait_for_idle,
1994 	.check_soft_reset = dm_check_soft_reset,
1995 	.soft_reset = dm_soft_reset,
1996 	.set_clockgating_state = dm_set_clockgating_state,
1997 	.set_powergating_state = dm_set_powergating_state,
1998 };
1999 
2000 const struct amdgpu_ip_block_version dm_ip_block =
2001 {
2002 	.type = AMD_IP_BLOCK_TYPE_DCE,
2003 	.major = 1,
2004 	.minor = 0,
2005 	.rev = 0,
2006 	.funcs = &amdgpu_dm_funcs,
2007 };
2008 
2009 
2010 /**
2011  * DOC: atomic
2012  *
2013  * *WIP*
2014  */
2015 
2016 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2017 	.fb_create = amdgpu_display_user_framebuffer_create,
2018 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2019 	.atomic_check = amdgpu_dm_atomic_check,
2020 	.atomic_commit = amdgpu_dm_atomic_commit,
2021 };
2022 
2023 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2024 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2025 };
2026 
2027 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2028 {
2029 	u32 max_cll, min_cll, max, min, q, r;
2030 	struct amdgpu_dm_backlight_caps *caps;
2031 	struct amdgpu_display_manager *dm;
2032 	struct drm_connector *conn_base;
2033 	struct amdgpu_device *adev;
2034 	struct dc_link *link = NULL;
2035 	static const u8 pre_computed_values[] = {
2036 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2037 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2038 
2039 	if (!aconnector || !aconnector->dc_link)
2040 		return;
2041 
2042 	link = aconnector->dc_link;
2043 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2044 		return;
2045 
2046 	conn_base = &aconnector->base;
2047 	adev = conn_base->dev->dev_private;
2048 	dm = &adev->dm;
2049 	caps = &dm->backlight_caps;
2050 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2051 	caps->aux_support = false;
2052 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2053 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2054 
2055 	if (caps->ext_caps->bits.oled == 1 ||
2056 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2057 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2058 		caps->aux_support = true;
2059 
2060 	/* From the specification (CTA-861-G), for calculating the maximum
2061 	 * luminance we need to use:
2062 	 *	Luminance = 50*2**(CV/32)
2063 	 * Where CV is a one-byte value.
2064 	 * For calculating this expression we may need float point precision;
2065 	 * to avoid this complexity level, we take advantage that CV is divided
2066 	 * by a constant. From the Euclids division algorithm, we know that CV
2067 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2068 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2069 	 * need to pre-compute the value of r/32. For pre-computing the values
2070 	 * We just used the following Ruby line:
2071 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2072 	 * The results of the above expressions can be verified at
2073 	 * pre_computed_values.
2074 	 */
2075 	q = max_cll >> 5;
2076 	r = max_cll % 32;
2077 	max = (1 << q) * pre_computed_values[r];
2078 
2079 	// min luminance: maxLum * (CV/255)^2 / 100
2080 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2081 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2082 
2083 	caps->aux_max_input_signal = max;
2084 	caps->aux_min_input_signal = min;
2085 }
2086 
2087 void amdgpu_dm_update_connector_after_detect(
2088 		struct amdgpu_dm_connector *aconnector)
2089 {
2090 	struct drm_connector *connector = &aconnector->base;
2091 	struct drm_device *dev = connector->dev;
2092 	struct dc_sink *sink;
2093 
2094 	/* MST handled by drm_mst framework */
2095 	if (aconnector->mst_mgr.mst_state == true)
2096 		return;
2097 
2098 
2099 	sink = aconnector->dc_link->local_sink;
2100 	if (sink)
2101 		dc_sink_retain(sink);
2102 
2103 	/*
2104 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2105 	 * the connector sink is set to either fake or physical sink depends on link status.
2106 	 * Skip if already done during boot.
2107 	 */
2108 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2109 			&& aconnector->dc_em_sink) {
2110 
2111 		/*
2112 		 * For S3 resume with headless use eml_sink to fake stream
2113 		 * because on resume connector->sink is set to NULL
2114 		 */
2115 		mutex_lock(&dev->mode_config.mutex);
2116 
2117 		if (sink) {
2118 			if (aconnector->dc_sink) {
2119 				amdgpu_dm_update_freesync_caps(connector, NULL);
2120 				/*
2121 				 * retain and release below are used to
2122 				 * bump up refcount for sink because the link doesn't point
2123 				 * to it anymore after disconnect, so on next crtc to connector
2124 				 * reshuffle by UMD we will get into unwanted dc_sink release
2125 				 */
2126 				dc_sink_release(aconnector->dc_sink);
2127 			}
2128 			aconnector->dc_sink = sink;
2129 			dc_sink_retain(aconnector->dc_sink);
2130 			amdgpu_dm_update_freesync_caps(connector,
2131 					aconnector->edid);
2132 		} else {
2133 			amdgpu_dm_update_freesync_caps(connector, NULL);
2134 			if (!aconnector->dc_sink) {
2135 				aconnector->dc_sink = aconnector->dc_em_sink;
2136 				dc_sink_retain(aconnector->dc_sink);
2137 			}
2138 		}
2139 
2140 		mutex_unlock(&dev->mode_config.mutex);
2141 
2142 		if (sink)
2143 			dc_sink_release(sink);
2144 		return;
2145 	}
2146 
2147 	/*
2148 	 * TODO: temporary guard to look for proper fix
2149 	 * if this sink is MST sink, we should not do anything
2150 	 */
2151 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2152 		dc_sink_release(sink);
2153 		return;
2154 	}
2155 
2156 	if (aconnector->dc_sink == sink) {
2157 		/*
2158 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2159 		 * Do nothing!!
2160 		 */
2161 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2162 				aconnector->connector_id);
2163 		if (sink)
2164 			dc_sink_release(sink);
2165 		return;
2166 	}
2167 
2168 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2169 		aconnector->connector_id, aconnector->dc_sink, sink);
2170 
2171 	mutex_lock(&dev->mode_config.mutex);
2172 
2173 	/*
2174 	 * 1. Update status of the drm connector
2175 	 * 2. Send an event and let userspace tell us what to do
2176 	 */
2177 	if (sink) {
2178 		/*
2179 		 * TODO: check if we still need the S3 mode update workaround.
2180 		 * If yes, put it here.
2181 		 */
2182 		if (aconnector->dc_sink)
2183 			amdgpu_dm_update_freesync_caps(connector, NULL);
2184 
2185 		aconnector->dc_sink = sink;
2186 		dc_sink_retain(aconnector->dc_sink);
2187 		if (sink->dc_edid.length == 0) {
2188 			aconnector->edid = NULL;
2189 			if (aconnector->dc_link->aux_mode) {
2190 				drm_dp_cec_unset_edid(
2191 					&aconnector->dm_dp_aux.aux);
2192 			}
2193 		} else {
2194 			aconnector->edid =
2195 				(struct edid *)sink->dc_edid.raw_edid;
2196 
2197 			drm_connector_update_edid_property(connector,
2198 							   aconnector->edid);
2199 
2200 			if (aconnector->dc_link->aux_mode)
2201 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2202 						    aconnector->edid);
2203 		}
2204 
2205 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2206 		update_connector_ext_caps(aconnector);
2207 	} else {
2208 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2209 		amdgpu_dm_update_freesync_caps(connector, NULL);
2210 		drm_connector_update_edid_property(connector, NULL);
2211 		aconnector->num_modes = 0;
2212 		dc_sink_release(aconnector->dc_sink);
2213 		aconnector->dc_sink = NULL;
2214 		aconnector->edid = NULL;
2215 #ifdef CONFIG_DRM_AMD_DC_HDCP
2216 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2217 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2218 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2219 #endif
2220 	}
2221 
2222 	mutex_unlock(&dev->mode_config.mutex);
2223 
2224 	if (sink)
2225 		dc_sink_release(sink);
2226 }
2227 
2228 static void handle_hpd_irq(void *param)
2229 {
2230 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2231 	struct drm_connector *connector = &aconnector->base;
2232 	struct drm_device *dev = connector->dev;
2233 	enum dc_connection_type new_connection_type = dc_connection_none;
2234 #ifdef CONFIG_DRM_AMD_DC_HDCP
2235 	struct amdgpu_device *adev = dev->dev_private;
2236 #endif
2237 
2238 	/*
2239 	 * In case of failure or MST no need to update connector status or notify the OS
2240 	 * since (for MST case) MST does this in its own context.
2241 	 */
2242 	mutex_lock(&aconnector->hpd_lock);
2243 
2244 #ifdef CONFIG_DRM_AMD_DC_HDCP
2245 	if (adev->dm.hdcp_workqueue)
2246 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2247 #endif
2248 	if (aconnector->fake_enable)
2249 		aconnector->fake_enable = false;
2250 
2251 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2252 		DRM_ERROR("KMS: Failed to detect connector\n");
2253 
2254 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2255 		emulated_link_detect(aconnector->dc_link);
2256 
2257 
2258 		drm_modeset_lock_all(dev);
2259 		dm_restore_drm_connector_state(dev, connector);
2260 		drm_modeset_unlock_all(dev);
2261 
2262 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2263 			drm_kms_helper_hotplug_event(dev);
2264 
2265 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2266 		amdgpu_dm_update_connector_after_detect(aconnector);
2267 
2268 
2269 		drm_modeset_lock_all(dev);
2270 		dm_restore_drm_connector_state(dev, connector);
2271 		drm_modeset_unlock_all(dev);
2272 
2273 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2274 			drm_kms_helper_hotplug_event(dev);
2275 	}
2276 	mutex_unlock(&aconnector->hpd_lock);
2277 
2278 }
2279 
2280 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2281 {
2282 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2283 	uint8_t dret;
2284 	bool new_irq_handled = false;
2285 	int dpcd_addr;
2286 	int dpcd_bytes_to_read;
2287 
2288 	const int max_process_count = 30;
2289 	int process_count = 0;
2290 
2291 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2292 
2293 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2294 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2295 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2296 		dpcd_addr = DP_SINK_COUNT;
2297 	} else {
2298 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2299 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2300 		dpcd_addr = DP_SINK_COUNT_ESI;
2301 	}
2302 
2303 	dret = drm_dp_dpcd_read(
2304 		&aconnector->dm_dp_aux.aux,
2305 		dpcd_addr,
2306 		esi,
2307 		dpcd_bytes_to_read);
2308 
2309 	while (dret == dpcd_bytes_to_read &&
2310 		process_count < max_process_count) {
2311 		uint8_t retry;
2312 		dret = 0;
2313 
2314 		process_count++;
2315 
2316 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2317 		/* handle HPD short pulse irq */
2318 		if (aconnector->mst_mgr.mst_state)
2319 			drm_dp_mst_hpd_irq(
2320 				&aconnector->mst_mgr,
2321 				esi,
2322 				&new_irq_handled);
2323 
2324 		if (new_irq_handled) {
2325 			/* ACK at DPCD to notify down stream */
2326 			const int ack_dpcd_bytes_to_write =
2327 				dpcd_bytes_to_read - 1;
2328 
2329 			for (retry = 0; retry < 3; retry++) {
2330 				uint8_t wret;
2331 
2332 				wret = drm_dp_dpcd_write(
2333 					&aconnector->dm_dp_aux.aux,
2334 					dpcd_addr + 1,
2335 					&esi[1],
2336 					ack_dpcd_bytes_to_write);
2337 				if (wret == ack_dpcd_bytes_to_write)
2338 					break;
2339 			}
2340 
2341 			/* check if there is new irq to be handled */
2342 			dret = drm_dp_dpcd_read(
2343 				&aconnector->dm_dp_aux.aux,
2344 				dpcd_addr,
2345 				esi,
2346 				dpcd_bytes_to_read);
2347 
2348 			new_irq_handled = false;
2349 		} else {
2350 			break;
2351 		}
2352 	}
2353 
2354 	if (process_count == max_process_count)
2355 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2356 }
2357 
2358 static void handle_hpd_rx_irq(void *param)
2359 {
2360 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2361 	struct drm_connector *connector = &aconnector->base;
2362 	struct drm_device *dev = connector->dev;
2363 	struct dc_link *dc_link = aconnector->dc_link;
2364 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2365 	enum dc_connection_type new_connection_type = dc_connection_none;
2366 #ifdef CONFIG_DRM_AMD_DC_HDCP
2367 	union hpd_irq_data hpd_irq_data;
2368 	struct amdgpu_device *adev = dev->dev_private;
2369 
2370 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2371 #endif
2372 
2373 	/*
2374 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2375 	 * conflict, after implement i2c helper, this mutex should be
2376 	 * retired.
2377 	 */
2378 	if (dc_link->type != dc_connection_mst_branch)
2379 		mutex_lock(&aconnector->hpd_lock);
2380 
2381 
2382 #ifdef CONFIG_DRM_AMD_DC_HDCP
2383 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2384 #else
2385 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2386 #endif
2387 			!is_mst_root_connector) {
2388 		/* Downstream Port status changed. */
2389 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2390 			DRM_ERROR("KMS: Failed to detect connector\n");
2391 
2392 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2393 			emulated_link_detect(dc_link);
2394 
2395 			if (aconnector->fake_enable)
2396 				aconnector->fake_enable = false;
2397 
2398 			amdgpu_dm_update_connector_after_detect(aconnector);
2399 
2400 
2401 			drm_modeset_lock_all(dev);
2402 			dm_restore_drm_connector_state(dev, connector);
2403 			drm_modeset_unlock_all(dev);
2404 
2405 			drm_kms_helper_hotplug_event(dev);
2406 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2407 
2408 			if (aconnector->fake_enable)
2409 				aconnector->fake_enable = false;
2410 
2411 			amdgpu_dm_update_connector_after_detect(aconnector);
2412 
2413 
2414 			drm_modeset_lock_all(dev);
2415 			dm_restore_drm_connector_state(dev, connector);
2416 			drm_modeset_unlock_all(dev);
2417 
2418 			drm_kms_helper_hotplug_event(dev);
2419 		}
2420 	}
2421 #ifdef CONFIG_DRM_AMD_DC_HDCP
2422 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2423 		if (adev->dm.hdcp_workqueue)
2424 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2425 	}
2426 #endif
2427 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2428 	    (dc_link->type == dc_connection_mst_branch))
2429 		dm_handle_hpd_rx_irq(aconnector);
2430 
2431 	if (dc_link->type != dc_connection_mst_branch) {
2432 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2433 		mutex_unlock(&aconnector->hpd_lock);
2434 	}
2435 }
2436 
2437 static void register_hpd_handlers(struct amdgpu_device *adev)
2438 {
2439 	struct drm_device *dev = adev->ddev;
2440 	struct drm_connector *connector;
2441 	struct amdgpu_dm_connector *aconnector;
2442 	const struct dc_link *dc_link;
2443 	struct dc_interrupt_params int_params = {0};
2444 
2445 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2446 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2447 
2448 	list_for_each_entry(connector,
2449 			&dev->mode_config.connector_list, head)	{
2450 
2451 		aconnector = to_amdgpu_dm_connector(connector);
2452 		dc_link = aconnector->dc_link;
2453 
2454 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2455 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2456 			int_params.irq_source = dc_link->irq_source_hpd;
2457 
2458 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2459 					handle_hpd_irq,
2460 					(void *) aconnector);
2461 		}
2462 
2463 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2464 
2465 			/* Also register for DP short pulse (hpd_rx). */
2466 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2467 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2468 
2469 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2470 					handle_hpd_rx_irq,
2471 					(void *) aconnector);
2472 		}
2473 	}
2474 }
2475 
2476 /* Register IRQ sources and initialize IRQ callbacks */
2477 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2478 {
2479 	struct dc *dc = adev->dm.dc;
2480 	struct common_irq_params *c_irq_params;
2481 	struct dc_interrupt_params int_params = {0};
2482 	int r;
2483 	int i;
2484 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2485 
2486 	if (adev->asic_type >= CHIP_VEGA10)
2487 		client_id = SOC15_IH_CLIENTID_DCE;
2488 
2489 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2490 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2491 
2492 	/*
2493 	 * Actions of amdgpu_irq_add_id():
2494 	 * 1. Register a set() function with base driver.
2495 	 *    Base driver will call set() function to enable/disable an
2496 	 *    interrupt in DC hardware.
2497 	 * 2. Register amdgpu_dm_irq_handler().
2498 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2499 	 *    coming from DC hardware.
2500 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2501 	 *    for acknowledging and handling. */
2502 
2503 	/* Use VBLANK interrupt */
2504 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2505 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2506 		if (r) {
2507 			DRM_ERROR("Failed to add crtc irq id!\n");
2508 			return r;
2509 		}
2510 
2511 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2512 		int_params.irq_source =
2513 			dc_interrupt_to_irq_source(dc, i, 0);
2514 
2515 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2516 
2517 		c_irq_params->adev = adev;
2518 		c_irq_params->irq_src = int_params.irq_source;
2519 
2520 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2521 				dm_crtc_high_irq, c_irq_params);
2522 	}
2523 
2524 	/* Use VUPDATE interrupt */
2525 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2526 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2527 		if (r) {
2528 			DRM_ERROR("Failed to add vupdate irq id!\n");
2529 			return r;
2530 		}
2531 
2532 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2533 		int_params.irq_source =
2534 			dc_interrupt_to_irq_source(dc, i, 0);
2535 
2536 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2537 
2538 		c_irq_params->adev = adev;
2539 		c_irq_params->irq_src = int_params.irq_source;
2540 
2541 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2542 				dm_vupdate_high_irq, c_irq_params);
2543 	}
2544 
2545 	/* Use GRPH_PFLIP interrupt */
2546 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2547 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2548 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2549 		if (r) {
2550 			DRM_ERROR("Failed to add page flip irq id!\n");
2551 			return r;
2552 		}
2553 
2554 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2555 		int_params.irq_source =
2556 			dc_interrupt_to_irq_source(dc, i, 0);
2557 
2558 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2559 
2560 		c_irq_params->adev = adev;
2561 		c_irq_params->irq_src = int_params.irq_source;
2562 
2563 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2564 				dm_pflip_high_irq, c_irq_params);
2565 
2566 	}
2567 
2568 	/* HPD */
2569 	r = amdgpu_irq_add_id(adev, client_id,
2570 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2571 	if (r) {
2572 		DRM_ERROR("Failed to add hpd irq id!\n");
2573 		return r;
2574 	}
2575 
2576 	register_hpd_handlers(adev);
2577 
2578 	return 0;
2579 }
2580 
2581 #if defined(CONFIG_DRM_AMD_DC_DCN)
2582 /* Register IRQ sources and initialize IRQ callbacks */
2583 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2584 {
2585 	struct dc *dc = adev->dm.dc;
2586 	struct common_irq_params *c_irq_params;
2587 	struct dc_interrupt_params int_params = {0};
2588 	int r;
2589 	int i;
2590 
2591 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2592 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2593 
2594 	/*
2595 	 * Actions of amdgpu_irq_add_id():
2596 	 * 1. Register a set() function with base driver.
2597 	 *    Base driver will call set() function to enable/disable an
2598 	 *    interrupt in DC hardware.
2599 	 * 2. Register amdgpu_dm_irq_handler().
2600 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2601 	 *    coming from DC hardware.
2602 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2603 	 *    for acknowledging and handling.
2604 	 */
2605 
2606 	/* Use VSTARTUP interrupt */
2607 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2608 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2609 			i++) {
2610 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2611 
2612 		if (r) {
2613 			DRM_ERROR("Failed to add crtc irq id!\n");
2614 			return r;
2615 		}
2616 
2617 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2618 		int_params.irq_source =
2619 			dc_interrupt_to_irq_source(dc, i, 0);
2620 
2621 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2622 
2623 		c_irq_params->adev = adev;
2624 		c_irq_params->irq_src = int_params.irq_source;
2625 
2626 		amdgpu_dm_irq_register_interrupt(
2627 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2628 	}
2629 
2630 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2631 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2632 	 * to trigger at end of each vblank, regardless of state of the lock,
2633 	 * matching DCE behaviour.
2634 	 */
2635 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2636 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2637 	     i++) {
2638 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2639 
2640 		if (r) {
2641 			DRM_ERROR("Failed to add vupdate irq id!\n");
2642 			return r;
2643 		}
2644 
2645 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2646 		int_params.irq_source =
2647 			dc_interrupt_to_irq_source(dc, i, 0);
2648 
2649 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2650 
2651 		c_irq_params->adev = adev;
2652 		c_irq_params->irq_src = int_params.irq_source;
2653 
2654 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2655 				dm_vupdate_high_irq, c_irq_params);
2656 	}
2657 
2658 	/* Use GRPH_PFLIP interrupt */
2659 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2660 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2661 			i++) {
2662 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2663 		if (r) {
2664 			DRM_ERROR("Failed to add page flip irq id!\n");
2665 			return r;
2666 		}
2667 
2668 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2669 		int_params.irq_source =
2670 			dc_interrupt_to_irq_source(dc, i, 0);
2671 
2672 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2673 
2674 		c_irq_params->adev = adev;
2675 		c_irq_params->irq_src = int_params.irq_source;
2676 
2677 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2678 				dm_pflip_high_irq, c_irq_params);
2679 
2680 	}
2681 
2682 	/* HPD */
2683 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2684 			&adev->hpd_irq);
2685 	if (r) {
2686 		DRM_ERROR("Failed to add hpd irq id!\n");
2687 		return r;
2688 	}
2689 
2690 	register_hpd_handlers(adev);
2691 
2692 	return 0;
2693 }
2694 #endif
2695 
2696 /*
2697  * Acquires the lock for the atomic state object and returns
2698  * the new atomic state.
2699  *
2700  * This should only be called during atomic check.
2701  */
2702 static int dm_atomic_get_state(struct drm_atomic_state *state,
2703 			       struct dm_atomic_state **dm_state)
2704 {
2705 	struct drm_device *dev = state->dev;
2706 	struct amdgpu_device *adev = dev->dev_private;
2707 	struct amdgpu_display_manager *dm = &adev->dm;
2708 	struct drm_private_state *priv_state;
2709 
2710 	if (*dm_state)
2711 		return 0;
2712 
2713 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2714 	if (IS_ERR(priv_state))
2715 		return PTR_ERR(priv_state);
2716 
2717 	*dm_state = to_dm_atomic_state(priv_state);
2718 
2719 	return 0;
2720 }
2721 
2722 static struct dm_atomic_state *
2723 dm_atomic_get_new_state(struct drm_atomic_state *state)
2724 {
2725 	struct drm_device *dev = state->dev;
2726 	struct amdgpu_device *adev = dev->dev_private;
2727 	struct amdgpu_display_manager *dm = &adev->dm;
2728 	struct drm_private_obj *obj;
2729 	struct drm_private_state *new_obj_state;
2730 	int i;
2731 
2732 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2733 		if (obj->funcs == dm->atomic_obj.funcs)
2734 			return to_dm_atomic_state(new_obj_state);
2735 	}
2736 
2737 	return NULL;
2738 }
2739 
2740 static struct dm_atomic_state *
2741 dm_atomic_get_old_state(struct drm_atomic_state *state)
2742 {
2743 	struct drm_device *dev = state->dev;
2744 	struct amdgpu_device *adev = dev->dev_private;
2745 	struct amdgpu_display_manager *dm = &adev->dm;
2746 	struct drm_private_obj *obj;
2747 	struct drm_private_state *old_obj_state;
2748 	int i;
2749 
2750 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2751 		if (obj->funcs == dm->atomic_obj.funcs)
2752 			return to_dm_atomic_state(old_obj_state);
2753 	}
2754 
2755 	return NULL;
2756 }
2757 
2758 static struct drm_private_state *
2759 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2760 {
2761 	struct dm_atomic_state *old_state, *new_state;
2762 
2763 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2764 	if (!new_state)
2765 		return NULL;
2766 
2767 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2768 
2769 	old_state = to_dm_atomic_state(obj->state);
2770 
2771 	if (old_state && old_state->context)
2772 		new_state->context = dc_copy_state(old_state->context);
2773 
2774 	if (!new_state->context) {
2775 		kfree(new_state);
2776 		return NULL;
2777 	}
2778 
2779 	return &new_state->base;
2780 }
2781 
2782 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2783 				    struct drm_private_state *state)
2784 {
2785 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2786 
2787 	if (dm_state && dm_state->context)
2788 		dc_release_state(dm_state->context);
2789 
2790 	kfree(dm_state);
2791 }
2792 
2793 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2794 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2795 	.atomic_destroy_state = dm_atomic_destroy_state,
2796 };
2797 
2798 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2799 {
2800 	struct dm_atomic_state *state;
2801 	int r;
2802 
2803 	adev->mode_info.mode_config_initialized = true;
2804 
2805 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2806 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2807 
2808 	adev->ddev->mode_config.max_width = 16384;
2809 	adev->ddev->mode_config.max_height = 16384;
2810 
2811 	adev->ddev->mode_config.preferred_depth = 24;
2812 	adev->ddev->mode_config.prefer_shadow = 1;
2813 	/* indicates support for immediate flip */
2814 	adev->ddev->mode_config.async_page_flip = true;
2815 
2816 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2817 
2818 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2819 	if (!state)
2820 		return -ENOMEM;
2821 
2822 	state->context = dc_create_state(adev->dm.dc);
2823 	if (!state->context) {
2824 		kfree(state);
2825 		return -ENOMEM;
2826 	}
2827 
2828 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2829 
2830 	drm_atomic_private_obj_init(adev->ddev,
2831 				    &adev->dm.atomic_obj,
2832 				    &state->base,
2833 				    &dm_atomic_state_funcs);
2834 
2835 	r = amdgpu_display_modeset_create_props(adev);
2836 	if (r)
2837 		return r;
2838 
2839 	r = amdgpu_dm_audio_init(adev);
2840 	if (r)
2841 		return r;
2842 
2843 	return 0;
2844 }
2845 
2846 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2847 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2848 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2849 
2850 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2851 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2852 
2853 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2854 {
2855 #if defined(CONFIG_ACPI)
2856 	struct amdgpu_dm_backlight_caps caps;
2857 
2858 	if (dm->backlight_caps.caps_valid)
2859 		return;
2860 
2861 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2862 	if (caps.caps_valid) {
2863 		dm->backlight_caps.caps_valid = true;
2864 		if (caps.aux_support)
2865 			return;
2866 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2867 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2868 	} else {
2869 		dm->backlight_caps.min_input_signal =
2870 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2871 		dm->backlight_caps.max_input_signal =
2872 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2873 	}
2874 #else
2875 	if (dm->backlight_caps.aux_support)
2876 		return;
2877 
2878 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2879 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2880 #endif
2881 }
2882 
2883 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2884 {
2885 	bool rc;
2886 
2887 	if (!link)
2888 		return 1;
2889 
2890 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2891 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2892 
2893 	return rc ? 0 : 1;
2894 }
2895 
2896 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2897 			      const uint32_t user_brightness)
2898 {
2899 	u32 min, max, conversion_pace;
2900 	u32 brightness = user_brightness;
2901 
2902 	if (!caps)
2903 		goto out;
2904 
2905 	if (!caps->aux_support) {
2906 		max = caps->max_input_signal;
2907 		min = caps->min_input_signal;
2908 		/*
2909 		 * The brightness input is in the range 0-255
2910 		 * It needs to be rescaled to be between the
2911 		 * requested min and max input signal
2912 		 * It also needs to be scaled up by 0x101 to
2913 		 * match the DC interface which has a range of
2914 		 * 0 to 0xffff
2915 		 */
2916 		conversion_pace = 0x101;
2917 		brightness =
2918 			user_brightness
2919 			* conversion_pace
2920 			* (max - min)
2921 			/ AMDGPU_MAX_BL_LEVEL
2922 			+ min * conversion_pace;
2923 	} else {
2924 		/* TODO
2925 		 * We are doing a linear interpolation here, which is OK but
2926 		 * does not provide the optimal result. We probably want
2927 		 * something close to the Perceptual Quantizer (PQ) curve.
2928 		 */
2929 		max = caps->aux_max_input_signal;
2930 		min = caps->aux_min_input_signal;
2931 
2932 		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2933 			       + user_brightness * max;
2934 		// Multiple the value by 1000 since we use millinits
2935 		brightness *= 1000;
2936 		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2937 	}
2938 
2939 out:
2940 	return brightness;
2941 }
2942 
2943 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2944 {
2945 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2946 	struct amdgpu_dm_backlight_caps caps;
2947 	struct dc_link *link = NULL;
2948 	u32 brightness;
2949 	bool rc;
2950 
2951 	amdgpu_dm_update_backlight_caps(dm);
2952 	caps = dm->backlight_caps;
2953 
2954 	link = (struct dc_link *)dm->backlight_link;
2955 
2956 	brightness = convert_brightness(&caps, bd->props.brightness);
2957 	// Change brightness based on AUX property
2958 	if (caps.aux_support)
2959 		return set_backlight_via_aux(link, brightness);
2960 
2961 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2962 
2963 	return rc ? 0 : 1;
2964 }
2965 
2966 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2967 {
2968 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2969 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2970 
2971 	if (ret == DC_ERROR_UNEXPECTED)
2972 		return bd->props.brightness;
2973 	return ret;
2974 }
2975 
2976 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2977 	.options = BL_CORE_SUSPENDRESUME,
2978 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2979 	.update_status	= amdgpu_dm_backlight_update_status,
2980 };
2981 
2982 static void
2983 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2984 {
2985 	char bl_name[16];
2986 	struct backlight_properties props = { 0 };
2987 
2988 	amdgpu_dm_update_backlight_caps(dm);
2989 
2990 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2991 	props.brightness = AMDGPU_MAX_BL_LEVEL;
2992 	props.type = BACKLIGHT_RAW;
2993 
2994 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2995 			dm->adev->ddev->primary->index);
2996 
2997 	dm->backlight_dev = backlight_device_register(bl_name,
2998 			dm->adev->ddev->dev,
2999 			dm,
3000 			&amdgpu_dm_backlight_ops,
3001 			&props);
3002 
3003 	if (IS_ERR(dm->backlight_dev))
3004 		DRM_ERROR("DM: Backlight registration failed!\n");
3005 	else
3006 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3007 }
3008 
3009 #endif
3010 
3011 static int initialize_plane(struct amdgpu_display_manager *dm,
3012 			    struct amdgpu_mode_info *mode_info, int plane_id,
3013 			    enum drm_plane_type plane_type,
3014 			    const struct dc_plane_cap *plane_cap)
3015 {
3016 	struct drm_plane *plane;
3017 	unsigned long possible_crtcs;
3018 	int ret = 0;
3019 
3020 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3021 	if (!plane) {
3022 		DRM_ERROR("KMS: Failed to allocate plane\n");
3023 		return -ENOMEM;
3024 	}
3025 	plane->type = plane_type;
3026 
3027 	/*
3028 	 * HACK: IGT tests expect that the primary plane for a CRTC
3029 	 * can only have one possible CRTC. Only expose support for
3030 	 * any CRTC if they're not going to be used as a primary plane
3031 	 * for a CRTC - like overlay or underlay planes.
3032 	 */
3033 	possible_crtcs = 1 << plane_id;
3034 	if (plane_id >= dm->dc->caps.max_streams)
3035 		possible_crtcs = 0xff;
3036 
3037 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3038 
3039 	if (ret) {
3040 		DRM_ERROR("KMS: Failed to initialize plane\n");
3041 		kfree(plane);
3042 		return ret;
3043 	}
3044 
3045 	if (mode_info)
3046 		mode_info->planes[plane_id] = plane;
3047 
3048 	return ret;
3049 }
3050 
3051 
3052 static void register_backlight_device(struct amdgpu_display_manager *dm,
3053 				      struct dc_link *link)
3054 {
3055 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3056 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3057 
3058 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3059 	    link->type != dc_connection_none) {
3060 		/*
3061 		 * Event if registration failed, we should continue with
3062 		 * DM initialization because not having a backlight control
3063 		 * is better then a black screen.
3064 		 */
3065 		amdgpu_dm_register_backlight_device(dm);
3066 
3067 		if (dm->backlight_dev)
3068 			dm->backlight_link = link;
3069 	}
3070 #endif
3071 }
3072 
3073 
3074 /*
3075  * In this architecture, the association
3076  * connector -> encoder -> crtc
3077  * id not really requried. The crtc and connector will hold the
3078  * display_index as an abstraction to use with DAL component
3079  *
3080  * Returns 0 on success
3081  */
3082 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3083 {
3084 	struct amdgpu_display_manager *dm = &adev->dm;
3085 	int32_t i;
3086 	struct amdgpu_dm_connector *aconnector = NULL;
3087 	struct amdgpu_encoder *aencoder = NULL;
3088 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3089 	uint32_t link_cnt;
3090 	int32_t primary_planes;
3091 	enum dc_connection_type new_connection_type = dc_connection_none;
3092 	const struct dc_plane_cap *plane;
3093 
3094 	link_cnt = dm->dc->caps.max_links;
3095 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3096 		DRM_ERROR("DM: Failed to initialize mode config\n");
3097 		return -EINVAL;
3098 	}
3099 
3100 	/* There is one primary plane per CRTC */
3101 	primary_planes = dm->dc->caps.max_streams;
3102 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3103 
3104 	/*
3105 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3106 	 * Order is reversed to match iteration order in atomic check.
3107 	 */
3108 	for (i = (primary_planes - 1); i >= 0; i--) {
3109 		plane = &dm->dc->caps.planes[i];
3110 
3111 		if (initialize_plane(dm, mode_info, i,
3112 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3113 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3114 			goto fail;
3115 		}
3116 	}
3117 
3118 	/*
3119 	 * Initialize overlay planes, index starting after primary planes.
3120 	 * These planes have a higher DRM index than the primary planes since
3121 	 * they should be considered as having a higher z-order.
3122 	 * Order is reversed to match iteration order in atomic check.
3123 	 *
3124 	 * Only support DCN for now, and only expose one so we don't encourage
3125 	 * userspace to use up all the pipes.
3126 	 */
3127 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3128 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3129 
3130 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3131 			continue;
3132 
3133 		if (!plane->blends_with_above || !plane->blends_with_below)
3134 			continue;
3135 
3136 		if (!plane->pixel_format_support.argb8888)
3137 			continue;
3138 
3139 		if (initialize_plane(dm, NULL, primary_planes + i,
3140 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3141 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3142 			goto fail;
3143 		}
3144 
3145 		/* Only create one overlay plane. */
3146 		break;
3147 	}
3148 
3149 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3150 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3151 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3152 			goto fail;
3153 		}
3154 
3155 	dm->display_indexes_num = dm->dc->caps.max_streams;
3156 
3157 	/* loops over all connectors on the board */
3158 	for (i = 0; i < link_cnt; i++) {
3159 		struct dc_link *link = NULL;
3160 
3161 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3162 			DRM_ERROR(
3163 				"KMS: Cannot support more than %d display indexes\n",
3164 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3165 			continue;
3166 		}
3167 
3168 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3169 		if (!aconnector)
3170 			goto fail;
3171 
3172 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3173 		if (!aencoder)
3174 			goto fail;
3175 
3176 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3177 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3178 			goto fail;
3179 		}
3180 
3181 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3182 			DRM_ERROR("KMS: Failed to initialize connector\n");
3183 			goto fail;
3184 		}
3185 
3186 		link = dc_get_link_at_index(dm->dc, i);
3187 
3188 		if (!dc_link_detect_sink(link, &new_connection_type))
3189 			DRM_ERROR("KMS: Failed to detect connector\n");
3190 
3191 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3192 			emulated_link_detect(link);
3193 			amdgpu_dm_update_connector_after_detect(aconnector);
3194 
3195 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3196 			amdgpu_dm_update_connector_after_detect(aconnector);
3197 			register_backlight_device(dm, link);
3198 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3199 				amdgpu_dm_set_psr_caps(link);
3200 		}
3201 
3202 
3203 	}
3204 
3205 	/* Software is initialized. Now we can register interrupt handlers. */
3206 	switch (adev->asic_type) {
3207 	case CHIP_BONAIRE:
3208 	case CHIP_HAWAII:
3209 	case CHIP_KAVERI:
3210 	case CHIP_KABINI:
3211 	case CHIP_MULLINS:
3212 	case CHIP_TONGA:
3213 	case CHIP_FIJI:
3214 	case CHIP_CARRIZO:
3215 	case CHIP_STONEY:
3216 	case CHIP_POLARIS11:
3217 	case CHIP_POLARIS10:
3218 	case CHIP_POLARIS12:
3219 	case CHIP_VEGAM:
3220 	case CHIP_VEGA10:
3221 	case CHIP_VEGA12:
3222 	case CHIP_VEGA20:
3223 		if (dce110_register_irq_handlers(dm->adev)) {
3224 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3225 			goto fail;
3226 		}
3227 		break;
3228 #if defined(CONFIG_DRM_AMD_DC_DCN)
3229 	case CHIP_RAVEN:
3230 	case CHIP_NAVI12:
3231 	case CHIP_NAVI10:
3232 	case CHIP_NAVI14:
3233 	case CHIP_RENOIR:
3234 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3235 	case CHIP_SIENNA_CICHLID:
3236 	case CHIP_NAVY_FLOUNDER:
3237 #endif
3238 		if (dcn10_register_irq_handlers(dm->adev)) {
3239 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3240 			goto fail;
3241 		}
3242 		break;
3243 #endif
3244 	default:
3245 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3246 		goto fail;
3247 	}
3248 
3249 	/* No userspace support. */
3250 	dm->dc->debug.disable_tri_buf = true;
3251 
3252 	return 0;
3253 fail:
3254 	kfree(aencoder);
3255 	kfree(aconnector);
3256 
3257 	return -EINVAL;
3258 }
3259 
3260 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3261 {
3262 	drm_mode_config_cleanup(dm->ddev);
3263 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3264 	return;
3265 }
3266 
3267 /******************************************************************************
3268  * amdgpu_display_funcs functions
3269  *****************************************************************************/
3270 
3271 /*
3272  * dm_bandwidth_update - program display watermarks
3273  *
3274  * @adev: amdgpu_device pointer
3275  *
3276  * Calculate and program the display watermarks and line buffer allocation.
3277  */
3278 static void dm_bandwidth_update(struct amdgpu_device *adev)
3279 {
3280 	/* TODO: implement later */
3281 }
3282 
3283 static const struct amdgpu_display_funcs dm_display_funcs = {
3284 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3285 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3286 	.backlight_set_level = NULL, /* never called for DC */
3287 	.backlight_get_level = NULL, /* never called for DC */
3288 	.hpd_sense = NULL,/* called unconditionally */
3289 	.hpd_set_polarity = NULL, /* called unconditionally */
3290 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3291 	.page_flip_get_scanoutpos =
3292 		dm_crtc_get_scanoutpos,/* called unconditionally */
3293 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3294 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3295 };
3296 
3297 #if defined(CONFIG_DEBUG_KERNEL_DC)
3298 
3299 static ssize_t s3_debug_store(struct device *device,
3300 			      struct device_attribute *attr,
3301 			      const char *buf,
3302 			      size_t count)
3303 {
3304 	int ret;
3305 	int s3_state;
3306 	struct drm_device *drm_dev = dev_get_drvdata(device);
3307 	struct amdgpu_device *adev = drm_dev->dev_private;
3308 
3309 	ret = kstrtoint(buf, 0, &s3_state);
3310 
3311 	if (ret == 0) {
3312 		if (s3_state) {
3313 			dm_resume(adev);
3314 			drm_kms_helper_hotplug_event(adev->ddev);
3315 		} else
3316 			dm_suspend(adev);
3317 	}
3318 
3319 	return ret == 0 ? count : 0;
3320 }
3321 
3322 DEVICE_ATTR_WO(s3_debug);
3323 
3324 #endif
3325 
3326 static int dm_early_init(void *handle)
3327 {
3328 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3329 
3330 	switch (adev->asic_type) {
3331 	case CHIP_BONAIRE:
3332 	case CHIP_HAWAII:
3333 		adev->mode_info.num_crtc = 6;
3334 		adev->mode_info.num_hpd = 6;
3335 		adev->mode_info.num_dig = 6;
3336 		break;
3337 	case CHIP_KAVERI:
3338 		adev->mode_info.num_crtc = 4;
3339 		adev->mode_info.num_hpd = 6;
3340 		adev->mode_info.num_dig = 7;
3341 		break;
3342 	case CHIP_KABINI:
3343 	case CHIP_MULLINS:
3344 		adev->mode_info.num_crtc = 2;
3345 		adev->mode_info.num_hpd = 6;
3346 		adev->mode_info.num_dig = 6;
3347 		break;
3348 	case CHIP_FIJI:
3349 	case CHIP_TONGA:
3350 		adev->mode_info.num_crtc = 6;
3351 		adev->mode_info.num_hpd = 6;
3352 		adev->mode_info.num_dig = 7;
3353 		break;
3354 	case CHIP_CARRIZO:
3355 		adev->mode_info.num_crtc = 3;
3356 		adev->mode_info.num_hpd = 6;
3357 		adev->mode_info.num_dig = 9;
3358 		break;
3359 	case CHIP_STONEY:
3360 		adev->mode_info.num_crtc = 2;
3361 		adev->mode_info.num_hpd = 6;
3362 		adev->mode_info.num_dig = 9;
3363 		break;
3364 	case CHIP_POLARIS11:
3365 	case CHIP_POLARIS12:
3366 		adev->mode_info.num_crtc = 5;
3367 		adev->mode_info.num_hpd = 5;
3368 		adev->mode_info.num_dig = 5;
3369 		break;
3370 	case CHIP_POLARIS10:
3371 	case CHIP_VEGAM:
3372 		adev->mode_info.num_crtc = 6;
3373 		adev->mode_info.num_hpd = 6;
3374 		adev->mode_info.num_dig = 6;
3375 		break;
3376 	case CHIP_VEGA10:
3377 	case CHIP_VEGA12:
3378 	case CHIP_VEGA20:
3379 		adev->mode_info.num_crtc = 6;
3380 		adev->mode_info.num_hpd = 6;
3381 		adev->mode_info.num_dig = 6;
3382 		break;
3383 #if defined(CONFIG_DRM_AMD_DC_DCN)
3384 	case CHIP_RAVEN:
3385 		adev->mode_info.num_crtc = 4;
3386 		adev->mode_info.num_hpd = 4;
3387 		adev->mode_info.num_dig = 4;
3388 		break;
3389 #endif
3390 	case CHIP_NAVI10:
3391 	case CHIP_NAVI12:
3392 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3393 	case CHIP_SIENNA_CICHLID:
3394 	case CHIP_NAVY_FLOUNDER:
3395 #endif
3396 		adev->mode_info.num_crtc = 6;
3397 		adev->mode_info.num_hpd = 6;
3398 		adev->mode_info.num_dig = 6;
3399 		break;
3400 	case CHIP_NAVI14:
3401 		adev->mode_info.num_crtc = 5;
3402 		adev->mode_info.num_hpd = 5;
3403 		adev->mode_info.num_dig = 5;
3404 		break;
3405 	case CHIP_RENOIR:
3406 		adev->mode_info.num_crtc = 4;
3407 		adev->mode_info.num_hpd = 4;
3408 		adev->mode_info.num_dig = 4;
3409 		break;
3410 	default:
3411 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3412 		return -EINVAL;
3413 	}
3414 
3415 	amdgpu_dm_set_irq_funcs(adev);
3416 
3417 	if (adev->mode_info.funcs == NULL)
3418 		adev->mode_info.funcs = &dm_display_funcs;
3419 
3420 	/*
3421 	 * Note: Do NOT change adev->audio_endpt_rreg and
3422 	 * adev->audio_endpt_wreg because they are initialised in
3423 	 * amdgpu_device_init()
3424 	 */
3425 #if defined(CONFIG_DEBUG_KERNEL_DC)
3426 	device_create_file(
3427 		adev->ddev->dev,
3428 		&dev_attr_s3_debug);
3429 #endif
3430 
3431 	return 0;
3432 }
3433 
3434 static bool modeset_required(struct drm_crtc_state *crtc_state,
3435 			     struct dc_stream_state *new_stream,
3436 			     struct dc_stream_state *old_stream)
3437 {
3438 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3439 		return false;
3440 
3441 	if (!crtc_state->enable)
3442 		return false;
3443 
3444 	return crtc_state->active;
3445 }
3446 
3447 static bool modereset_required(struct drm_crtc_state *crtc_state)
3448 {
3449 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3450 		return false;
3451 
3452 	return !crtc_state->enable || !crtc_state->active;
3453 }
3454 
3455 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3456 {
3457 	drm_encoder_cleanup(encoder);
3458 	kfree(encoder);
3459 }
3460 
3461 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3462 	.destroy = amdgpu_dm_encoder_destroy,
3463 };
3464 
3465 
3466 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3467 				struct dc_scaling_info *scaling_info)
3468 {
3469 	int scale_w, scale_h;
3470 
3471 	memset(scaling_info, 0, sizeof(*scaling_info));
3472 
3473 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3474 	scaling_info->src_rect.x = state->src_x >> 16;
3475 	scaling_info->src_rect.y = state->src_y >> 16;
3476 
3477 	scaling_info->src_rect.width = state->src_w >> 16;
3478 	if (scaling_info->src_rect.width == 0)
3479 		return -EINVAL;
3480 
3481 	scaling_info->src_rect.height = state->src_h >> 16;
3482 	if (scaling_info->src_rect.height == 0)
3483 		return -EINVAL;
3484 
3485 	scaling_info->dst_rect.x = state->crtc_x;
3486 	scaling_info->dst_rect.y = state->crtc_y;
3487 
3488 	if (state->crtc_w == 0)
3489 		return -EINVAL;
3490 
3491 	scaling_info->dst_rect.width = state->crtc_w;
3492 
3493 	if (state->crtc_h == 0)
3494 		return -EINVAL;
3495 
3496 	scaling_info->dst_rect.height = state->crtc_h;
3497 
3498 	/* DRM doesn't specify clipping on destination output. */
3499 	scaling_info->clip_rect = scaling_info->dst_rect;
3500 
3501 	/* TODO: Validate scaling per-format with DC plane caps */
3502 	scale_w = scaling_info->dst_rect.width * 1000 /
3503 		  scaling_info->src_rect.width;
3504 
3505 	if (scale_w < 250 || scale_w > 16000)
3506 		return -EINVAL;
3507 
3508 	scale_h = scaling_info->dst_rect.height * 1000 /
3509 		  scaling_info->src_rect.height;
3510 
3511 	if (scale_h < 250 || scale_h > 16000)
3512 		return -EINVAL;
3513 
3514 	/*
3515 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3516 	 * assume reasonable defaults based on the format.
3517 	 */
3518 
3519 	return 0;
3520 }
3521 
3522 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3523 		       uint64_t *tiling_flags, bool *tmz_surface)
3524 {
3525 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3526 	int r = amdgpu_bo_reserve(rbo, false);
3527 
3528 	if (unlikely(r)) {
3529 		/* Don't show error message when returning -ERESTARTSYS */
3530 		if (r != -ERESTARTSYS)
3531 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3532 		return r;
3533 	}
3534 
3535 	if (tiling_flags)
3536 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3537 
3538 	if (tmz_surface)
3539 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3540 
3541 	amdgpu_bo_unreserve(rbo);
3542 
3543 	return r;
3544 }
3545 
3546 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3547 {
3548 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3549 
3550 	return offset ? (address + offset * 256) : 0;
3551 }
3552 
3553 static int
3554 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3555 			  const struct amdgpu_framebuffer *afb,
3556 			  const enum surface_pixel_format format,
3557 			  const enum dc_rotation_angle rotation,
3558 			  const struct plane_size *plane_size,
3559 			  const union dc_tiling_info *tiling_info,
3560 			  const uint64_t info,
3561 			  struct dc_plane_dcc_param *dcc,
3562 			  struct dc_plane_address *address,
3563 			  bool force_disable_dcc)
3564 {
3565 	struct dc *dc = adev->dm.dc;
3566 	struct dc_dcc_surface_param input;
3567 	struct dc_surface_dcc_cap output;
3568 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3569 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3570 	uint64_t dcc_address;
3571 
3572 	memset(&input, 0, sizeof(input));
3573 	memset(&output, 0, sizeof(output));
3574 
3575 	if (force_disable_dcc)
3576 		return 0;
3577 
3578 	if (!offset)
3579 		return 0;
3580 
3581 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3582 		return 0;
3583 
3584 	if (!dc->cap_funcs.get_dcc_compression_cap)
3585 		return -EINVAL;
3586 
3587 	input.format = format;
3588 	input.surface_size.width = plane_size->surface_size.width;
3589 	input.surface_size.height = plane_size->surface_size.height;
3590 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3591 
3592 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3593 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3594 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3595 		input.scan = SCAN_DIRECTION_VERTICAL;
3596 
3597 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3598 		return -EINVAL;
3599 
3600 	if (!output.capable)
3601 		return -EINVAL;
3602 
3603 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3604 		return -EINVAL;
3605 
3606 	dcc->enable = 1;
3607 	dcc->meta_pitch =
3608 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3609 	dcc->independent_64b_blks = i64b;
3610 
3611 	dcc_address = get_dcc_address(afb->address, info);
3612 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3613 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3614 
3615 	return 0;
3616 }
3617 
3618 static int
3619 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3620 			     const struct amdgpu_framebuffer *afb,
3621 			     const enum surface_pixel_format format,
3622 			     const enum dc_rotation_angle rotation,
3623 			     const uint64_t tiling_flags,
3624 			     union dc_tiling_info *tiling_info,
3625 			     struct plane_size *plane_size,
3626 			     struct dc_plane_dcc_param *dcc,
3627 			     struct dc_plane_address *address,
3628 			     bool tmz_surface,
3629 			     bool force_disable_dcc)
3630 {
3631 	const struct drm_framebuffer *fb = &afb->base;
3632 	int ret;
3633 
3634 	memset(tiling_info, 0, sizeof(*tiling_info));
3635 	memset(plane_size, 0, sizeof(*plane_size));
3636 	memset(dcc, 0, sizeof(*dcc));
3637 	memset(address, 0, sizeof(*address));
3638 
3639 	address->tmz_surface = tmz_surface;
3640 
3641 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3642 		plane_size->surface_size.x = 0;
3643 		plane_size->surface_size.y = 0;
3644 		plane_size->surface_size.width = fb->width;
3645 		plane_size->surface_size.height = fb->height;
3646 		plane_size->surface_pitch =
3647 			fb->pitches[0] / fb->format->cpp[0];
3648 
3649 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3650 		address->grph.addr.low_part = lower_32_bits(afb->address);
3651 		address->grph.addr.high_part = upper_32_bits(afb->address);
3652 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3653 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3654 
3655 		plane_size->surface_size.x = 0;
3656 		plane_size->surface_size.y = 0;
3657 		plane_size->surface_size.width = fb->width;
3658 		plane_size->surface_size.height = fb->height;
3659 		plane_size->surface_pitch =
3660 			fb->pitches[0] / fb->format->cpp[0];
3661 
3662 		plane_size->chroma_size.x = 0;
3663 		plane_size->chroma_size.y = 0;
3664 		/* TODO: set these based on surface format */
3665 		plane_size->chroma_size.width = fb->width / 2;
3666 		plane_size->chroma_size.height = fb->height / 2;
3667 
3668 		plane_size->chroma_pitch =
3669 			fb->pitches[1] / fb->format->cpp[1];
3670 
3671 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3672 		address->video_progressive.luma_addr.low_part =
3673 			lower_32_bits(afb->address);
3674 		address->video_progressive.luma_addr.high_part =
3675 			upper_32_bits(afb->address);
3676 		address->video_progressive.chroma_addr.low_part =
3677 			lower_32_bits(chroma_addr);
3678 		address->video_progressive.chroma_addr.high_part =
3679 			upper_32_bits(chroma_addr);
3680 	}
3681 
3682 	/* Fill GFX8 params */
3683 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3684 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3685 
3686 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3687 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3688 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3689 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3690 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3691 
3692 		/* XXX fix me for VI */
3693 		tiling_info->gfx8.num_banks = num_banks;
3694 		tiling_info->gfx8.array_mode =
3695 				DC_ARRAY_2D_TILED_THIN1;
3696 		tiling_info->gfx8.tile_split = tile_split;
3697 		tiling_info->gfx8.bank_width = bankw;
3698 		tiling_info->gfx8.bank_height = bankh;
3699 		tiling_info->gfx8.tile_aspect = mtaspect;
3700 		tiling_info->gfx8.tile_mode =
3701 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3702 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3703 			== DC_ARRAY_1D_TILED_THIN1) {
3704 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3705 	}
3706 
3707 	tiling_info->gfx8.pipe_config =
3708 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3709 
3710 	if (adev->asic_type == CHIP_VEGA10 ||
3711 	    adev->asic_type == CHIP_VEGA12 ||
3712 	    adev->asic_type == CHIP_VEGA20 ||
3713 	    adev->asic_type == CHIP_NAVI10 ||
3714 	    adev->asic_type == CHIP_NAVI14 ||
3715 	    adev->asic_type == CHIP_NAVI12 ||
3716 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3717 		adev->asic_type == CHIP_SIENNA_CICHLID ||
3718 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
3719 #endif
3720 	    adev->asic_type == CHIP_RENOIR ||
3721 	    adev->asic_type == CHIP_RAVEN) {
3722 		/* Fill GFX9 params */
3723 		tiling_info->gfx9.num_pipes =
3724 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3725 		tiling_info->gfx9.num_banks =
3726 			adev->gfx.config.gb_addr_config_fields.num_banks;
3727 		tiling_info->gfx9.pipe_interleave =
3728 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3729 		tiling_info->gfx9.num_shader_engines =
3730 			adev->gfx.config.gb_addr_config_fields.num_se;
3731 		tiling_info->gfx9.max_compressed_frags =
3732 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3733 		tiling_info->gfx9.num_rb_per_se =
3734 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3735 		tiling_info->gfx9.swizzle =
3736 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3737 		tiling_info->gfx9.shaderEnable = 1;
3738 
3739 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3740 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3741 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
3742 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3743 #endif
3744 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3745 						plane_size, tiling_info,
3746 						tiling_flags, dcc, address,
3747 						force_disable_dcc);
3748 		if (ret)
3749 			return ret;
3750 	}
3751 
3752 	return 0;
3753 }
3754 
3755 static void
3756 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3757 			       bool *per_pixel_alpha, bool *global_alpha,
3758 			       int *global_alpha_value)
3759 {
3760 	*per_pixel_alpha = false;
3761 	*global_alpha = false;
3762 	*global_alpha_value = 0xff;
3763 
3764 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3765 		return;
3766 
3767 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3768 		static const uint32_t alpha_formats[] = {
3769 			DRM_FORMAT_ARGB8888,
3770 			DRM_FORMAT_RGBA8888,
3771 			DRM_FORMAT_ABGR8888,
3772 		};
3773 		uint32_t format = plane_state->fb->format->format;
3774 		unsigned int i;
3775 
3776 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3777 			if (format == alpha_formats[i]) {
3778 				*per_pixel_alpha = true;
3779 				break;
3780 			}
3781 		}
3782 	}
3783 
3784 	if (plane_state->alpha < 0xffff) {
3785 		*global_alpha = true;
3786 		*global_alpha_value = plane_state->alpha >> 8;
3787 	}
3788 }
3789 
3790 static int
3791 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3792 			    const enum surface_pixel_format format,
3793 			    enum dc_color_space *color_space)
3794 {
3795 	bool full_range;
3796 
3797 	*color_space = COLOR_SPACE_SRGB;
3798 
3799 	/* DRM color properties only affect non-RGB formats. */
3800 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3801 		return 0;
3802 
3803 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3804 
3805 	switch (plane_state->color_encoding) {
3806 	case DRM_COLOR_YCBCR_BT601:
3807 		if (full_range)
3808 			*color_space = COLOR_SPACE_YCBCR601;
3809 		else
3810 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3811 		break;
3812 
3813 	case DRM_COLOR_YCBCR_BT709:
3814 		if (full_range)
3815 			*color_space = COLOR_SPACE_YCBCR709;
3816 		else
3817 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3818 		break;
3819 
3820 	case DRM_COLOR_YCBCR_BT2020:
3821 		if (full_range)
3822 			*color_space = COLOR_SPACE_2020_YCBCR;
3823 		else
3824 			return -EINVAL;
3825 		break;
3826 
3827 	default:
3828 		return -EINVAL;
3829 	}
3830 
3831 	return 0;
3832 }
3833 
3834 static int
3835 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3836 			    const struct drm_plane_state *plane_state,
3837 			    const uint64_t tiling_flags,
3838 			    struct dc_plane_info *plane_info,
3839 			    struct dc_plane_address *address,
3840 			    bool tmz_surface,
3841 			    bool force_disable_dcc)
3842 {
3843 	const struct drm_framebuffer *fb = plane_state->fb;
3844 	const struct amdgpu_framebuffer *afb =
3845 		to_amdgpu_framebuffer(plane_state->fb);
3846 	struct drm_format_name_buf format_name;
3847 	int ret;
3848 
3849 	memset(plane_info, 0, sizeof(*plane_info));
3850 
3851 	switch (fb->format->format) {
3852 	case DRM_FORMAT_C8:
3853 		plane_info->format =
3854 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3855 		break;
3856 	case DRM_FORMAT_RGB565:
3857 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3858 		break;
3859 	case DRM_FORMAT_XRGB8888:
3860 	case DRM_FORMAT_ARGB8888:
3861 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3862 		break;
3863 	case DRM_FORMAT_XRGB2101010:
3864 	case DRM_FORMAT_ARGB2101010:
3865 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3866 		break;
3867 	case DRM_FORMAT_XBGR2101010:
3868 	case DRM_FORMAT_ABGR2101010:
3869 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3870 		break;
3871 	case DRM_FORMAT_XBGR8888:
3872 	case DRM_FORMAT_ABGR8888:
3873 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3874 		break;
3875 	case DRM_FORMAT_NV21:
3876 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3877 		break;
3878 	case DRM_FORMAT_NV12:
3879 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3880 		break;
3881 	case DRM_FORMAT_P010:
3882 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3883 		break;
3884 	case DRM_FORMAT_XRGB16161616F:
3885 	case DRM_FORMAT_ARGB16161616F:
3886 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3887 		break;
3888 	case DRM_FORMAT_XBGR16161616F:
3889 	case DRM_FORMAT_ABGR16161616F:
3890 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
3891 		break;
3892 	default:
3893 		DRM_ERROR(
3894 			"Unsupported screen format %s\n",
3895 			drm_get_format_name(fb->format->format, &format_name));
3896 		return -EINVAL;
3897 	}
3898 
3899 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3900 	case DRM_MODE_ROTATE_0:
3901 		plane_info->rotation = ROTATION_ANGLE_0;
3902 		break;
3903 	case DRM_MODE_ROTATE_90:
3904 		plane_info->rotation = ROTATION_ANGLE_90;
3905 		break;
3906 	case DRM_MODE_ROTATE_180:
3907 		plane_info->rotation = ROTATION_ANGLE_180;
3908 		break;
3909 	case DRM_MODE_ROTATE_270:
3910 		plane_info->rotation = ROTATION_ANGLE_270;
3911 		break;
3912 	default:
3913 		plane_info->rotation = ROTATION_ANGLE_0;
3914 		break;
3915 	}
3916 
3917 	plane_info->visible = true;
3918 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3919 
3920 	plane_info->layer_index = 0;
3921 
3922 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3923 					  &plane_info->color_space);
3924 	if (ret)
3925 		return ret;
3926 
3927 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3928 					   plane_info->rotation, tiling_flags,
3929 					   &plane_info->tiling_info,
3930 					   &plane_info->plane_size,
3931 					   &plane_info->dcc, address, tmz_surface,
3932 					   force_disable_dcc);
3933 	if (ret)
3934 		return ret;
3935 
3936 	fill_blending_from_plane_state(
3937 		plane_state, &plane_info->per_pixel_alpha,
3938 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3939 
3940 	return 0;
3941 }
3942 
3943 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3944 				    struct dc_plane_state *dc_plane_state,
3945 				    struct drm_plane_state *plane_state,
3946 				    struct drm_crtc_state *crtc_state)
3947 {
3948 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3949 	const struct amdgpu_framebuffer *amdgpu_fb =
3950 		to_amdgpu_framebuffer(plane_state->fb);
3951 	struct dc_scaling_info scaling_info;
3952 	struct dc_plane_info plane_info;
3953 	uint64_t tiling_flags;
3954 	int ret;
3955 	bool tmz_surface = false;
3956 	bool force_disable_dcc = false;
3957 
3958 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3959 	if (ret)
3960 		return ret;
3961 
3962 	dc_plane_state->src_rect = scaling_info.src_rect;
3963 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3964 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3965 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3966 
3967 	ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3968 	if (ret)
3969 		return ret;
3970 
3971 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3972 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3973 					  &plane_info,
3974 					  &dc_plane_state->address,
3975 					  tmz_surface,
3976 					  force_disable_dcc);
3977 	if (ret)
3978 		return ret;
3979 
3980 	dc_plane_state->format = plane_info.format;
3981 	dc_plane_state->color_space = plane_info.color_space;
3982 	dc_plane_state->format = plane_info.format;
3983 	dc_plane_state->plane_size = plane_info.plane_size;
3984 	dc_plane_state->rotation = plane_info.rotation;
3985 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3986 	dc_plane_state->stereo_format = plane_info.stereo_format;
3987 	dc_plane_state->tiling_info = plane_info.tiling_info;
3988 	dc_plane_state->visible = plane_info.visible;
3989 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3990 	dc_plane_state->global_alpha = plane_info.global_alpha;
3991 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3992 	dc_plane_state->dcc = plane_info.dcc;
3993 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3994 
3995 	/*
3996 	 * Always set input transfer function, since plane state is refreshed
3997 	 * every time.
3998 	 */
3999 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4000 	if (ret)
4001 		return ret;
4002 
4003 	return 0;
4004 }
4005 
4006 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4007 					   const struct dm_connector_state *dm_state,
4008 					   struct dc_stream_state *stream)
4009 {
4010 	enum amdgpu_rmx_type rmx_type;
4011 
4012 	struct rect src = { 0 }; /* viewport in composition space*/
4013 	struct rect dst = { 0 }; /* stream addressable area */
4014 
4015 	/* no mode. nothing to be done */
4016 	if (!mode)
4017 		return;
4018 
4019 	/* Full screen scaling by default */
4020 	src.width = mode->hdisplay;
4021 	src.height = mode->vdisplay;
4022 	dst.width = stream->timing.h_addressable;
4023 	dst.height = stream->timing.v_addressable;
4024 
4025 	if (dm_state) {
4026 		rmx_type = dm_state->scaling;
4027 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4028 			if (src.width * dst.height <
4029 					src.height * dst.width) {
4030 				/* height needs less upscaling/more downscaling */
4031 				dst.width = src.width *
4032 						dst.height / src.height;
4033 			} else {
4034 				/* width needs less upscaling/more downscaling */
4035 				dst.height = src.height *
4036 						dst.width / src.width;
4037 			}
4038 		} else if (rmx_type == RMX_CENTER) {
4039 			dst = src;
4040 		}
4041 
4042 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4043 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4044 
4045 		if (dm_state->underscan_enable) {
4046 			dst.x += dm_state->underscan_hborder / 2;
4047 			dst.y += dm_state->underscan_vborder / 2;
4048 			dst.width -= dm_state->underscan_hborder;
4049 			dst.height -= dm_state->underscan_vborder;
4050 		}
4051 	}
4052 
4053 	stream->src = src;
4054 	stream->dst = dst;
4055 
4056 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4057 			dst.x, dst.y, dst.width, dst.height);
4058 
4059 }
4060 
4061 static enum dc_color_depth
4062 convert_color_depth_from_display_info(const struct drm_connector *connector,
4063 				      bool is_y420, int requested_bpc)
4064 {
4065 	uint8_t bpc;
4066 
4067 	if (is_y420) {
4068 		bpc = 8;
4069 
4070 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4071 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4072 			bpc = 16;
4073 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4074 			bpc = 12;
4075 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4076 			bpc = 10;
4077 	} else {
4078 		bpc = (uint8_t)connector->display_info.bpc;
4079 		/* Assume 8 bpc by default if no bpc is specified. */
4080 		bpc = bpc ? bpc : 8;
4081 	}
4082 
4083 	if (requested_bpc > 0) {
4084 		/*
4085 		 * Cap display bpc based on the user requested value.
4086 		 *
4087 		 * The value for state->max_bpc may not correctly updated
4088 		 * depending on when the connector gets added to the state
4089 		 * or if this was called outside of atomic check, so it
4090 		 * can't be used directly.
4091 		 */
4092 		bpc = min_t(u8, bpc, requested_bpc);
4093 
4094 		/* Round down to the nearest even number. */
4095 		bpc = bpc - (bpc & 1);
4096 	}
4097 
4098 	switch (bpc) {
4099 	case 0:
4100 		/*
4101 		 * Temporary Work around, DRM doesn't parse color depth for
4102 		 * EDID revision before 1.4
4103 		 * TODO: Fix edid parsing
4104 		 */
4105 		return COLOR_DEPTH_888;
4106 	case 6:
4107 		return COLOR_DEPTH_666;
4108 	case 8:
4109 		return COLOR_DEPTH_888;
4110 	case 10:
4111 		return COLOR_DEPTH_101010;
4112 	case 12:
4113 		return COLOR_DEPTH_121212;
4114 	case 14:
4115 		return COLOR_DEPTH_141414;
4116 	case 16:
4117 		return COLOR_DEPTH_161616;
4118 	default:
4119 		return COLOR_DEPTH_UNDEFINED;
4120 	}
4121 }
4122 
4123 static enum dc_aspect_ratio
4124 get_aspect_ratio(const struct drm_display_mode *mode_in)
4125 {
4126 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4127 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4128 }
4129 
4130 static enum dc_color_space
4131 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4132 {
4133 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4134 
4135 	switch (dc_crtc_timing->pixel_encoding)	{
4136 	case PIXEL_ENCODING_YCBCR422:
4137 	case PIXEL_ENCODING_YCBCR444:
4138 	case PIXEL_ENCODING_YCBCR420:
4139 	{
4140 		/*
4141 		 * 27030khz is the separation point between HDTV and SDTV
4142 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4143 		 * respectively
4144 		 */
4145 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4146 			if (dc_crtc_timing->flags.Y_ONLY)
4147 				color_space =
4148 					COLOR_SPACE_YCBCR709_LIMITED;
4149 			else
4150 				color_space = COLOR_SPACE_YCBCR709;
4151 		} else {
4152 			if (dc_crtc_timing->flags.Y_ONLY)
4153 				color_space =
4154 					COLOR_SPACE_YCBCR601_LIMITED;
4155 			else
4156 				color_space = COLOR_SPACE_YCBCR601;
4157 		}
4158 
4159 	}
4160 	break;
4161 	case PIXEL_ENCODING_RGB:
4162 		color_space = COLOR_SPACE_SRGB;
4163 		break;
4164 
4165 	default:
4166 		WARN_ON(1);
4167 		break;
4168 	}
4169 
4170 	return color_space;
4171 }
4172 
4173 static bool adjust_colour_depth_from_display_info(
4174 	struct dc_crtc_timing *timing_out,
4175 	const struct drm_display_info *info)
4176 {
4177 	enum dc_color_depth depth = timing_out->display_color_depth;
4178 	int normalized_clk;
4179 	do {
4180 		normalized_clk = timing_out->pix_clk_100hz / 10;
4181 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4182 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4183 			normalized_clk /= 2;
4184 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4185 		switch (depth) {
4186 		case COLOR_DEPTH_888:
4187 			break;
4188 		case COLOR_DEPTH_101010:
4189 			normalized_clk = (normalized_clk * 30) / 24;
4190 			break;
4191 		case COLOR_DEPTH_121212:
4192 			normalized_clk = (normalized_clk * 36) / 24;
4193 			break;
4194 		case COLOR_DEPTH_161616:
4195 			normalized_clk = (normalized_clk * 48) / 24;
4196 			break;
4197 		default:
4198 			/* The above depths are the only ones valid for HDMI. */
4199 			return false;
4200 		}
4201 		if (normalized_clk <= info->max_tmds_clock) {
4202 			timing_out->display_color_depth = depth;
4203 			return true;
4204 		}
4205 	} while (--depth > COLOR_DEPTH_666);
4206 	return false;
4207 }
4208 
4209 static void fill_stream_properties_from_drm_display_mode(
4210 	struct dc_stream_state *stream,
4211 	const struct drm_display_mode *mode_in,
4212 	const struct drm_connector *connector,
4213 	const struct drm_connector_state *connector_state,
4214 	const struct dc_stream_state *old_stream,
4215 	int requested_bpc)
4216 {
4217 	struct dc_crtc_timing *timing_out = &stream->timing;
4218 	const struct drm_display_info *info = &connector->display_info;
4219 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4220 	struct hdmi_vendor_infoframe hv_frame;
4221 	struct hdmi_avi_infoframe avi_frame;
4222 
4223 	memset(&hv_frame, 0, sizeof(hv_frame));
4224 	memset(&avi_frame, 0, sizeof(avi_frame));
4225 
4226 	timing_out->h_border_left = 0;
4227 	timing_out->h_border_right = 0;
4228 	timing_out->v_border_top = 0;
4229 	timing_out->v_border_bottom = 0;
4230 	/* TODO: un-hardcode */
4231 	if (drm_mode_is_420_only(info, mode_in)
4232 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4233 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4234 	else if (drm_mode_is_420_also(info, mode_in)
4235 			&& aconnector->force_yuv420_output)
4236 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4237 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4238 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4239 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4240 	else
4241 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4242 
4243 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4244 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4245 		connector,
4246 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4247 		requested_bpc);
4248 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4249 	timing_out->hdmi_vic = 0;
4250 
4251 	if(old_stream) {
4252 		timing_out->vic = old_stream->timing.vic;
4253 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4254 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4255 	} else {
4256 		timing_out->vic = drm_match_cea_mode(mode_in);
4257 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4258 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4259 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4260 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4261 	}
4262 
4263 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4264 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4265 		timing_out->vic = avi_frame.video_code;
4266 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4267 		timing_out->hdmi_vic = hv_frame.vic;
4268 	}
4269 
4270 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4271 	timing_out->h_total = mode_in->crtc_htotal;
4272 	timing_out->h_sync_width =
4273 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4274 	timing_out->h_front_porch =
4275 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4276 	timing_out->v_total = mode_in->crtc_vtotal;
4277 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4278 	timing_out->v_front_porch =
4279 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4280 	timing_out->v_sync_width =
4281 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4282 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4283 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4284 
4285 	stream->output_color_space = get_output_color_space(timing_out);
4286 
4287 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4288 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4289 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4290 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4291 		    drm_mode_is_420_also(info, mode_in) &&
4292 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4293 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4294 			adjust_colour_depth_from_display_info(timing_out, info);
4295 		}
4296 	}
4297 }
4298 
4299 static void fill_audio_info(struct audio_info *audio_info,
4300 			    const struct drm_connector *drm_connector,
4301 			    const struct dc_sink *dc_sink)
4302 {
4303 	int i = 0;
4304 	int cea_revision = 0;
4305 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4306 
4307 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4308 	audio_info->product_id = edid_caps->product_id;
4309 
4310 	cea_revision = drm_connector->display_info.cea_rev;
4311 
4312 	strscpy(audio_info->display_name,
4313 		edid_caps->display_name,
4314 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4315 
4316 	if (cea_revision >= 3) {
4317 		audio_info->mode_count = edid_caps->audio_mode_count;
4318 
4319 		for (i = 0; i < audio_info->mode_count; ++i) {
4320 			audio_info->modes[i].format_code =
4321 					(enum audio_format_code)
4322 					(edid_caps->audio_modes[i].format_code);
4323 			audio_info->modes[i].channel_count =
4324 					edid_caps->audio_modes[i].channel_count;
4325 			audio_info->modes[i].sample_rates.all =
4326 					edid_caps->audio_modes[i].sample_rate;
4327 			audio_info->modes[i].sample_size =
4328 					edid_caps->audio_modes[i].sample_size;
4329 		}
4330 	}
4331 
4332 	audio_info->flags.all = edid_caps->speaker_flags;
4333 
4334 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4335 	if (drm_connector->latency_present[0]) {
4336 		audio_info->video_latency = drm_connector->video_latency[0];
4337 		audio_info->audio_latency = drm_connector->audio_latency[0];
4338 	}
4339 
4340 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4341 
4342 }
4343 
4344 static void
4345 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4346 				      struct drm_display_mode *dst_mode)
4347 {
4348 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4349 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4350 	dst_mode->crtc_clock = src_mode->crtc_clock;
4351 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4352 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4353 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4354 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4355 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4356 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4357 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4358 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4359 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4360 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4361 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4362 }
4363 
4364 static void
4365 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4366 					const struct drm_display_mode *native_mode,
4367 					bool scale_enabled)
4368 {
4369 	if (scale_enabled) {
4370 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4371 	} else if (native_mode->clock == drm_mode->clock &&
4372 			native_mode->htotal == drm_mode->htotal &&
4373 			native_mode->vtotal == drm_mode->vtotal) {
4374 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4375 	} else {
4376 		/* no scaling nor amdgpu inserted, no need to patch */
4377 	}
4378 }
4379 
4380 static struct dc_sink *
4381 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4382 {
4383 	struct dc_sink_init_data sink_init_data = { 0 };
4384 	struct dc_sink *sink = NULL;
4385 	sink_init_data.link = aconnector->dc_link;
4386 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4387 
4388 	sink = dc_sink_create(&sink_init_data);
4389 	if (!sink) {
4390 		DRM_ERROR("Failed to create sink!\n");
4391 		return NULL;
4392 	}
4393 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4394 
4395 	return sink;
4396 }
4397 
4398 static void set_multisync_trigger_params(
4399 		struct dc_stream_state *stream)
4400 {
4401 	if (stream->triggered_crtc_reset.enabled) {
4402 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4403 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4404 	}
4405 }
4406 
4407 static void set_master_stream(struct dc_stream_state *stream_set[],
4408 			      int stream_count)
4409 {
4410 	int j, highest_rfr = 0, master_stream = 0;
4411 
4412 	for (j = 0;  j < stream_count; j++) {
4413 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4414 			int refresh_rate = 0;
4415 
4416 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4417 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4418 			if (refresh_rate > highest_rfr) {
4419 				highest_rfr = refresh_rate;
4420 				master_stream = j;
4421 			}
4422 		}
4423 	}
4424 	for (j = 0;  j < stream_count; j++) {
4425 		if (stream_set[j])
4426 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4427 	}
4428 }
4429 
4430 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4431 {
4432 	int i = 0;
4433 
4434 	if (context->stream_count < 2)
4435 		return;
4436 	for (i = 0; i < context->stream_count ; i++) {
4437 		if (!context->streams[i])
4438 			continue;
4439 		/*
4440 		 * TODO: add a function to read AMD VSDB bits and set
4441 		 * crtc_sync_master.multi_sync_enabled flag
4442 		 * For now it's set to false
4443 		 */
4444 		set_multisync_trigger_params(context->streams[i]);
4445 	}
4446 	set_master_stream(context->streams, context->stream_count);
4447 }
4448 
4449 static struct dc_stream_state *
4450 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4451 		       const struct drm_display_mode *drm_mode,
4452 		       const struct dm_connector_state *dm_state,
4453 		       const struct dc_stream_state *old_stream,
4454 		       int requested_bpc)
4455 {
4456 	struct drm_display_mode *preferred_mode = NULL;
4457 	struct drm_connector *drm_connector;
4458 	const struct drm_connector_state *con_state =
4459 		dm_state ? &dm_state->base : NULL;
4460 	struct dc_stream_state *stream = NULL;
4461 	struct drm_display_mode mode = *drm_mode;
4462 	bool native_mode_found = false;
4463 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4464 	int mode_refresh;
4465 	int preferred_refresh = 0;
4466 #if defined(CONFIG_DRM_AMD_DC_DCN)
4467 	struct dsc_dec_dpcd_caps dsc_caps;
4468 #endif
4469 	uint32_t link_bandwidth_kbps;
4470 
4471 	struct dc_sink *sink = NULL;
4472 	if (aconnector == NULL) {
4473 		DRM_ERROR("aconnector is NULL!\n");
4474 		return stream;
4475 	}
4476 
4477 	drm_connector = &aconnector->base;
4478 
4479 	if (!aconnector->dc_sink) {
4480 		sink = create_fake_sink(aconnector);
4481 		if (!sink)
4482 			return stream;
4483 	} else {
4484 		sink = aconnector->dc_sink;
4485 		dc_sink_retain(sink);
4486 	}
4487 
4488 	stream = dc_create_stream_for_sink(sink);
4489 
4490 	if (stream == NULL) {
4491 		DRM_ERROR("Failed to create stream for sink!\n");
4492 		goto finish;
4493 	}
4494 
4495 	stream->dm_stream_context = aconnector;
4496 
4497 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4498 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4499 
4500 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4501 		/* Search for preferred mode */
4502 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4503 			native_mode_found = true;
4504 			break;
4505 		}
4506 	}
4507 	if (!native_mode_found)
4508 		preferred_mode = list_first_entry_or_null(
4509 				&aconnector->base.modes,
4510 				struct drm_display_mode,
4511 				head);
4512 
4513 	mode_refresh = drm_mode_vrefresh(&mode);
4514 
4515 	if (preferred_mode == NULL) {
4516 		/*
4517 		 * This may not be an error, the use case is when we have no
4518 		 * usermode calls to reset and set mode upon hotplug. In this
4519 		 * case, we call set mode ourselves to restore the previous mode
4520 		 * and the modelist may not be filled in in time.
4521 		 */
4522 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4523 	} else {
4524 		decide_crtc_timing_for_drm_display_mode(
4525 				&mode, preferred_mode,
4526 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4527 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4528 	}
4529 
4530 	if (!dm_state)
4531 		drm_mode_set_crtcinfo(&mode, 0);
4532 
4533 	/*
4534 	* If scaling is enabled and refresh rate didn't change
4535 	* we copy the vic and polarities of the old timings
4536 	*/
4537 	if (!scale || mode_refresh != preferred_refresh)
4538 		fill_stream_properties_from_drm_display_mode(stream,
4539 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4540 	else
4541 		fill_stream_properties_from_drm_display_mode(stream,
4542 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4543 
4544 	stream->timing.flags.DSC = 0;
4545 
4546 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4547 #if defined(CONFIG_DRM_AMD_DC_DCN)
4548 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4549 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4550 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4551 				      &dsc_caps);
4552 #endif
4553 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4554 							     dc_link_get_link_cap(aconnector->dc_link));
4555 
4556 #if defined(CONFIG_DRM_AMD_DC_DCN)
4557 		if (dsc_caps.is_dsc_supported)
4558 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4559 						  &dsc_caps,
4560 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4561 						  link_bandwidth_kbps,
4562 						  &stream->timing,
4563 						  &stream->timing.dsc_cfg))
4564 				stream->timing.flags.DSC = 1;
4565 #endif
4566 	}
4567 
4568 	update_stream_scaling_settings(&mode, dm_state, stream);
4569 
4570 	fill_audio_info(
4571 		&stream->audio_info,
4572 		drm_connector,
4573 		sink);
4574 
4575 	update_stream_signal(stream, sink);
4576 
4577 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4578 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4579 	if (stream->link->psr_settings.psr_feature_enabled) {
4580 		//
4581 		// should decide stream support vsc sdp colorimetry capability
4582 		// before building vsc info packet
4583 		//
4584 		stream->use_vsc_sdp_for_colorimetry = false;
4585 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4586 			stream->use_vsc_sdp_for_colorimetry =
4587 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4588 		} else {
4589 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4590 				stream->use_vsc_sdp_for_colorimetry = true;
4591 		}
4592 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4593 	}
4594 finish:
4595 	dc_sink_release(sink);
4596 
4597 	return stream;
4598 }
4599 
4600 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4601 {
4602 	drm_crtc_cleanup(crtc);
4603 	kfree(crtc);
4604 }
4605 
4606 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4607 				  struct drm_crtc_state *state)
4608 {
4609 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4610 
4611 	/* TODO Destroy dc_stream objects are stream object is flattened */
4612 	if (cur->stream)
4613 		dc_stream_release(cur->stream);
4614 
4615 
4616 	__drm_atomic_helper_crtc_destroy_state(state);
4617 
4618 
4619 	kfree(state);
4620 }
4621 
4622 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4623 {
4624 	struct dm_crtc_state *state;
4625 
4626 	if (crtc->state)
4627 		dm_crtc_destroy_state(crtc, crtc->state);
4628 
4629 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4630 	if (WARN_ON(!state))
4631 		return;
4632 
4633 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
4634 }
4635 
4636 static struct drm_crtc_state *
4637 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4638 {
4639 	struct dm_crtc_state *state, *cur;
4640 
4641 	cur = to_dm_crtc_state(crtc->state);
4642 
4643 	if (WARN_ON(!crtc->state))
4644 		return NULL;
4645 
4646 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4647 	if (!state)
4648 		return NULL;
4649 
4650 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4651 
4652 	if (cur->stream) {
4653 		state->stream = cur->stream;
4654 		dc_stream_retain(state->stream);
4655 	}
4656 
4657 	state->active_planes = cur->active_planes;
4658 	state->vrr_params = cur->vrr_params;
4659 	state->vrr_infopacket = cur->vrr_infopacket;
4660 	state->abm_level = cur->abm_level;
4661 	state->vrr_supported = cur->vrr_supported;
4662 	state->freesync_config = cur->freesync_config;
4663 	state->crc_src = cur->crc_src;
4664 	state->cm_has_degamma = cur->cm_has_degamma;
4665 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4666 
4667 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4668 
4669 	return &state->base;
4670 }
4671 
4672 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4673 {
4674 	enum dc_irq_source irq_source;
4675 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4676 	struct amdgpu_device *adev = crtc->dev->dev_private;
4677 	int rc;
4678 
4679 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4680 
4681 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4682 
4683 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4684 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4685 	return rc;
4686 }
4687 
4688 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4689 {
4690 	enum dc_irq_source irq_source;
4691 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4692 	struct amdgpu_device *adev = crtc->dev->dev_private;
4693 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4694 	int rc = 0;
4695 
4696 	if (enable) {
4697 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4698 		if (amdgpu_dm_vrr_active(acrtc_state))
4699 			rc = dm_set_vupdate_irq(crtc, true);
4700 	} else {
4701 		/* vblank irq off -> vupdate irq off */
4702 		rc = dm_set_vupdate_irq(crtc, false);
4703 	}
4704 
4705 	if (rc)
4706 		return rc;
4707 
4708 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4709 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4710 }
4711 
4712 static int dm_enable_vblank(struct drm_crtc *crtc)
4713 {
4714 	return dm_set_vblank(crtc, true);
4715 }
4716 
4717 static void dm_disable_vblank(struct drm_crtc *crtc)
4718 {
4719 	dm_set_vblank(crtc, false);
4720 }
4721 
4722 /* Implemented only the options currently availible for the driver */
4723 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4724 	.reset = dm_crtc_reset_state,
4725 	.destroy = amdgpu_dm_crtc_destroy,
4726 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4727 	.set_config = drm_atomic_helper_set_config,
4728 	.page_flip = drm_atomic_helper_page_flip,
4729 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4730 	.atomic_destroy_state = dm_crtc_destroy_state,
4731 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4732 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4733 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4734 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4735 	.enable_vblank = dm_enable_vblank,
4736 	.disable_vblank = dm_disable_vblank,
4737 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4738 };
4739 
4740 static enum drm_connector_status
4741 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4742 {
4743 	bool connected;
4744 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4745 
4746 	/*
4747 	 * Notes:
4748 	 * 1. This interface is NOT called in context of HPD irq.
4749 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4750 	 * makes it a bad place for *any* MST-related activity.
4751 	 */
4752 
4753 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4754 	    !aconnector->fake_enable)
4755 		connected = (aconnector->dc_sink != NULL);
4756 	else
4757 		connected = (aconnector->base.force == DRM_FORCE_ON);
4758 
4759 	return (connected ? connector_status_connected :
4760 			connector_status_disconnected);
4761 }
4762 
4763 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4764 					    struct drm_connector_state *connector_state,
4765 					    struct drm_property *property,
4766 					    uint64_t val)
4767 {
4768 	struct drm_device *dev = connector->dev;
4769 	struct amdgpu_device *adev = dev->dev_private;
4770 	struct dm_connector_state *dm_old_state =
4771 		to_dm_connector_state(connector->state);
4772 	struct dm_connector_state *dm_new_state =
4773 		to_dm_connector_state(connector_state);
4774 
4775 	int ret = -EINVAL;
4776 
4777 	if (property == dev->mode_config.scaling_mode_property) {
4778 		enum amdgpu_rmx_type rmx_type;
4779 
4780 		switch (val) {
4781 		case DRM_MODE_SCALE_CENTER:
4782 			rmx_type = RMX_CENTER;
4783 			break;
4784 		case DRM_MODE_SCALE_ASPECT:
4785 			rmx_type = RMX_ASPECT;
4786 			break;
4787 		case DRM_MODE_SCALE_FULLSCREEN:
4788 			rmx_type = RMX_FULL;
4789 			break;
4790 		case DRM_MODE_SCALE_NONE:
4791 		default:
4792 			rmx_type = RMX_OFF;
4793 			break;
4794 		}
4795 
4796 		if (dm_old_state->scaling == rmx_type)
4797 			return 0;
4798 
4799 		dm_new_state->scaling = rmx_type;
4800 		ret = 0;
4801 	} else if (property == adev->mode_info.underscan_hborder_property) {
4802 		dm_new_state->underscan_hborder = val;
4803 		ret = 0;
4804 	} else if (property == adev->mode_info.underscan_vborder_property) {
4805 		dm_new_state->underscan_vborder = val;
4806 		ret = 0;
4807 	} else if (property == adev->mode_info.underscan_property) {
4808 		dm_new_state->underscan_enable = val;
4809 		ret = 0;
4810 	} else if (property == adev->mode_info.abm_level_property) {
4811 		dm_new_state->abm_level = val;
4812 		ret = 0;
4813 	}
4814 
4815 	return ret;
4816 }
4817 
4818 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4819 					    const struct drm_connector_state *state,
4820 					    struct drm_property *property,
4821 					    uint64_t *val)
4822 {
4823 	struct drm_device *dev = connector->dev;
4824 	struct amdgpu_device *adev = dev->dev_private;
4825 	struct dm_connector_state *dm_state =
4826 		to_dm_connector_state(state);
4827 	int ret = -EINVAL;
4828 
4829 	if (property == dev->mode_config.scaling_mode_property) {
4830 		switch (dm_state->scaling) {
4831 		case RMX_CENTER:
4832 			*val = DRM_MODE_SCALE_CENTER;
4833 			break;
4834 		case RMX_ASPECT:
4835 			*val = DRM_MODE_SCALE_ASPECT;
4836 			break;
4837 		case RMX_FULL:
4838 			*val = DRM_MODE_SCALE_FULLSCREEN;
4839 			break;
4840 		case RMX_OFF:
4841 		default:
4842 			*val = DRM_MODE_SCALE_NONE;
4843 			break;
4844 		}
4845 		ret = 0;
4846 	} else if (property == adev->mode_info.underscan_hborder_property) {
4847 		*val = dm_state->underscan_hborder;
4848 		ret = 0;
4849 	} else if (property == adev->mode_info.underscan_vborder_property) {
4850 		*val = dm_state->underscan_vborder;
4851 		ret = 0;
4852 	} else if (property == adev->mode_info.underscan_property) {
4853 		*val = dm_state->underscan_enable;
4854 		ret = 0;
4855 	} else if (property == adev->mode_info.abm_level_property) {
4856 		*val = dm_state->abm_level;
4857 		ret = 0;
4858 	}
4859 
4860 	return ret;
4861 }
4862 
4863 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4864 {
4865 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4866 
4867 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4868 }
4869 
4870 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4871 {
4872 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4873 	const struct dc_link *link = aconnector->dc_link;
4874 	struct amdgpu_device *adev = connector->dev->dev_private;
4875 	struct amdgpu_display_manager *dm = &adev->dm;
4876 
4877 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4878 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4879 
4880 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4881 	    link->type != dc_connection_none &&
4882 	    dm->backlight_dev) {
4883 		backlight_device_unregister(dm->backlight_dev);
4884 		dm->backlight_dev = NULL;
4885 	}
4886 #endif
4887 
4888 	if (aconnector->dc_em_sink)
4889 		dc_sink_release(aconnector->dc_em_sink);
4890 	aconnector->dc_em_sink = NULL;
4891 	if (aconnector->dc_sink)
4892 		dc_sink_release(aconnector->dc_sink);
4893 	aconnector->dc_sink = NULL;
4894 
4895 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4896 	drm_connector_unregister(connector);
4897 	drm_connector_cleanup(connector);
4898 	if (aconnector->i2c) {
4899 		i2c_del_adapter(&aconnector->i2c->base);
4900 		kfree(aconnector->i2c);
4901 	}
4902 	kfree(aconnector->dm_dp_aux.aux.name);
4903 
4904 	kfree(connector);
4905 }
4906 
4907 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4908 {
4909 	struct dm_connector_state *state =
4910 		to_dm_connector_state(connector->state);
4911 
4912 	if (connector->state)
4913 		__drm_atomic_helper_connector_destroy_state(connector->state);
4914 
4915 	kfree(state);
4916 
4917 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4918 
4919 	if (state) {
4920 		state->scaling = RMX_OFF;
4921 		state->underscan_enable = false;
4922 		state->underscan_hborder = 0;
4923 		state->underscan_vborder = 0;
4924 		state->base.max_requested_bpc = 8;
4925 		state->vcpi_slots = 0;
4926 		state->pbn = 0;
4927 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4928 			state->abm_level = amdgpu_dm_abm_level;
4929 
4930 		__drm_atomic_helper_connector_reset(connector, &state->base);
4931 	}
4932 }
4933 
4934 struct drm_connector_state *
4935 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4936 {
4937 	struct dm_connector_state *state =
4938 		to_dm_connector_state(connector->state);
4939 
4940 	struct dm_connector_state *new_state =
4941 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4942 
4943 	if (!new_state)
4944 		return NULL;
4945 
4946 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4947 
4948 	new_state->freesync_capable = state->freesync_capable;
4949 	new_state->abm_level = state->abm_level;
4950 	new_state->scaling = state->scaling;
4951 	new_state->underscan_enable = state->underscan_enable;
4952 	new_state->underscan_hborder = state->underscan_hborder;
4953 	new_state->underscan_vborder = state->underscan_vborder;
4954 	new_state->vcpi_slots = state->vcpi_slots;
4955 	new_state->pbn = state->pbn;
4956 	return &new_state->base;
4957 }
4958 
4959 static int
4960 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4961 {
4962 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4963 		to_amdgpu_dm_connector(connector);
4964 	int r;
4965 
4966 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4967 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4968 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4969 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4970 		if (r)
4971 			return r;
4972 	}
4973 
4974 #if defined(CONFIG_DEBUG_FS)
4975 	connector_debugfs_init(amdgpu_dm_connector);
4976 #endif
4977 
4978 	return 0;
4979 }
4980 
4981 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4982 	.reset = amdgpu_dm_connector_funcs_reset,
4983 	.detect = amdgpu_dm_connector_detect,
4984 	.fill_modes = drm_helper_probe_single_connector_modes,
4985 	.destroy = amdgpu_dm_connector_destroy,
4986 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4987 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4988 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4989 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4990 	.late_register = amdgpu_dm_connector_late_register,
4991 	.early_unregister = amdgpu_dm_connector_unregister
4992 };
4993 
4994 static int get_modes(struct drm_connector *connector)
4995 {
4996 	return amdgpu_dm_connector_get_modes(connector);
4997 }
4998 
4999 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5000 {
5001 	struct dc_sink_init_data init_params = {
5002 			.link = aconnector->dc_link,
5003 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5004 	};
5005 	struct edid *edid;
5006 
5007 	if (!aconnector->base.edid_blob_ptr) {
5008 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5009 				aconnector->base.name);
5010 
5011 		aconnector->base.force = DRM_FORCE_OFF;
5012 		aconnector->base.override_edid = false;
5013 		return;
5014 	}
5015 
5016 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5017 
5018 	aconnector->edid = edid;
5019 
5020 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5021 		aconnector->dc_link,
5022 		(uint8_t *)edid,
5023 		(edid->extensions + 1) * EDID_LENGTH,
5024 		&init_params);
5025 
5026 	if (aconnector->base.force == DRM_FORCE_ON) {
5027 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5028 		aconnector->dc_link->local_sink :
5029 		aconnector->dc_em_sink;
5030 		dc_sink_retain(aconnector->dc_sink);
5031 	}
5032 }
5033 
5034 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5035 {
5036 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5037 
5038 	/*
5039 	 * In case of headless boot with force on for DP managed connector
5040 	 * Those settings have to be != 0 to get initial modeset
5041 	 */
5042 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5043 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5044 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5045 	}
5046 
5047 
5048 	aconnector->base.override_edid = true;
5049 	create_eml_sink(aconnector);
5050 }
5051 
5052 static struct dc_stream_state *
5053 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5054 				const struct drm_display_mode *drm_mode,
5055 				const struct dm_connector_state *dm_state,
5056 				const struct dc_stream_state *old_stream)
5057 {
5058 	struct drm_connector *connector = &aconnector->base;
5059 	struct amdgpu_device *adev = connector->dev->dev_private;
5060 	struct dc_stream_state *stream;
5061 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5062 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5063 	enum dc_status dc_result = DC_OK;
5064 
5065 	do {
5066 		stream = create_stream_for_sink(aconnector, drm_mode,
5067 						dm_state, old_stream,
5068 						requested_bpc);
5069 		if (stream == NULL) {
5070 			DRM_ERROR("Failed to create stream for sink!\n");
5071 			break;
5072 		}
5073 
5074 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5075 
5076 		if (dc_result != DC_OK) {
5077 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5078 				      drm_mode->hdisplay,
5079 				      drm_mode->vdisplay,
5080 				      drm_mode->clock,
5081 				      dc_result,
5082 				      dc_status_to_str(dc_result));
5083 
5084 			dc_stream_release(stream);
5085 			stream = NULL;
5086 			requested_bpc -= 2; /* lower bpc to retry validation */
5087 		}
5088 
5089 	} while (stream == NULL && requested_bpc >= 6);
5090 
5091 	return stream;
5092 }
5093 
5094 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5095 				   struct drm_display_mode *mode)
5096 {
5097 	int result = MODE_ERROR;
5098 	struct dc_sink *dc_sink;
5099 	/* TODO: Unhardcode stream count */
5100 	struct dc_stream_state *stream;
5101 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5102 
5103 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5104 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5105 		return result;
5106 
5107 	/*
5108 	 * Only run this the first time mode_valid is called to initilialize
5109 	 * EDID mgmt
5110 	 */
5111 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5112 		!aconnector->dc_em_sink)
5113 		handle_edid_mgmt(aconnector);
5114 
5115 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5116 
5117 	if (dc_sink == NULL) {
5118 		DRM_ERROR("dc_sink is NULL!\n");
5119 		goto fail;
5120 	}
5121 
5122 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5123 	if (stream) {
5124 		dc_stream_release(stream);
5125 		result = MODE_OK;
5126 	}
5127 
5128 fail:
5129 	/* TODO: error handling*/
5130 	return result;
5131 }
5132 
5133 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5134 				struct dc_info_packet *out)
5135 {
5136 	struct hdmi_drm_infoframe frame;
5137 	unsigned char buf[30]; /* 26 + 4 */
5138 	ssize_t len;
5139 	int ret, i;
5140 
5141 	memset(out, 0, sizeof(*out));
5142 
5143 	if (!state->hdr_output_metadata)
5144 		return 0;
5145 
5146 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5147 	if (ret)
5148 		return ret;
5149 
5150 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5151 	if (len < 0)
5152 		return (int)len;
5153 
5154 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5155 	if (len != 30)
5156 		return -EINVAL;
5157 
5158 	/* Prepare the infopacket for DC. */
5159 	switch (state->connector->connector_type) {
5160 	case DRM_MODE_CONNECTOR_HDMIA:
5161 		out->hb0 = 0x87; /* type */
5162 		out->hb1 = 0x01; /* version */
5163 		out->hb2 = 0x1A; /* length */
5164 		out->sb[0] = buf[3]; /* checksum */
5165 		i = 1;
5166 		break;
5167 
5168 	case DRM_MODE_CONNECTOR_DisplayPort:
5169 	case DRM_MODE_CONNECTOR_eDP:
5170 		out->hb0 = 0x00; /* sdp id, zero */
5171 		out->hb1 = 0x87; /* type */
5172 		out->hb2 = 0x1D; /* payload len - 1 */
5173 		out->hb3 = (0x13 << 2); /* sdp version */
5174 		out->sb[0] = 0x01; /* version */
5175 		out->sb[1] = 0x1A; /* length */
5176 		i = 2;
5177 		break;
5178 
5179 	default:
5180 		return -EINVAL;
5181 	}
5182 
5183 	memcpy(&out->sb[i], &buf[4], 26);
5184 	out->valid = true;
5185 
5186 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5187 		       sizeof(out->sb), false);
5188 
5189 	return 0;
5190 }
5191 
5192 static bool
5193 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5194 			  const struct drm_connector_state *new_state)
5195 {
5196 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5197 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5198 
5199 	if (old_blob != new_blob) {
5200 		if (old_blob && new_blob &&
5201 		    old_blob->length == new_blob->length)
5202 			return memcmp(old_blob->data, new_blob->data,
5203 				      old_blob->length);
5204 
5205 		return true;
5206 	}
5207 
5208 	return false;
5209 }
5210 
5211 static int
5212 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5213 				 struct drm_atomic_state *state)
5214 {
5215 	struct drm_connector_state *new_con_state =
5216 		drm_atomic_get_new_connector_state(state, conn);
5217 	struct drm_connector_state *old_con_state =
5218 		drm_atomic_get_old_connector_state(state, conn);
5219 	struct drm_crtc *crtc = new_con_state->crtc;
5220 	struct drm_crtc_state *new_crtc_state;
5221 	int ret;
5222 
5223 	if (!crtc)
5224 		return 0;
5225 
5226 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5227 		struct dc_info_packet hdr_infopacket;
5228 
5229 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5230 		if (ret)
5231 			return ret;
5232 
5233 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5234 		if (IS_ERR(new_crtc_state))
5235 			return PTR_ERR(new_crtc_state);
5236 
5237 		/*
5238 		 * DC considers the stream backends changed if the
5239 		 * static metadata changes. Forcing the modeset also
5240 		 * gives a simple way for userspace to switch from
5241 		 * 8bpc to 10bpc when setting the metadata to enter
5242 		 * or exit HDR.
5243 		 *
5244 		 * Changing the static metadata after it's been
5245 		 * set is permissible, however. So only force a
5246 		 * modeset if we're entering or exiting HDR.
5247 		 */
5248 		new_crtc_state->mode_changed =
5249 			!old_con_state->hdr_output_metadata ||
5250 			!new_con_state->hdr_output_metadata;
5251 	}
5252 
5253 	return 0;
5254 }
5255 
5256 static const struct drm_connector_helper_funcs
5257 amdgpu_dm_connector_helper_funcs = {
5258 	/*
5259 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5260 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5261 	 * are missing after user start lightdm. So we need to renew modes list.
5262 	 * in get_modes call back, not just return the modes count
5263 	 */
5264 	.get_modes = get_modes,
5265 	.mode_valid = amdgpu_dm_connector_mode_valid,
5266 	.atomic_check = amdgpu_dm_connector_atomic_check,
5267 };
5268 
5269 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5270 {
5271 }
5272 
5273 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5274 {
5275 	struct drm_device *dev = new_crtc_state->crtc->dev;
5276 	struct drm_plane *plane;
5277 
5278 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5279 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5280 			return true;
5281 	}
5282 
5283 	return false;
5284 }
5285 
5286 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5287 {
5288 	struct drm_atomic_state *state = new_crtc_state->state;
5289 	struct drm_plane *plane;
5290 	int num_active = 0;
5291 
5292 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5293 		struct drm_plane_state *new_plane_state;
5294 
5295 		/* Cursor planes are "fake". */
5296 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5297 			continue;
5298 
5299 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5300 
5301 		if (!new_plane_state) {
5302 			/*
5303 			 * The plane is enable on the CRTC and hasn't changed
5304 			 * state. This means that it previously passed
5305 			 * validation and is therefore enabled.
5306 			 */
5307 			num_active += 1;
5308 			continue;
5309 		}
5310 
5311 		/* We need a framebuffer to be considered enabled. */
5312 		num_active += (new_plane_state->fb != NULL);
5313 	}
5314 
5315 	return num_active;
5316 }
5317 
5318 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5319 					 struct drm_crtc_state *new_crtc_state)
5320 {
5321 	struct dm_crtc_state *dm_new_crtc_state =
5322 		to_dm_crtc_state(new_crtc_state);
5323 
5324 	dm_new_crtc_state->active_planes = 0;
5325 
5326 	if (!dm_new_crtc_state->stream)
5327 		return;
5328 
5329 	dm_new_crtc_state->active_planes =
5330 		count_crtc_active_planes(new_crtc_state);
5331 }
5332 
5333 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5334 				       struct drm_crtc_state *state)
5335 {
5336 	struct amdgpu_device *adev = crtc->dev->dev_private;
5337 	struct dc *dc = adev->dm.dc;
5338 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5339 	int ret = -EINVAL;
5340 
5341 	dm_update_crtc_active_planes(crtc, state);
5342 
5343 	if (unlikely(!dm_crtc_state->stream &&
5344 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5345 		WARN_ON(1);
5346 		return ret;
5347 	}
5348 
5349 	/* In some use cases, like reset, no stream is attached */
5350 	if (!dm_crtc_state->stream)
5351 		return 0;
5352 
5353 	/*
5354 	 * We want at least one hardware plane enabled to use
5355 	 * the stream with a cursor enabled.
5356 	 */
5357 	if (state->enable && state->active &&
5358 	    does_crtc_have_active_cursor(state) &&
5359 	    dm_crtc_state->active_planes == 0)
5360 		return -EINVAL;
5361 
5362 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5363 		return 0;
5364 
5365 	return ret;
5366 }
5367 
5368 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5369 				      const struct drm_display_mode *mode,
5370 				      struct drm_display_mode *adjusted_mode)
5371 {
5372 	return true;
5373 }
5374 
5375 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5376 	.disable = dm_crtc_helper_disable,
5377 	.atomic_check = dm_crtc_helper_atomic_check,
5378 	.mode_fixup = dm_crtc_helper_mode_fixup,
5379 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5380 };
5381 
5382 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5383 {
5384 
5385 }
5386 
5387 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5388 {
5389 	switch (display_color_depth) {
5390 		case COLOR_DEPTH_666:
5391 			return 6;
5392 		case COLOR_DEPTH_888:
5393 			return 8;
5394 		case COLOR_DEPTH_101010:
5395 			return 10;
5396 		case COLOR_DEPTH_121212:
5397 			return 12;
5398 		case COLOR_DEPTH_141414:
5399 			return 14;
5400 		case COLOR_DEPTH_161616:
5401 			return 16;
5402 		default:
5403 			break;
5404 		}
5405 	return 0;
5406 }
5407 
5408 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5409 					  struct drm_crtc_state *crtc_state,
5410 					  struct drm_connector_state *conn_state)
5411 {
5412 	struct drm_atomic_state *state = crtc_state->state;
5413 	struct drm_connector *connector = conn_state->connector;
5414 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5415 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5416 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5417 	struct drm_dp_mst_topology_mgr *mst_mgr;
5418 	struct drm_dp_mst_port *mst_port;
5419 	enum dc_color_depth color_depth;
5420 	int clock, bpp = 0;
5421 	bool is_y420 = false;
5422 
5423 	if (!aconnector->port || !aconnector->dc_sink)
5424 		return 0;
5425 
5426 	mst_port = aconnector->port;
5427 	mst_mgr = &aconnector->mst_port->mst_mgr;
5428 
5429 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5430 		return 0;
5431 
5432 	if (!state->duplicated) {
5433 		int max_bpc = conn_state->max_requested_bpc;
5434 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5435 				aconnector->force_yuv420_output;
5436 		color_depth = convert_color_depth_from_display_info(connector,
5437 								    is_y420,
5438 								    max_bpc);
5439 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5440 		clock = adjusted_mode->clock;
5441 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5442 	}
5443 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5444 									   mst_mgr,
5445 									   mst_port,
5446 									   dm_new_connector_state->pbn,
5447 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5448 	if (dm_new_connector_state->vcpi_slots < 0) {
5449 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5450 		return dm_new_connector_state->vcpi_slots;
5451 	}
5452 	return 0;
5453 }
5454 
5455 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5456 	.disable = dm_encoder_helper_disable,
5457 	.atomic_check = dm_encoder_helper_atomic_check
5458 };
5459 
5460 #if defined(CONFIG_DRM_AMD_DC_DCN)
5461 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5462 					    struct dc_state *dc_state)
5463 {
5464 	struct dc_stream_state *stream = NULL;
5465 	struct drm_connector *connector;
5466 	struct drm_connector_state *new_con_state, *old_con_state;
5467 	struct amdgpu_dm_connector *aconnector;
5468 	struct dm_connector_state *dm_conn_state;
5469 	int i, j, clock, bpp;
5470 	int vcpi, pbn_div, pbn = 0;
5471 
5472 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5473 
5474 		aconnector = to_amdgpu_dm_connector(connector);
5475 
5476 		if (!aconnector->port)
5477 			continue;
5478 
5479 		if (!new_con_state || !new_con_state->crtc)
5480 			continue;
5481 
5482 		dm_conn_state = to_dm_connector_state(new_con_state);
5483 
5484 		for (j = 0; j < dc_state->stream_count; j++) {
5485 			stream = dc_state->streams[j];
5486 			if (!stream)
5487 				continue;
5488 
5489 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5490 				break;
5491 
5492 			stream = NULL;
5493 		}
5494 
5495 		if (!stream)
5496 			continue;
5497 
5498 		if (stream->timing.flags.DSC != 1) {
5499 			drm_dp_mst_atomic_enable_dsc(state,
5500 						     aconnector->port,
5501 						     dm_conn_state->pbn,
5502 						     0,
5503 						     false);
5504 			continue;
5505 		}
5506 
5507 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5508 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5509 		clock = stream->timing.pix_clk_100hz / 10;
5510 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5511 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5512 						    aconnector->port,
5513 						    pbn, pbn_div,
5514 						    true);
5515 		if (vcpi < 0)
5516 			return vcpi;
5517 
5518 		dm_conn_state->pbn = pbn;
5519 		dm_conn_state->vcpi_slots = vcpi;
5520 	}
5521 	return 0;
5522 }
5523 #endif
5524 
5525 static void dm_drm_plane_reset(struct drm_plane *plane)
5526 {
5527 	struct dm_plane_state *amdgpu_state = NULL;
5528 
5529 	if (plane->state)
5530 		plane->funcs->atomic_destroy_state(plane, plane->state);
5531 
5532 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5533 	WARN_ON(amdgpu_state == NULL);
5534 
5535 	if (amdgpu_state)
5536 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5537 }
5538 
5539 static struct drm_plane_state *
5540 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5541 {
5542 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5543 
5544 	old_dm_plane_state = to_dm_plane_state(plane->state);
5545 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5546 	if (!dm_plane_state)
5547 		return NULL;
5548 
5549 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5550 
5551 	if (old_dm_plane_state->dc_state) {
5552 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5553 		dc_plane_state_retain(dm_plane_state->dc_state);
5554 	}
5555 
5556 	return &dm_plane_state->base;
5557 }
5558 
5559 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5560 				struct drm_plane_state *state)
5561 {
5562 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5563 
5564 	if (dm_plane_state->dc_state)
5565 		dc_plane_state_release(dm_plane_state->dc_state);
5566 
5567 	drm_atomic_helper_plane_destroy_state(plane, state);
5568 }
5569 
5570 static const struct drm_plane_funcs dm_plane_funcs = {
5571 	.update_plane	= drm_atomic_helper_update_plane,
5572 	.disable_plane	= drm_atomic_helper_disable_plane,
5573 	.destroy	= drm_primary_helper_destroy,
5574 	.reset = dm_drm_plane_reset,
5575 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5576 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5577 };
5578 
5579 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5580 				      struct drm_plane_state *new_state)
5581 {
5582 	struct amdgpu_framebuffer *afb;
5583 	struct drm_gem_object *obj;
5584 	struct amdgpu_device *adev;
5585 	struct amdgpu_bo *rbo;
5586 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5587 	struct list_head list;
5588 	struct ttm_validate_buffer tv;
5589 	struct ww_acquire_ctx ticket;
5590 	uint64_t tiling_flags;
5591 	uint32_t domain;
5592 	int r;
5593 	bool tmz_surface = false;
5594 	bool force_disable_dcc = false;
5595 
5596 	dm_plane_state_old = to_dm_plane_state(plane->state);
5597 	dm_plane_state_new = to_dm_plane_state(new_state);
5598 
5599 	if (!new_state->fb) {
5600 		DRM_DEBUG_DRIVER("No FB bound\n");
5601 		return 0;
5602 	}
5603 
5604 	afb = to_amdgpu_framebuffer(new_state->fb);
5605 	obj = new_state->fb->obj[0];
5606 	rbo = gem_to_amdgpu_bo(obj);
5607 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5608 	INIT_LIST_HEAD(&list);
5609 
5610 	tv.bo = &rbo->tbo;
5611 	tv.num_shared = 1;
5612 	list_add(&tv.head, &list);
5613 
5614 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5615 	if (r) {
5616 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5617 		return r;
5618 	}
5619 
5620 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5621 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5622 	else
5623 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5624 
5625 	r = amdgpu_bo_pin(rbo, domain);
5626 	if (unlikely(r != 0)) {
5627 		if (r != -ERESTARTSYS)
5628 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5629 		ttm_eu_backoff_reservation(&ticket, &list);
5630 		return r;
5631 	}
5632 
5633 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5634 	if (unlikely(r != 0)) {
5635 		amdgpu_bo_unpin(rbo);
5636 		ttm_eu_backoff_reservation(&ticket, &list);
5637 		DRM_ERROR("%p bind failed\n", rbo);
5638 		return r;
5639 	}
5640 
5641 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5642 
5643 	tmz_surface = amdgpu_bo_encrypted(rbo);
5644 
5645 	ttm_eu_backoff_reservation(&ticket, &list);
5646 
5647 	afb->address = amdgpu_bo_gpu_offset(rbo);
5648 
5649 	amdgpu_bo_ref(rbo);
5650 
5651 	if (dm_plane_state_new->dc_state &&
5652 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5653 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5654 
5655 		force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5656 		fill_plane_buffer_attributes(
5657 			adev, afb, plane_state->format, plane_state->rotation,
5658 			tiling_flags, &plane_state->tiling_info,
5659 			&plane_state->plane_size, &plane_state->dcc,
5660 			&plane_state->address, tmz_surface,
5661 			force_disable_dcc);
5662 	}
5663 
5664 	return 0;
5665 }
5666 
5667 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5668 				       struct drm_plane_state *old_state)
5669 {
5670 	struct amdgpu_bo *rbo;
5671 	int r;
5672 
5673 	if (!old_state->fb)
5674 		return;
5675 
5676 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5677 	r = amdgpu_bo_reserve(rbo, false);
5678 	if (unlikely(r)) {
5679 		DRM_ERROR("failed to reserve rbo before unpin\n");
5680 		return;
5681 	}
5682 
5683 	amdgpu_bo_unpin(rbo);
5684 	amdgpu_bo_unreserve(rbo);
5685 	amdgpu_bo_unref(&rbo);
5686 }
5687 
5688 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5689 				       struct drm_crtc_state *new_crtc_state)
5690 {
5691 	int max_downscale = 0;
5692 	int max_upscale = INT_MAX;
5693 
5694 	/* TODO: These should be checked against DC plane caps */
5695 	return drm_atomic_helper_check_plane_state(
5696 		state, new_crtc_state, max_downscale, max_upscale, true, true);
5697 }
5698 
5699 static int dm_plane_atomic_check(struct drm_plane *plane,
5700 				 struct drm_plane_state *state)
5701 {
5702 	struct amdgpu_device *adev = plane->dev->dev_private;
5703 	struct dc *dc = adev->dm.dc;
5704 	struct dm_plane_state *dm_plane_state;
5705 	struct dc_scaling_info scaling_info;
5706 	struct drm_crtc_state *new_crtc_state;
5707 	int ret;
5708 
5709 	dm_plane_state = to_dm_plane_state(state);
5710 
5711 	if (!dm_plane_state->dc_state)
5712 		return 0;
5713 
5714 	new_crtc_state =
5715 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
5716 	if (!new_crtc_state)
5717 		return -EINVAL;
5718 
5719 	ret = dm_plane_helper_check_state(state, new_crtc_state);
5720 	if (ret)
5721 		return ret;
5722 
5723 	ret = fill_dc_scaling_info(state, &scaling_info);
5724 	if (ret)
5725 		return ret;
5726 
5727 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5728 		return 0;
5729 
5730 	return -EINVAL;
5731 }
5732 
5733 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5734 				       struct drm_plane_state *new_plane_state)
5735 {
5736 	/* Only support async updates on cursor planes. */
5737 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5738 		return -EINVAL;
5739 
5740 	return 0;
5741 }
5742 
5743 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5744 					 struct drm_plane_state *new_state)
5745 {
5746 	struct drm_plane_state *old_state =
5747 		drm_atomic_get_old_plane_state(new_state->state, plane);
5748 
5749 	swap(plane->state->fb, new_state->fb);
5750 
5751 	plane->state->src_x = new_state->src_x;
5752 	plane->state->src_y = new_state->src_y;
5753 	plane->state->src_w = new_state->src_w;
5754 	plane->state->src_h = new_state->src_h;
5755 	plane->state->crtc_x = new_state->crtc_x;
5756 	plane->state->crtc_y = new_state->crtc_y;
5757 	plane->state->crtc_w = new_state->crtc_w;
5758 	plane->state->crtc_h = new_state->crtc_h;
5759 
5760 	handle_cursor_update(plane, old_state);
5761 }
5762 
5763 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5764 	.prepare_fb = dm_plane_helper_prepare_fb,
5765 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5766 	.atomic_check = dm_plane_atomic_check,
5767 	.atomic_async_check = dm_plane_atomic_async_check,
5768 	.atomic_async_update = dm_plane_atomic_async_update
5769 };
5770 
5771 /*
5772  * TODO: these are currently initialized to rgb formats only.
5773  * For future use cases we should either initialize them dynamically based on
5774  * plane capabilities, or initialize this array to all formats, so internal drm
5775  * check will succeed, and let DC implement proper check
5776  */
5777 static const uint32_t rgb_formats[] = {
5778 	DRM_FORMAT_XRGB8888,
5779 	DRM_FORMAT_ARGB8888,
5780 	DRM_FORMAT_RGBA8888,
5781 	DRM_FORMAT_XRGB2101010,
5782 	DRM_FORMAT_XBGR2101010,
5783 	DRM_FORMAT_ARGB2101010,
5784 	DRM_FORMAT_ABGR2101010,
5785 	DRM_FORMAT_XBGR8888,
5786 	DRM_FORMAT_ABGR8888,
5787 	DRM_FORMAT_RGB565,
5788 };
5789 
5790 static const uint32_t overlay_formats[] = {
5791 	DRM_FORMAT_XRGB8888,
5792 	DRM_FORMAT_ARGB8888,
5793 	DRM_FORMAT_RGBA8888,
5794 	DRM_FORMAT_XBGR8888,
5795 	DRM_FORMAT_ABGR8888,
5796 	DRM_FORMAT_RGB565
5797 };
5798 
5799 static const u32 cursor_formats[] = {
5800 	DRM_FORMAT_ARGB8888
5801 };
5802 
5803 static int get_plane_formats(const struct drm_plane *plane,
5804 			     const struct dc_plane_cap *plane_cap,
5805 			     uint32_t *formats, int max_formats)
5806 {
5807 	int i, num_formats = 0;
5808 
5809 	/*
5810 	 * TODO: Query support for each group of formats directly from
5811 	 * DC plane caps. This will require adding more formats to the
5812 	 * caps list.
5813 	 */
5814 
5815 	switch (plane->type) {
5816 	case DRM_PLANE_TYPE_PRIMARY:
5817 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5818 			if (num_formats >= max_formats)
5819 				break;
5820 
5821 			formats[num_formats++] = rgb_formats[i];
5822 		}
5823 
5824 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5825 			formats[num_formats++] = DRM_FORMAT_NV12;
5826 		if (plane_cap && plane_cap->pixel_format_support.p010)
5827 			formats[num_formats++] = DRM_FORMAT_P010;
5828 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
5829 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5830 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5831 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
5832 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
5833 		}
5834 		break;
5835 
5836 	case DRM_PLANE_TYPE_OVERLAY:
5837 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5838 			if (num_formats >= max_formats)
5839 				break;
5840 
5841 			formats[num_formats++] = overlay_formats[i];
5842 		}
5843 		break;
5844 
5845 	case DRM_PLANE_TYPE_CURSOR:
5846 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5847 			if (num_formats >= max_formats)
5848 				break;
5849 
5850 			formats[num_formats++] = cursor_formats[i];
5851 		}
5852 		break;
5853 	}
5854 
5855 	return num_formats;
5856 }
5857 
5858 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5859 				struct drm_plane *plane,
5860 				unsigned long possible_crtcs,
5861 				const struct dc_plane_cap *plane_cap)
5862 {
5863 	uint32_t formats[32];
5864 	int num_formats;
5865 	int res = -EPERM;
5866 	unsigned int supported_rotations;
5867 
5868 	num_formats = get_plane_formats(plane, plane_cap, formats,
5869 					ARRAY_SIZE(formats));
5870 
5871 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5872 				       &dm_plane_funcs, formats, num_formats,
5873 				       NULL, plane->type, NULL);
5874 	if (res)
5875 		return res;
5876 
5877 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5878 	    plane_cap && plane_cap->per_pixel_alpha) {
5879 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5880 					  BIT(DRM_MODE_BLEND_PREMULTI);
5881 
5882 		drm_plane_create_alpha_property(plane);
5883 		drm_plane_create_blend_mode_property(plane, blend_caps);
5884 	}
5885 
5886 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5887 	    plane_cap &&
5888 	    (plane_cap->pixel_format_support.nv12 ||
5889 	     plane_cap->pixel_format_support.p010)) {
5890 		/* This only affects YUV formats. */
5891 		drm_plane_create_color_properties(
5892 			plane,
5893 			BIT(DRM_COLOR_YCBCR_BT601) |
5894 			BIT(DRM_COLOR_YCBCR_BT709) |
5895 			BIT(DRM_COLOR_YCBCR_BT2020),
5896 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5897 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5898 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5899 	}
5900 
5901 	supported_rotations =
5902 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
5903 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
5904 
5905 	drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
5906 					   supported_rotations);
5907 
5908 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5909 
5910 	/* Create (reset) the plane state */
5911 	if (plane->funcs->reset)
5912 		plane->funcs->reset(plane);
5913 
5914 	return 0;
5915 }
5916 
5917 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5918 			       struct drm_plane *plane,
5919 			       uint32_t crtc_index)
5920 {
5921 	struct amdgpu_crtc *acrtc = NULL;
5922 	struct drm_plane *cursor_plane;
5923 
5924 	int res = -ENOMEM;
5925 
5926 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5927 	if (!cursor_plane)
5928 		goto fail;
5929 
5930 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5931 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5932 
5933 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5934 	if (!acrtc)
5935 		goto fail;
5936 
5937 	res = drm_crtc_init_with_planes(
5938 			dm->ddev,
5939 			&acrtc->base,
5940 			plane,
5941 			cursor_plane,
5942 			&amdgpu_dm_crtc_funcs, NULL);
5943 
5944 	if (res)
5945 		goto fail;
5946 
5947 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5948 
5949 	/* Create (reset) the plane state */
5950 	if (acrtc->base.funcs->reset)
5951 		acrtc->base.funcs->reset(&acrtc->base);
5952 
5953 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5954 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5955 
5956 	acrtc->crtc_id = crtc_index;
5957 	acrtc->base.enabled = false;
5958 	acrtc->otg_inst = -1;
5959 
5960 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5961 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5962 				   true, MAX_COLOR_LUT_ENTRIES);
5963 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5964 
5965 	return 0;
5966 
5967 fail:
5968 	kfree(acrtc);
5969 	kfree(cursor_plane);
5970 	return res;
5971 }
5972 
5973 
5974 static int to_drm_connector_type(enum signal_type st)
5975 {
5976 	switch (st) {
5977 	case SIGNAL_TYPE_HDMI_TYPE_A:
5978 		return DRM_MODE_CONNECTOR_HDMIA;
5979 	case SIGNAL_TYPE_EDP:
5980 		return DRM_MODE_CONNECTOR_eDP;
5981 	case SIGNAL_TYPE_LVDS:
5982 		return DRM_MODE_CONNECTOR_LVDS;
5983 	case SIGNAL_TYPE_RGB:
5984 		return DRM_MODE_CONNECTOR_VGA;
5985 	case SIGNAL_TYPE_DISPLAY_PORT:
5986 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
5987 		return DRM_MODE_CONNECTOR_DisplayPort;
5988 	case SIGNAL_TYPE_DVI_DUAL_LINK:
5989 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
5990 		return DRM_MODE_CONNECTOR_DVID;
5991 	case SIGNAL_TYPE_VIRTUAL:
5992 		return DRM_MODE_CONNECTOR_VIRTUAL;
5993 
5994 	default:
5995 		return DRM_MODE_CONNECTOR_Unknown;
5996 	}
5997 }
5998 
5999 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6000 {
6001 	struct drm_encoder *encoder;
6002 
6003 	/* There is only one encoder per connector */
6004 	drm_connector_for_each_possible_encoder(connector, encoder)
6005 		return encoder;
6006 
6007 	return NULL;
6008 }
6009 
6010 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6011 {
6012 	struct drm_encoder *encoder;
6013 	struct amdgpu_encoder *amdgpu_encoder;
6014 
6015 	encoder = amdgpu_dm_connector_to_encoder(connector);
6016 
6017 	if (encoder == NULL)
6018 		return;
6019 
6020 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6021 
6022 	amdgpu_encoder->native_mode.clock = 0;
6023 
6024 	if (!list_empty(&connector->probed_modes)) {
6025 		struct drm_display_mode *preferred_mode = NULL;
6026 
6027 		list_for_each_entry(preferred_mode,
6028 				    &connector->probed_modes,
6029 				    head) {
6030 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6031 				amdgpu_encoder->native_mode = *preferred_mode;
6032 
6033 			break;
6034 		}
6035 
6036 	}
6037 }
6038 
6039 static struct drm_display_mode *
6040 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6041 			     char *name,
6042 			     int hdisplay, int vdisplay)
6043 {
6044 	struct drm_device *dev = encoder->dev;
6045 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6046 	struct drm_display_mode *mode = NULL;
6047 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6048 
6049 	mode = drm_mode_duplicate(dev, native_mode);
6050 
6051 	if (mode == NULL)
6052 		return NULL;
6053 
6054 	mode->hdisplay = hdisplay;
6055 	mode->vdisplay = vdisplay;
6056 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6057 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6058 
6059 	return mode;
6060 
6061 }
6062 
6063 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6064 						 struct drm_connector *connector)
6065 {
6066 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6067 	struct drm_display_mode *mode = NULL;
6068 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6069 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6070 				to_amdgpu_dm_connector(connector);
6071 	int i;
6072 	int n;
6073 	struct mode_size {
6074 		char name[DRM_DISPLAY_MODE_LEN];
6075 		int w;
6076 		int h;
6077 	} common_modes[] = {
6078 		{  "640x480",  640,  480},
6079 		{  "800x600",  800,  600},
6080 		{ "1024x768", 1024,  768},
6081 		{ "1280x720", 1280,  720},
6082 		{ "1280x800", 1280,  800},
6083 		{"1280x1024", 1280, 1024},
6084 		{ "1440x900", 1440,  900},
6085 		{"1680x1050", 1680, 1050},
6086 		{"1600x1200", 1600, 1200},
6087 		{"1920x1080", 1920, 1080},
6088 		{"1920x1200", 1920, 1200}
6089 	};
6090 
6091 	n = ARRAY_SIZE(common_modes);
6092 
6093 	for (i = 0; i < n; i++) {
6094 		struct drm_display_mode *curmode = NULL;
6095 		bool mode_existed = false;
6096 
6097 		if (common_modes[i].w > native_mode->hdisplay ||
6098 		    common_modes[i].h > native_mode->vdisplay ||
6099 		   (common_modes[i].w == native_mode->hdisplay &&
6100 		    common_modes[i].h == native_mode->vdisplay))
6101 			continue;
6102 
6103 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6104 			if (common_modes[i].w == curmode->hdisplay &&
6105 			    common_modes[i].h == curmode->vdisplay) {
6106 				mode_existed = true;
6107 				break;
6108 			}
6109 		}
6110 
6111 		if (mode_existed)
6112 			continue;
6113 
6114 		mode = amdgpu_dm_create_common_mode(encoder,
6115 				common_modes[i].name, common_modes[i].w,
6116 				common_modes[i].h);
6117 		drm_mode_probed_add(connector, mode);
6118 		amdgpu_dm_connector->num_modes++;
6119 	}
6120 }
6121 
6122 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6123 					      struct edid *edid)
6124 {
6125 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6126 			to_amdgpu_dm_connector(connector);
6127 
6128 	if (edid) {
6129 		/* empty probed_modes */
6130 		INIT_LIST_HEAD(&connector->probed_modes);
6131 		amdgpu_dm_connector->num_modes =
6132 				drm_add_edid_modes(connector, edid);
6133 
6134 		/* sorting the probed modes before calling function
6135 		 * amdgpu_dm_get_native_mode() since EDID can have
6136 		 * more than one preferred mode. The modes that are
6137 		 * later in the probed mode list could be of higher
6138 		 * and preferred resolution. For example, 3840x2160
6139 		 * resolution in base EDID preferred timing and 4096x2160
6140 		 * preferred resolution in DID extension block later.
6141 		 */
6142 		drm_mode_sort(&connector->probed_modes);
6143 		amdgpu_dm_get_native_mode(connector);
6144 	} else {
6145 		amdgpu_dm_connector->num_modes = 0;
6146 	}
6147 }
6148 
6149 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6150 {
6151 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6152 			to_amdgpu_dm_connector(connector);
6153 	struct drm_encoder *encoder;
6154 	struct edid *edid = amdgpu_dm_connector->edid;
6155 
6156 	encoder = amdgpu_dm_connector_to_encoder(connector);
6157 
6158 	if (!edid || !drm_edid_is_valid(edid)) {
6159 		amdgpu_dm_connector->num_modes =
6160 				drm_add_modes_noedid(connector, 640, 480);
6161 	} else {
6162 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6163 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6164 	}
6165 	amdgpu_dm_fbc_init(connector);
6166 
6167 	return amdgpu_dm_connector->num_modes;
6168 }
6169 
6170 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6171 				     struct amdgpu_dm_connector *aconnector,
6172 				     int connector_type,
6173 				     struct dc_link *link,
6174 				     int link_index)
6175 {
6176 	struct amdgpu_device *adev = dm->ddev->dev_private;
6177 
6178 	/*
6179 	 * Some of the properties below require access to state, like bpc.
6180 	 * Allocate some default initial connector state with our reset helper.
6181 	 */
6182 	if (aconnector->base.funcs->reset)
6183 		aconnector->base.funcs->reset(&aconnector->base);
6184 
6185 	aconnector->connector_id = link_index;
6186 	aconnector->dc_link = link;
6187 	aconnector->base.interlace_allowed = false;
6188 	aconnector->base.doublescan_allowed = false;
6189 	aconnector->base.stereo_allowed = false;
6190 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6191 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6192 	aconnector->audio_inst = -1;
6193 	mutex_init(&aconnector->hpd_lock);
6194 
6195 	/*
6196 	 * configure support HPD hot plug connector_>polled default value is 0
6197 	 * which means HPD hot plug not supported
6198 	 */
6199 	switch (connector_type) {
6200 	case DRM_MODE_CONNECTOR_HDMIA:
6201 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6202 		aconnector->base.ycbcr_420_allowed =
6203 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6204 		break;
6205 	case DRM_MODE_CONNECTOR_DisplayPort:
6206 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6207 		aconnector->base.ycbcr_420_allowed =
6208 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6209 		break;
6210 	case DRM_MODE_CONNECTOR_DVID:
6211 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6212 		break;
6213 	default:
6214 		break;
6215 	}
6216 
6217 	drm_object_attach_property(&aconnector->base.base,
6218 				dm->ddev->mode_config.scaling_mode_property,
6219 				DRM_MODE_SCALE_NONE);
6220 
6221 	drm_object_attach_property(&aconnector->base.base,
6222 				adev->mode_info.underscan_property,
6223 				UNDERSCAN_OFF);
6224 	drm_object_attach_property(&aconnector->base.base,
6225 				adev->mode_info.underscan_hborder_property,
6226 				0);
6227 	drm_object_attach_property(&aconnector->base.base,
6228 				adev->mode_info.underscan_vborder_property,
6229 				0);
6230 
6231 	if (!aconnector->mst_port)
6232 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6233 
6234 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6235 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6236 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6237 
6238 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6239 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6240 		drm_object_attach_property(&aconnector->base.base,
6241 				adev->mode_info.abm_level_property, 0);
6242 	}
6243 
6244 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6245 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6246 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6247 		drm_object_attach_property(
6248 			&aconnector->base.base,
6249 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6250 
6251 		if (!aconnector->mst_port)
6252 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6253 
6254 #ifdef CONFIG_DRM_AMD_DC_HDCP
6255 		if (adev->dm.hdcp_workqueue)
6256 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6257 #endif
6258 	}
6259 }
6260 
6261 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6262 			      struct i2c_msg *msgs, int num)
6263 {
6264 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6265 	struct ddc_service *ddc_service = i2c->ddc_service;
6266 	struct i2c_command cmd;
6267 	int i;
6268 	int result = -EIO;
6269 
6270 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6271 
6272 	if (!cmd.payloads)
6273 		return result;
6274 
6275 	cmd.number_of_payloads = num;
6276 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6277 	cmd.speed = 100;
6278 
6279 	for (i = 0; i < num; i++) {
6280 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6281 		cmd.payloads[i].address = msgs[i].addr;
6282 		cmd.payloads[i].length = msgs[i].len;
6283 		cmd.payloads[i].data = msgs[i].buf;
6284 	}
6285 
6286 	if (dc_submit_i2c(
6287 			ddc_service->ctx->dc,
6288 			ddc_service->ddc_pin->hw_info.ddc_channel,
6289 			&cmd))
6290 		result = num;
6291 
6292 	kfree(cmd.payloads);
6293 	return result;
6294 }
6295 
6296 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6297 {
6298 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6299 }
6300 
6301 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6302 	.master_xfer = amdgpu_dm_i2c_xfer,
6303 	.functionality = amdgpu_dm_i2c_func,
6304 };
6305 
6306 static struct amdgpu_i2c_adapter *
6307 create_i2c(struct ddc_service *ddc_service,
6308 	   int link_index,
6309 	   int *res)
6310 {
6311 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6312 	struct amdgpu_i2c_adapter *i2c;
6313 
6314 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6315 	if (!i2c)
6316 		return NULL;
6317 	i2c->base.owner = THIS_MODULE;
6318 	i2c->base.class = I2C_CLASS_DDC;
6319 	i2c->base.dev.parent = &adev->pdev->dev;
6320 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6321 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6322 	i2c_set_adapdata(&i2c->base, i2c);
6323 	i2c->ddc_service = ddc_service;
6324 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6325 
6326 	return i2c;
6327 }
6328 
6329 
6330 /*
6331  * Note: this function assumes that dc_link_detect() was called for the
6332  * dc_link which will be represented by this aconnector.
6333  */
6334 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6335 				    struct amdgpu_dm_connector *aconnector,
6336 				    uint32_t link_index,
6337 				    struct amdgpu_encoder *aencoder)
6338 {
6339 	int res = 0;
6340 	int connector_type;
6341 	struct dc *dc = dm->dc;
6342 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6343 	struct amdgpu_i2c_adapter *i2c;
6344 
6345 	link->priv = aconnector;
6346 
6347 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6348 
6349 	i2c = create_i2c(link->ddc, link->link_index, &res);
6350 	if (!i2c) {
6351 		DRM_ERROR("Failed to create i2c adapter data\n");
6352 		return -ENOMEM;
6353 	}
6354 
6355 	aconnector->i2c = i2c;
6356 	res = i2c_add_adapter(&i2c->base);
6357 
6358 	if (res) {
6359 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6360 		goto out_free;
6361 	}
6362 
6363 	connector_type = to_drm_connector_type(link->connector_signal);
6364 
6365 	res = drm_connector_init_with_ddc(
6366 			dm->ddev,
6367 			&aconnector->base,
6368 			&amdgpu_dm_connector_funcs,
6369 			connector_type,
6370 			&i2c->base);
6371 
6372 	if (res) {
6373 		DRM_ERROR("connector_init failed\n");
6374 		aconnector->connector_id = -1;
6375 		goto out_free;
6376 	}
6377 
6378 	drm_connector_helper_add(
6379 			&aconnector->base,
6380 			&amdgpu_dm_connector_helper_funcs);
6381 
6382 	amdgpu_dm_connector_init_helper(
6383 		dm,
6384 		aconnector,
6385 		connector_type,
6386 		link,
6387 		link_index);
6388 
6389 	drm_connector_attach_encoder(
6390 		&aconnector->base, &aencoder->base);
6391 
6392 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6393 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6394 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6395 
6396 out_free:
6397 	if (res) {
6398 		kfree(i2c);
6399 		aconnector->i2c = NULL;
6400 	}
6401 	return res;
6402 }
6403 
6404 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6405 {
6406 	switch (adev->mode_info.num_crtc) {
6407 	case 1:
6408 		return 0x1;
6409 	case 2:
6410 		return 0x3;
6411 	case 3:
6412 		return 0x7;
6413 	case 4:
6414 		return 0xf;
6415 	case 5:
6416 		return 0x1f;
6417 	case 6:
6418 	default:
6419 		return 0x3f;
6420 	}
6421 }
6422 
6423 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6424 				  struct amdgpu_encoder *aencoder,
6425 				  uint32_t link_index)
6426 {
6427 	struct amdgpu_device *adev = dev->dev_private;
6428 
6429 	int res = drm_encoder_init(dev,
6430 				   &aencoder->base,
6431 				   &amdgpu_dm_encoder_funcs,
6432 				   DRM_MODE_ENCODER_TMDS,
6433 				   NULL);
6434 
6435 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6436 
6437 	if (!res)
6438 		aencoder->encoder_id = link_index;
6439 	else
6440 		aencoder->encoder_id = -1;
6441 
6442 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6443 
6444 	return res;
6445 }
6446 
6447 static void manage_dm_interrupts(struct amdgpu_device *adev,
6448 				 struct amdgpu_crtc *acrtc,
6449 				 bool enable)
6450 {
6451 	/*
6452 	 * We have no guarantee that the frontend index maps to the same
6453 	 * backend index - some even map to more than one.
6454 	 *
6455 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6456 	 */
6457 	int irq_type =
6458 		amdgpu_display_crtc_idx_to_irq_type(
6459 			adev,
6460 			acrtc->crtc_id);
6461 
6462 	if (enable) {
6463 		drm_crtc_vblank_on(&acrtc->base);
6464 		amdgpu_irq_get(
6465 			adev,
6466 			&adev->pageflip_irq,
6467 			irq_type);
6468 	} else {
6469 
6470 		amdgpu_irq_put(
6471 			adev,
6472 			&adev->pageflip_irq,
6473 			irq_type);
6474 		drm_crtc_vblank_off(&acrtc->base);
6475 	}
6476 }
6477 
6478 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6479 				      struct amdgpu_crtc *acrtc)
6480 {
6481 	int irq_type =
6482 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6483 
6484 	/**
6485 	 * This reads the current state for the IRQ and force reapplies
6486 	 * the setting to hardware.
6487 	 */
6488 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6489 }
6490 
6491 static bool
6492 is_scaling_state_different(const struct dm_connector_state *dm_state,
6493 			   const struct dm_connector_state *old_dm_state)
6494 {
6495 	if (dm_state->scaling != old_dm_state->scaling)
6496 		return true;
6497 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6498 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6499 			return true;
6500 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6501 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6502 			return true;
6503 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6504 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6505 		return true;
6506 	return false;
6507 }
6508 
6509 #ifdef CONFIG_DRM_AMD_DC_HDCP
6510 static bool is_content_protection_different(struct drm_connector_state *state,
6511 					    const struct drm_connector_state *old_state,
6512 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6513 {
6514 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6515 
6516 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6517 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6518 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6519 		return true;
6520 	}
6521 
6522 	/* CP is being re enabled, ignore this */
6523 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6524 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6525 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6526 		return false;
6527 	}
6528 
6529 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6530 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6531 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6532 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6533 
6534 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6535 	 * hot-plug, headless s3, dpms
6536 	 */
6537 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6538 	    aconnector->dc_sink != NULL)
6539 		return true;
6540 
6541 	if (old_state->content_protection == state->content_protection)
6542 		return false;
6543 
6544 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6545 		return true;
6546 
6547 	return false;
6548 }
6549 
6550 #endif
6551 static void remove_stream(struct amdgpu_device *adev,
6552 			  struct amdgpu_crtc *acrtc,
6553 			  struct dc_stream_state *stream)
6554 {
6555 	/* this is the update mode case */
6556 
6557 	acrtc->otg_inst = -1;
6558 	acrtc->enabled = false;
6559 }
6560 
6561 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6562 			       struct dc_cursor_position *position)
6563 {
6564 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6565 	int x, y;
6566 	int xorigin = 0, yorigin = 0;
6567 
6568 	position->enable = false;
6569 	position->x = 0;
6570 	position->y = 0;
6571 
6572 	if (!crtc || !plane->state->fb)
6573 		return 0;
6574 
6575 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6576 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6577 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6578 			  __func__,
6579 			  plane->state->crtc_w,
6580 			  plane->state->crtc_h);
6581 		return -EINVAL;
6582 	}
6583 
6584 	x = plane->state->crtc_x;
6585 	y = plane->state->crtc_y;
6586 
6587 	if (x <= -amdgpu_crtc->max_cursor_width ||
6588 	    y <= -amdgpu_crtc->max_cursor_height)
6589 		return 0;
6590 
6591 	if (x < 0) {
6592 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6593 		x = 0;
6594 	}
6595 	if (y < 0) {
6596 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6597 		y = 0;
6598 	}
6599 	position->enable = true;
6600 	position->translate_by_source = true;
6601 	position->x = x;
6602 	position->y = y;
6603 	position->x_hotspot = xorigin;
6604 	position->y_hotspot = yorigin;
6605 
6606 	return 0;
6607 }
6608 
6609 static void handle_cursor_update(struct drm_plane *plane,
6610 				 struct drm_plane_state *old_plane_state)
6611 {
6612 	struct amdgpu_device *adev = plane->dev->dev_private;
6613 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6614 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6615 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6616 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6617 	uint64_t address = afb ? afb->address : 0;
6618 	struct dc_cursor_position position;
6619 	struct dc_cursor_attributes attributes;
6620 	int ret;
6621 
6622 	if (!plane->state->fb && !old_plane_state->fb)
6623 		return;
6624 
6625 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6626 			 __func__,
6627 			 amdgpu_crtc->crtc_id,
6628 			 plane->state->crtc_w,
6629 			 plane->state->crtc_h);
6630 
6631 	ret = get_cursor_position(plane, crtc, &position);
6632 	if (ret)
6633 		return;
6634 
6635 	if (!position.enable) {
6636 		/* turn off cursor */
6637 		if (crtc_state && crtc_state->stream) {
6638 			mutex_lock(&adev->dm.dc_lock);
6639 			dc_stream_set_cursor_position(crtc_state->stream,
6640 						      &position);
6641 			mutex_unlock(&adev->dm.dc_lock);
6642 		}
6643 		return;
6644 	}
6645 
6646 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6647 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6648 
6649 	memset(&attributes, 0, sizeof(attributes));
6650 	attributes.address.high_part = upper_32_bits(address);
6651 	attributes.address.low_part  = lower_32_bits(address);
6652 	attributes.width             = plane->state->crtc_w;
6653 	attributes.height            = plane->state->crtc_h;
6654 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6655 	attributes.rotation_angle    = 0;
6656 	attributes.attribute_flags.value = 0;
6657 
6658 	attributes.pitch = attributes.width;
6659 
6660 	if (crtc_state->stream) {
6661 		mutex_lock(&adev->dm.dc_lock);
6662 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6663 							 &attributes))
6664 			DRM_ERROR("DC failed to set cursor attributes\n");
6665 
6666 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6667 						   &position))
6668 			DRM_ERROR("DC failed to set cursor position\n");
6669 		mutex_unlock(&adev->dm.dc_lock);
6670 	}
6671 }
6672 
6673 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6674 {
6675 
6676 	assert_spin_locked(&acrtc->base.dev->event_lock);
6677 	WARN_ON(acrtc->event);
6678 
6679 	acrtc->event = acrtc->base.state->event;
6680 
6681 	/* Set the flip status */
6682 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6683 
6684 	/* Mark this event as consumed */
6685 	acrtc->base.state->event = NULL;
6686 
6687 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6688 						 acrtc->crtc_id);
6689 }
6690 
6691 static void update_freesync_state_on_stream(
6692 	struct amdgpu_display_manager *dm,
6693 	struct dm_crtc_state *new_crtc_state,
6694 	struct dc_stream_state *new_stream,
6695 	struct dc_plane_state *surface,
6696 	u32 flip_timestamp_in_us)
6697 {
6698 	struct mod_vrr_params vrr_params;
6699 	struct dc_info_packet vrr_infopacket = {0};
6700 	struct amdgpu_device *adev = dm->adev;
6701 	unsigned long flags;
6702 
6703 	if (!new_stream)
6704 		return;
6705 
6706 	/*
6707 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6708 	 * For now it's sufficient to just guard against these conditions.
6709 	 */
6710 
6711 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6712 		return;
6713 
6714 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6715 	vrr_params = new_crtc_state->vrr_params;
6716 
6717 	if (surface) {
6718 		mod_freesync_handle_preflip(
6719 			dm->freesync_module,
6720 			surface,
6721 			new_stream,
6722 			flip_timestamp_in_us,
6723 			&vrr_params);
6724 
6725 		if (adev->family < AMDGPU_FAMILY_AI &&
6726 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6727 			mod_freesync_handle_v_update(dm->freesync_module,
6728 						     new_stream, &vrr_params);
6729 
6730 			/* Need to call this before the frame ends. */
6731 			dc_stream_adjust_vmin_vmax(dm->dc,
6732 						   new_crtc_state->stream,
6733 						   &vrr_params.adjust);
6734 		}
6735 	}
6736 
6737 	mod_freesync_build_vrr_infopacket(
6738 		dm->freesync_module,
6739 		new_stream,
6740 		&vrr_params,
6741 		PACKET_TYPE_VRR,
6742 		TRANSFER_FUNC_UNKNOWN,
6743 		&vrr_infopacket);
6744 
6745 	new_crtc_state->freesync_timing_changed |=
6746 		(memcmp(&new_crtc_state->vrr_params.adjust,
6747 			&vrr_params.adjust,
6748 			sizeof(vrr_params.adjust)) != 0);
6749 
6750 	new_crtc_state->freesync_vrr_info_changed |=
6751 		(memcmp(&new_crtc_state->vrr_infopacket,
6752 			&vrr_infopacket,
6753 			sizeof(vrr_infopacket)) != 0);
6754 
6755 	new_crtc_state->vrr_params = vrr_params;
6756 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6757 
6758 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6759 	new_stream->vrr_infopacket = vrr_infopacket;
6760 
6761 	if (new_crtc_state->freesync_vrr_info_changed)
6762 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6763 			      new_crtc_state->base.crtc->base.id,
6764 			      (int)new_crtc_state->base.vrr_enabled,
6765 			      (int)vrr_params.state);
6766 
6767 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6768 }
6769 
6770 static void pre_update_freesync_state_on_stream(
6771 	struct amdgpu_display_manager *dm,
6772 	struct dm_crtc_state *new_crtc_state)
6773 {
6774 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6775 	struct mod_vrr_params vrr_params;
6776 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6777 	struct amdgpu_device *adev = dm->adev;
6778 	unsigned long flags;
6779 
6780 	if (!new_stream)
6781 		return;
6782 
6783 	/*
6784 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6785 	 * For now it's sufficient to just guard against these conditions.
6786 	 */
6787 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6788 		return;
6789 
6790 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6791 	vrr_params = new_crtc_state->vrr_params;
6792 
6793 	if (new_crtc_state->vrr_supported &&
6794 	    config.min_refresh_in_uhz &&
6795 	    config.max_refresh_in_uhz) {
6796 		config.state = new_crtc_state->base.vrr_enabled ?
6797 			VRR_STATE_ACTIVE_VARIABLE :
6798 			VRR_STATE_INACTIVE;
6799 	} else {
6800 		config.state = VRR_STATE_UNSUPPORTED;
6801 	}
6802 
6803 	mod_freesync_build_vrr_params(dm->freesync_module,
6804 				      new_stream,
6805 				      &config, &vrr_params);
6806 
6807 	new_crtc_state->freesync_timing_changed |=
6808 		(memcmp(&new_crtc_state->vrr_params.adjust,
6809 			&vrr_params.adjust,
6810 			sizeof(vrr_params.adjust)) != 0);
6811 
6812 	new_crtc_state->vrr_params = vrr_params;
6813 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6814 }
6815 
6816 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6817 					    struct dm_crtc_state *new_state)
6818 {
6819 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6820 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6821 
6822 	if (!old_vrr_active && new_vrr_active) {
6823 		/* Transition VRR inactive -> active:
6824 		 * While VRR is active, we must not disable vblank irq, as a
6825 		 * reenable after disable would compute bogus vblank/pflip
6826 		 * timestamps if it likely happened inside display front-porch.
6827 		 *
6828 		 * We also need vupdate irq for the actual core vblank handling
6829 		 * at end of vblank.
6830 		 */
6831 		dm_set_vupdate_irq(new_state->base.crtc, true);
6832 		drm_crtc_vblank_get(new_state->base.crtc);
6833 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6834 				 __func__, new_state->base.crtc->base.id);
6835 	} else if (old_vrr_active && !new_vrr_active) {
6836 		/* Transition VRR active -> inactive:
6837 		 * Allow vblank irq disable again for fixed refresh rate.
6838 		 */
6839 		dm_set_vupdate_irq(new_state->base.crtc, false);
6840 		drm_crtc_vblank_put(new_state->base.crtc);
6841 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6842 				 __func__, new_state->base.crtc->base.id);
6843 	}
6844 }
6845 
6846 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6847 {
6848 	struct drm_plane *plane;
6849 	struct drm_plane_state *old_plane_state, *new_plane_state;
6850 	int i;
6851 
6852 	/*
6853 	 * TODO: Make this per-stream so we don't issue redundant updates for
6854 	 * commits with multiple streams.
6855 	 */
6856 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6857 				       new_plane_state, i)
6858 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6859 			handle_cursor_update(plane, old_plane_state);
6860 }
6861 
6862 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6863 				    struct dc_state *dc_state,
6864 				    struct drm_device *dev,
6865 				    struct amdgpu_display_manager *dm,
6866 				    struct drm_crtc *pcrtc,
6867 				    bool wait_for_vblank)
6868 {
6869 	uint32_t i;
6870 	uint64_t timestamp_ns;
6871 	struct drm_plane *plane;
6872 	struct drm_plane_state *old_plane_state, *new_plane_state;
6873 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6874 	struct drm_crtc_state *new_pcrtc_state =
6875 			drm_atomic_get_new_crtc_state(state, pcrtc);
6876 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6877 	struct dm_crtc_state *dm_old_crtc_state =
6878 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6879 	int planes_count = 0, vpos, hpos;
6880 	long r;
6881 	unsigned long flags;
6882 	struct amdgpu_bo *abo;
6883 	uint64_t tiling_flags;
6884 	bool tmz_surface = false;
6885 	uint32_t target_vblank, last_flip_vblank;
6886 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6887 	bool pflip_present = false;
6888 	struct {
6889 		struct dc_surface_update surface_updates[MAX_SURFACES];
6890 		struct dc_plane_info plane_infos[MAX_SURFACES];
6891 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6892 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6893 		struct dc_stream_update stream_update;
6894 	} *bundle;
6895 
6896 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6897 
6898 	if (!bundle) {
6899 		dm_error("Failed to allocate update bundle\n");
6900 		goto cleanup;
6901 	}
6902 
6903 	/*
6904 	 * Disable the cursor first if we're disabling all the planes.
6905 	 * It'll remain on the screen after the planes are re-enabled
6906 	 * if we don't.
6907 	 */
6908 	if (acrtc_state->active_planes == 0)
6909 		amdgpu_dm_commit_cursors(state);
6910 
6911 	/* update planes when needed */
6912 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6913 		struct drm_crtc *crtc = new_plane_state->crtc;
6914 		struct drm_crtc_state *new_crtc_state;
6915 		struct drm_framebuffer *fb = new_plane_state->fb;
6916 		bool plane_needs_flip;
6917 		struct dc_plane_state *dc_plane;
6918 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6919 
6920 		/* Cursor plane is handled after stream updates */
6921 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6922 			continue;
6923 
6924 		if (!fb || !crtc || pcrtc != crtc)
6925 			continue;
6926 
6927 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6928 		if (!new_crtc_state->active)
6929 			continue;
6930 
6931 		dc_plane = dm_new_plane_state->dc_state;
6932 
6933 		bundle->surface_updates[planes_count].surface = dc_plane;
6934 		if (new_pcrtc_state->color_mgmt_changed) {
6935 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6936 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6937 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6938 		}
6939 
6940 		fill_dc_scaling_info(new_plane_state,
6941 				     &bundle->scaling_infos[planes_count]);
6942 
6943 		bundle->surface_updates[planes_count].scaling_info =
6944 			&bundle->scaling_infos[planes_count];
6945 
6946 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6947 
6948 		pflip_present = pflip_present || plane_needs_flip;
6949 
6950 		if (!plane_needs_flip) {
6951 			planes_count += 1;
6952 			continue;
6953 		}
6954 
6955 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6956 
6957 		/*
6958 		 * Wait for all fences on this FB. Do limited wait to avoid
6959 		 * deadlock during GPU reset when this fence will not signal
6960 		 * but we hold reservation lock for the BO.
6961 		 */
6962 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6963 							false,
6964 							msecs_to_jiffies(5000));
6965 		if (unlikely(r <= 0))
6966 			DRM_ERROR("Waiting for fences timed out!");
6967 
6968 		/*
6969 		 * TODO This might fail and hence better not used, wait
6970 		 * explicitly on fences instead
6971 		 * and in general should be called for
6972 		 * blocking commit to as per framework helpers
6973 		 */
6974 		r = amdgpu_bo_reserve(abo, true);
6975 		if (unlikely(r != 0))
6976 			DRM_ERROR("failed to reserve buffer before flip\n");
6977 
6978 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6979 
6980 		tmz_surface = amdgpu_bo_encrypted(abo);
6981 
6982 		amdgpu_bo_unreserve(abo);
6983 
6984 		fill_dc_plane_info_and_addr(
6985 			dm->adev, new_plane_state, tiling_flags,
6986 			&bundle->plane_infos[planes_count],
6987 			&bundle->flip_addrs[planes_count].address,
6988 			tmz_surface,
6989 			false);
6990 
6991 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6992 				 new_plane_state->plane->index,
6993 				 bundle->plane_infos[planes_count].dcc.enable);
6994 
6995 		bundle->surface_updates[planes_count].plane_info =
6996 			&bundle->plane_infos[planes_count];
6997 
6998 		/*
6999 		 * Only allow immediate flips for fast updates that don't
7000 		 * change FB pitch, DCC state, rotation or mirroing.
7001 		 */
7002 		bundle->flip_addrs[planes_count].flip_immediate =
7003 			crtc->state->async_flip &&
7004 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7005 
7006 		timestamp_ns = ktime_get_ns();
7007 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7008 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7009 		bundle->surface_updates[planes_count].surface = dc_plane;
7010 
7011 		if (!bundle->surface_updates[planes_count].surface) {
7012 			DRM_ERROR("No surface for CRTC: id=%d\n",
7013 					acrtc_attach->crtc_id);
7014 			continue;
7015 		}
7016 
7017 		if (plane == pcrtc->primary)
7018 			update_freesync_state_on_stream(
7019 				dm,
7020 				acrtc_state,
7021 				acrtc_state->stream,
7022 				dc_plane,
7023 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7024 
7025 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7026 				 __func__,
7027 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7028 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7029 
7030 		planes_count += 1;
7031 
7032 	}
7033 
7034 	if (pflip_present) {
7035 		if (!vrr_active) {
7036 			/* Use old throttling in non-vrr fixed refresh rate mode
7037 			 * to keep flip scheduling based on target vblank counts
7038 			 * working in a backwards compatible way, e.g., for
7039 			 * clients using the GLX_OML_sync_control extension or
7040 			 * DRI3/Present extension with defined target_msc.
7041 			 */
7042 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7043 		}
7044 		else {
7045 			/* For variable refresh rate mode only:
7046 			 * Get vblank of last completed flip to avoid > 1 vrr
7047 			 * flips per video frame by use of throttling, but allow
7048 			 * flip programming anywhere in the possibly large
7049 			 * variable vrr vblank interval for fine-grained flip
7050 			 * timing control and more opportunity to avoid stutter
7051 			 * on late submission of flips.
7052 			 */
7053 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7054 			last_flip_vblank = acrtc_attach->last_flip_vblank;
7055 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7056 		}
7057 
7058 		target_vblank = last_flip_vblank + wait_for_vblank;
7059 
7060 		/*
7061 		 * Wait until we're out of the vertical blank period before the one
7062 		 * targeted by the flip
7063 		 */
7064 		while ((acrtc_attach->enabled &&
7065 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7066 							    0, &vpos, &hpos, NULL,
7067 							    NULL, &pcrtc->hwmode)
7068 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7069 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7070 			(int)(target_vblank -
7071 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7072 			usleep_range(1000, 1100);
7073 		}
7074 
7075 		/**
7076 		 * Prepare the flip event for the pageflip interrupt to handle.
7077 		 *
7078 		 * This only works in the case where we've already turned on the
7079 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7080 		 * from 0 -> n planes we have to skip a hardware generated event
7081 		 * and rely on sending it from software.
7082 		 */
7083 		if (acrtc_attach->base.state->event &&
7084 		    acrtc_state->active_planes > 0) {
7085 			drm_crtc_vblank_get(pcrtc);
7086 
7087 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7088 
7089 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7090 			prepare_flip_isr(acrtc_attach);
7091 
7092 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7093 		}
7094 
7095 		if (acrtc_state->stream) {
7096 			if (acrtc_state->freesync_vrr_info_changed)
7097 				bundle->stream_update.vrr_infopacket =
7098 					&acrtc_state->stream->vrr_infopacket;
7099 		}
7100 	}
7101 
7102 	/* Update the planes if changed or disable if we don't have any. */
7103 	if ((planes_count || acrtc_state->active_planes == 0) &&
7104 		acrtc_state->stream) {
7105 		bundle->stream_update.stream = acrtc_state->stream;
7106 		if (new_pcrtc_state->mode_changed) {
7107 			bundle->stream_update.src = acrtc_state->stream->src;
7108 			bundle->stream_update.dst = acrtc_state->stream->dst;
7109 		}
7110 
7111 		if (new_pcrtc_state->color_mgmt_changed) {
7112 			/*
7113 			 * TODO: This isn't fully correct since we've actually
7114 			 * already modified the stream in place.
7115 			 */
7116 			bundle->stream_update.gamut_remap =
7117 				&acrtc_state->stream->gamut_remap_matrix;
7118 			bundle->stream_update.output_csc_transform =
7119 				&acrtc_state->stream->csc_color_matrix;
7120 			bundle->stream_update.out_transfer_func =
7121 				acrtc_state->stream->out_transfer_func;
7122 		}
7123 
7124 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7125 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7126 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7127 
7128 		/*
7129 		 * If FreeSync state on the stream has changed then we need to
7130 		 * re-adjust the min/max bounds now that DC doesn't handle this
7131 		 * as part of commit.
7132 		 */
7133 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7134 		    amdgpu_dm_vrr_active(acrtc_state)) {
7135 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7136 			dc_stream_adjust_vmin_vmax(
7137 				dm->dc, acrtc_state->stream,
7138 				&acrtc_state->vrr_params.adjust);
7139 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7140 		}
7141 		mutex_lock(&dm->dc_lock);
7142 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7143 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7144 			amdgpu_dm_psr_disable(acrtc_state->stream);
7145 
7146 		dc_commit_updates_for_stream(dm->dc,
7147 						     bundle->surface_updates,
7148 						     planes_count,
7149 						     acrtc_state->stream,
7150 						     &bundle->stream_update,
7151 						     dc_state);
7152 
7153 		/**
7154 		 * Enable or disable the interrupts on the backend.
7155 		 *
7156 		 * Most pipes are put into power gating when unused.
7157 		 *
7158 		 * When power gating is enabled on a pipe we lose the
7159 		 * interrupt enablement state when power gating is disabled.
7160 		 *
7161 		 * So we need to update the IRQ control state in hardware
7162 		 * whenever the pipe turns on (since it could be previously
7163 		 * power gated) or off (since some pipes can't be power gated
7164 		 * on some ASICs).
7165 		 */
7166 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7167 			dm_update_pflip_irq_state(
7168 				(struct amdgpu_device *)dev->dev_private,
7169 				acrtc_attach);
7170 
7171 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7172 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7173 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7174 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7175 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7176 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7177 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7178 			amdgpu_dm_psr_enable(acrtc_state->stream);
7179 		}
7180 
7181 		mutex_unlock(&dm->dc_lock);
7182 	}
7183 
7184 	/*
7185 	 * Update cursor state *after* programming all the planes.
7186 	 * This avoids redundant programming in the case where we're going
7187 	 * to be disabling a single plane - those pipes are being disabled.
7188 	 */
7189 	if (acrtc_state->active_planes)
7190 		amdgpu_dm_commit_cursors(state);
7191 
7192 cleanup:
7193 	kfree(bundle);
7194 }
7195 
7196 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7197 				   struct drm_atomic_state *state)
7198 {
7199 	struct amdgpu_device *adev = dev->dev_private;
7200 	struct amdgpu_dm_connector *aconnector;
7201 	struct drm_connector *connector;
7202 	struct drm_connector_state *old_con_state, *new_con_state;
7203 	struct drm_crtc_state *new_crtc_state;
7204 	struct dm_crtc_state *new_dm_crtc_state;
7205 	const struct dc_stream_status *status;
7206 	int i, inst;
7207 
7208 	/* Notify device removals. */
7209 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7210 		if (old_con_state->crtc != new_con_state->crtc) {
7211 			/* CRTC changes require notification. */
7212 			goto notify;
7213 		}
7214 
7215 		if (!new_con_state->crtc)
7216 			continue;
7217 
7218 		new_crtc_state = drm_atomic_get_new_crtc_state(
7219 			state, new_con_state->crtc);
7220 
7221 		if (!new_crtc_state)
7222 			continue;
7223 
7224 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7225 			continue;
7226 
7227 	notify:
7228 		aconnector = to_amdgpu_dm_connector(connector);
7229 
7230 		mutex_lock(&adev->dm.audio_lock);
7231 		inst = aconnector->audio_inst;
7232 		aconnector->audio_inst = -1;
7233 		mutex_unlock(&adev->dm.audio_lock);
7234 
7235 		amdgpu_dm_audio_eld_notify(adev, inst);
7236 	}
7237 
7238 	/* Notify audio device additions. */
7239 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7240 		if (!new_con_state->crtc)
7241 			continue;
7242 
7243 		new_crtc_state = drm_atomic_get_new_crtc_state(
7244 			state, new_con_state->crtc);
7245 
7246 		if (!new_crtc_state)
7247 			continue;
7248 
7249 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7250 			continue;
7251 
7252 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7253 		if (!new_dm_crtc_state->stream)
7254 			continue;
7255 
7256 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7257 		if (!status)
7258 			continue;
7259 
7260 		aconnector = to_amdgpu_dm_connector(connector);
7261 
7262 		mutex_lock(&adev->dm.audio_lock);
7263 		inst = status->audio_inst;
7264 		aconnector->audio_inst = inst;
7265 		mutex_unlock(&adev->dm.audio_lock);
7266 
7267 		amdgpu_dm_audio_eld_notify(adev, inst);
7268 	}
7269 }
7270 
7271 /*
7272  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7273  * @crtc_state: the DRM CRTC state
7274  * @stream_state: the DC stream state.
7275  *
7276  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7277  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7278  */
7279 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7280 						struct dc_stream_state *stream_state)
7281 {
7282 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7283 }
7284 
7285 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7286 				   struct drm_atomic_state *state,
7287 				   bool nonblock)
7288 {
7289 	struct drm_crtc *crtc;
7290 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7291 	struct amdgpu_device *adev = dev->dev_private;
7292 	int i;
7293 
7294 	/*
7295 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7296 	 * a modeset, being disabled, or have no active planes.
7297 	 *
7298 	 * It's done in atomic commit rather than commit tail for now since
7299 	 * some of these interrupt handlers access the current CRTC state and
7300 	 * potentially the stream pointer itself.
7301 	 *
7302 	 * Since the atomic state is swapped within atomic commit and not within
7303 	 * commit tail this would leave to new state (that hasn't been committed yet)
7304 	 * being accesssed from within the handlers.
7305 	 *
7306 	 * TODO: Fix this so we can do this in commit tail and not have to block
7307 	 * in atomic check.
7308 	 */
7309 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7310 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7311 
7312 		if (old_crtc_state->active &&
7313 		    (!new_crtc_state->active ||
7314 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7315 			manage_dm_interrupts(adev, acrtc, false);
7316 	}
7317 	/*
7318 	 * Add check here for SoC's that support hardware cursor plane, to
7319 	 * unset legacy_cursor_update
7320 	 */
7321 
7322 	return drm_atomic_helper_commit(dev, state, nonblock);
7323 
7324 	/*TODO Handle EINTR, reenable IRQ*/
7325 }
7326 
7327 /**
7328  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7329  * @state: The atomic state to commit
7330  *
7331  * This will tell DC to commit the constructed DC state from atomic_check,
7332  * programming the hardware. Any failures here implies a hardware failure, since
7333  * atomic check should have filtered anything non-kosher.
7334  */
7335 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7336 {
7337 	struct drm_device *dev = state->dev;
7338 	struct amdgpu_device *adev = dev->dev_private;
7339 	struct amdgpu_display_manager *dm = &adev->dm;
7340 	struct dm_atomic_state *dm_state;
7341 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7342 	uint32_t i, j;
7343 	struct drm_crtc *crtc;
7344 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7345 	unsigned long flags;
7346 	bool wait_for_vblank = true;
7347 	struct drm_connector *connector;
7348 	struct drm_connector_state *old_con_state, *new_con_state;
7349 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7350 	int crtc_disable_count = 0;
7351 
7352 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7353 
7354 	dm_state = dm_atomic_get_new_state(state);
7355 	if (dm_state && dm_state->context) {
7356 		dc_state = dm_state->context;
7357 	} else {
7358 		/* No state changes, retain current state. */
7359 		dc_state_temp = dc_create_state(dm->dc);
7360 		ASSERT(dc_state_temp);
7361 		dc_state = dc_state_temp;
7362 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7363 	}
7364 
7365 	/* update changed items */
7366 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7367 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7368 
7369 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7370 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7371 
7372 		DRM_DEBUG_DRIVER(
7373 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7374 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7375 			"connectors_changed:%d\n",
7376 			acrtc->crtc_id,
7377 			new_crtc_state->enable,
7378 			new_crtc_state->active,
7379 			new_crtc_state->planes_changed,
7380 			new_crtc_state->mode_changed,
7381 			new_crtc_state->active_changed,
7382 			new_crtc_state->connectors_changed);
7383 
7384 		/* Copy all transient state flags into dc state */
7385 		if (dm_new_crtc_state->stream) {
7386 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7387 							    dm_new_crtc_state->stream);
7388 		}
7389 
7390 		/* handles headless hotplug case, updating new_state and
7391 		 * aconnector as needed
7392 		 */
7393 
7394 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7395 
7396 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7397 
7398 			if (!dm_new_crtc_state->stream) {
7399 				/*
7400 				 * this could happen because of issues with
7401 				 * userspace notifications delivery.
7402 				 * In this case userspace tries to set mode on
7403 				 * display which is disconnected in fact.
7404 				 * dc_sink is NULL in this case on aconnector.
7405 				 * We expect reset mode will come soon.
7406 				 *
7407 				 * This can also happen when unplug is done
7408 				 * during resume sequence ended
7409 				 *
7410 				 * In this case, we want to pretend we still
7411 				 * have a sink to keep the pipe running so that
7412 				 * hw state is consistent with the sw state
7413 				 */
7414 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7415 						__func__, acrtc->base.base.id);
7416 				continue;
7417 			}
7418 
7419 			if (dm_old_crtc_state->stream)
7420 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7421 
7422 			pm_runtime_get_noresume(dev->dev);
7423 
7424 			acrtc->enabled = true;
7425 			acrtc->hw_mode = new_crtc_state->mode;
7426 			crtc->hwmode = new_crtc_state->mode;
7427 		} else if (modereset_required(new_crtc_state)) {
7428 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7429 			/* i.e. reset mode */
7430 			if (dm_old_crtc_state->stream) {
7431 				if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7432 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7433 
7434 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7435 			}
7436 		}
7437 	} /* for_each_crtc_in_state() */
7438 
7439 	if (dc_state) {
7440 		dm_enable_per_frame_crtc_master_sync(dc_state);
7441 		mutex_lock(&dm->dc_lock);
7442 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7443 		mutex_unlock(&dm->dc_lock);
7444 	}
7445 
7446 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7447 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7448 
7449 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7450 
7451 		if (dm_new_crtc_state->stream != NULL) {
7452 			const struct dc_stream_status *status =
7453 					dc_stream_get_status(dm_new_crtc_state->stream);
7454 
7455 			if (!status)
7456 				status = dc_stream_get_status_from_state(dc_state,
7457 									 dm_new_crtc_state->stream);
7458 
7459 			if (!status)
7460 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7461 			else
7462 				acrtc->otg_inst = status->primary_otg_inst;
7463 		}
7464 	}
7465 #ifdef CONFIG_DRM_AMD_DC_HDCP
7466 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7467 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7468 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7469 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7470 
7471 		new_crtc_state = NULL;
7472 
7473 		if (acrtc)
7474 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7475 
7476 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7477 
7478 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7479 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7480 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7481 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7482 			continue;
7483 		}
7484 
7485 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7486 			hdcp_update_display(
7487 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7488 				new_con_state->hdcp_content_type,
7489 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7490 													 : false);
7491 	}
7492 #endif
7493 
7494 	/* Handle connector state changes */
7495 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7496 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7497 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7498 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7499 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7500 		struct dc_stream_update stream_update;
7501 		struct dc_info_packet hdr_packet;
7502 		struct dc_stream_status *status = NULL;
7503 		bool abm_changed, hdr_changed, scaling_changed;
7504 
7505 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7506 		memset(&stream_update, 0, sizeof(stream_update));
7507 
7508 		if (acrtc) {
7509 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7510 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7511 		}
7512 
7513 		/* Skip any modesets/resets */
7514 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7515 			continue;
7516 
7517 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7518 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7519 
7520 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7521 							     dm_old_con_state);
7522 
7523 		abm_changed = dm_new_crtc_state->abm_level !=
7524 			      dm_old_crtc_state->abm_level;
7525 
7526 		hdr_changed =
7527 			is_hdr_metadata_different(old_con_state, new_con_state);
7528 
7529 		if (!scaling_changed && !abm_changed && !hdr_changed)
7530 			continue;
7531 
7532 		stream_update.stream = dm_new_crtc_state->stream;
7533 		if (scaling_changed) {
7534 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7535 					dm_new_con_state, dm_new_crtc_state->stream);
7536 
7537 			stream_update.src = dm_new_crtc_state->stream->src;
7538 			stream_update.dst = dm_new_crtc_state->stream->dst;
7539 		}
7540 
7541 		if (abm_changed) {
7542 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7543 
7544 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7545 		}
7546 
7547 		if (hdr_changed) {
7548 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7549 			stream_update.hdr_static_metadata = &hdr_packet;
7550 		}
7551 
7552 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7553 		WARN_ON(!status);
7554 		WARN_ON(!status->plane_count);
7555 
7556 		/*
7557 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7558 		 * Here we create an empty update on each plane.
7559 		 * To fix this, DC should permit updating only stream properties.
7560 		 */
7561 		for (j = 0; j < status->plane_count; j++)
7562 			dummy_updates[j].surface = status->plane_states[0];
7563 
7564 
7565 		mutex_lock(&dm->dc_lock);
7566 		dc_commit_updates_for_stream(dm->dc,
7567 						     dummy_updates,
7568 						     status->plane_count,
7569 						     dm_new_crtc_state->stream,
7570 						     &stream_update,
7571 						     dc_state);
7572 		mutex_unlock(&dm->dc_lock);
7573 	}
7574 
7575 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7576 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7577 				      new_crtc_state, i) {
7578 		if (old_crtc_state->active && !new_crtc_state->active)
7579 			crtc_disable_count++;
7580 
7581 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7582 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7583 
7584 		/* Update freesync active state. */
7585 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7586 
7587 		/* Handle vrr on->off / off->on transitions */
7588 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7589 						dm_new_crtc_state);
7590 	}
7591 
7592 	/**
7593 	 * Enable interrupts for CRTCs that are newly enabled or went through
7594 	 * a modeset. It was intentionally deferred until after the front end
7595 	 * state was modified to wait until the OTG was on and so the IRQ
7596 	 * handlers didn't access stale or invalid state.
7597 	 */
7598 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7599 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7600 
7601 		if (new_crtc_state->active &&
7602 		    (!old_crtc_state->active ||
7603 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7604 			manage_dm_interrupts(adev, acrtc, true);
7605 #ifdef CONFIG_DEBUG_FS
7606 			/**
7607 			 * Frontend may have changed so reapply the CRC capture
7608 			 * settings for the stream.
7609 			 */
7610 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7611 
7612 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7613 				amdgpu_dm_crtc_configure_crc_source(
7614 					crtc, dm_new_crtc_state,
7615 					dm_new_crtc_state->crc_src);
7616 			}
7617 #endif
7618 		}
7619 	}
7620 
7621 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7622 		if (new_crtc_state->async_flip)
7623 			wait_for_vblank = false;
7624 
7625 	/* update planes when needed per crtc*/
7626 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7627 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7628 
7629 		if (dm_new_crtc_state->stream)
7630 			amdgpu_dm_commit_planes(state, dc_state, dev,
7631 						dm, crtc, wait_for_vblank);
7632 	}
7633 
7634 	/* Update audio instances for each connector. */
7635 	amdgpu_dm_commit_audio(dev, state);
7636 
7637 	/*
7638 	 * send vblank event on all events not handled in flip and
7639 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7640 	 */
7641 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7642 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7643 
7644 		if (new_crtc_state->event)
7645 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7646 
7647 		new_crtc_state->event = NULL;
7648 	}
7649 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7650 
7651 	/* Signal HW programming completion */
7652 	drm_atomic_helper_commit_hw_done(state);
7653 
7654 	if (wait_for_vblank)
7655 		drm_atomic_helper_wait_for_flip_done(dev, state);
7656 
7657 	drm_atomic_helper_cleanup_planes(dev, state);
7658 
7659 	/*
7660 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7661 	 * so we can put the GPU into runtime suspend if we're not driving any
7662 	 * displays anymore
7663 	 */
7664 	for (i = 0; i < crtc_disable_count; i++)
7665 		pm_runtime_put_autosuspend(dev->dev);
7666 	pm_runtime_mark_last_busy(dev->dev);
7667 
7668 	if (dc_state_temp)
7669 		dc_release_state(dc_state_temp);
7670 }
7671 
7672 
7673 static int dm_force_atomic_commit(struct drm_connector *connector)
7674 {
7675 	int ret = 0;
7676 	struct drm_device *ddev = connector->dev;
7677 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7678 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7679 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7680 	struct drm_connector_state *conn_state;
7681 	struct drm_crtc_state *crtc_state;
7682 	struct drm_plane_state *plane_state;
7683 
7684 	if (!state)
7685 		return -ENOMEM;
7686 
7687 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7688 
7689 	/* Construct an atomic state to restore previous display setting */
7690 
7691 	/*
7692 	 * Attach connectors to drm_atomic_state
7693 	 */
7694 	conn_state = drm_atomic_get_connector_state(state, connector);
7695 
7696 	ret = PTR_ERR_OR_ZERO(conn_state);
7697 	if (ret)
7698 		goto err;
7699 
7700 	/* Attach crtc to drm_atomic_state*/
7701 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7702 
7703 	ret = PTR_ERR_OR_ZERO(crtc_state);
7704 	if (ret)
7705 		goto err;
7706 
7707 	/* force a restore */
7708 	crtc_state->mode_changed = true;
7709 
7710 	/* Attach plane to drm_atomic_state */
7711 	plane_state = drm_atomic_get_plane_state(state, plane);
7712 
7713 	ret = PTR_ERR_OR_ZERO(plane_state);
7714 	if (ret)
7715 		goto err;
7716 
7717 
7718 	/* Call commit internally with the state we just constructed */
7719 	ret = drm_atomic_commit(state);
7720 	if (!ret)
7721 		return 0;
7722 
7723 err:
7724 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7725 	drm_atomic_state_put(state);
7726 
7727 	return ret;
7728 }
7729 
7730 /*
7731  * This function handles all cases when set mode does not come upon hotplug.
7732  * This includes when a display is unplugged then plugged back into the
7733  * same port and when running without usermode desktop manager supprot
7734  */
7735 void dm_restore_drm_connector_state(struct drm_device *dev,
7736 				    struct drm_connector *connector)
7737 {
7738 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7739 	struct amdgpu_crtc *disconnected_acrtc;
7740 	struct dm_crtc_state *acrtc_state;
7741 
7742 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7743 		return;
7744 
7745 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7746 	if (!disconnected_acrtc)
7747 		return;
7748 
7749 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7750 	if (!acrtc_state->stream)
7751 		return;
7752 
7753 	/*
7754 	 * If the previous sink is not released and different from the current,
7755 	 * we deduce we are in a state where we can not rely on usermode call
7756 	 * to turn on the display, so we do it here
7757 	 */
7758 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7759 		dm_force_atomic_commit(&aconnector->base);
7760 }
7761 
7762 /*
7763  * Grabs all modesetting locks to serialize against any blocking commits,
7764  * Waits for completion of all non blocking commits.
7765  */
7766 static int do_aquire_global_lock(struct drm_device *dev,
7767 				 struct drm_atomic_state *state)
7768 {
7769 	struct drm_crtc *crtc;
7770 	struct drm_crtc_commit *commit;
7771 	long ret;
7772 
7773 	/*
7774 	 * Adding all modeset locks to aquire_ctx will
7775 	 * ensure that when the framework release it the
7776 	 * extra locks we are locking here will get released to
7777 	 */
7778 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7779 	if (ret)
7780 		return ret;
7781 
7782 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7783 		spin_lock(&crtc->commit_lock);
7784 		commit = list_first_entry_or_null(&crtc->commit_list,
7785 				struct drm_crtc_commit, commit_entry);
7786 		if (commit)
7787 			drm_crtc_commit_get(commit);
7788 		spin_unlock(&crtc->commit_lock);
7789 
7790 		if (!commit)
7791 			continue;
7792 
7793 		/*
7794 		 * Make sure all pending HW programming completed and
7795 		 * page flips done
7796 		 */
7797 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7798 
7799 		if (ret > 0)
7800 			ret = wait_for_completion_interruptible_timeout(
7801 					&commit->flip_done, 10*HZ);
7802 
7803 		if (ret == 0)
7804 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7805 				  "timed out\n", crtc->base.id, crtc->name);
7806 
7807 		drm_crtc_commit_put(commit);
7808 	}
7809 
7810 	return ret < 0 ? ret : 0;
7811 }
7812 
7813 static void get_freesync_config_for_crtc(
7814 	struct dm_crtc_state *new_crtc_state,
7815 	struct dm_connector_state *new_con_state)
7816 {
7817 	struct mod_freesync_config config = {0};
7818 	struct amdgpu_dm_connector *aconnector =
7819 			to_amdgpu_dm_connector(new_con_state->base.connector);
7820 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7821 	int vrefresh = drm_mode_vrefresh(mode);
7822 
7823 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7824 					vrefresh >= aconnector->min_vfreq &&
7825 					vrefresh <= aconnector->max_vfreq;
7826 
7827 	if (new_crtc_state->vrr_supported) {
7828 		new_crtc_state->stream->ignore_msa_timing_param = true;
7829 		config.state = new_crtc_state->base.vrr_enabled ?
7830 				VRR_STATE_ACTIVE_VARIABLE :
7831 				VRR_STATE_INACTIVE;
7832 		config.min_refresh_in_uhz =
7833 				aconnector->min_vfreq * 1000000;
7834 		config.max_refresh_in_uhz =
7835 				aconnector->max_vfreq * 1000000;
7836 		config.vsif_supported = true;
7837 		config.btr = true;
7838 	}
7839 
7840 	new_crtc_state->freesync_config = config;
7841 }
7842 
7843 static void reset_freesync_config_for_crtc(
7844 	struct dm_crtc_state *new_crtc_state)
7845 {
7846 	new_crtc_state->vrr_supported = false;
7847 
7848 	memset(&new_crtc_state->vrr_params, 0,
7849 	       sizeof(new_crtc_state->vrr_params));
7850 	memset(&new_crtc_state->vrr_infopacket, 0,
7851 	       sizeof(new_crtc_state->vrr_infopacket));
7852 }
7853 
7854 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7855 				struct drm_atomic_state *state,
7856 				struct drm_crtc *crtc,
7857 				struct drm_crtc_state *old_crtc_state,
7858 				struct drm_crtc_state *new_crtc_state,
7859 				bool enable,
7860 				bool *lock_and_validation_needed)
7861 {
7862 	struct dm_atomic_state *dm_state = NULL;
7863 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7864 	struct dc_stream_state *new_stream;
7865 	int ret = 0;
7866 
7867 	/*
7868 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7869 	 * update changed items
7870 	 */
7871 	struct amdgpu_crtc *acrtc = NULL;
7872 	struct amdgpu_dm_connector *aconnector = NULL;
7873 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7874 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7875 
7876 	new_stream = NULL;
7877 
7878 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7879 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7880 	acrtc = to_amdgpu_crtc(crtc);
7881 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7882 
7883 	/* TODO This hack should go away */
7884 	if (aconnector && enable) {
7885 		/* Make sure fake sink is created in plug-in scenario */
7886 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7887 							    &aconnector->base);
7888 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7889 							    &aconnector->base);
7890 
7891 		if (IS_ERR(drm_new_conn_state)) {
7892 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7893 			goto fail;
7894 		}
7895 
7896 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7897 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7898 
7899 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7900 			goto skip_modeset;
7901 
7902 		new_stream = create_validate_stream_for_sink(aconnector,
7903 							     &new_crtc_state->mode,
7904 							     dm_new_conn_state,
7905 							     dm_old_crtc_state->stream);
7906 
7907 		/*
7908 		 * we can have no stream on ACTION_SET if a display
7909 		 * was disconnected during S3, in this case it is not an
7910 		 * error, the OS will be updated after detection, and
7911 		 * will do the right thing on next atomic commit
7912 		 */
7913 
7914 		if (!new_stream) {
7915 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7916 					__func__, acrtc->base.base.id);
7917 			ret = -ENOMEM;
7918 			goto fail;
7919 		}
7920 
7921 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7922 
7923 		ret = fill_hdr_info_packet(drm_new_conn_state,
7924 					   &new_stream->hdr_static_metadata);
7925 		if (ret)
7926 			goto fail;
7927 
7928 		/*
7929 		 * If we already removed the old stream from the context
7930 		 * (and set the new stream to NULL) then we can't reuse
7931 		 * the old stream even if the stream and scaling are unchanged.
7932 		 * We'll hit the BUG_ON and black screen.
7933 		 *
7934 		 * TODO: Refactor this function to allow this check to work
7935 		 * in all conditions.
7936 		 */
7937 		if (dm_new_crtc_state->stream &&
7938 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7939 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7940 			new_crtc_state->mode_changed = false;
7941 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7942 					 new_crtc_state->mode_changed);
7943 		}
7944 	}
7945 
7946 	/* mode_changed flag may get updated above, need to check again */
7947 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7948 		goto skip_modeset;
7949 
7950 	DRM_DEBUG_DRIVER(
7951 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7952 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7953 		"connectors_changed:%d\n",
7954 		acrtc->crtc_id,
7955 		new_crtc_state->enable,
7956 		new_crtc_state->active,
7957 		new_crtc_state->planes_changed,
7958 		new_crtc_state->mode_changed,
7959 		new_crtc_state->active_changed,
7960 		new_crtc_state->connectors_changed);
7961 
7962 	/* Remove stream for any changed/disabled CRTC */
7963 	if (!enable) {
7964 
7965 		if (!dm_old_crtc_state->stream)
7966 			goto skip_modeset;
7967 
7968 		ret = dm_atomic_get_state(state, &dm_state);
7969 		if (ret)
7970 			goto fail;
7971 
7972 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7973 				crtc->base.id);
7974 
7975 		/* i.e. reset mode */
7976 		if (dc_remove_stream_from_ctx(
7977 				dm->dc,
7978 				dm_state->context,
7979 				dm_old_crtc_state->stream) != DC_OK) {
7980 			ret = -EINVAL;
7981 			goto fail;
7982 		}
7983 
7984 		dc_stream_release(dm_old_crtc_state->stream);
7985 		dm_new_crtc_state->stream = NULL;
7986 
7987 		reset_freesync_config_for_crtc(dm_new_crtc_state);
7988 
7989 		*lock_and_validation_needed = true;
7990 
7991 	} else {/* Add stream for any updated/enabled CRTC */
7992 		/*
7993 		 * Quick fix to prevent NULL pointer on new_stream when
7994 		 * added MST connectors not found in existing crtc_state in the chained mode
7995 		 * TODO: need to dig out the root cause of that
7996 		 */
7997 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7998 			goto skip_modeset;
7999 
8000 		if (modereset_required(new_crtc_state))
8001 			goto skip_modeset;
8002 
8003 		if (modeset_required(new_crtc_state, new_stream,
8004 				     dm_old_crtc_state->stream)) {
8005 
8006 			WARN_ON(dm_new_crtc_state->stream);
8007 
8008 			ret = dm_atomic_get_state(state, &dm_state);
8009 			if (ret)
8010 				goto fail;
8011 
8012 			dm_new_crtc_state->stream = new_stream;
8013 
8014 			dc_stream_retain(new_stream);
8015 
8016 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8017 						crtc->base.id);
8018 
8019 			if (dc_add_stream_to_ctx(
8020 					dm->dc,
8021 					dm_state->context,
8022 					dm_new_crtc_state->stream) != DC_OK) {
8023 				ret = -EINVAL;
8024 				goto fail;
8025 			}
8026 
8027 			*lock_and_validation_needed = true;
8028 		}
8029 	}
8030 
8031 skip_modeset:
8032 	/* Release extra reference */
8033 	if (new_stream)
8034 		 dc_stream_release(new_stream);
8035 
8036 	/*
8037 	 * We want to do dc stream updates that do not require a
8038 	 * full modeset below.
8039 	 */
8040 	if (!(enable && aconnector && new_crtc_state->enable &&
8041 	      new_crtc_state->active))
8042 		return 0;
8043 	/*
8044 	 * Given above conditions, the dc state cannot be NULL because:
8045 	 * 1. We're in the process of enabling CRTCs (just been added
8046 	 *    to the dc context, or already is on the context)
8047 	 * 2. Has a valid connector attached, and
8048 	 * 3. Is currently active and enabled.
8049 	 * => The dc stream state currently exists.
8050 	 */
8051 	BUG_ON(dm_new_crtc_state->stream == NULL);
8052 
8053 	/* Scaling or underscan settings */
8054 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8055 		update_stream_scaling_settings(
8056 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8057 
8058 	/* ABM settings */
8059 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8060 
8061 	/*
8062 	 * Color management settings. We also update color properties
8063 	 * when a modeset is needed, to ensure it gets reprogrammed.
8064 	 */
8065 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8066 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8067 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8068 		if (ret)
8069 			goto fail;
8070 	}
8071 
8072 	/* Update Freesync settings. */
8073 	get_freesync_config_for_crtc(dm_new_crtc_state,
8074 				     dm_new_conn_state);
8075 
8076 	return ret;
8077 
8078 fail:
8079 	if (new_stream)
8080 		dc_stream_release(new_stream);
8081 	return ret;
8082 }
8083 
8084 static bool should_reset_plane(struct drm_atomic_state *state,
8085 			       struct drm_plane *plane,
8086 			       struct drm_plane_state *old_plane_state,
8087 			       struct drm_plane_state *new_plane_state)
8088 {
8089 	struct drm_plane *other;
8090 	struct drm_plane_state *old_other_state, *new_other_state;
8091 	struct drm_crtc_state *new_crtc_state;
8092 	int i;
8093 
8094 	/*
8095 	 * TODO: Remove this hack once the checks below are sufficient
8096 	 * enough to determine when we need to reset all the planes on
8097 	 * the stream.
8098 	 */
8099 	if (state->allow_modeset)
8100 		return true;
8101 
8102 	/* Exit early if we know that we're adding or removing the plane. */
8103 	if (old_plane_state->crtc != new_plane_state->crtc)
8104 		return true;
8105 
8106 	/* old crtc == new_crtc == NULL, plane not in context. */
8107 	if (!new_plane_state->crtc)
8108 		return false;
8109 
8110 	new_crtc_state =
8111 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8112 
8113 	if (!new_crtc_state)
8114 		return true;
8115 
8116 	/* CRTC Degamma changes currently require us to recreate planes. */
8117 	if (new_crtc_state->color_mgmt_changed)
8118 		return true;
8119 
8120 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8121 		return true;
8122 
8123 	/*
8124 	 * If there are any new primary or overlay planes being added or
8125 	 * removed then the z-order can potentially change. To ensure
8126 	 * correct z-order and pipe acquisition the current DC architecture
8127 	 * requires us to remove and recreate all existing planes.
8128 	 *
8129 	 * TODO: Come up with a more elegant solution for this.
8130 	 */
8131 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8132 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8133 			continue;
8134 
8135 		if (old_other_state->crtc != new_plane_state->crtc &&
8136 		    new_other_state->crtc != new_plane_state->crtc)
8137 			continue;
8138 
8139 		if (old_other_state->crtc != new_other_state->crtc)
8140 			return true;
8141 
8142 		/* TODO: Remove this once we can handle fast format changes. */
8143 		if (old_other_state->fb && new_other_state->fb &&
8144 		    old_other_state->fb->format != new_other_state->fb->format)
8145 			return true;
8146 	}
8147 
8148 	return false;
8149 }
8150 
8151 static int dm_update_plane_state(struct dc *dc,
8152 				 struct drm_atomic_state *state,
8153 				 struct drm_plane *plane,
8154 				 struct drm_plane_state *old_plane_state,
8155 				 struct drm_plane_state *new_plane_state,
8156 				 bool enable,
8157 				 bool *lock_and_validation_needed)
8158 {
8159 
8160 	struct dm_atomic_state *dm_state = NULL;
8161 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8162 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8163 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8164 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8165 	struct amdgpu_crtc *new_acrtc;
8166 	bool needs_reset;
8167 	int ret = 0;
8168 
8169 
8170 	new_plane_crtc = new_plane_state->crtc;
8171 	old_plane_crtc = old_plane_state->crtc;
8172 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8173 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8174 
8175 	/*TODO Implement better atomic check for cursor plane */
8176 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8177 		if (!enable || !new_plane_crtc ||
8178 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8179 			return 0;
8180 
8181 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8182 
8183 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8184 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8185 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8186 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8187 			return -EINVAL;
8188 		}
8189 
8190 		return 0;
8191 	}
8192 
8193 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8194 					 new_plane_state);
8195 
8196 	/* Remove any changed/removed planes */
8197 	if (!enable) {
8198 		if (!needs_reset)
8199 			return 0;
8200 
8201 		if (!old_plane_crtc)
8202 			return 0;
8203 
8204 		old_crtc_state = drm_atomic_get_old_crtc_state(
8205 				state, old_plane_crtc);
8206 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8207 
8208 		if (!dm_old_crtc_state->stream)
8209 			return 0;
8210 
8211 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8212 				plane->base.id, old_plane_crtc->base.id);
8213 
8214 		ret = dm_atomic_get_state(state, &dm_state);
8215 		if (ret)
8216 			return ret;
8217 
8218 		if (!dc_remove_plane_from_context(
8219 				dc,
8220 				dm_old_crtc_state->stream,
8221 				dm_old_plane_state->dc_state,
8222 				dm_state->context)) {
8223 
8224 			ret = EINVAL;
8225 			return ret;
8226 		}
8227 
8228 
8229 		dc_plane_state_release(dm_old_plane_state->dc_state);
8230 		dm_new_plane_state->dc_state = NULL;
8231 
8232 		*lock_and_validation_needed = true;
8233 
8234 	} else { /* Add new planes */
8235 		struct dc_plane_state *dc_new_plane_state;
8236 
8237 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8238 			return 0;
8239 
8240 		if (!new_plane_crtc)
8241 			return 0;
8242 
8243 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8244 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8245 
8246 		if (!dm_new_crtc_state->stream)
8247 			return 0;
8248 
8249 		if (!needs_reset)
8250 			return 0;
8251 
8252 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8253 		if (ret)
8254 			return ret;
8255 
8256 		WARN_ON(dm_new_plane_state->dc_state);
8257 
8258 		dc_new_plane_state = dc_create_plane_state(dc);
8259 		if (!dc_new_plane_state)
8260 			return -ENOMEM;
8261 
8262 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8263 				plane->base.id, new_plane_crtc->base.id);
8264 
8265 		ret = fill_dc_plane_attributes(
8266 			new_plane_crtc->dev->dev_private,
8267 			dc_new_plane_state,
8268 			new_plane_state,
8269 			new_crtc_state);
8270 		if (ret) {
8271 			dc_plane_state_release(dc_new_plane_state);
8272 			return ret;
8273 		}
8274 
8275 		ret = dm_atomic_get_state(state, &dm_state);
8276 		if (ret) {
8277 			dc_plane_state_release(dc_new_plane_state);
8278 			return ret;
8279 		}
8280 
8281 		/*
8282 		 * Any atomic check errors that occur after this will
8283 		 * not need a release. The plane state will be attached
8284 		 * to the stream, and therefore part of the atomic
8285 		 * state. It'll be released when the atomic state is
8286 		 * cleaned.
8287 		 */
8288 		if (!dc_add_plane_to_context(
8289 				dc,
8290 				dm_new_crtc_state->stream,
8291 				dc_new_plane_state,
8292 				dm_state->context)) {
8293 
8294 			dc_plane_state_release(dc_new_plane_state);
8295 			return -EINVAL;
8296 		}
8297 
8298 		dm_new_plane_state->dc_state = dc_new_plane_state;
8299 
8300 		/* Tell DC to do a full surface update every time there
8301 		 * is a plane change. Inefficient, but works for now.
8302 		 */
8303 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8304 
8305 		*lock_and_validation_needed = true;
8306 	}
8307 
8308 
8309 	return ret;
8310 }
8311 
8312 static int
8313 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8314 				    struct drm_atomic_state *state,
8315 				    enum surface_update_type *out_type)
8316 {
8317 	struct dc *dc = dm->dc;
8318 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8319 	int i, j, num_plane, ret = 0;
8320 	struct drm_plane_state *old_plane_state, *new_plane_state;
8321 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8322 	struct drm_crtc *new_plane_crtc;
8323 	struct drm_plane *plane;
8324 
8325 	struct drm_crtc *crtc;
8326 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8327 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8328 	struct dc_stream_status *status = NULL;
8329 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8330 	struct surface_info_bundle {
8331 		struct dc_surface_update surface_updates[MAX_SURFACES];
8332 		struct dc_plane_info plane_infos[MAX_SURFACES];
8333 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8334 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8335 		struct dc_stream_update stream_update;
8336 	} *bundle;
8337 
8338 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8339 
8340 	if (!bundle) {
8341 		DRM_ERROR("Failed to allocate update bundle\n");
8342 		/* Set type to FULL to avoid crashing in DC*/
8343 		update_type = UPDATE_TYPE_FULL;
8344 		goto cleanup;
8345 	}
8346 
8347 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8348 
8349 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8350 
8351 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8352 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8353 		num_plane = 0;
8354 
8355 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8356 			update_type = UPDATE_TYPE_FULL;
8357 			goto cleanup;
8358 		}
8359 
8360 		if (!new_dm_crtc_state->stream)
8361 			continue;
8362 
8363 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8364 			const struct amdgpu_framebuffer *amdgpu_fb =
8365 				to_amdgpu_framebuffer(new_plane_state->fb);
8366 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8367 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8368 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8369 			uint64_t tiling_flags;
8370 			bool tmz_surface = false;
8371 
8372 			new_plane_crtc = new_plane_state->crtc;
8373 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8374 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8375 
8376 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8377 				continue;
8378 
8379 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8380 				update_type = UPDATE_TYPE_FULL;
8381 				goto cleanup;
8382 			}
8383 
8384 			if (crtc != new_plane_crtc)
8385 				continue;
8386 
8387 			bundle->surface_updates[num_plane].surface =
8388 					new_dm_plane_state->dc_state;
8389 
8390 			if (new_crtc_state->mode_changed) {
8391 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8392 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8393 			}
8394 
8395 			if (new_crtc_state->color_mgmt_changed) {
8396 				bundle->surface_updates[num_plane].gamma =
8397 						new_dm_plane_state->dc_state->gamma_correction;
8398 				bundle->surface_updates[num_plane].in_transfer_func =
8399 						new_dm_plane_state->dc_state->in_transfer_func;
8400 				bundle->surface_updates[num_plane].gamut_remap_matrix =
8401 						&new_dm_plane_state->dc_state->gamut_remap_matrix;
8402 				bundle->stream_update.gamut_remap =
8403 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8404 				bundle->stream_update.output_csc_transform =
8405 						&new_dm_crtc_state->stream->csc_color_matrix;
8406 				bundle->stream_update.out_transfer_func =
8407 						new_dm_crtc_state->stream->out_transfer_func;
8408 			}
8409 
8410 			ret = fill_dc_scaling_info(new_plane_state,
8411 						   scaling_info);
8412 			if (ret)
8413 				goto cleanup;
8414 
8415 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8416 
8417 			if (amdgpu_fb) {
8418 				ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8419 				if (ret)
8420 					goto cleanup;
8421 
8422 				ret = fill_dc_plane_info_and_addr(
8423 					dm->adev, new_plane_state, tiling_flags,
8424 					plane_info,
8425 					&flip_addr->address, tmz_surface,
8426 					false);
8427 				if (ret)
8428 					goto cleanup;
8429 
8430 				bundle->surface_updates[num_plane].plane_info = plane_info;
8431 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8432 			}
8433 
8434 			num_plane++;
8435 		}
8436 
8437 		if (num_plane == 0)
8438 			continue;
8439 
8440 		ret = dm_atomic_get_state(state, &dm_state);
8441 		if (ret)
8442 			goto cleanup;
8443 
8444 		old_dm_state = dm_atomic_get_old_state(state);
8445 		if (!old_dm_state) {
8446 			ret = -EINVAL;
8447 			goto cleanup;
8448 		}
8449 
8450 		status = dc_stream_get_status_from_state(old_dm_state->context,
8451 							 new_dm_crtc_state->stream);
8452 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8453 		/*
8454 		 * TODO: DC modifies the surface during this call so we need
8455 		 * to lock here - find a way to do this without locking.
8456 		 */
8457 		mutex_lock(&dm->dc_lock);
8458 		update_type = dc_check_update_surfaces_for_stream(
8459 				dc,	bundle->surface_updates, num_plane,
8460 				&bundle->stream_update, status);
8461 		mutex_unlock(&dm->dc_lock);
8462 
8463 		if (update_type > UPDATE_TYPE_MED) {
8464 			update_type = UPDATE_TYPE_FULL;
8465 			goto cleanup;
8466 		}
8467 	}
8468 
8469 cleanup:
8470 	kfree(bundle);
8471 
8472 	*out_type = update_type;
8473 	return ret;
8474 }
8475 #if defined(CONFIG_DRM_AMD_DC_DCN)
8476 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8477 {
8478 	struct drm_connector *connector;
8479 	struct drm_connector_state *conn_state;
8480 	struct amdgpu_dm_connector *aconnector = NULL;
8481 	int i;
8482 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8483 		if (conn_state->crtc != crtc)
8484 			continue;
8485 
8486 		aconnector = to_amdgpu_dm_connector(connector);
8487 		if (!aconnector->port || !aconnector->mst_port)
8488 			aconnector = NULL;
8489 		else
8490 			break;
8491 	}
8492 
8493 	if (!aconnector)
8494 		return 0;
8495 
8496 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8497 }
8498 #endif
8499 
8500 /**
8501  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8502  * @dev: The DRM device
8503  * @state: The atomic state to commit
8504  *
8505  * Validate that the given atomic state is programmable by DC into hardware.
8506  * This involves constructing a &struct dc_state reflecting the new hardware
8507  * state we wish to commit, then querying DC to see if it is programmable. It's
8508  * important not to modify the existing DC state. Otherwise, atomic_check
8509  * may unexpectedly commit hardware changes.
8510  *
8511  * When validating the DC state, it's important that the right locks are
8512  * acquired. For full updates case which removes/adds/updates streams on one
8513  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8514  * that any such full update commit will wait for completion of any outstanding
8515  * flip using DRMs synchronization events. See
8516  * dm_determine_update_type_for_commit()
8517  *
8518  * Note that DM adds the affected connectors for all CRTCs in state, when that
8519  * might not seem necessary. This is because DC stream creation requires the
8520  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8521  * be possible but non-trivial - a possible TODO item.
8522  *
8523  * Return: -Error code if validation failed.
8524  */
8525 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8526 				  struct drm_atomic_state *state)
8527 {
8528 	struct amdgpu_device *adev = dev->dev_private;
8529 	struct dm_atomic_state *dm_state = NULL;
8530 	struct dc *dc = adev->dm.dc;
8531 	struct drm_connector *connector;
8532 	struct drm_connector_state *old_con_state, *new_con_state;
8533 	struct drm_crtc *crtc;
8534 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8535 	struct drm_plane *plane;
8536 	struct drm_plane_state *old_plane_state, *new_plane_state;
8537 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8538 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8539 	enum dc_status status;
8540 	int ret, i;
8541 
8542 	/*
8543 	 * This bool will be set for true for any modeset/reset
8544 	 * or plane update which implies non fast surface update.
8545 	 */
8546 	bool lock_and_validation_needed = false;
8547 
8548 	ret = drm_atomic_helper_check_modeset(dev, state);
8549 	if (ret)
8550 		goto fail;
8551 
8552 	/* Check connector changes */
8553 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8554 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8555 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8556 
8557 		/* Skip connectors that are disabled or part of modeset already. */
8558 		if (!old_con_state->crtc && !new_con_state->crtc)
8559 			continue;
8560 
8561 		if (!new_con_state->crtc)
8562 			continue;
8563 
8564 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8565 		if (IS_ERR(new_crtc_state)) {
8566 			ret = PTR_ERR(new_crtc_state);
8567 			goto fail;
8568 		}
8569 
8570 		if (dm_old_con_state->abm_level !=
8571 		    dm_new_con_state->abm_level)
8572 			new_crtc_state->connectors_changed = true;
8573 	}
8574 
8575 #if defined(CONFIG_DRM_AMD_DC_DCN)
8576 	if (adev->asic_type >= CHIP_NAVI10) {
8577 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8578 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8579 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8580 				if (ret)
8581 					goto fail;
8582 			}
8583 		}
8584 	}
8585 #endif
8586 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8587 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8588 		    !new_crtc_state->color_mgmt_changed &&
8589 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8590 			continue;
8591 
8592 		if (!new_crtc_state->enable)
8593 			continue;
8594 
8595 		ret = drm_atomic_add_affected_connectors(state, crtc);
8596 		if (ret)
8597 			return ret;
8598 
8599 		ret = drm_atomic_add_affected_planes(state, crtc);
8600 		if (ret)
8601 			goto fail;
8602 	}
8603 
8604 	/*
8605 	 * Add all primary and overlay planes on the CRTC to the state
8606 	 * whenever a plane is enabled to maintain correct z-ordering
8607 	 * and to enable fast surface updates.
8608 	 */
8609 	drm_for_each_crtc(crtc, dev) {
8610 		bool modified = false;
8611 
8612 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8613 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8614 				continue;
8615 
8616 			if (new_plane_state->crtc == crtc ||
8617 			    old_plane_state->crtc == crtc) {
8618 				modified = true;
8619 				break;
8620 			}
8621 		}
8622 
8623 		if (!modified)
8624 			continue;
8625 
8626 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8627 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8628 				continue;
8629 
8630 			new_plane_state =
8631 				drm_atomic_get_plane_state(state, plane);
8632 
8633 			if (IS_ERR(new_plane_state)) {
8634 				ret = PTR_ERR(new_plane_state);
8635 				goto fail;
8636 			}
8637 		}
8638 	}
8639 
8640 	/* Remove exiting planes if they are modified */
8641 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8642 		ret = dm_update_plane_state(dc, state, plane,
8643 					    old_plane_state,
8644 					    new_plane_state,
8645 					    false,
8646 					    &lock_and_validation_needed);
8647 		if (ret)
8648 			goto fail;
8649 	}
8650 
8651 	/* Disable all crtcs which require disable */
8652 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8653 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8654 					   old_crtc_state,
8655 					   new_crtc_state,
8656 					   false,
8657 					   &lock_and_validation_needed);
8658 		if (ret)
8659 			goto fail;
8660 	}
8661 
8662 	/* Enable all crtcs which require enable */
8663 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8664 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8665 					   old_crtc_state,
8666 					   new_crtc_state,
8667 					   true,
8668 					   &lock_and_validation_needed);
8669 		if (ret)
8670 			goto fail;
8671 	}
8672 
8673 	/* Add new/modified planes */
8674 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8675 		ret = dm_update_plane_state(dc, state, plane,
8676 					    old_plane_state,
8677 					    new_plane_state,
8678 					    true,
8679 					    &lock_and_validation_needed);
8680 		if (ret)
8681 			goto fail;
8682 	}
8683 
8684 	/* Run this here since we want to validate the streams we created */
8685 	ret = drm_atomic_helper_check_planes(dev, state);
8686 	if (ret)
8687 		goto fail;
8688 
8689 	if (state->legacy_cursor_update) {
8690 		/*
8691 		 * This is a fast cursor update coming from the plane update
8692 		 * helper, check if it can be done asynchronously for better
8693 		 * performance.
8694 		 */
8695 		state->async_update =
8696 			!drm_atomic_helper_async_check(dev, state);
8697 
8698 		/*
8699 		 * Skip the remaining global validation if this is an async
8700 		 * update. Cursor updates can be done without affecting
8701 		 * state or bandwidth calcs and this avoids the performance
8702 		 * penalty of locking the private state object and
8703 		 * allocating a new dc_state.
8704 		 */
8705 		if (state->async_update)
8706 			return 0;
8707 	}
8708 
8709 	/* Check scaling and underscan changes*/
8710 	/* TODO Removed scaling changes validation due to inability to commit
8711 	 * new stream into context w\o causing full reset. Need to
8712 	 * decide how to handle.
8713 	 */
8714 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8715 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8716 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8717 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8718 
8719 		/* Skip any modesets/resets */
8720 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8721 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8722 			continue;
8723 
8724 		/* Skip any thing not scale or underscan changes */
8725 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8726 			continue;
8727 
8728 		overall_update_type = UPDATE_TYPE_FULL;
8729 		lock_and_validation_needed = true;
8730 	}
8731 
8732 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8733 	if (ret)
8734 		goto fail;
8735 
8736 	if (overall_update_type < update_type)
8737 		overall_update_type = update_type;
8738 
8739 	/*
8740 	 * lock_and_validation_needed was an old way to determine if we need to set
8741 	 * the global lock. Leaving it in to check if we broke any corner cases
8742 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8743 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8744 	 */
8745 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8746 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8747 
8748 	if (overall_update_type > UPDATE_TYPE_FAST) {
8749 		ret = dm_atomic_get_state(state, &dm_state);
8750 		if (ret)
8751 			goto fail;
8752 
8753 		ret = do_aquire_global_lock(dev, state);
8754 		if (ret)
8755 			goto fail;
8756 
8757 #if defined(CONFIG_DRM_AMD_DC_DCN)
8758 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8759 			goto fail;
8760 
8761 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8762 		if (ret)
8763 			goto fail;
8764 #endif
8765 
8766 		/*
8767 		 * Perform validation of MST topology in the state:
8768 		 * We need to perform MST atomic check before calling
8769 		 * dc_validate_global_state(), or there is a chance
8770 		 * to get stuck in an infinite loop and hang eventually.
8771 		 */
8772 		ret = drm_dp_mst_atomic_check(state);
8773 		if (ret)
8774 			goto fail;
8775 		status = dc_validate_global_state(dc, dm_state->context, false);
8776 		if (status != DC_OK) {
8777 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
8778 				       dc_status_to_str(status), status);
8779 			ret = -EINVAL;
8780 			goto fail;
8781 		}
8782 	} else {
8783 		/*
8784 		 * The commit is a fast update. Fast updates shouldn't change
8785 		 * the DC context, affect global validation, and can have their
8786 		 * commit work done in parallel with other commits not touching
8787 		 * the same resource. If we have a new DC context as part of
8788 		 * the DM atomic state from validation we need to free it and
8789 		 * retain the existing one instead.
8790 		 *
8791 		 * Furthermore, since the DM atomic state only contains the DC
8792 		 * context and can safely be annulled, we can free the state
8793 		 * and clear the associated private object now to free
8794 		 * some memory and avoid a possible use-after-free later.
8795 		 */
8796 
8797 		for (i = 0; i < state->num_private_objs; i++) {
8798 			struct drm_private_obj *obj = state->private_objs[i].ptr;
8799 
8800 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
8801 				int j = state->num_private_objs-1;
8802 
8803 				dm_atomic_destroy_state(obj,
8804 						state->private_objs[i].state);
8805 
8806 				/* If i is not at the end of the array then the
8807 				 * last element needs to be moved to where i was
8808 				 * before the array can safely be truncated.
8809 				 */
8810 				if (i != j)
8811 					state->private_objs[i] =
8812 						state->private_objs[j];
8813 
8814 				state->private_objs[j].ptr = NULL;
8815 				state->private_objs[j].state = NULL;
8816 				state->private_objs[j].old_state = NULL;
8817 				state->private_objs[j].new_state = NULL;
8818 
8819 				state->num_private_objs = j;
8820 				break;
8821 			}
8822 		}
8823 	}
8824 
8825 	/* Store the overall update type for use later in atomic check. */
8826 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8827 		struct dm_crtc_state *dm_new_crtc_state =
8828 			to_dm_crtc_state(new_crtc_state);
8829 
8830 		dm_new_crtc_state->update_type = (int)overall_update_type;
8831 	}
8832 
8833 	/* Must be success */
8834 	WARN_ON(ret);
8835 	return ret;
8836 
8837 fail:
8838 	if (ret == -EDEADLK)
8839 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8840 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8841 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8842 	else
8843 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8844 
8845 	return ret;
8846 }
8847 
8848 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8849 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8850 {
8851 	uint8_t dpcd_data;
8852 	bool capable = false;
8853 
8854 	if (amdgpu_dm_connector->dc_link &&
8855 		dm_helpers_dp_read_dpcd(
8856 				NULL,
8857 				amdgpu_dm_connector->dc_link,
8858 				DP_DOWN_STREAM_PORT_COUNT,
8859 				&dpcd_data,
8860 				sizeof(dpcd_data))) {
8861 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8862 	}
8863 
8864 	return capable;
8865 }
8866 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8867 					struct edid *edid)
8868 {
8869 	int i;
8870 	bool edid_check_required;
8871 	struct detailed_timing *timing;
8872 	struct detailed_non_pixel *data;
8873 	struct detailed_data_monitor_range *range;
8874 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8875 			to_amdgpu_dm_connector(connector);
8876 	struct dm_connector_state *dm_con_state = NULL;
8877 
8878 	struct drm_device *dev = connector->dev;
8879 	struct amdgpu_device *adev = dev->dev_private;
8880 	bool freesync_capable = false;
8881 
8882 	if (!connector->state) {
8883 		DRM_ERROR("%s - Connector has no state", __func__);
8884 		goto update;
8885 	}
8886 
8887 	if (!edid) {
8888 		dm_con_state = to_dm_connector_state(connector->state);
8889 
8890 		amdgpu_dm_connector->min_vfreq = 0;
8891 		amdgpu_dm_connector->max_vfreq = 0;
8892 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8893 
8894 		goto update;
8895 	}
8896 
8897 	dm_con_state = to_dm_connector_state(connector->state);
8898 
8899 	edid_check_required = false;
8900 	if (!amdgpu_dm_connector->dc_sink) {
8901 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8902 		goto update;
8903 	}
8904 	if (!adev->dm.freesync_module)
8905 		goto update;
8906 	/*
8907 	 * if edid non zero restrict freesync only for dp and edp
8908 	 */
8909 	if (edid) {
8910 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8911 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8912 			edid_check_required = is_dp_capable_without_timing_msa(
8913 						adev->dm.dc,
8914 						amdgpu_dm_connector);
8915 		}
8916 	}
8917 	if (edid_check_required == true && (edid->version > 1 ||
8918 	   (edid->version == 1 && edid->revision > 1))) {
8919 		for (i = 0; i < 4; i++) {
8920 
8921 			timing	= &edid->detailed_timings[i];
8922 			data	= &timing->data.other_data;
8923 			range	= &data->data.range;
8924 			/*
8925 			 * Check if monitor has continuous frequency mode
8926 			 */
8927 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8928 				continue;
8929 			/*
8930 			 * Check for flag range limits only. If flag == 1 then
8931 			 * no additional timing information provided.
8932 			 * Default GTF, GTF Secondary curve and CVT are not
8933 			 * supported
8934 			 */
8935 			if (range->flags != 1)
8936 				continue;
8937 
8938 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8939 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8940 			amdgpu_dm_connector->pixel_clock_mhz =
8941 				range->pixel_clock_mhz * 10;
8942 			break;
8943 		}
8944 
8945 		if (amdgpu_dm_connector->max_vfreq -
8946 		    amdgpu_dm_connector->min_vfreq > 10) {
8947 
8948 			freesync_capable = true;
8949 		}
8950 	}
8951 
8952 update:
8953 	if (dm_con_state)
8954 		dm_con_state->freesync_capable = freesync_capable;
8955 
8956 	if (connector->vrr_capable_property)
8957 		drm_connector_set_vrr_capable_property(connector,
8958 						       freesync_capable);
8959 }
8960 
8961 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8962 {
8963 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8964 
8965 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8966 		return;
8967 	if (link->type == dc_connection_none)
8968 		return;
8969 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8970 					dpcd_data, sizeof(dpcd_data))) {
8971 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8972 
8973 		if (dpcd_data[0] == 0) {
8974 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8975 			link->psr_settings.psr_feature_enabled = false;
8976 		} else {
8977 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
8978 			link->psr_settings.psr_feature_enabled = true;
8979 		}
8980 
8981 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8982 	}
8983 }
8984 
8985 /*
8986  * amdgpu_dm_link_setup_psr() - configure psr link
8987  * @stream: stream state
8988  *
8989  * Return: true if success
8990  */
8991 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8992 {
8993 	struct dc_link *link = NULL;
8994 	struct psr_config psr_config = {0};
8995 	struct psr_context psr_context = {0};
8996 	bool ret = false;
8997 
8998 	if (stream == NULL)
8999 		return false;
9000 
9001 	link = stream->link;
9002 
9003 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9004 
9005 	if (psr_config.psr_version > 0) {
9006 		psr_config.psr_exit_link_training_required = 0x1;
9007 		psr_config.psr_frame_capture_indication_req = 0;
9008 		psr_config.psr_rfb_setup_time = 0x37;
9009 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9010 		psr_config.allow_smu_optimizations = 0x0;
9011 
9012 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9013 
9014 	}
9015 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9016 
9017 	return ret;
9018 }
9019 
9020 /*
9021  * amdgpu_dm_psr_enable() - enable psr f/w
9022  * @stream: stream state
9023  *
9024  * Return: true if success
9025  */
9026 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9027 {
9028 	struct dc_link *link = stream->link;
9029 	unsigned int vsync_rate_hz = 0;
9030 	struct dc_static_screen_params params = {0};
9031 	/* Calculate number of static frames before generating interrupt to
9032 	 * enter PSR.
9033 	 */
9034 	// Init fail safe of 2 frames static
9035 	unsigned int num_frames_static = 2;
9036 
9037 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9038 
9039 	vsync_rate_hz = div64_u64(div64_u64((
9040 			stream->timing.pix_clk_100hz * 100),
9041 			stream->timing.v_total),
9042 			stream->timing.h_total);
9043 
9044 	/* Round up
9045 	 * Calculate number of frames such that at least 30 ms of time has
9046 	 * passed.
9047 	 */
9048 	if (vsync_rate_hz != 0) {
9049 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9050 		num_frames_static = (30000 / frame_time_microsec) + 1;
9051 	}
9052 
9053 	params.triggers.cursor_update = true;
9054 	params.triggers.overlay_update = true;
9055 	params.triggers.surface_update = true;
9056 	params.num_frames = num_frames_static;
9057 
9058 	dc_stream_set_static_screen_params(link->ctx->dc,
9059 					   &stream, 1,
9060 					   &params);
9061 
9062 	return dc_link_set_psr_allow_active(link, true, false);
9063 }
9064 
9065 /*
9066  * amdgpu_dm_psr_disable() - disable psr f/w
9067  * @stream:  stream state
9068  *
9069  * Return: true if success
9070  */
9071 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9072 {
9073 
9074 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9075 
9076 	return dc_link_set_psr_allow_active(stream->link, false, true);
9077 }
9078