1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 
98 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100 
101 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103 
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106 
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109 
110 /**
111  * DOC: overview
112  *
113  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115  * requests into DC requests, and DC responses into DRM responses.
116  *
117  * The root control structure is &struct amdgpu_display_manager.
118  */
119 
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123 
124 /*
125  * initializes drm_device display related structures, based on the information
126  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127  * drm_encoder, drm_mode_config
128  *
129  * Returns 0 on success
130  */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134 
135 static void
136 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
137 
138 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
139 				struct drm_plane *plane,
140 				unsigned long possible_crtcs,
141 				const struct dc_plane_cap *plane_cap);
142 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
143 			       struct drm_plane *plane,
144 			       uint32_t link_index);
145 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
146 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
147 				    uint32_t link_index,
148 				    struct amdgpu_encoder *amdgpu_encoder);
149 static int amdgpu_dm_encoder_init(struct drm_device *dev,
150 				  struct amdgpu_encoder *aencoder,
151 				  uint32_t link_index);
152 
153 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
154 
155 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
156 				   struct drm_atomic_state *state,
157 				   bool nonblock);
158 
159 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
160 
161 static int amdgpu_dm_atomic_check(struct drm_device *dev,
162 				  struct drm_atomic_state *state);
163 
164 static void handle_cursor_update(struct drm_plane *plane,
165 				 struct drm_plane_state *old_plane_state);
166 
167 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
168 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
169 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
170 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
171 
172 
173 /*
174  * dm_vblank_get_counter
175  *
176  * @brief
177  * Get counter for number of vertical blanks
178  *
179  * @param
180  * struct amdgpu_device *adev - [in] desired amdgpu device
181  * int disp_idx - [in] which CRTC to get the counter from
182  *
183  * @return
184  * Counter for vertical blanks
185  */
186 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
187 {
188 	if (crtc >= adev->mode_info.num_crtc)
189 		return 0;
190 	else {
191 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
192 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
193 				acrtc->base.state);
194 
195 
196 		if (acrtc_state->stream == NULL) {
197 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
198 				  crtc);
199 			return 0;
200 		}
201 
202 		return dc_stream_get_vblank_counter(acrtc_state->stream);
203 	}
204 }
205 
206 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
207 				  u32 *vbl, u32 *position)
208 {
209 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
210 
211 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
212 		return -EINVAL;
213 	else {
214 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
215 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
216 						acrtc->base.state);
217 
218 		if (acrtc_state->stream ==  NULL) {
219 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
220 				  crtc);
221 			return 0;
222 		}
223 
224 		/*
225 		 * TODO rework base driver to use values directly.
226 		 * for now parse it back into reg-format
227 		 */
228 		dc_stream_get_scanoutpos(acrtc_state->stream,
229 					 &v_blank_start,
230 					 &v_blank_end,
231 					 &h_position,
232 					 &v_position);
233 
234 		*position = v_position | (h_position << 16);
235 		*vbl = v_blank_start | (v_blank_end << 16);
236 	}
237 
238 	return 0;
239 }
240 
241 static bool dm_is_idle(void *handle)
242 {
243 	/* XXX todo */
244 	return true;
245 }
246 
247 static int dm_wait_for_idle(void *handle)
248 {
249 	/* XXX todo */
250 	return 0;
251 }
252 
253 static bool dm_check_soft_reset(void *handle)
254 {
255 	return false;
256 }
257 
258 static int dm_soft_reset(void *handle)
259 {
260 	/* XXX todo */
261 	return 0;
262 }
263 
264 static struct amdgpu_crtc *
265 get_crtc_by_otg_inst(struct amdgpu_device *adev,
266 		     int otg_inst)
267 {
268 	struct drm_device *dev = adev->ddev;
269 	struct drm_crtc *crtc;
270 	struct amdgpu_crtc *amdgpu_crtc;
271 
272 	if (otg_inst == -1) {
273 		WARN_ON(1);
274 		return adev->mode_info.crtcs[0];
275 	}
276 
277 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
278 		amdgpu_crtc = to_amdgpu_crtc(crtc);
279 
280 		if (amdgpu_crtc->otg_inst == otg_inst)
281 			return amdgpu_crtc;
282 	}
283 
284 	return NULL;
285 }
286 
287 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
288 {
289 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
290 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
291 }
292 
293 /**
294  * dm_pflip_high_irq() - Handle pageflip interrupt
295  * @interrupt_params: ignored
296  *
297  * Handles the pageflip interrupt by notifying all interested parties
298  * that the pageflip has been completed.
299  */
300 static void dm_pflip_high_irq(void *interrupt_params)
301 {
302 	struct amdgpu_crtc *amdgpu_crtc;
303 	struct common_irq_params *irq_params = interrupt_params;
304 	struct amdgpu_device *adev = irq_params->adev;
305 	unsigned long flags;
306 	struct drm_pending_vblank_event *e;
307 	struct dm_crtc_state *acrtc_state;
308 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
309 	bool vrr_active;
310 
311 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
312 
313 	/* IRQ could occur when in initial stage */
314 	/* TODO work and BO cleanup */
315 	if (amdgpu_crtc == NULL) {
316 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
317 		return;
318 	}
319 
320 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
321 
322 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
323 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
324 						 amdgpu_crtc->pflip_status,
325 						 AMDGPU_FLIP_SUBMITTED,
326 						 amdgpu_crtc->crtc_id,
327 						 amdgpu_crtc);
328 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
329 		return;
330 	}
331 
332 	/* page flip completed. */
333 	e = amdgpu_crtc->event;
334 	amdgpu_crtc->event = NULL;
335 
336 	if (!e)
337 		WARN_ON(1);
338 
339 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
340 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
341 
342 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
343 	if (!vrr_active ||
344 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
345 				      &v_blank_end, &hpos, &vpos) ||
346 	    (vpos < v_blank_start)) {
347 		/* Update to correct count and vblank timestamp if racing with
348 		 * vblank irq. This also updates to the correct vblank timestamp
349 		 * even in VRR mode, as scanout is past the front-porch atm.
350 		 */
351 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
352 
353 		/* Wake up userspace by sending the pageflip event with proper
354 		 * count and timestamp of vblank of flip completion.
355 		 */
356 		if (e) {
357 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
358 
359 			/* Event sent, so done with vblank for this flip */
360 			drm_crtc_vblank_put(&amdgpu_crtc->base);
361 		}
362 	} else if (e) {
363 		/* VRR active and inside front-porch: vblank count and
364 		 * timestamp for pageflip event will only be up to date after
365 		 * drm_crtc_handle_vblank() has been executed from late vblank
366 		 * irq handler after start of back-porch (vline 0). We queue the
367 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
368 		 * updated timestamp and count, once it runs after us.
369 		 *
370 		 * We need to open-code this instead of using the helper
371 		 * drm_crtc_arm_vblank_event(), as that helper would
372 		 * call drm_crtc_accurate_vblank_count(), which we must
373 		 * not call in VRR mode while we are in front-porch!
374 		 */
375 
376 		/* sequence will be replaced by real count during send-out. */
377 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
378 		e->pipe = amdgpu_crtc->crtc_id;
379 
380 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
381 		e = NULL;
382 	}
383 
384 	/* Keep track of vblank of this flip for flip throttling. We use the
385 	 * cooked hw counter, as that one incremented at start of this vblank
386 	 * of pageflip completion, so last_flip_vblank is the forbidden count
387 	 * for queueing new pageflips if vsync + VRR is enabled.
388 	 */
389 	amdgpu_crtc->last_flip_vblank =
390 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
391 
392 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
393 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
394 
395 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
396 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
397 			 vrr_active, (int) !e);
398 }
399 
400 static void dm_vupdate_high_irq(void *interrupt_params)
401 {
402 	struct common_irq_params *irq_params = interrupt_params;
403 	struct amdgpu_device *adev = irq_params->adev;
404 	struct amdgpu_crtc *acrtc;
405 	struct dm_crtc_state *acrtc_state;
406 	unsigned long flags;
407 
408 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
409 
410 	if (acrtc) {
411 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
412 
413 		DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
414 				 amdgpu_dm_vrr_active(acrtc_state));
415 
416 		/* Core vblank handling is done here after end of front-porch in
417 		 * vrr mode, as vblank timestamping will give valid results
418 		 * while now done after front-porch. This will also deliver
419 		 * page-flip completion events that have been queued to us
420 		 * if a pageflip happened inside front-porch.
421 		 */
422 		if (amdgpu_dm_vrr_active(acrtc_state)) {
423 			drm_crtc_handle_vblank(&acrtc->base);
424 
425 			/* BTR processing for pre-DCE12 ASICs */
426 			if (acrtc_state->stream &&
427 			    adev->family < AMDGPU_FAMILY_AI) {
428 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
429 				mod_freesync_handle_v_update(
430 				    adev->dm.freesync_module,
431 				    acrtc_state->stream,
432 				    &acrtc_state->vrr_params);
433 
434 				dc_stream_adjust_vmin_vmax(
435 				    adev->dm.dc,
436 				    acrtc_state->stream,
437 				    &acrtc_state->vrr_params.adjust);
438 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
439 			}
440 		}
441 	}
442 }
443 
444 /**
445  * dm_crtc_high_irq() - Handles CRTC interrupt
446  * @interrupt_params: ignored
447  *
448  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
449  * event handler.
450  */
451 static void dm_crtc_high_irq(void *interrupt_params)
452 {
453 	struct common_irq_params *irq_params = interrupt_params;
454 	struct amdgpu_device *adev = irq_params->adev;
455 	struct amdgpu_crtc *acrtc;
456 	struct dm_crtc_state *acrtc_state;
457 	unsigned long flags;
458 
459 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
460 
461 	if (acrtc) {
462 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
463 
464 		DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
465 				 amdgpu_dm_vrr_active(acrtc_state));
466 
467 		/* Core vblank handling at start of front-porch is only possible
468 		 * in non-vrr mode, as only there vblank timestamping will give
469 		 * valid results while done in front-porch. Otherwise defer it
470 		 * to dm_vupdate_high_irq after end of front-porch.
471 		 */
472 		if (!amdgpu_dm_vrr_active(acrtc_state))
473 			drm_crtc_handle_vblank(&acrtc->base);
474 
475 		/* Following stuff must happen at start of vblank, for crc
476 		 * computation and below-the-range btr support in vrr mode.
477 		 */
478 		amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
479 
480 		if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
481 		    acrtc_state->vrr_params.supported &&
482 		    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
483 			spin_lock_irqsave(&adev->ddev->event_lock, flags);
484 			mod_freesync_handle_v_update(
485 				adev->dm.freesync_module,
486 				acrtc_state->stream,
487 				&acrtc_state->vrr_params);
488 
489 			dc_stream_adjust_vmin_vmax(
490 				adev->dm.dc,
491 				acrtc_state->stream,
492 				&acrtc_state->vrr_params.adjust);
493 			spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
494 		}
495 	}
496 }
497 
498 #if defined(CONFIG_DRM_AMD_DC_DCN)
499 /**
500  * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
501  * @interrupt params - interrupt parameters
502  *
503  * Notify DRM's vblank event handler at VSTARTUP
504  *
505  * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
506  * * We are close enough to VUPDATE - the point of no return for hw
507  * * We are in the fixed portion of variable front porch when vrr is enabled
508  * * We are before VUPDATE, where double-buffered vrr registers are swapped
509  *
510  * It is therefore the correct place to signal vblank, send user flip events,
511  * and update VRR.
512  */
513 static void dm_dcn_crtc_high_irq(void *interrupt_params)
514 {
515 	struct common_irq_params *irq_params = interrupt_params;
516 	struct amdgpu_device *adev = irq_params->adev;
517 	struct amdgpu_crtc *acrtc;
518 	struct dm_crtc_state *acrtc_state;
519 	unsigned long flags;
520 
521 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
522 
523 	if (!acrtc)
524 		return;
525 
526 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
527 
528 	DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
529 				amdgpu_dm_vrr_active(acrtc_state));
530 
531 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
532 	drm_crtc_handle_vblank(&acrtc->base);
533 
534 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
535 
536 	if (acrtc_state->vrr_params.supported &&
537 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
538 		mod_freesync_handle_v_update(
539 		adev->dm.freesync_module,
540 		acrtc_state->stream,
541 		&acrtc_state->vrr_params);
542 
543 		dc_stream_adjust_vmin_vmax(
544 			adev->dm.dc,
545 			acrtc_state->stream,
546 			&acrtc_state->vrr_params.adjust);
547 	}
548 
549 	if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) {
550 		if (acrtc->event) {
551 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
552 			acrtc->event = NULL;
553 			drm_crtc_vblank_put(&acrtc->base);
554 		}
555 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
556 	}
557 
558 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
559 }
560 #endif
561 
562 static int dm_set_clockgating_state(void *handle,
563 		  enum amd_clockgating_state state)
564 {
565 	return 0;
566 }
567 
568 static int dm_set_powergating_state(void *handle,
569 		  enum amd_powergating_state state)
570 {
571 	return 0;
572 }
573 
574 /* Prototypes of private functions */
575 static int dm_early_init(void* handle);
576 
577 /* Allocate memory for FBC compressed data  */
578 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
579 {
580 	struct drm_device *dev = connector->dev;
581 	struct amdgpu_device *adev = dev->dev_private;
582 	struct dm_comressor_info *compressor = &adev->dm.compressor;
583 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
584 	struct drm_display_mode *mode;
585 	unsigned long max_size = 0;
586 
587 	if (adev->dm.dc->fbc_compressor == NULL)
588 		return;
589 
590 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
591 		return;
592 
593 	if (compressor->bo_ptr)
594 		return;
595 
596 
597 	list_for_each_entry(mode, &connector->modes, head) {
598 		if (max_size < mode->htotal * mode->vtotal)
599 			max_size = mode->htotal * mode->vtotal;
600 	}
601 
602 	if (max_size) {
603 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
604 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
605 			    &compressor->gpu_addr, &compressor->cpu_addr);
606 
607 		if (r)
608 			DRM_ERROR("DM: Failed to initialize FBC\n");
609 		else {
610 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
611 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
612 		}
613 
614 	}
615 
616 }
617 
618 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
619 					  int pipe, bool *enabled,
620 					  unsigned char *buf, int max_bytes)
621 {
622 	struct drm_device *dev = dev_get_drvdata(kdev);
623 	struct amdgpu_device *adev = dev->dev_private;
624 	struct drm_connector *connector;
625 	struct drm_connector_list_iter conn_iter;
626 	struct amdgpu_dm_connector *aconnector;
627 	int ret = 0;
628 
629 	*enabled = false;
630 
631 	mutex_lock(&adev->dm.audio_lock);
632 
633 	drm_connector_list_iter_begin(dev, &conn_iter);
634 	drm_for_each_connector_iter(connector, &conn_iter) {
635 		aconnector = to_amdgpu_dm_connector(connector);
636 		if (aconnector->audio_inst != port)
637 			continue;
638 
639 		*enabled = true;
640 		ret = drm_eld_size(connector->eld);
641 		memcpy(buf, connector->eld, min(max_bytes, ret));
642 
643 		break;
644 	}
645 	drm_connector_list_iter_end(&conn_iter);
646 
647 	mutex_unlock(&adev->dm.audio_lock);
648 
649 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
650 
651 	return ret;
652 }
653 
654 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
655 	.get_eld = amdgpu_dm_audio_component_get_eld,
656 };
657 
658 static int amdgpu_dm_audio_component_bind(struct device *kdev,
659 				       struct device *hda_kdev, void *data)
660 {
661 	struct drm_device *dev = dev_get_drvdata(kdev);
662 	struct amdgpu_device *adev = dev->dev_private;
663 	struct drm_audio_component *acomp = data;
664 
665 	acomp->ops = &amdgpu_dm_audio_component_ops;
666 	acomp->dev = kdev;
667 	adev->dm.audio_component = acomp;
668 
669 	return 0;
670 }
671 
672 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
673 					  struct device *hda_kdev, void *data)
674 {
675 	struct drm_device *dev = dev_get_drvdata(kdev);
676 	struct amdgpu_device *adev = dev->dev_private;
677 	struct drm_audio_component *acomp = data;
678 
679 	acomp->ops = NULL;
680 	acomp->dev = NULL;
681 	adev->dm.audio_component = NULL;
682 }
683 
684 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
685 	.bind	= amdgpu_dm_audio_component_bind,
686 	.unbind	= amdgpu_dm_audio_component_unbind,
687 };
688 
689 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
690 {
691 	int i, ret;
692 
693 	if (!amdgpu_audio)
694 		return 0;
695 
696 	adev->mode_info.audio.enabled = true;
697 
698 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
699 
700 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
701 		adev->mode_info.audio.pin[i].channels = -1;
702 		adev->mode_info.audio.pin[i].rate = -1;
703 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
704 		adev->mode_info.audio.pin[i].status_bits = 0;
705 		adev->mode_info.audio.pin[i].category_code = 0;
706 		adev->mode_info.audio.pin[i].connected = false;
707 		adev->mode_info.audio.pin[i].id =
708 			adev->dm.dc->res_pool->audios[i]->inst;
709 		adev->mode_info.audio.pin[i].offset = 0;
710 	}
711 
712 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
713 	if (ret < 0)
714 		return ret;
715 
716 	adev->dm.audio_registered = true;
717 
718 	return 0;
719 }
720 
721 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
722 {
723 	if (!amdgpu_audio)
724 		return;
725 
726 	if (!adev->mode_info.audio.enabled)
727 		return;
728 
729 	if (adev->dm.audio_registered) {
730 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
731 		adev->dm.audio_registered = false;
732 	}
733 
734 	/* TODO: Disable audio? */
735 
736 	adev->mode_info.audio.enabled = false;
737 }
738 
739 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
740 {
741 	struct drm_audio_component *acomp = adev->dm.audio_component;
742 
743 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
744 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
745 
746 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
747 						 pin, -1);
748 	}
749 }
750 
751 static int dm_dmub_hw_init(struct amdgpu_device *adev)
752 {
753 	const struct dmcub_firmware_header_v1_0 *hdr;
754 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
755 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
756 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
757 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
758 	struct abm *abm = adev->dm.dc->res_pool->abm;
759 	struct dmub_srv_hw_params hw_params;
760 	enum dmub_status status;
761 	const unsigned char *fw_inst_const, *fw_bss_data;
762 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
763 	bool has_hw_support;
764 
765 	if (!dmub_srv)
766 		/* DMUB isn't supported on the ASIC. */
767 		return 0;
768 
769 	if (!fb_info) {
770 		DRM_ERROR("No framebuffer info for DMUB service.\n");
771 		return -EINVAL;
772 	}
773 
774 	if (!dmub_fw) {
775 		/* Firmware required for DMUB support. */
776 		DRM_ERROR("No firmware provided for DMUB.\n");
777 		return -EINVAL;
778 	}
779 
780 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
781 	if (status != DMUB_STATUS_OK) {
782 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
783 		return -EINVAL;
784 	}
785 
786 	if (!has_hw_support) {
787 		DRM_INFO("DMUB unsupported on ASIC\n");
788 		return 0;
789 	}
790 
791 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
792 
793 	fw_inst_const = dmub_fw->data +
794 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
795 			PSP_HEADER_BYTES;
796 
797 	fw_bss_data = dmub_fw->data +
798 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
799 		      le32_to_cpu(hdr->inst_const_bytes);
800 
801 	/* Copy firmware and bios info into FB memory. */
802 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
803 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
804 
805 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
806 
807 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
808 	 * amdgpu_ucode_init_single_fw will load dmub firmware
809 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
810 	 * will be done by dm_dmub_hw_init
811 	 */
812 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
813 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
814 				fw_inst_const_size);
815 	}
816 
817 	memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
818 	       fw_bss_data_size);
819 
820 	/* Copy firmware bios info into FB memory. */
821 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
822 	       adev->bios_size);
823 
824 	/* Reset regions that need to be reset. */
825 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
826 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
827 
828 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
829 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
830 
831 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
832 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
833 
834 	/* Initialize hardware. */
835 	memset(&hw_params, 0, sizeof(hw_params));
836 	hw_params.fb_base = adev->gmc.fb_start;
837 	hw_params.fb_offset = adev->gmc.aper_base;
838 
839 	/* backdoor load firmware and trigger dmub running */
840 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
841 		hw_params.load_inst_const = true;
842 
843 	if (dmcu)
844 		hw_params.psp_version = dmcu->psp_version;
845 
846 	for (i = 0; i < fb_info->num_fb; ++i)
847 		hw_params.fb[i] = &fb_info->fb[i];
848 
849 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
850 	if (status != DMUB_STATUS_OK) {
851 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
852 		return -EINVAL;
853 	}
854 
855 	/* Wait for firmware load to finish. */
856 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
857 	if (status != DMUB_STATUS_OK)
858 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
859 
860 	/* Init DMCU and ABM if available. */
861 	if (dmcu && abm) {
862 		dmcu->funcs->dmcu_init(dmcu);
863 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
864 	}
865 
866 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
867 	if (!adev->dm.dc->ctx->dmub_srv) {
868 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
869 		return -ENOMEM;
870 	}
871 
872 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
873 		 adev->dm.dmcub_fw_version);
874 
875 	return 0;
876 }
877 
878 static int amdgpu_dm_init(struct amdgpu_device *adev)
879 {
880 	struct dc_init_data init_data;
881 #ifdef CONFIG_DRM_AMD_DC_HDCP
882 	struct dc_callback_init init_params;
883 #endif
884 	int r;
885 
886 	adev->dm.ddev = adev->ddev;
887 	adev->dm.adev = adev;
888 
889 	/* Zero all the fields */
890 	memset(&init_data, 0, sizeof(init_data));
891 #ifdef CONFIG_DRM_AMD_DC_HDCP
892 	memset(&init_params, 0, sizeof(init_params));
893 #endif
894 
895 	mutex_init(&adev->dm.dc_lock);
896 	mutex_init(&adev->dm.audio_lock);
897 
898 	if(amdgpu_dm_irq_init(adev)) {
899 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
900 		goto error;
901 	}
902 
903 	init_data.asic_id.chip_family = adev->family;
904 
905 	init_data.asic_id.pci_revision_id = adev->rev_id;
906 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
907 
908 	init_data.asic_id.vram_width = adev->gmc.vram_width;
909 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
910 	init_data.asic_id.atombios_base_address =
911 		adev->mode_info.atom_context->bios;
912 
913 	init_data.driver = adev;
914 
915 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
916 
917 	if (!adev->dm.cgs_device) {
918 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
919 		goto error;
920 	}
921 
922 	init_data.cgs_device = adev->dm.cgs_device;
923 
924 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
925 
926 	switch (adev->asic_type) {
927 	case CHIP_CARRIZO:
928 	case CHIP_STONEY:
929 	case CHIP_RAVEN:
930 	case CHIP_RENOIR:
931 		init_data.flags.gpu_vm_support = true;
932 		break;
933 	default:
934 		break;
935 	}
936 
937 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
938 		init_data.flags.fbc_support = true;
939 
940 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
941 		init_data.flags.multi_mon_pp_mclk_switch = true;
942 
943 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
944 		init_data.flags.disable_fractional_pwm = true;
945 
946 	init_data.flags.power_down_display_on_boot = true;
947 
948 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
949 
950 	/* Display Core create. */
951 	adev->dm.dc = dc_create(&init_data);
952 
953 	if (adev->dm.dc) {
954 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
955 	} else {
956 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
957 		goto error;
958 	}
959 
960 	r = dm_dmub_hw_init(adev);
961 	if (r) {
962 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
963 		goto error;
964 	}
965 
966 	dc_hardware_init(adev->dm.dc);
967 
968 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
969 	if (!adev->dm.freesync_module) {
970 		DRM_ERROR(
971 		"amdgpu: failed to initialize freesync_module.\n");
972 	} else
973 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
974 				adev->dm.freesync_module);
975 
976 	amdgpu_dm_init_color_mod();
977 
978 #ifdef CONFIG_DRM_AMD_DC_HDCP
979 	if (adev->asic_type >= CHIP_RAVEN) {
980 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
981 
982 		if (!adev->dm.hdcp_workqueue)
983 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
984 		else
985 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
986 
987 		dc_init_callbacks(adev->dm.dc, &init_params);
988 	}
989 #endif
990 	if (amdgpu_dm_initialize_drm_device(adev)) {
991 		DRM_ERROR(
992 		"amdgpu: failed to initialize sw for display support.\n");
993 		goto error;
994 	}
995 
996 	/* Update the actual used number of crtc */
997 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
998 
999 	/* TODO: Add_display_info? */
1000 
1001 	/* TODO use dynamic cursor width */
1002 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1003 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1004 
1005 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1006 		DRM_ERROR(
1007 		"amdgpu: failed to initialize sw for display support.\n");
1008 		goto error;
1009 	}
1010 
1011 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1012 
1013 	return 0;
1014 error:
1015 	amdgpu_dm_fini(adev);
1016 
1017 	return -EINVAL;
1018 }
1019 
1020 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1021 {
1022 	amdgpu_dm_audio_fini(adev);
1023 
1024 	amdgpu_dm_destroy_drm_device(&adev->dm);
1025 
1026 #ifdef CONFIG_DRM_AMD_DC_HDCP
1027 	if (adev->dm.hdcp_workqueue) {
1028 		hdcp_destroy(adev->dm.hdcp_workqueue);
1029 		adev->dm.hdcp_workqueue = NULL;
1030 	}
1031 
1032 	if (adev->dm.dc)
1033 		dc_deinit_callbacks(adev->dm.dc);
1034 #endif
1035 	if (adev->dm.dc->ctx->dmub_srv) {
1036 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1037 		adev->dm.dc->ctx->dmub_srv = NULL;
1038 	}
1039 
1040 	if (adev->dm.dmub_bo)
1041 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1042 				      &adev->dm.dmub_bo_gpu_addr,
1043 				      &adev->dm.dmub_bo_cpu_addr);
1044 
1045 	/* DC Destroy TODO: Replace destroy DAL */
1046 	if (adev->dm.dc)
1047 		dc_destroy(&adev->dm.dc);
1048 	/*
1049 	 * TODO: pageflip, vlank interrupt
1050 	 *
1051 	 * amdgpu_dm_irq_fini(adev);
1052 	 */
1053 
1054 	if (adev->dm.cgs_device) {
1055 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1056 		adev->dm.cgs_device = NULL;
1057 	}
1058 	if (adev->dm.freesync_module) {
1059 		mod_freesync_destroy(adev->dm.freesync_module);
1060 		adev->dm.freesync_module = NULL;
1061 	}
1062 
1063 	mutex_destroy(&adev->dm.audio_lock);
1064 	mutex_destroy(&adev->dm.dc_lock);
1065 
1066 	return;
1067 }
1068 
1069 static int load_dmcu_fw(struct amdgpu_device *adev)
1070 {
1071 	const char *fw_name_dmcu = NULL;
1072 	int r;
1073 	const struct dmcu_firmware_header_v1_0 *hdr;
1074 
1075 	switch(adev->asic_type) {
1076 	case CHIP_BONAIRE:
1077 	case CHIP_HAWAII:
1078 	case CHIP_KAVERI:
1079 	case CHIP_KABINI:
1080 	case CHIP_MULLINS:
1081 	case CHIP_TONGA:
1082 	case CHIP_FIJI:
1083 	case CHIP_CARRIZO:
1084 	case CHIP_STONEY:
1085 	case CHIP_POLARIS11:
1086 	case CHIP_POLARIS10:
1087 	case CHIP_POLARIS12:
1088 	case CHIP_VEGAM:
1089 	case CHIP_VEGA10:
1090 	case CHIP_VEGA12:
1091 	case CHIP_VEGA20:
1092 	case CHIP_NAVI10:
1093 	case CHIP_NAVI14:
1094 	case CHIP_RENOIR:
1095 		return 0;
1096 	case CHIP_NAVI12:
1097 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1098 		break;
1099 	case CHIP_RAVEN:
1100 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1101 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1102 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1103 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1104 		else
1105 			return 0;
1106 		break;
1107 	default:
1108 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1109 		return -EINVAL;
1110 	}
1111 
1112 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1113 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1114 		return 0;
1115 	}
1116 
1117 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1118 	if (r == -ENOENT) {
1119 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1120 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1121 		adev->dm.fw_dmcu = NULL;
1122 		return 0;
1123 	}
1124 	if (r) {
1125 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1126 			fw_name_dmcu);
1127 		return r;
1128 	}
1129 
1130 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1131 	if (r) {
1132 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1133 			fw_name_dmcu);
1134 		release_firmware(adev->dm.fw_dmcu);
1135 		adev->dm.fw_dmcu = NULL;
1136 		return r;
1137 	}
1138 
1139 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1140 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1141 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1142 	adev->firmware.fw_size +=
1143 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1144 
1145 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1146 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1147 	adev->firmware.fw_size +=
1148 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1149 
1150 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1151 
1152 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1153 
1154 	return 0;
1155 }
1156 
1157 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1158 {
1159 	struct amdgpu_device *adev = ctx;
1160 
1161 	return dm_read_reg(adev->dm.dc->ctx, address);
1162 }
1163 
1164 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1165 				     uint32_t value)
1166 {
1167 	struct amdgpu_device *adev = ctx;
1168 
1169 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1170 }
1171 
1172 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1173 {
1174 	struct dmub_srv_create_params create_params;
1175 	struct dmub_srv_region_params region_params;
1176 	struct dmub_srv_region_info region_info;
1177 	struct dmub_srv_fb_params fb_params;
1178 	struct dmub_srv_fb_info *fb_info;
1179 	struct dmub_srv *dmub_srv;
1180 	const struct dmcub_firmware_header_v1_0 *hdr;
1181 	const char *fw_name_dmub;
1182 	enum dmub_asic dmub_asic;
1183 	enum dmub_status status;
1184 	int r;
1185 
1186 	switch (adev->asic_type) {
1187 	case CHIP_RENOIR:
1188 		dmub_asic = DMUB_ASIC_DCN21;
1189 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1190 		break;
1191 
1192 	default:
1193 		/* ASIC doesn't support DMUB. */
1194 		return 0;
1195 	}
1196 
1197 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1198 	if (r) {
1199 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1200 		return 0;
1201 	}
1202 
1203 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1204 	if (r) {
1205 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1206 		return 0;
1207 	}
1208 
1209 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1210 
1211 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1212 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1213 			AMDGPU_UCODE_ID_DMCUB;
1214 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1215 			adev->dm.dmub_fw;
1216 		adev->firmware.fw_size +=
1217 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1218 
1219 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1220 			 adev->dm.dmcub_fw_version);
1221 	}
1222 
1223 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1224 
1225 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1226 	dmub_srv = adev->dm.dmub_srv;
1227 
1228 	if (!dmub_srv) {
1229 		DRM_ERROR("Failed to allocate DMUB service!\n");
1230 		return -ENOMEM;
1231 	}
1232 
1233 	memset(&create_params, 0, sizeof(create_params));
1234 	create_params.user_ctx = adev;
1235 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1236 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1237 	create_params.asic = dmub_asic;
1238 
1239 	/* Create the DMUB service. */
1240 	status = dmub_srv_create(dmub_srv, &create_params);
1241 	if (status != DMUB_STATUS_OK) {
1242 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1243 		return -EINVAL;
1244 	}
1245 
1246 	/* Calculate the size of all the regions for the DMUB service. */
1247 	memset(&region_params, 0, sizeof(region_params));
1248 
1249 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1250 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1251 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1252 	region_params.vbios_size = adev->bios_size;
1253 	region_params.fw_bss_data =
1254 		adev->dm.dmub_fw->data +
1255 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1256 		le32_to_cpu(hdr->inst_const_bytes);
1257 
1258 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1259 					   &region_info);
1260 
1261 	if (status != DMUB_STATUS_OK) {
1262 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1263 		return -EINVAL;
1264 	}
1265 
1266 	/*
1267 	 * Allocate a framebuffer based on the total size of all the regions.
1268 	 * TODO: Move this into GART.
1269 	 */
1270 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1271 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1272 				    &adev->dm.dmub_bo_gpu_addr,
1273 				    &adev->dm.dmub_bo_cpu_addr);
1274 	if (r)
1275 		return r;
1276 
1277 	/* Rebase the regions on the framebuffer address. */
1278 	memset(&fb_params, 0, sizeof(fb_params));
1279 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1280 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1281 	fb_params.region_info = &region_info;
1282 
1283 	adev->dm.dmub_fb_info =
1284 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1285 	fb_info = adev->dm.dmub_fb_info;
1286 
1287 	if (!fb_info) {
1288 		DRM_ERROR(
1289 			"Failed to allocate framebuffer info for DMUB service!\n");
1290 		return -ENOMEM;
1291 	}
1292 
1293 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1294 	if (status != DMUB_STATUS_OK) {
1295 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1296 		return -EINVAL;
1297 	}
1298 
1299 	return 0;
1300 }
1301 
1302 static int dm_sw_init(void *handle)
1303 {
1304 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1305 	int r;
1306 
1307 	r = dm_dmub_sw_init(adev);
1308 	if (r)
1309 		return r;
1310 
1311 	return load_dmcu_fw(adev);
1312 }
1313 
1314 static int dm_sw_fini(void *handle)
1315 {
1316 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1317 
1318 	kfree(adev->dm.dmub_fb_info);
1319 	adev->dm.dmub_fb_info = NULL;
1320 
1321 	if (adev->dm.dmub_srv) {
1322 		dmub_srv_destroy(adev->dm.dmub_srv);
1323 		adev->dm.dmub_srv = NULL;
1324 	}
1325 
1326 	if (adev->dm.dmub_fw) {
1327 		release_firmware(adev->dm.dmub_fw);
1328 		adev->dm.dmub_fw = NULL;
1329 	}
1330 
1331 	if(adev->dm.fw_dmcu) {
1332 		release_firmware(adev->dm.fw_dmcu);
1333 		adev->dm.fw_dmcu = NULL;
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1340 {
1341 	struct amdgpu_dm_connector *aconnector;
1342 	struct drm_connector *connector;
1343 	struct drm_connector_list_iter iter;
1344 	int ret = 0;
1345 
1346 	drm_connector_list_iter_begin(dev, &iter);
1347 	drm_for_each_connector_iter(connector, &iter) {
1348 		aconnector = to_amdgpu_dm_connector(connector);
1349 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1350 		    aconnector->mst_mgr.aux) {
1351 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1352 					 aconnector,
1353 					 aconnector->base.base.id);
1354 
1355 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1356 			if (ret < 0) {
1357 				DRM_ERROR("DM_MST: Failed to start MST\n");
1358 				aconnector->dc_link->type =
1359 					dc_connection_single;
1360 				break;
1361 			}
1362 		}
1363 	}
1364 	drm_connector_list_iter_end(&iter);
1365 
1366 	return ret;
1367 }
1368 
1369 static int dm_late_init(void *handle)
1370 {
1371 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1372 
1373 	struct dmcu_iram_parameters params;
1374 	unsigned int linear_lut[16];
1375 	int i;
1376 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1377 	bool ret = false;
1378 
1379 	for (i = 0; i < 16; i++)
1380 		linear_lut[i] = 0xFFFF * i / 15;
1381 
1382 	params.set = 0;
1383 	params.backlight_ramping_start = 0xCCCC;
1384 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1385 	params.backlight_lut_array_size = 16;
1386 	params.backlight_lut_array = linear_lut;
1387 
1388 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1389 	 * 0xFFFF x 0.01 = 0x28F
1390 	 */
1391 	params.min_abm_backlight = 0x28F;
1392 
1393 	/* todo will enable for navi10 */
1394 	if (adev->asic_type <= CHIP_RAVEN) {
1395 		ret = dmcu_load_iram(dmcu, params);
1396 
1397 		if (!ret)
1398 			return -EINVAL;
1399 	}
1400 
1401 	return detect_mst_link_for_all_connectors(adev->ddev);
1402 }
1403 
1404 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1405 {
1406 	struct amdgpu_dm_connector *aconnector;
1407 	struct drm_connector *connector;
1408 	struct drm_connector_list_iter iter;
1409 	struct drm_dp_mst_topology_mgr *mgr;
1410 	int ret;
1411 	bool need_hotplug = false;
1412 
1413 	drm_connector_list_iter_begin(dev, &iter);
1414 	drm_for_each_connector_iter(connector, &iter) {
1415 		aconnector = to_amdgpu_dm_connector(connector);
1416 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1417 		    aconnector->mst_port)
1418 			continue;
1419 
1420 		mgr = &aconnector->mst_mgr;
1421 
1422 		if (suspend) {
1423 			drm_dp_mst_topology_mgr_suspend(mgr);
1424 		} else {
1425 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1426 			if (ret < 0) {
1427 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1428 				need_hotplug = true;
1429 			}
1430 		}
1431 	}
1432 	drm_connector_list_iter_end(&iter);
1433 
1434 	if (need_hotplug)
1435 		drm_kms_helper_hotplug_event(dev);
1436 }
1437 
1438 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1439 {
1440 	struct smu_context *smu = &adev->smu;
1441 	int ret = 0;
1442 
1443 	if (!is_support_sw_smu(adev))
1444 		return 0;
1445 
1446 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1447 	 * on window driver dc implementation.
1448 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1449 	 * should be passed to smu during boot up and resume from s3.
1450 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1451 	 * dcn20_resource_construct
1452 	 * then call pplib functions below to pass the settings to smu:
1453 	 * smu_set_watermarks_for_clock_ranges
1454 	 * smu_set_watermarks_table
1455 	 * navi10_set_watermarks_table
1456 	 * smu_write_watermarks_table
1457 	 *
1458 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1459 	 * dc has implemented different flow for window driver:
1460 	 * dc_hardware_init / dc_set_power_state
1461 	 * dcn10_init_hw
1462 	 * notify_wm_ranges
1463 	 * set_wm_ranges
1464 	 * -- Linux
1465 	 * smu_set_watermarks_for_clock_ranges
1466 	 * renoir_set_watermarks_table
1467 	 * smu_write_watermarks_table
1468 	 *
1469 	 * For Linux,
1470 	 * dc_hardware_init -> amdgpu_dm_init
1471 	 * dc_set_power_state --> dm_resume
1472 	 *
1473 	 * therefore, this function apply to navi10/12/14 but not Renoir
1474 	 * *
1475 	 */
1476 	switch(adev->asic_type) {
1477 	case CHIP_NAVI10:
1478 	case CHIP_NAVI14:
1479 	case CHIP_NAVI12:
1480 		break;
1481 	default:
1482 		return 0;
1483 	}
1484 
1485 	mutex_lock(&smu->mutex);
1486 
1487 	/* pass data to smu controller */
1488 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1489 			!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1490 		ret = smu_write_watermarks_table(smu);
1491 
1492 		if (ret) {
1493 			mutex_unlock(&smu->mutex);
1494 			DRM_ERROR("Failed to update WMTABLE!\n");
1495 			return ret;
1496 		}
1497 		smu->watermarks_bitmap |= WATERMARKS_LOADED;
1498 	}
1499 
1500 	mutex_unlock(&smu->mutex);
1501 
1502 	return 0;
1503 }
1504 
1505 /**
1506  * dm_hw_init() - Initialize DC device
1507  * @handle: The base driver device containing the amdgpu_dm device.
1508  *
1509  * Initialize the &struct amdgpu_display_manager device. This involves calling
1510  * the initializers of each DM component, then populating the struct with them.
1511  *
1512  * Although the function implies hardware initialization, both hardware and
1513  * software are initialized here. Splitting them out to their relevant init
1514  * hooks is a future TODO item.
1515  *
1516  * Some notable things that are initialized here:
1517  *
1518  * - Display Core, both software and hardware
1519  * - DC modules that we need (freesync and color management)
1520  * - DRM software states
1521  * - Interrupt sources and handlers
1522  * - Vblank support
1523  * - Debug FS entries, if enabled
1524  */
1525 static int dm_hw_init(void *handle)
1526 {
1527 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1528 	/* Create DAL display manager */
1529 	amdgpu_dm_init(adev);
1530 	amdgpu_dm_hpd_init(adev);
1531 
1532 	return 0;
1533 }
1534 
1535 /**
1536  * dm_hw_fini() - Teardown DC device
1537  * @handle: The base driver device containing the amdgpu_dm device.
1538  *
1539  * Teardown components within &struct amdgpu_display_manager that require
1540  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1541  * were loaded. Also flush IRQ workqueues and disable them.
1542  */
1543 static int dm_hw_fini(void *handle)
1544 {
1545 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1546 
1547 	amdgpu_dm_hpd_fini(adev);
1548 
1549 	amdgpu_dm_irq_fini(adev);
1550 	amdgpu_dm_fini(adev);
1551 	return 0;
1552 }
1553 
1554 static int dm_suspend(void *handle)
1555 {
1556 	struct amdgpu_device *adev = handle;
1557 	struct amdgpu_display_manager *dm = &adev->dm;
1558 	int ret = 0;
1559 
1560 	WARN_ON(adev->dm.cached_state);
1561 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1562 
1563 	s3_handle_mst(adev->ddev, true);
1564 
1565 	amdgpu_dm_irq_suspend(adev);
1566 
1567 
1568 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1569 
1570 	return ret;
1571 }
1572 
1573 static struct amdgpu_dm_connector *
1574 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1575 					     struct drm_crtc *crtc)
1576 {
1577 	uint32_t i;
1578 	struct drm_connector_state *new_con_state;
1579 	struct drm_connector *connector;
1580 	struct drm_crtc *crtc_from_state;
1581 
1582 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1583 		crtc_from_state = new_con_state->crtc;
1584 
1585 		if (crtc_from_state == crtc)
1586 			return to_amdgpu_dm_connector(connector);
1587 	}
1588 
1589 	return NULL;
1590 }
1591 
1592 static void emulated_link_detect(struct dc_link *link)
1593 {
1594 	struct dc_sink_init_data sink_init_data = { 0 };
1595 	struct display_sink_capability sink_caps = { 0 };
1596 	enum dc_edid_status edid_status;
1597 	struct dc_context *dc_ctx = link->ctx;
1598 	struct dc_sink *sink = NULL;
1599 	struct dc_sink *prev_sink = NULL;
1600 
1601 	link->type = dc_connection_none;
1602 	prev_sink = link->local_sink;
1603 
1604 	if (prev_sink != NULL)
1605 		dc_sink_retain(prev_sink);
1606 
1607 	switch (link->connector_signal) {
1608 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1609 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1610 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1611 		break;
1612 	}
1613 
1614 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1615 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1616 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1617 		break;
1618 	}
1619 
1620 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1621 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1622 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1623 		break;
1624 	}
1625 
1626 	case SIGNAL_TYPE_LVDS: {
1627 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1628 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1629 		break;
1630 	}
1631 
1632 	case SIGNAL_TYPE_EDP: {
1633 		sink_caps.transaction_type =
1634 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1635 		sink_caps.signal = SIGNAL_TYPE_EDP;
1636 		break;
1637 	}
1638 
1639 	case SIGNAL_TYPE_DISPLAY_PORT: {
1640 		sink_caps.transaction_type =
1641 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1642 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1643 		break;
1644 	}
1645 
1646 	default:
1647 		DC_ERROR("Invalid connector type! signal:%d\n",
1648 			link->connector_signal);
1649 		return;
1650 	}
1651 
1652 	sink_init_data.link = link;
1653 	sink_init_data.sink_signal = sink_caps.signal;
1654 
1655 	sink = dc_sink_create(&sink_init_data);
1656 	if (!sink) {
1657 		DC_ERROR("Failed to create sink!\n");
1658 		return;
1659 	}
1660 
1661 	/* dc_sink_create returns a new reference */
1662 	link->local_sink = sink;
1663 
1664 	edid_status = dm_helpers_read_local_edid(
1665 			link->ctx,
1666 			link,
1667 			sink);
1668 
1669 	if (edid_status != EDID_OK)
1670 		DC_ERROR("Failed to read EDID");
1671 
1672 }
1673 
1674 static int dm_resume(void *handle)
1675 {
1676 	struct amdgpu_device *adev = handle;
1677 	struct drm_device *ddev = adev->ddev;
1678 	struct amdgpu_display_manager *dm = &adev->dm;
1679 	struct amdgpu_dm_connector *aconnector;
1680 	struct drm_connector *connector;
1681 	struct drm_connector_list_iter iter;
1682 	struct drm_crtc *crtc;
1683 	struct drm_crtc_state *new_crtc_state;
1684 	struct dm_crtc_state *dm_new_crtc_state;
1685 	struct drm_plane *plane;
1686 	struct drm_plane_state *new_plane_state;
1687 	struct dm_plane_state *dm_new_plane_state;
1688 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1689 	enum dc_connection_type new_connection_type = dc_connection_none;
1690 	int i, r;
1691 
1692 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1693 	dc_release_state(dm_state->context);
1694 	dm_state->context = dc_create_state(dm->dc);
1695 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1696 	dc_resource_state_construct(dm->dc, dm_state->context);
1697 
1698 	/* Before powering on DC we need to re-initialize DMUB. */
1699 	r = dm_dmub_hw_init(adev);
1700 	if (r)
1701 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1702 
1703 	/* power on hardware */
1704 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1705 
1706 	/* program HPD filter */
1707 	dc_resume(dm->dc);
1708 
1709 	/*
1710 	 * early enable HPD Rx IRQ, should be done before set mode as short
1711 	 * pulse interrupts are used for MST
1712 	 */
1713 	amdgpu_dm_irq_resume_early(adev);
1714 
1715 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1716 	s3_handle_mst(ddev, false);
1717 
1718 	/* Do detection*/
1719 	drm_connector_list_iter_begin(ddev, &iter);
1720 	drm_for_each_connector_iter(connector, &iter) {
1721 		aconnector = to_amdgpu_dm_connector(connector);
1722 
1723 		/*
1724 		 * this is the case when traversing through already created
1725 		 * MST connectors, should be skipped
1726 		 */
1727 		if (aconnector->mst_port)
1728 			continue;
1729 
1730 		mutex_lock(&aconnector->hpd_lock);
1731 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1732 			DRM_ERROR("KMS: Failed to detect connector\n");
1733 
1734 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1735 			emulated_link_detect(aconnector->dc_link);
1736 		else
1737 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1738 
1739 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1740 			aconnector->fake_enable = false;
1741 
1742 		if (aconnector->dc_sink)
1743 			dc_sink_release(aconnector->dc_sink);
1744 		aconnector->dc_sink = NULL;
1745 		amdgpu_dm_update_connector_after_detect(aconnector);
1746 		mutex_unlock(&aconnector->hpd_lock);
1747 	}
1748 	drm_connector_list_iter_end(&iter);
1749 
1750 	/* Force mode set in atomic commit */
1751 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1752 		new_crtc_state->active_changed = true;
1753 
1754 	/*
1755 	 * atomic_check is expected to create the dc states. We need to release
1756 	 * them here, since they were duplicated as part of the suspend
1757 	 * procedure.
1758 	 */
1759 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1760 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1761 		if (dm_new_crtc_state->stream) {
1762 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1763 			dc_stream_release(dm_new_crtc_state->stream);
1764 			dm_new_crtc_state->stream = NULL;
1765 		}
1766 	}
1767 
1768 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1769 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1770 		if (dm_new_plane_state->dc_state) {
1771 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1772 			dc_plane_state_release(dm_new_plane_state->dc_state);
1773 			dm_new_plane_state->dc_state = NULL;
1774 		}
1775 	}
1776 
1777 	drm_atomic_helper_resume(ddev, dm->cached_state);
1778 
1779 	dm->cached_state = NULL;
1780 
1781 	amdgpu_dm_irq_resume_late(adev);
1782 
1783 	amdgpu_dm_smu_write_watermarks_table(adev);
1784 
1785 	return 0;
1786 }
1787 
1788 /**
1789  * DOC: DM Lifecycle
1790  *
1791  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1792  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1793  * the base driver's device list to be initialized and torn down accordingly.
1794  *
1795  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1796  */
1797 
1798 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1799 	.name = "dm",
1800 	.early_init = dm_early_init,
1801 	.late_init = dm_late_init,
1802 	.sw_init = dm_sw_init,
1803 	.sw_fini = dm_sw_fini,
1804 	.hw_init = dm_hw_init,
1805 	.hw_fini = dm_hw_fini,
1806 	.suspend = dm_suspend,
1807 	.resume = dm_resume,
1808 	.is_idle = dm_is_idle,
1809 	.wait_for_idle = dm_wait_for_idle,
1810 	.check_soft_reset = dm_check_soft_reset,
1811 	.soft_reset = dm_soft_reset,
1812 	.set_clockgating_state = dm_set_clockgating_state,
1813 	.set_powergating_state = dm_set_powergating_state,
1814 };
1815 
1816 const struct amdgpu_ip_block_version dm_ip_block =
1817 {
1818 	.type = AMD_IP_BLOCK_TYPE_DCE,
1819 	.major = 1,
1820 	.minor = 0,
1821 	.rev = 0,
1822 	.funcs = &amdgpu_dm_funcs,
1823 };
1824 
1825 
1826 /**
1827  * DOC: atomic
1828  *
1829  * *WIP*
1830  */
1831 
1832 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1833 	.fb_create = amdgpu_display_user_framebuffer_create,
1834 	.output_poll_changed = drm_fb_helper_output_poll_changed,
1835 	.atomic_check = amdgpu_dm_atomic_check,
1836 	.atomic_commit = amdgpu_dm_atomic_commit,
1837 };
1838 
1839 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1840 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1841 };
1842 
1843 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1844 {
1845 	u32 max_cll, min_cll, max, min, q, r;
1846 	struct amdgpu_dm_backlight_caps *caps;
1847 	struct amdgpu_display_manager *dm;
1848 	struct drm_connector *conn_base;
1849 	struct amdgpu_device *adev;
1850 	static const u8 pre_computed_values[] = {
1851 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1852 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1853 
1854 	if (!aconnector || !aconnector->dc_link)
1855 		return;
1856 
1857 	conn_base = &aconnector->base;
1858 	adev = conn_base->dev->dev_private;
1859 	dm = &adev->dm;
1860 	caps = &dm->backlight_caps;
1861 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1862 	caps->aux_support = false;
1863 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1864 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1865 
1866 	if (caps->ext_caps->bits.oled == 1 ||
1867 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1868 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1869 		caps->aux_support = true;
1870 
1871 	/* From the specification (CTA-861-G), for calculating the maximum
1872 	 * luminance we need to use:
1873 	 *	Luminance = 50*2**(CV/32)
1874 	 * Where CV is a one-byte value.
1875 	 * For calculating this expression we may need float point precision;
1876 	 * to avoid this complexity level, we take advantage that CV is divided
1877 	 * by a constant. From the Euclids division algorithm, we know that CV
1878 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
1879 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1880 	 * need to pre-compute the value of r/32. For pre-computing the values
1881 	 * We just used the following Ruby line:
1882 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1883 	 * The results of the above expressions can be verified at
1884 	 * pre_computed_values.
1885 	 */
1886 	q = max_cll >> 5;
1887 	r = max_cll % 32;
1888 	max = (1 << q) * pre_computed_values[r];
1889 
1890 	// min luminance: maxLum * (CV/255)^2 / 100
1891 	q = DIV_ROUND_CLOSEST(min_cll, 255);
1892 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
1893 
1894 	caps->aux_max_input_signal = max;
1895 	caps->aux_min_input_signal = min;
1896 }
1897 
1898 static void
1899 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1900 {
1901 	struct drm_connector *connector = &aconnector->base;
1902 	struct drm_device *dev = connector->dev;
1903 	struct dc_sink *sink;
1904 
1905 	/* MST handled by drm_mst framework */
1906 	if (aconnector->mst_mgr.mst_state == true)
1907 		return;
1908 
1909 
1910 	sink = aconnector->dc_link->local_sink;
1911 	if (sink)
1912 		dc_sink_retain(sink);
1913 
1914 	/*
1915 	 * Edid mgmt connector gets first update only in mode_valid hook and then
1916 	 * the connector sink is set to either fake or physical sink depends on link status.
1917 	 * Skip if already done during boot.
1918 	 */
1919 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1920 			&& aconnector->dc_em_sink) {
1921 
1922 		/*
1923 		 * For S3 resume with headless use eml_sink to fake stream
1924 		 * because on resume connector->sink is set to NULL
1925 		 */
1926 		mutex_lock(&dev->mode_config.mutex);
1927 
1928 		if (sink) {
1929 			if (aconnector->dc_sink) {
1930 				amdgpu_dm_update_freesync_caps(connector, NULL);
1931 				/*
1932 				 * retain and release below are used to
1933 				 * bump up refcount for sink because the link doesn't point
1934 				 * to it anymore after disconnect, so on next crtc to connector
1935 				 * reshuffle by UMD we will get into unwanted dc_sink release
1936 				 */
1937 				dc_sink_release(aconnector->dc_sink);
1938 			}
1939 			aconnector->dc_sink = sink;
1940 			dc_sink_retain(aconnector->dc_sink);
1941 			amdgpu_dm_update_freesync_caps(connector,
1942 					aconnector->edid);
1943 		} else {
1944 			amdgpu_dm_update_freesync_caps(connector, NULL);
1945 			if (!aconnector->dc_sink) {
1946 				aconnector->dc_sink = aconnector->dc_em_sink;
1947 				dc_sink_retain(aconnector->dc_sink);
1948 			}
1949 		}
1950 
1951 		mutex_unlock(&dev->mode_config.mutex);
1952 
1953 		if (sink)
1954 			dc_sink_release(sink);
1955 		return;
1956 	}
1957 
1958 	/*
1959 	 * TODO: temporary guard to look for proper fix
1960 	 * if this sink is MST sink, we should not do anything
1961 	 */
1962 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1963 		dc_sink_release(sink);
1964 		return;
1965 	}
1966 
1967 	if (aconnector->dc_sink == sink) {
1968 		/*
1969 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1970 		 * Do nothing!!
1971 		 */
1972 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1973 				aconnector->connector_id);
1974 		if (sink)
1975 			dc_sink_release(sink);
1976 		return;
1977 	}
1978 
1979 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1980 		aconnector->connector_id, aconnector->dc_sink, sink);
1981 
1982 	mutex_lock(&dev->mode_config.mutex);
1983 
1984 	/*
1985 	 * 1. Update status of the drm connector
1986 	 * 2. Send an event and let userspace tell us what to do
1987 	 */
1988 	if (sink) {
1989 		/*
1990 		 * TODO: check if we still need the S3 mode update workaround.
1991 		 * If yes, put it here.
1992 		 */
1993 		if (aconnector->dc_sink)
1994 			amdgpu_dm_update_freesync_caps(connector, NULL);
1995 
1996 		aconnector->dc_sink = sink;
1997 		dc_sink_retain(aconnector->dc_sink);
1998 		if (sink->dc_edid.length == 0) {
1999 			aconnector->edid = NULL;
2000 			drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2001 		} else {
2002 			aconnector->edid =
2003 				(struct edid *) sink->dc_edid.raw_edid;
2004 
2005 
2006 			drm_connector_update_edid_property(connector,
2007 					aconnector->edid);
2008 			drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2009 					    aconnector->edid);
2010 		}
2011 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2012 		update_connector_ext_caps(aconnector);
2013 	} else {
2014 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2015 		amdgpu_dm_update_freesync_caps(connector, NULL);
2016 		drm_connector_update_edid_property(connector, NULL);
2017 		aconnector->num_modes = 0;
2018 		dc_sink_release(aconnector->dc_sink);
2019 		aconnector->dc_sink = NULL;
2020 		aconnector->edid = NULL;
2021 #ifdef CONFIG_DRM_AMD_DC_HDCP
2022 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2023 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2024 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2025 #endif
2026 	}
2027 
2028 	mutex_unlock(&dev->mode_config.mutex);
2029 
2030 	if (sink)
2031 		dc_sink_release(sink);
2032 }
2033 
2034 static void handle_hpd_irq(void *param)
2035 {
2036 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2037 	struct drm_connector *connector = &aconnector->base;
2038 	struct drm_device *dev = connector->dev;
2039 	enum dc_connection_type new_connection_type = dc_connection_none;
2040 #ifdef CONFIG_DRM_AMD_DC_HDCP
2041 	struct amdgpu_device *adev = dev->dev_private;
2042 #endif
2043 
2044 	/*
2045 	 * In case of failure or MST no need to update connector status or notify the OS
2046 	 * since (for MST case) MST does this in its own context.
2047 	 */
2048 	mutex_lock(&aconnector->hpd_lock);
2049 
2050 #ifdef CONFIG_DRM_AMD_DC_HDCP
2051 	if (adev->dm.hdcp_workqueue)
2052 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2053 #endif
2054 	if (aconnector->fake_enable)
2055 		aconnector->fake_enable = false;
2056 
2057 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2058 		DRM_ERROR("KMS: Failed to detect connector\n");
2059 
2060 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2061 		emulated_link_detect(aconnector->dc_link);
2062 
2063 
2064 		drm_modeset_lock_all(dev);
2065 		dm_restore_drm_connector_state(dev, connector);
2066 		drm_modeset_unlock_all(dev);
2067 
2068 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2069 			drm_kms_helper_hotplug_event(dev);
2070 
2071 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2072 		amdgpu_dm_update_connector_after_detect(aconnector);
2073 
2074 
2075 		drm_modeset_lock_all(dev);
2076 		dm_restore_drm_connector_state(dev, connector);
2077 		drm_modeset_unlock_all(dev);
2078 
2079 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2080 			drm_kms_helper_hotplug_event(dev);
2081 	}
2082 	mutex_unlock(&aconnector->hpd_lock);
2083 
2084 }
2085 
2086 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2087 {
2088 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2089 	uint8_t dret;
2090 	bool new_irq_handled = false;
2091 	int dpcd_addr;
2092 	int dpcd_bytes_to_read;
2093 
2094 	const int max_process_count = 30;
2095 	int process_count = 0;
2096 
2097 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2098 
2099 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2100 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2101 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2102 		dpcd_addr = DP_SINK_COUNT;
2103 	} else {
2104 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2105 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2106 		dpcd_addr = DP_SINK_COUNT_ESI;
2107 	}
2108 
2109 	dret = drm_dp_dpcd_read(
2110 		&aconnector->dm_dp_aux.aux,
2111 		dpcd_addr,
2112 		esi,
2113 		dpcd_bytes_to_read);
2114 
2115 	while (dret == dpcd_bytes_to_read &&
2116 		process_count < max_process_count) {
2117 		uint8_t retry;
2118 		dret = 0;
2119 
2120 		process_count++;
2121 
2122 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2123 		/* handle HPD short pulse irq */
2124 		if (aconnector->mst_mgr.mst_state)
2125 			drm_dp_mst_hpd_irq(
2126 				&aconnector->mst_mgr,
2127 				esi,
2128 				&new_irq_handled);
2129 
2130 		if (new_irq_handled) {
2131 			/* ACK at DPCD to notify down stream */
2132 			const int ack_dpcd_bytes_to_write =
2133 				dpcd_bytes_to_read - 1;
2134 
2135 			for (retry = 0; retry < 3; retry++) {
2136 				uint8_t wret;
2137 
2138 				wret = drm_dp_dpcd_write(
2139 					&aconnector->dm_dp_aux.aux,
2140 					dpcd_addr + 1,
2141 					&esi[1],
2142 					ack_dpcd_bytes_to_write);
2143 				if (wret == ack_dpcd_bytes_to_write)
2144 					break;
2145 			}
2146 
2147 			/* check if there is new irq to be handled */
2148 			dret = drm_dp_dpcd_read(
2149 				&aconnector->dm_dp_aux.aux,
2150 				dpcd_addr,
2151 				esi,
2152 				dpcd_bytes_to_read);
2153 
2154 			new_irq_handled = false;
2155 		} else {
2156 			break;
2157 		}
2158 	}
2159 
2160 	if (process_count == max_process_count)
2161 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2162 }
2163 
2164 static void handle_hpd_rx_irq(void *param)
2165 {
2166 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2167 	struct drm_connector *connector = &aconnector->base;
2168 	struct drm_device *dev = connector->dev;
2169 	struct dc_link *dc_link = aconnector->dc_link;
2170 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2171 	enum dc_connection_type new_connection_type = dc_connection_none;
2172 #ifdef CONFIG_DRM_AMD_DC_HDCP
2173 	union hpd_irq_data hpd_irq_data;
2174 	struct amdgpu_device *adev = dev->dev_private;
2175 
2176 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2177 #endif
2178 
2179 	/*
2180 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2181 	 * conflict, after implement i2c helper, this mutex should be
2182 	 * retired.
2183 	 */
2184 	if (dc_link->type != dc_connection_mst_branch)
2185 		mutex_lock(&aconnector->hpd_lock);
2186 
2187 
2188 #ifdef CONFIG_DRM_AMD_DC_HDCP
2189 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2190 #else
2191 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2192 #endif
2193 			!is_mst_root_connector) {
2194 		/* Downstream Port status changed. */
2195 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2196 			DRM_ERROR("KMS: Failed to detect connector\n");
2197 
2198 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2199 			emulated_link_detect(dc_link);
2200 
2201 			if (aconnector->fake_enable)
2202 				aconnector->fake_enable = false;
2203 
2204 			amdgpu_dm_update_connector_after_detect(aconnector);
2205 
2206 
2207 			drm_modeset_lock_all(dev);
2208 			dm_restore_drm_connector_state(dev, connector);
2209 			drm_modeset_unlock_all(dev);
2210 
2211 			drm_kms_helper_hotplug_event(dev);
2212 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2213 
2214 			if (aconnector->fake_enable)
2215 				aconnector->fake_enable = false;
2216 
2217 			amdgpu_dm_update_connector_after_detect(aconnector);
2218 
2219 
2220 			drm_modeset_lock_all(dev);
2221 			dm_restore_drm_connector_state(dev, connector);
2222 			drm_modeset_unlock_all(dev);
2223 
2224 			drm_kms_helper_hotplug_event(dev);
2225 		}
2226 	}
2227 #ifdef CONFIG_DRM_AMD_DC_HDCP
2228 	    if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2229 		    if (adev->dm.hdcp_workqueue)
2230 			    hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2231 	    }
2232 #endif
2233 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2234 	    (dc_link->type == dc_connection_mst_branch))
2235 		dm_handle_hpd_rx_irq(aconnector);
2236 
2237 	if (dc_link->type != dc_connection_mst_branch) {
2238 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2239 		mutex_unlock(&aconnector->hpd_lock);
2240 	}
2241 }
2242 
2243 static void register_hpd_handlers(struct amdgpu_device *adev)
2244 {
2245 	struct drm_device *dev = adev->ddev;
2246 	struct drm_connector *connector;
2247 	struct amdgpu_dm_connector *aconnector;
2248 	const struct dc_link *dc_link;
2249 	struct dc_interrupt_params int_params = {0};
2250 
2251 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2252 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2253 
2254 	list_for_each_entry(connector,
2255 			&dev->mode_config.connector_list, head)	{
2256 
2257 		aconnector = to_amdgpu_dm_connector(connector);
2258 		dc_link = aconnector->dc_link;
2259 
2260 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2261 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2262 			int_params.irq_source = dc_link->irq_source_hpd;
2263 
2264 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2265 					handle_hpd_irq,
2266 					(void *) aconnector);
2267 		}
2268 
2269 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2270 
2271 			/* Also register for DP short pulse (hpd_rx). */
2272 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2273 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2274 
2275 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2276 					handle_hpd_rx_irq,
2277 					(void *) aconnector);
2278 		}
2279 	}
2280 }
2281 
2282 /* Register IRQ sources and initialize IRQ callbacks */
2283 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2284 {
2285 	struct dc *dc = adev->dm.dc;
2286 	struct common_irq_params *c_irq_params;
2287 	struct dc_interrupt_params int_params = {0};
2288 	int r;
2289 	int i;
2290 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2291 
2292 	if (adev->asic_type >= CHIP_VEGA10)
2293 		client_id = SOC15_IH_CLIENTID_DCE;
2294 
2295 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2296 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2297 
2298 	/*
2299 	 * Actions of amdgpu_irq_add_id():
2300 	 * 1. Register a set() function with base driver.
2301 	 *    Base driver will call set() function to enable/disable an
2302 	 *    interrupt in DC hardware.
2303 	 * 2. Register amdgpu_dm_irq_handler().
2304 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2305 	 *    coming from DC hardware.
2306 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2307 	 *    for acknowledging and handling. */
2308 
2309 	/* Use VBLANK interrupt */
2310 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2311 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2312 		if (r) {
2313 			DRM_ERROR("Failed to add crtc irq id!\n");
2314 			return r;
2315 		}
2316 
2317 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2318 		int_params.irq_source =
2319 			dc_interrupt_to_irq_source(dc, i, 0);
2320 
2321 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2322 
2323 		c_irq_params->adev = adev;
2324 		c_irq_params->irq_src = int_params.irq_source;
2325 
2326 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2327 				dm_crtc_high_irq, c_irq_params);
2328 	}
2329 
2330 	/* Use VUPDATE interrupt */
2331 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2332 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2333 		if (r) {
2334 			DRM_ERROR("Failed to add vupdate irq id!\n");
2335 			return r;
2336 		}
2337 
2338 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2339 		int_params.irq_source =
2340 			dc_interrupt_to_irq_source(dc, i, 0);
2341 
2342 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2343 
2344 		c_irq_params->adev = adev;
2345 		c_irq_params->irq_src = int_params.irq_source;
2346 
2347 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2348 				dm_vupdate_high_irq, c_irq_params);
2349 	}
2350 
2351 	/* Use GRPH_PFLIP interrupt */
2352 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2353 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2354 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2355 		if (r) {
2356 			DRM_ERROR("Failed to add page flip irq id!\n");
2357 			return r;
2358 		}
2359 
2360 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2361 		int_params.irq_source =
2362 			dc_interrupt_to_irq_source(dc, i, 0);
2363 
2364 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2365 
2366 		c_irq_params->adev = adev;
2367 		c_irq_params->irq_src = int_params.irq_source;
2368 
2369 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2370 				dm_pflip_high_irq, c_irq_params);
2371 
2372 	}
2373 
2374 	/* HPD */
2375 	r = amdgpu_irq_add_id(adev, client_id,
2376 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2377 	if (r) {
2378 		DRM_ERROR("Failed to add hpd irq id!\n");
2379 		return r;
2380 	}
2381 
2382 	register_hpd_handlers(adev);
2383 
2384 	return 0;
2385 }
2386 
2387 #if defined(CONFIG_DRM_AMD_DC_DCN)
2388 /* Register IRQ sources and initialize IRQ callbacks */
2389 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2390 {
2391 	struct dc *dc = adev->dm.dc;
2392 	struct common_irq_params *c_irq_params;
2393 	struct dc_interrupt_params int_params = {0};
2394 	int r;
2395 	int i;
2396 
2397 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2398 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2399 
2400 	/*
2401 	 * Actions of amdgpu_irq_add_id():
2402 	 * 1. Register a set() function with base driver.
2403 	 *    Base driver will call set() function to enable/disable an
2404 	 *    interrupt in DC hardware.
2405 	 * 2. Register amdgpu_dm_irq_handler().
2406 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2407 	 *    coming from DC hardware.
2408 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2409 	 *    for acknowledging and handling.
2410 	 */
2411 
2412 	/* Use VSTARTUP interrupt */
2413 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2414 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2415 			i++) {
2416 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2417 
2418 		if (r) {
2419 			DRM_ERROR("Failed to add crtc irq id!\n");
2420 			return r;
2421 		}
2422 
2423 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2424 		int_params.irq_source =
2425 			dc_interrupt_to_irq_source(dc, i, 0);
2426 
2427 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2428 
2429 		c_irq_params->adev = adev;
2430 		c_irq_params->irq_src = int_params.irq_source;
2431 
2432 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2433 				dm_dcn_crtc_high_irq, c_irq_params);
2434 	}
2435 
2436 	/* Use GRPH_PFLIP interrupt */
2437 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2438 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2439 			i++) {
2440 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2441 		if (r) {
2442 			DRM_ERROR("Failed to add page flip irq id!\n");
2443 			return r;
2444 		}
2445 
2446 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2447 		int_params.irq_source =
2448 			dc_interrupt_to_irq_source(dc, i, 0);
2449 
2450 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2451 
2452 		c_irq_params->adev = adev;
2453 		c_irq_params->irq_src = int_params.irq_source;
2454 
2455 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2456 				dm_pflip_high_irq, c_irq_params);
2457 
2458 	}
2459 
2460 	/* HPD */
2461 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2462 			&adev->hpd_irq);
2463 	if (r) {
2464 		DRM_ERROR("Failed to add hpd irq id!\n");
2465 		return r;
2466 	}
2467 
2468 	register_hpd_handlers(adev);
2469 
2470 	return 0;
2471 }
2472 #endif
2473 
2474 /*
2475  * Acquires the lock for the atomic state object and returns
2476  * the new atomic state.
2477  *
2478  * This should only be called during atomic check.
2479  */
2480 static int dm_atomic_get_state(struct drm_atomic_state *state,
2481 			       struct dm_atomic_state **dm_state)
2482 {
2483 	struct drm_device *dev = state->dev;
2484 	struct amdgpu_device *adev = dev->dev_private;
2485 	struct amdgpu_display_manager *dm = &adev->dm;
2486 	struct drm_private_state *priv_state;
2487 
2488 	if (*dm_state)
2489 		return 0;
2490 
2491 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2492 	if (IS_ERR(priv_state))
2493 		return PTR_ERR(priv_state);
2494 
2495 	*dm_state = to_dm_atomic_state(priv_state);
2496 
2497 	return 0;
2498 }
2499 
2500 struct dm_atomic_state *
2501 dm_atomic_get_new_state(struct drm_atomic_state *state)
2502 {
2503 	struct drm_device *dev = state->dev;
2504 	struct amdgpu_device *adev = dev->dev_private;
2505 	struct amdgpu_display_manager *dm = &adev->dm;
2506 	struct drm_private_obj *obj;
2507 	struct drm_private_state *new_obj_state;
2508 	int i;
2509 
2510 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2511 		if (obj->funcs == dm->atomic_obj.funcs)
2512 			return to_dm_atomic_state(new_obj_state);
2513 	}
2514 
2515 	return NULL;
2516 }
2517 
2518 struct dm_atomic_state *
2519 dm_atomic_get_old_state(struct drm_atomic_state *state)
2520 {
2521 	struct drm_device *dev = state->dev;
2522 	struct amdgpu_device *adev = dev->dev_private;
2523 	struct amdgpu_display_manager *dm = &adev->dm;
2524 	struct drm_private_obj *obj;
2525 	struct drm_private_state *old_obj_state;
2526 	int i;
2527 
2528 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2529 		if (obj->funcs == dm->atomic_obj.funcs)
2530 			return to_dm_atomic_state(old_obj_state);
2531 	}
2532 
2533 	return NULL;
2534 }
2535 
2536 static struct drm_private_state *
2537 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2538 {
2539 	struct dm_atomic_state *old_state, *new_state;
2540 
2541 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2542 	if (!new_state)
2543 		return NULL;
2544 
2545 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2546 
2547 	old_state = to_dm_atomic_state(obj->state);
2548 
2549 	if (old_state && old_state->context)
2550 		new_state->context = dc_copy_state(old_state->context);
2551 
2552 	if (!new_state->context) {
2553 		kfree(new_state);
2554 		return NULL;
2555 	}
2556 
2557 	return &new_state->base;
2558 }
2559 
2560 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2561 				    struct drm_private_state *state)
2562 {
2563 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2564 
2565 	if (dm_state && dm_state->context)
2566 		dc_release_state(dm_state->context);
2567 
2568 	kfree(dm_state);
2569 }
2570 
2571 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2572 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2573 	.atomic_destroy_state = dm_atomic_destroy_state,
2574 };
2575 
2576 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2577 {
2578 	struct dm_atomic_state *state;
2579 	int r;
2580 
2581 	adev->mode_info.mode_config_initialized = true;
2582 
2583 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2584 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2585 
2586 	adev->ddev->mode_config.max_width = 16384;
2587 	adev->ddev->mode_config.max_height = 16384;
2588 
2589 	adev->ddev->mode_config.preferred_depth = 24;
2590 	adev->ddev->mode_config.prefer_shadow = 1;
2591 	/* indicates support for immediate flip */
2592 	adev->ddev->mode_config.async_page_flip = true;
2593 
2594 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2595 
2596 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2597 	if (!state)
2598 		return -ENOMEM;
2599 
2600 	state->context = dc_create_state(adev->dm.dc);
2601 	if (!state->context) {
2602 		kfree(state);
2603 		return -ENOMEM;
2604 	}
2605 
2606 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2607 
2608 	drm_atomic_private_obj_init(adev->ddev,
2609 				    &adev->dm.atomic_obj,
2610 				    &state->base,
2611 				    &dm_atomic_state_funcs);
2612 
2613 	r = amdgpu_display_modeset_create_props(adev);
2614 	if (r)
2615 		return r;
2616 
2617 	r = amdgpu_dm_audio_init(adev);
2618 	if (r)
2619 		return r;
2620 
2621 	return 0;
2622 }
2623 
2624 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2625 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2626 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2627 
2628 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2629 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2630 
2631 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2632 {
2633 #if defined(CONFIG_ACPI)
2634 	struct amdgpu_dm_backlight_caps caps;
2635 
2636 	if (dm->backlight_caps.caps_valid)
2637 		return;
2638 
2639 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2640 	if (caps.caps_valid) {
2641 		dm->backlight_caps.caps_valid = true;
2642 		if (caps.aux_support)
2643 			return;
2644 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2645 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2646 	} else {
2647 		dm->backlight_caps.min_input_signal =
2648 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2649 		dm->backlight_caps.max_input_signal =
2650 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2651 	}
2652 #else
2653 	if (dm->backlight_caps.aux_support)
2654 		return;
2655 
2656 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2657 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2658 #endif
2659 }
2660 
2661 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2662 {
2663 	bool rc;
2664 
2665 	if (!link)
2666 		return 1;
2667 
2668 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2669 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2670 
2671 	return rc ? 0 : 1;
2672 }
2673 
2674 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2675 			      const uint32_t user_brightness)
2676 {
2677 	u32 min, max, conversion_pace;
2678 	u32 brightness = user_brightness;
2679 
2680 	if (!caps)
2681 		goto out;
2682 
2683 	if (!caps->aux_support) {
2684 		max = caps->max_input_signal;
2685 		min = caps->min_input_signal;
2686 		/*
2687 		 * The brightness input is in the range 0-255
2688 		 * It needs to be rescaled to be between the
2689 		 * requested min and max input signal
2690 		 * It also needs to be scaled up by 0x101 to
2691 		 * match the DC interface which has a range of
2692 		 * 0 to 0xffff
2693 		 */
2694 		conversion_pace = 0x101;
2695 		brightness =
2696 			user_brightness
2697 			* conversion_pace
2698 			* (max - min)
2699 			/ AMDGPU_MAX_BL_LEVEL
2700 			+ min * conversion_pace;
2701 	} else {
2702 		/* TODO
2703 		 * We are doing a linear interpolation here, which is OK but
2704 		 * does not provide the optimal result. We probably want
2705 		 * something close to the Perceptual Quantizer (PQ) curve.
2706 		 */
2707 		max = caps->aux_max_input_signal;
2708 		min = caps->aux_min_input_signal;
2709 
2710 		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2711 			       + user_brightness * max;
2712 		// Multiple the value by 1000 since we use millinits
2713 		brightness *= 1000;
2714 		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2715 	}
2716 
2717 out:
2718 	return brightness;
2719 }
2720 
2721 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2722 {
2723 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2724 	struct amdgpu_dm_backlight_caps caps;
2725 	struct dc_link *link = NULL;
2726 	u32 brightness;
2727 	bool rc;
2728 
2729 	amdgpu_dm_update_backlight_caps(dm);
2730 	caps = dm->backlight_caps;
2731 
2732 	link = (struct dc_link *)dm->backlight_link;
2733 
2734 	brightness = convert_brightness(&caps, bd->props.brightness);
2735 	// Change brightness based on AUX property
2736 	if (caps.aux_support)
2737 		return set_backlight_via_aux(link, brightness);
2738 
2739 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2740 
2741 	return rc ? 0 : 1;
2742 }
2743 
2744 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2745 {
2746 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2747 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2748 
2749 	if (ret == DC_ERROR_UNEXPECTED)
2750 		return bd->props.brightness;
2751 	return ret;
2752 }
2753 
2754 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2755 	.options = BL_CORE_SUSPENDRESUME,
2756 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2757 	.update_status	= amdgpu_dm_backlight_update_status,
2758 };
2759 
2760 static void
2761 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2762 {
2763 	char bl_name[16];
2764 	struct backlight_properties props = { 0 };
2765 
2766 	amdgpu_dm_update_backlight_caps(dm);
2767 
2768 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2769 	props.brightness = AMDGPU_MAX_BL_LEVEL;
2770 	props.type = BACKLIGHT_RAW;
2771 
2772 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2773 			dm->adev->ddev->primary->index);
2774 
2775 	dm->backlight_dev = backlight_device_register(bl_name,
2776 			dm->adev->ddev->dev,
2777 			dm,
2778 			&amdgpu_dm_backlight_ops,
2779 			&props);
2780 
2781 	if (IS_ERR(dm->backlight_dev))
2782 		DRM_ERROR("DM: Backlight registration failed!\n");
2783 	else
2784 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2785 }
2786 
2787 #endif
2788 
2789 static int initialize_plane(struct amdgpu_display_manager *dm,
2790 			    struct amdgpu_mode_info *mode_info, int plane_id,
2791 			    enum drm_plane_type plane_type,
2792 			    const struct dc_plane_cap *plane_cap)
2793 {
2794 	struct drm_plane *plane;
2795 	unsigned long possible_crtcs;
2796 	int ret = 0;
2797 
2798 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2799 	if (!plane) {
2800 		DRM_ERROR("KMS: Failed to allocate plane\n");
2801 		return -ENOMEM;
2802 	}
2803 	plane->type = plane_type;
2804 
2805 	/*
2806 	 * HACK: IGT tests expect that the primary plane for a CRTC
2807 	 * can only have one possible CRTC. Only expose support for
2808 	 * any CRTC if they're not going to be used as a primary plane
2809 	 * for a CRTC - like overlay or underlay planes.
2810 	 */
2811 	possible_crtcs = 1 << plane_id;
2812 	if (plane_id >= dm->dc->caps.max_streams)
2813 		possible_crtcs = 0xff;
2814 
2815 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2816 
2817 	if (ret) {
2818 		DRM_ERROR("KMS: Failed to initialize plane\n");
2819 		kfree(plane);
2820 		return ret;
2821 	}
2822 
2823 	if (mode_info)
2824 		mode_info->planes[plane_id] = plane;
2825 
2826 	return ret;
2827 }
2828 
2829 
2830 static void register_backlight_device(struct amdgpu_display_manager *dm,
2831 				      struct dc_link *link)
2832 {
2833 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2834 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2835 
2836 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2837 	    link->type != dc_connection_none) {
2838 		/*
2839 		 * Event if registration failed, we should continue with
2840 		 * DM initialization because not having a backlight control
2841 		 * is better then a black screen.
2842 		 */
2843 		amdgpu_dm_register_backlight_device(dm);
2844 
2845 		if (dm->backlight_dev)
2846 			dm->backlight_link = link;
2847 	}
2848 #endif
2849 }
2850 
2851 
2852 /*
2853  * In this architecture, the association
2854  * connector -> encoder -> crtc
2855  * id not really requried. The crtc and connector will hold the
2856  * display_index as an abstraction to use with DAL component
2857  *
2858  * Returns 0 on success
2859  */
2860 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2861 {
2862 	struct amdgpu_display_manager *dm = &adev->dm;
2863 	int32_t i;
2864 	struct amdgpu_dm_connector *aconnector = NULL;
2865 	struct amdgpu_encoder *aencoder = NULL;
2866 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
2867 	uint32_t link_cnt;
2868 	int32_t primary_planes;
2869 	enum dc_connection_type new_connection_type = dc_connection_none;
2870 	const struct dc_plane_cap *plane;
2871 
2872 	link_cnt = dm->dc->caps.max_links;
2873 	if (amdgpu_dm_mode_config_init(dm->adev)) {
2874 		DRM_ERROR("DM: Failed to initialize mode config\n");
2875 		return -EINVAL;
2876 	}
2877 
2878 	/* There is one primary plane per CRTC */
2879 	primary_planes = dm->dc->caps.max_streams;
2880 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2881 
2882 	/*
2883 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
2884 	 * Order is reversed to match iteration order in atomic check.
2885 	 */
2886 	for (i = (primary_planes - 1); i >= 0; i--) {
2887 		plane = &dm->dc->caps.planes[i];
2888 
2889 		if (initialize_plane(dm, mode_info, i,
2890 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
2891 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
2892 			goto fail;
2893 		}
2894 	}
2895 
2896 	/*
2897 	 * Initialize overlay planes, index starting after primary planes.
2898 	 * These planes have a higher DRM index than the primary planes since
2899 	 * they should be considered as having a higher z-order.
2900 	 * Order is reversed to match iteration order in atomic check.
2901 	 *
2902 	 * Only support DCN for now, and only expose one so we don't encourage
2903 	 * userspace to use up all the pipes.
2904 	 */
2905 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2906 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2907 
2908 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2909 			continue;
2910 
2911 		if (!plane->blends_with_above || !plane->blends_with_below)
2912 			continue;
2913 
2914 		if (!plane->pixel_format_support.argb8888)
2915 			continue;
2916 
2917 		if (initialize_plane(dm, NULL, primary_planes + i,
2918 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
2919 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2920 			goto fail;
2921 		}
2922 
2923 		/* Only create one overlay plane. */
2924 		break;
2925 	}
2926 
2927 	for (i = 0; i < dm->dc->caps.max_streams; i++)
2928 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2929 			DRM_ERROR("KMS: Failed to initialize crtc\n");
2930 			goto fail;
2931 		}
2932 
2933 	dm->display_indexes_num = dm->dc->caps.max_streams;
2934 
2935 	/* loops over all connectors on the board */
2936 	for (i = 0; i < link_cnt; i++) {
2937 		struct dc_link *link = NULL;
2938 
2939 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2940 			DRM_ERROR(
2941 				"KMS: Cannot support more than %d display indexes\n",
2942 					AMDGPU_DM_MAX_DISPLAY_INDEX);
2943 			continue;
2944 		}
2945 
2946 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2947 		if (!aconnector)
2948 			goto fail;
2949 
2950 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2951 		if (!aencoder)
2952 			goto fail;
2953 
2954 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2955 			DRM_ERROR("KMS: Failed to initialize encoder\n");
2956 			goto fail;
2957 		}
2958 
2959 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2960 			DRM_ERROR("KMS: Failed to initialize connector\n");
2961 			goto fail;
2962 		}
2963 
2964 		link = dc_get_link_at_index(dm->dc, i);
2965 
2966 		if (!dc_link_detect_sink(link, &new_connection_type))
2967 			DRM_ERROR("KMS: Failed to detect connector\n");
2968 
2969 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2970 			emulated_link_detect(link);
2971 			amdgpu_dm_update_connector_after_detect(aconnector);
2972 
2973 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2974 			amdgpu_dm_update_connector_after_detect(aconnector);
2975 			register_backlight_device(dm, link);
2976 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2977 				amdgpu_dm_set_psr_caps(link);
2978 		}
2979 
2980 
2981 	}
2982 
2983 	/* Software is initialized. Now we can register interrupt handlers. */
2984 	switch (adev->asic_type) {
2985 	case CHIP_BONAIRE:
2986 	case CHIP_HAWAII:
2987 	case CHIP_KAVERI:
2988 	case CHIP_KABINI:
2989 	case CHIP_MULLINS:
2990 	case CHIP_TONGA:
2991 	case CHIP_FIJI:
2992 	case CHIP_CARRIZO:
2993 	case CHIP_STONEY:
2994 	case CHIP_POLARIS11:
2995 	case CHIP_POLARIS10:
2996 	case CHIP_POLARIS12:
2997 	case CHIP_VEGAM:
2998 	case CHIP_VEGA10:
2999 	case CHIP_VEGA12:
3000 	case CHIP_VEGA20:
3001 		if (dce110_register_irq_handlers(dm->adev)) {
3002 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3003 			goto fail;
3004 		}
3005 		break;
3006 #if defined(CONFIG_DRM_AMD_DC_DCN)
3007 	case CHIP_RAVEN:
3008 	case CHIP_NAVI12:
3009 	case CHIP_NAVI10:
3010 	case CHIP_NAVI14:
3011 	case CHIP_RENOIR:
3012 		if (dcn10_register_irq_handlers(dm->adev)) {
3013 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3014 			goto fail;
3015 		}
3016 		break;
3017 #endif
3018 	default:
3019 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3020 		goto fail;
3021 	}
3022 
3023 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3024 		dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3025 
3026 	return 0;
3027 fail:
3028 	kfree(aencoder);
3029 	kfree(aconnector);
3030 
3031 	return -EINVAL;
3032 }
3033 
3034 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3035 {
3036 	drm_mode_config_cleanup(dm->ddev);
3037 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3038 	return;
3039 }
3040 
3041 /******************************************************************************
3042  * amdgpu_display_funcs functions
3043  *****************************************************************************/
3044 
3045 /*
3046  * dm_bandwidth_update - program display watermarks
3047  *
3048  * @adev: amdgpu_device pointer
3049  *
3050  * Calculate and program the display watermarks and line buffer allocation.
3051  */
3052 static void dm_bandwidth_update(struct amdgpu_device *adev)
3053 {
3054 	/* TODO: implement later */
3055 }
3056 
3057 static const struct amdgpu_display_funcs dm_display_funcs = {
3058 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3059 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3060 	.backlight_set_level = NULL, /* never called for DC */
3061 	.backlight_get_level = NULL, /* never called for DC */
3062 	.hpd_sense = NULL,/* called unconditionally */
3063 	.hpd_set_polarity = NULL, /* called unconditionally */
3064 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3065 	.page_flip_get_scanoutpos =
3066 		dm_crtc_get_scanoutpos,/* called unconditionally */
3067 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3068 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3069 };
3070 
3071 #if defined(CONFIG_DEBUG_KERNEL_DC)
3072 
3073 static ssize_t s3_debug_store(struct device *device,
3074 			      struct device_attribute *attr,
3075 			      const char *buf,
3076 			      size_t count)
3077 {
3078 	int ret;
3079 	int s3_state;
3080 	struct drm_device *drm_dev = dev_get_drvdata(device);
3081 	struct amdgpu_device *adev = drm_dev->dev_private;
3082 
3083 	ret = kstrtoint(buf, 0, &s3_state);
3084 
3085 	if (ret == 0) {
3086 		if (s3_state) {
3087 			dm_resume(adev);
3088 			drm_kms_helper_hotplug_event(adev->ddev);
3089 		} else
3090 			dm_suspend(adev);
3091 	}
3092 
3093 	return ret == 0 ? count : 0;
3094 }
3095 
3096 DEVICE_ATTR_WO(s3_debug);
3097 
3098 #endif
3099 
3100 static int dm_early_init(void *handle)
3101 {
3102 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3103 
3104 	switch (adev->asic_type) {
3105 	case CHIP_BONAIRE:
3106 	case CHIP_HAWAII:
3107 		adev->mode_info.num_crtc = 6;
3108 		adev->mode_info.num_hpd = 6;
3109 		adev->mode_info.num_dig = 6;
3110 		break;
3111 	case CHIP_KAVERI:
3112 		adev->mode_info.num_crtc = 4;
3113 		adev->mode_info.num_hpd = 6;
3114 		adev->mode_info.num_dig = 7;
3115 		break;
3116 	case CHIP_KABINI:
3117 	case CHIP_MULLINS:
3118 		adev->mode_info.num_crtc = 2;
3119 		adev->mode_info.num_hpd = 6;
3120 		adev->mode_info.num_dig = 6;
3121 		break;
3122 	case CHIP_FIJI:
3123 	case CHIP_TONGA:
3124 		adev->mode_info.num_crtc = 6;
3125 		adev->mode_info.num_hpd = 6;
3126 		adev->mode_info.num_dig = 7;
3127 		break;
3128 	case CHIP_CARRIZO:
3129 		adev->mode_info.num_crtc = 3;
3130 		adev->mode_info.num_hpd = 6;
3131 		adev->mode_info.num_dig = 9;
3132 		break;
3133 	case CHIP_STONEY:
3134 		adev->mode_info.num_crtc = 2;
3135 		adev->mode_info.num_hpd = 6;
3136 		adev->mode_info.num_dig = 9;
3137 		break;
3138 	case CHIP_POLARIS11:
3139 	case CHIP_POLARIS12:
3140 		adev->mode_info.num_crtc = 5;
3141 		adev->mode_info.num_hpd = 5;
3142 		adev->mode_info.num_dig = 5;
3143 		break;
3144 	case CHIP_POLARIS10:
3145 	case CHIP_VEGAM:
3146 		adev->mode_info.num_crtc = 6;
3147 		adev->mode_info.num_hpd = 6;
3148 		adev->mode_info.num_dig = 6;
3149 		break;
3150 	case CHIP_VEGA10:
3151 	case CHIP_VEGA12:
3152 	case CHIP_VEGA20:
3153 		adev->mode_info.num_crtc = 6;
3154 		adev->mode_info.num_hpd = 6;
3155 		adev->mode_info.num_dig = 6;
3156 		break;
3157 #if defined(CONFIG_DRM_AMD_DC_DCN)
3158 	case CHIP_RAVEN:
3159 		adev->mode_info.num_crtc = 4;
3160 		adev->mode_info.num_hpd = 4;
3161 		adev->mode_info.num_dig = 4;
3162 		break;
3163 #endif
3164 	case CHIP_NAVI10:
3165 	case CHIP_NAVI12:
3166 		adev->mode_info.num_crtc = 6;
3167 		adev->mode_info.num_hpd = 6;
3168 		adev->mode_info.num_dig = 6;
3169 		break;
3170 	case CHIP_NAVI14:
3171 		adev->mode_info.num_crtc = 5;
3172 		adev->mode_info.num_hpd = 5;
3173 		adev->mode_info.num_dig = 5;
3174 		break;
3175 	case CHIP_RENOIR:
3176 		adev->mode_info.num_crtc = 4;
3177 		adev->mode_info.num_hpd = 4;
3178 		adev->mode_info.num_dig = 4;
3179 		break;
3180 	default:
3181 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3182 		return -EINVAL;
3183 	}
3184 
3185 	amdgpu_dm_set_irq_funcs(adev);
3186 
3187 	if (adev->mode_info.funcs == NULL)
3188 		adev->mode_info.funcs = &dm_display_funcs;
3189 
3190 	/*
3191 	 * Note: Do NOT change adev->audio_endpt_rreg and
3192 	 * adev->audio_endpt_wreg because they are initialised in
3193 	 * amdgpu_device_init()
3194 	 */
3195 #if defined(CONFIG_DEBUG_KERNEL_DC)
3196 	device_create_file(
3197 		adev->ddev->dev,
3198 		&dev_attr_s3_debug);
3199 #endif
3200 
3201 	return 0;
3202 }
3203 
3204 static bool modeset_required(struct drm_crtc_state *crtc_state,
3205 			     struct dc_stream_state *new_stream,
3206 			     struct dc_stream_state *old_stream)
3207 {
3208 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3209 		return false;
3210 
3211 	if (!crtc_state->enable)
3212 		return false;
3213 
3214 	return crtc_state->active;
3215 }
3216 
3217 static bool modereset_required(struct drm_crtc_state *crtc_state)
3218 {
3219 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3220 		return false;
3221 
3222 	return !crtc_state->enable || !crtc_state->active;
3223 }
3224 
3225 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3226 {
3227 	drm_encoder_cleanup(encoder);
3228 	kfree(encoder);
3229 }
3230 
3231 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3232 	.destroy = amdgpu_dm_encoder_destroy,
3233 };
3234 
3235 
3236 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3237 				struct dc_scaling_info *scaling_info)
3238 {
3239 	int scale_w, scale_h;
3240 
3241 	memset(scaling_info, 0, sizeof(*scaling_info));
3242 
3243 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3244 	scaling_info->src_rect.x = state->src_x >> 16;
3245 	scaling_info->src_rect.y = state->src_y >> 16;
3246 
3247 	scaling_info->src_rect.width = state->src_w >> 16;
3248 	if (scaling_info->src_rect.width == 0)
3249 		return -EINVAL;
3250 
3251 	scaling_info->src_rect.height = state->src_h >> 16;
3252 	if (scaling_info->src_rect.height == 0)
3253 		return -EINVAL;
3254 
3255 	scaling_info->dst_rect.x = state->crtc_x;
3256 	scaling_info->dst_rect.y = state->crtc_y;
3257 
3258 	if (state->crtc_w == 0)
3259 		return -EINVAL;
3260 
3261 	scaling_info->dst_rect.width = state->crtc_w;
3262 
3263 	if (state->crtc_h == 0)
3264 		return -EINVAL;
3265 
3266 	scaling_info->dst_rect.height = state->crtc_h;
3267 
3268 	/* DRM doesn't specify clipping on destination output. */
3269 	scaling_info->clip_rect = scaling_info->dst_rect;
3270 
3271 	/* TODO: Validate scaling per-format with DC plane caps */
3272 	scale_w = scaling_info->dst_rect.width * 1000 /
3273 		  scaling_info->src_rect.width;
3274 
3275 	if (scale_w < 250 || scale_w > 16000)
3276 		return -EINVAL;
3277 
3278 	scale_h = scaling_info->dst_rect.height * 1000 /
3279 		  scaling_info->src_rect.height;
3280 
3281 	if (scale_h < 250 || scale_h > 16000)
3282 		return -EINVAL;
3283 
3284 	/*
3285 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3286 	 * assume reasonable defaults based on the format.
3287 	 */
3288 
3289 	return 0;
3290 }
3291 
3292 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3293 		       uint64_t *tiling_flags)
3294 {
3295 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3296 	int r = amdgpu_bo_reserve(rbo, false);
3297 
3298 	if (unlikely(r)) {
3299 		/* Don't show error message when returning -ERESTARTSYS */
3300 		if (r != -ERESTARTSYS)
3301 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3302 		return r;
3303 	}
3304 
3305 	if (tiling_flags)
3306 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3307 
3308 	amdgpu_bo_unreserve(rbo);
3309 
3310 	return r;
3311 }
3312 
3313 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3314 {
3315 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3316 
3317 	return offset ? (address + offset * 256) : 0;
3318 }
3319 
3320 static int
3321 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3322 			  const struct amdgpu_framebuffer *afb,
3323 			  const enum surface_pixel_format format,
3324 			  const enum dc_rotation_angle rotation,
3325 			  const struct plane_size *plane_size,
3326 			  const union dc_tiling_info *tiling_info,
3327 			  const uint64_t info,
3328 			  struct dc_plane_dcc_param *dcc,
3329 			  struct dc_plane_address *address)
3330 {
3331 	struct dc *dc = adev->dm.dc;
3332 	struct dc_dcc_surface_param input;
3333 	struct dc_surface_dcc_cap output;
3334 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3335 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3336 	uint64_t dcc_address;
3337 
3338 	memset(&input, 0, sizeof(input));
3339 	memset(&output, 0, sizeof(output));
3340 
3341 	if (!offset)
3342 		return 0;
3343 
3344 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3345 		return 0;
3346 
3347 	if (!dc->cap_funcs.get_dcc_compression_cap)
3348 		return -EINVAL;
3349 
3350 	input.format = format;
3351 	input.surface_size.width = plane_size->surface_size.width;
3352 	input.surface_size.height = plane_size->surface_size.height;
3353 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3354 
3355 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3356 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3357 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3358 		input.scan = SCAN_DIRECTION_VERTICAL;
3359 
3360 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3361 		return -EINVAL;
3362 
3363 	if (!output.capable)
3364 		return -EINVAL;
3365 
3366 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3367 		return -EINVAL;
3368 
3369 	dcc->enable = 1;
3370 	dcc->meta_pitch =
3371 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3372 	dcc->independent_64b_blks = i64b;
3373 
3374 	dcc_address = get_dcc_address(afb->address, info);
3375 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3376 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3377 
3378 	return 0;
3379 }
3380 
3381 static int
3382 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3383 			     const struct amdgpu_framebuffer *afb,
3384 			     const enum surface_pixel_format format,
3385 			     const enum dc_rotation_angle rotation,
3386 			     const uint64_t tiling_flags,
3387 			     union dc_tiling_info *tiling_info,
3388 			     struct plane_size *plane_size,
3389 			     struct dc_plane_dcc_param *dcc,
3390 			     struct dc_plane_address *address)
3391 {
3392 	const struct drm_framebuffer *fb = &afb->base;
3393 	int ret;
3394 
3395 	memset(tiling_info, 0, sizeof(*tiling_info));
3396 	memset(plane_size, 0, sizeof(*plane_size));
3397 	memset(dcc, 0, sizeof(*dcc));
3398 	memset(address, 0, sizeof(*address));
3399 
3400 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3401 		plane_size->surface_size.x = 0;
3402 		plane_size->surface_size.y = 0;
3403 		plane_size->surface_size.width = fb->width;
3404 		plane_size->surface_size.height = fb->height;
3405 		plane_size->surface_pitch =
3406 			fb->pitches[0] / fb->format->cpp[0];
3407 
3408 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3409 		address->grph.addr.low_part = lower_32_bits(afb->address);
3410 		address->grph.addr.high_part = upper_32_bits(afb->address);
3411 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3412 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3413 
3414 		plane_size->surface_size.x = 0;
3415 		plane_size->surface_size.y = 0;
3416 		plane_size->surface_size.width = fb->width;
3417 		plane_size->surface_size.height = fb->height;
3418 		plane_size->surface_pitch =
3419 			fb->pitches[0] / fb->format->cpp[0];
3420 
3421 		plane_size->chroma_size.x = 0;
3422 		plane_size->chroma_size.y = 0;
3423 		/* TODO: set these based on surface format */
3424 		plane_size->chroma_size.width = fb->width / 2;
3425 		plane_size->chroma_size.height = fb->height / 2;
3426 
3427 		plane_size->chroma_pitch =
3428 			fb->pitches[1] / fb->format->cpp[1];
3429 
3430 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3431 		address->video_progressive.luma_addr.low_part =
3432 			lower_32_bits(afb->address);
3433 		address->video_progressive.luma_addr.high_part =
3434 			upper_32_bits(afb->address);
3435 		address->video_progressive.chroma_addr.low_part =
3436 			lower_32_bits(chroma_addr);
3437 		address->video_progressive.chroma_addr.high_part =
3438 			upper_32_bits(chroma_addr);
3439 	}
3440 
3441 	/* Fill GFX8 params */
3442 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3443 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3444 
3445 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3446 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3447 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3448 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3449 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3450 
3451 		/* XXX fix me for VI */
3452 		tiling_info->gfx8.num_banks = num_banks;
3453 		tiling_info->gfx8.array_mode =
3454 				DC_ARRAY_2D_TILED_THIN1;
3455 		tiling_info->gfx8.tile_split = tile_split;
3456 		tiling_info->gfx8.bank_width = bankw;
3457 		tiling_info->gfx8.bank_height = bankh;
3458 		tiling_info->gfx8.tile_aspect = mtaspect;
3459 		tiling_info->gfx8.tile_mode =
3460 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3461 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3462 			== DC_ARRAY_1D_TILED_THIN1) {
3463 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3464 	}
3465 
3466 	tiling_info->gfx8.pipe_config =
3467 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3468 
3469 	if (adev->asic_type == CHIP_VEGA10 ||
3470 	    adev->asic_type == CHIP_VEGA12 ||
3471 	    adev->asic_type == CHIP_VEGA20 ||
3472 	    adev->asic_type == CHIP_NAVI10 ||
3473 	    adev->asic_type == CHIP_NAVI14 ||
3474 	    adev->asic_type == CHIP_NAVI12 ||
3475 	    adev->asic_type == CHIP_RENOIR ||
3476 	    adev->asic_type == CHIP_RAVEN) {
3477 		/* Fill GFX9 params */
3478 		tiling_info->gfx9.num_pipes =
3479 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3480 		tiling_info->gfx9.num_banks =
3481 			adev->gfx.config.gb_addr_config_fields.num_banks;
3482 		tiling_info->gfx9.pipe_interleave =
3483 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3484 		tiling_info->gfx9.num_shader_engines =
3485 			adev->gfx.config.gb_addr_config_fields.num_se;
3486 		tiling_info->gfx9.max_compressed_frags =
3487 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3488 		tiling_info->gfx9.num_rb_per_se =
3489 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3490 		tiling_info->gfx9.swizzle =
3491 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3492 		tiling_info->gfx9.shaderEnable = 1;
3493 
3494 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3495 						plane_size, tiling_info,
3496 						tiling_flags, dcc, address);
3497 		if (ret)
3498 			return ret;
3499 	}
3500 
3501 	return 0;
3502 }
3503 
3504 static void
3505 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3506 			       bool *per_pixel_alpha, bool *global_alpha,
3507 			       int *global_alpha_value)
3508 {
3509 	*per_pixel_alpha = false;
3510 	*global_alpha = false;
3511 	*global_alpha_value = 0xff;
3512 
3513 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3514 		return;
3515 
3516 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3517 		static const uint32_t alpha_formats[] = {
3518 			DRM_FORMAT_ARGB8888,
3519 			DRM_FORMAT_RGBA8888,
3520 			DRM_FORMAT_ABGR8888,
3521 		};
3522 		uint32_t format = plane_state->fb->format->format;
3523 		unsigned int i;
3524 
3525 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3526 			if (format == alpha_formats[i]) {
3527 				*per_pixel_alpha = true;
3528 				break;
3529 			}
3530 		}
3531 	}
3532 
3533 	if (plane_state->alpha < 0xffff) {
3534 		*global_alpha = true;
3535 		*global_alpha_value = plane_state->alpha >> 8;
3536 	}
3537 }
3538 
3539 static int
3540 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3541 			    const enum surface_pixel_format format,
3542 			    enum dc_color_space *color_space)
3543 {
3544 	bool full_range;
3545 
3546 	*color_space = COLOR_SPACE_SRGB;
3547 
3548 	/* DRM color properties only affect non-RGB formats. */
3549 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3550 		return 0;
3551 
3552 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3553 
3554 	switch (plane_state->color_encoding) {
3555 	case DRM_COLOR_YCBCR_BT601:
3556 		if (full_range)
3557 			*color_space = COLOR_SPACE_YCBCR601;
3558 		else
3559 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3560 		break;
3561 
3562 	case DRM_COLOR_YCBCR_BT709:
3563 		if (full_range)
3564 			*color_space = COLOR_SPACE_YCBCR709;
3565 		else
3566 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3567 		break;
3568 
3569 	case DRM_COLOR_YCBCR_BT2020:
3570 		if (full_range)
3571 			*color_space = COLOR_SPACE_2020_YCBCR;
3572 		else
3573 			return -EINVAL;
3574 		break;
3575 
3576 	default:
3577 		return -EINVAL;
3578 	}
3579 
3580 	return 0;
3581 }
3582 
3583 static int
3584 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3585 			    const struct drm_plane_state *plane_state,
3586 			    const uint64_t tiling_flags,
3587 			    struct dc_plane_info *plane_info,
3588 			    struct dc_plane_address *address)
3589 {
3590 	const struct drm_framebuffer *fb = plane_state->fb;
3591 	const struct amdgpu_framebuffer *afb =
3592 		to_amdgpu_framebuffer(plane_state->fb);
3593 	struct drm_format_name_buf format_name;
3594 	int ret;
3595 
3596 	memset(plane_info, 0, sizeof(*plane_info));
3597 
3598 	switch (fb->format->format) {
3599 	case DRM_FORMAT_C8:
3600 		plane_info->format =
3601 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3602 		break;
3603 	case DRM_FORMAT_RGB565:
3604 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3605 		break;
3606 	case DRM_FORMAT_XRGB8888:
3607 	case DRM_FORMAT_ARGB8888:
3608 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3609 		break;
3610 	case DRM_FORMAT_XRGB2101010:
3611 	case DRM_FORMAT_ARGB2101010:
3612 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3613 		break;
3614 	case DRM_FORMAT_XBGR2101010:
3615 	case DRM_FORMAT_ABGR2101010:
3616 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3617 		break;
3618 	case DRM_FORMAT_XBGR8888:
3619 	case DRM_FORMAT_ABGR8888:
3620 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3621 		break;
3622 	case DRM_FORMAT_NV21:
3623 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3624 		break;
3625 	case DRM_FORMAT_NV12:
3626 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3627 		break;
3628 	default:
3629 		DRM_ERROR(
3630 			"Unsupported screen format %s\n",
3631 			drm_get_format_name(fb->format->format, &format_name));
3632 		return -EINVAL;
3633 	}
3634 
3635 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3636 	case DRM_MODE_ROTATE_0:
3637 		plane_info->rotation = ROTATION_ANGLE_0;
3638 		break;
3639 	case DRM_MODE_ROTATE_90:
3640 		plane_info->rotation = ROTATION_ANGLE_90;
3641 		break;
3642 	case DRM_MODE_ROTATE_180:
3643 		plane_info->rotation = ROTATION_ANGLE_180;
3644 		break;
3645 	case DRM_MODE_ROTATE_270:
3646 		plane_info->rotation = ROTATION_ANGLE_270;
3647 		break;
3648 	default:
3649 		plane_info->rotation = ROTATION_ANGLE_0;
3650 		break;
3651 	}
3652 
3653 	plane_info->visible = true;
3654 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3655 
3656 	plane_info->layer_index = 0;
3657 
3658 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3659 					  &plane_info->color_space);
3660 	if (ret)
3661 		return ret;
3662 
3663 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3664 					   plane_info->rotation, tiling_flags,
3665 					   &plane_info->tiling_info,
3666 					   &plane_info->plane_size,
3667 					   &plane_info->dcc, address);
3668 	if (ret)
3669 		return ret;
3670 
3671 	fill_blending_from_plane_state(
3672 		plane_state, &plane_info->per_pixel_alpha,
3673 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3674 
3675 	return 0;
3676 }
3677 
3678 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3679 				    struct dc_plane_state *dc_plane_state,
3680 				    struct drm_plane_state *plane_state,
3681 				    struct drm_crtc_state *crtc_state)
3682 {
3683 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3684 	const struct amdgpu_framebuffer *amdgpu_fb =
3685 		to_amdgpu_framebuffer(plane_state->fb);
3686 	struct dc_scaling_info scaling_info;
3687 	struct dc_plane_info plane_info;
3688 	uint64_t tiling_flags;
3689 	int ret;
3690 
3691 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3692 	if (ret)
3693 		return ret;
3694 
3695 	dc_plane_state->src_rect = scaling_info.src_rect;
3696 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3697 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3698 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3699 
3700 	ret = get_fb_info(amdgpu_fb, &tiling_flags);
3701 	if (ret)
3702 		return ret;
3703 
3704 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3705 					  &plane_info,
3706 					  &dc_plane_state->address);
3707 	if (ret)
3708 		return ret;
3709 
3710 	dc_plane_state->format = plane_info.format;
3711 	dc_plane_state->color_space = plane_info.color_space;
3712 	dc_plane_state->format = plane_info.format;
3713 	dc_plane_state->plane_size = plane_info.plane_size;
3714 	dc_plane_state->rotation = plane_info.rotation;
3715 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3716 	dc_plane_state->stereo_format = plane_info.stereo_format;
3717 	dc_plane_state->tiling_info = plane_info.tiling_info;
3718 	dc_plane_state->visible = plane_info.visible;
3719 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3720 	dc_plane_state->global_alpha = plane_info.global_alpha;
3721 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3722 	dc_plane_state->dcc = plane_info.dcc;
3723 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3724 
3725 	/*
3726 	 * Always set input transfer function, since plane state is refreshed
3727 	 * every time.
3728 	 */
3729 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3730 	if (ret)
3731 		return ret;
3732 
3733 	return 0;
3734 }
3735 
3736 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3737 					   const struct dm_connector_state *dm_state,
3738 					   struct dc_stream_state *stream)
3739 {
3740 	enum amdgpu_rmx_type rmx_type;
3741 
3742 	struct rect src = { 0 }; /* viewport in composition space*/
3743 	struct rect dst = { 0 }; /* stream addressable area */
3744 
3745 	/* no mode. nothing to be done */
3746 	if (!mode)
3747 		return;
3748 
3749 	/* Full screen scaling by default */
3750 	src.width = mode->hdisplay;
3751 	src.height = mode->vdisplay;
3752 	dst.width = stream->timing.h_addressable;
3753 	dst.height = stream->timing.v_addressable;
3754 
3755 	if (dm_state) {
3756 		rmx_type = dm_state->scaling;
3757 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3758 			if (src.width * dst.height <
3759 					src.height * dst.width) {
3760 				/* height needs less upscaling/more downscaling */
3761 				dst.width = src.width *
3762 						dst.height / src.height;
3763 			} else {
3764 				/* width needs less upscaling/more downscaling */
3765 				dst.height = src.height *
3766 						dst.width / src.width;
3767 			}
3768 		} else if (rmx_type == RMX_CENTER) {
3769 			dst = src;
3770 		}
3771 
3772 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
3773 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
3774 
3775 		if (dm_state->underscan_enable) {
3776 			dst.x += dm_state->underscan_hborder / 2;
3777 			dst.y += dm_state->underscan_vborder / 2;
3778 			dst.width -= dm_state->underscan_hborder;
3779 			dst.height -= dm_state->underscan_vborder;
3780 		}
3781 	}
3782 
3783 	stream->src = src;
3784 	stream->dst = dst;
3785 
3786 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
3787 			dst.x, dst.y, dst.width, dst.height);
3788 
3789 }
3790 
3791 static enum dc_color_depth
3792 convert_color_depth_from_display_info(const struct drm_connector *connector,
3793 				      const struct drm_connector_state *state,
3794 				      bool is_y420)
3795 {
3796 	uint8_t bpc;
3797 
3798 	if (is_y420) {
3799 		bpc = 8;
3800 
3801 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
3802 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3803 			bpc = 16;
3804 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3805 			bpc = 12;
3806 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3807 			bpc = 10;
3808 	} else {
3809 		bpc = (uint8_t)connector->display_info.bpc;
3810 		/* Assume 8 bpc by default if no bpc is specified. */
3811 		bpc = bpc ? bpc : 8;
3812 	}
3813 
3814 	if (!state)
3815 		state = connector->state;
3816 
3817 	if (state) {
3818 		/*
3819 		 * Cap display bpc based on the user requested value.
3820 		 *
3821 		 * The value for state->max_bpc may not correctly updated
3822 		 * depending on when the connector gets added to the state
3823 		 * or if this was called outside of atomic check, so it
3824 		 * can't be used directly.
3825 		 */
3826 		bpc = min(bpc, state->max_requested_bpc);
3827 
3828 		/* Round down to the nearest even number. */
3829 		bpc = bpc - (bpc & 1);
3830 	}
3831 
3832 	switch (bpc) {
3833 	case 0:
3834 		/*
3835 		 * Temporary Work around, DRM doesn't parse color depth for
3836 		 * EDID revision before 1.4
3837 		 * TODO: Fix edid parsing
3838 		 */
3839 		return COLOR_DEPTH_888;
3840 	case 6:
3841 		return COLOR_DEPTH_666;
3842 	case 8:
3843 		return COLOR_DEPTH_888;
3844 	case 10:
3845 		return COLOR_DEPTH_101010;
3846 	case 12:
3847 		return COLOR_DEPTH_121212;
3848 	case 14:
3849 		return COLOR_DEPTH_141414;
3850 	case 16:
3851 		return COLOR_DEPTH_161616;
3852 	default:
3853 		return COLOR_DEPTH_UNDEFINED;
3854 	}
3855 }
3856 
3857 static enum dc_aspect_ratio
3858 get_aspect_ratio(const struct drm_display_mode *mode_in)
3859 {
3860 	/* 1-1 mapping, since both enums follow the HDMI spec. */
3861 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3862 }
3863 
3864 static enum dc_color_space
3865 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3866 {
3867 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
3868 
3869 	switch (dc_crtc_timing->pixel_encoding)	{
3870 	case PIXEL_ENCODING_YCBCR422:
3871 	case PIXEL_ENCODING_YCBCR444:
3872 	case PIXEL_ENCODING_YCBCR420:
3873 	{
3874 		/*
3875 		 * 27030khz is the separation point between HDTV and SDTV
3876 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
3877 		 * respectively
3878 		 */
3879 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
3880 			if (dc_crtc_timing->flags.Y_ONLY)
3881 				color_space =
3882 					COLOR_SPACE_YCBCR709_LIMITED;
3883 			else
3884 				color_space = COLOR_SPACE_YCBCR709;
3885 		} else {
3886 			if (dc_crtc_timing->flags.Y_ONLY)
3887 				color_space =
3888 					COLOR_SPACE_YCBCR601_LIMITED;
3889 			else
3890 				color_space = COLOR_SPACE_YCBCR601;
3891 		}
3892 
3893 	}
3894 	break;
3895 	case PIXEL_ENCODING_RGB:
3896 		color_space = COLOR_SPACE_SRGB;
3897 		break;
3898 
3899 	default:
3900 		WARN_ON(1);
3901 		break;
3902 	}
3903 
3904 	return color_space;
3905 }
3906 
3907 static bool adjust_colour_depth_from_display_info(
3908 	struct dc_crtc_timing *timing_out,
3909 	const struct drm_display_info *info)
3910 {
3911 	enum dc_color_depth depth = timing_out->display_color_depth;
3912 	int normalized_clk;
3913 	do {
3914 		normalized_clk = timing_out->pix_clk_100hz / 10;
3915 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3916 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3917 			normalized_clk /= 2;
3918 		/* Adjusting pix clock following on HDMI spec based on colour depth */
3919 		switch (depth) {
3920 		case COLOR_DEPTH_888:
3921 			break;
3922 		case COLOR_DEPTH_101010:
3923 			normalized_clk = (normalized_clk * 30) / 24;
3924 			break;
3925 		case COLOR_DEPTH_121212:
3926 			normalized_clk = (normalized_clk * 36) / 24;
3927 			break;
3928 		case COLOR_DEPTH_161616:
3929 			normalized_clk = (normalized_clk * 48) / 24;
3930 			break;
3931 		default:
3932 			/* The above depths are the only ones valid for HDMI. */
3933 			return false;
3934 		}
3935 		if (normalized_clk <= info->max_tmds_clock) {
3936 			timing_out->display_color_depth = depth;
3937 			return true;
3938 		}
3939 	} while (--depth > COLOR_DEPTH_666);
3940 	return false;
3941 }
3942 
3943 static void fill_stream_properties_from_drm_display_mode(
3944 	struct dc_stream_state *stream,
3945 	const struct drm_display_mode *mode_in,
3946 	const struct drm_connector *connector,
3947 	const struct drm_connector_state *connector_state,
3948 	const struct dc_stream_state *old_stream)
3949 {
3950 	struct dc_crtc_timing *timing_out = &stream->timing;
3951 	const struct drm_display_info *info = &connector->display_info;
3952 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3953 	struct hdmi_vendor_infoframe hv_frame;
3954 	struct hdmi_avi_infoframe avi_frame;
3955 
3956 	memset(&hv_frame, 0, sizeof(hv_frame));
3957 	memset(&avi_frame, 0, sizeof(avi_frame));
3958 
3959 	timing_out->h_border_left = 0;
3960 	timing_out->h_border_right = 0;
3961 	timing_out->v_border_top = 0;
3962 	timing_out->v_border_bottom = 0;
3963 	/* TODO: un-hardcode */
3964 	if (drm_mode_is_420_only(info, mode_in)
3965 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3966 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3967 	else if (drm_mode_is_420_also(info, mode_in)
3968 			&& aconnector->force_yuv420_output)
3969 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3970 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3971 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3972 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3973 	else
3974 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3975 
3976 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
3977 	timing_out->display_color_depth = convert_color_depth_from_display_info(
3978 		connector, connector_state,
3979 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
3980 	timing_out->scan_type = SCANNING_TYPE_NODATA;
3981 	timing_out->hdmi_vic = 0;
3982 
3983 	if(old_stream) {
3984 		timing_out->vic = old_stream->timing.vic;
3985 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
3986 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
3987 	} else {
3988 		timing_out->vic = drm_match_cea_mode(mode_in);
3989 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
3990 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
3991 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
3992 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
3993 	}
3994 
3995 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
3996 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
3997 		timing_out->vic = avi_frame.video_code;
3998 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
3999 		timing_out->hdmi_vic = hv_frame.vic;
4000 	}
4001 
4002 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4003 	timing_out->h_total = mode_in->crtc_htotal;
4004 	timing_out->h_sync_width =
4005 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4006 	timing_out->h_front_porch =
4007 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4008 	timing_out->v_total = mode_in->crtc_vtotal;
4009 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4010 	timing_out->v_front_porch =
4011 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4012 	timing_out->v_sync_width =
4013 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4014 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4015 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4016 
4017 	stream->output_color_space = get_output_color_space(timing_out);
4018 
4019 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4020 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4021 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4022 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4023 		    drm_mode_is_420_also(info, mode_in) &&
4024 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4025 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4026 			adjust_colour_depth_from_display_info(timing_out, info);
4027 		}
4028 	}
4029 }
4030 
4031 static void fill_audio_info(struct audio_info *audio_info,
4032 			    const struct drm_connector *drm_connector,
4033 			    const struct dc_sink *dc_sink)
4034 {
4035 	int i = 0;
4036 	int cea_revision = 0;
4037 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4038 
4039 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4040 	audio_info->product_id = edid_caps->product_id;
4041 
4042 	cea_revision = drm_connector->display_info.cea_rev;
4043 
4044 	strscpy(audio_info->display_name,
4045 		edid_caps->display_name,
4046 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4047 
4048 	if (cea_revision >= 3) {
4049 		audio_info->mode_count = edid_caps->audio_mode_count;
4050 
4051 		for (i = 0; i < audio_info->mode_count; ++i) {
4052 			audio_info->modes[i].format_code =
4053 					(enum audio_format_code)
4054 					(edid_caps->audio_modes[i].format_code);
4055 			audio_info->modes[i].channel_count =
4056 					edid_caps->audio_modes[i].channel_count;
4057 			audio_info->modes[i].sample_rates.all =
4058 					edid_caps->audio_modes[i].sample_rate;
4059 			audio_info->modes[i].sample_size =
4060 					edid_caps->audio_modes[i].sample_size;
4061 		}
4062 	}
4063 
4064 	audio_info->flags.all = edid_caps->speaker_flags;
4065 
4066 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4067 	if (drm_connector->latency_present[0]) {
4068 		audio_info->video_latency = drm_connector->video_latency[0];
4069 		audio_info->audio_latency = drm_connector->audio_latency[0];
4070 	}
4071 
4072 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4073 
4074 }
4075 
4076 static void
4077 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4078 				      struct drm_display_mode *dst_mode)
4079 {
4080 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4081 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4082 	dst_mode->crtc_clock = src_mode->crtc_clock;
4083 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4084 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4085 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4086 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4087 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4088 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4089 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4090 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4091 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4092 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4093 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4094 }
4095 
4096 static void
4097 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4098 					const struct drm_display_mode *native_mode,
4099 					bool scale_enabled)
4100 {
4101 	if (scale_enabled) {
4102 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4103 	} else if (native_mode->clock == drm_mode->clock &&
4104 			native_mode->htotal == drm_mode->htotal &&
4105 			native_mode->vtotal == drm_mode->vtotal) {
4106 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4107 	} else {
4108 		/* no scaling nor amdgpu inserted, no need to patch */
4109 	}
4110 }
4111 
4112 static struct dc_sink *
4113 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4114 {
4115 	struct dc_sink_init_data sink_init_data = { 0 };
4116 	struct dc_sink *sink = NULL;
4117 	sink_init_data.link = aconnector->dc_link;
4118 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4119 
4120 	sink = dc_sink_create(&sink_init_data);
4121 	if (!sink) {
4122 		DRM_ERROR("Failed to create sink!\n");
4123 		return NULL;
4124 	}
4125 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4126 
4127 	return sink;
4128 }
4129 
4130 static void set_multisync_trigger_params(
4131 		struct dc_stream_state *stream)
4132 {
4133 	if (stream->triggered_crtc_reset.enabled) {
4134 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4135 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4136 	}
4137 }
4138 
4139 static void set_master_stream(struct dc_stream_state *stream_set[],
4140 			      int stream_count)
4141 {
4142 	int j, highest_rfr = 0, master_stream = 0;
4143 
4144 	for (j = 0;  j < stream_count; j++) {
4145 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4146 			int refresh_rate = 0;
4147 
4148 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4149 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4150 			if (refresh_rate > highest_rfr) {
4151 				highest_rfr = refresh_rate;
4152 				master_stream = j;
4153 			}
4154 		}
4155 	}
4156 	for (j = 0;  j < stream_count; j++) {
4157 		if (stream_set[j])
4158 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4159 	}
4160 }
4161 
4162 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4163 {
4164 	int i = 0;
4165 
4166 	if (context->stream_count < 2)
4167 		return;
4168 	for (i = 0; i < context->stream_count ; i++) {
4169 		if (!context->streams[i])
4170 			continue;
4171 		/*
4172 		 * TODO: add a function to read AMD VSDB bits and set
4173 		 * crtc_sync_master.multi_sync_enabled flag
4174 		 * For now it's set to false
4175 		 */
4176 		set_multisync_trigger_params(context->streams[i]);
4177 	}
4178 	set_master_stream(context->streams, context->stream_count);
4179 }
4180 
4181 static struct dc_stream_state *
4182 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4183 		       const struct drm_display_mode *drm_mode,
4184 		       const struct dm_connector_state *dm_state,
4185 		       const struct dc_stream_state *old_stream)
4186 {
4187 	struct drm_display_mode *preferred_mode = NULL;
4188 	struct drm_connector *drm_connector;
4189 	const struct drm_connector_state *con_state =
4190 		dm_state ? &dm_state->base : NULL;
4191 	struct dc_stream_state *stream = NULL;
4192 	struct drm_display_mode mode = *drm_mode;
4193 	bool native_mode_found = false;
4194 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4195 	int mode_refresh;
4196 	int preferred_refresh = 0;
4197 #if defined(CONFIG_DRM_AMD_DC_DCN)
4198 	struct dsc_dec_dpcd_caps dsc_caps;
4199 #endif
4200 	uint32_t link_bandwidth_kbps;
4201 
4202 	struct dc_sink *sink = NULL;
4203 	if (aconnector == NULL) {
4204 		DRM_ERROR("aconnector is NULL!\n");
4205 		return stream;
4206 	}
4207 
4208 	drm_connector = &aconnector->base;
4209 
4210 	if (!aconnector->dc_sink) {
4211 		sink = create_fake_sink(aconnector);
4212 		if (!sink)
4213 			return stream;
4214 	} else {
4215 		sink = aconnector->dc_sink;
4216 		dc_sink_retain(sink);
4217 	}
4218 
4219 	stream = dc_create_stream_for_sink(sink);
4220 
4221 	if (stream == NULL) {
4222 		DRM_ERROR("Failed to create stream for sink!\n");
4223 		goto finish;
4224 	}
4225 
4226 	stream->dm_stream_context = aconnector;
4227 
4228 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4229 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4230 
4231 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4232 		/* Search for preferred mode */
4233 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4234 			native_mode_found = true;
4235 			break;
4236 		}
4237 	}
4238 	if (!native_mode_found)
4239 		preferred_mode = list_first_entry_or_null(
4240 				&aconnector->base.modes,
4241 				struct drm_display_mode,
4242 				head);
4243 
4244 	mode_refresh = drm_mode_vrefresh(&mode);
4245 
4246 	if (preferred_mode == NULL) {
4247 		/*
4248 		 * This may not be an error, the use case is when we have no
4249 		 * usermode calls to reset and set mode upon hotplug. In this
4250 		 * case, we call set mode ourselves to restore the previous mode
4251 		 * and the modelist may not be filled in in time.
4252 		 */
4253 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4254 	} else {
4255 		decide_crtc_timing_for_drm_display_mode(
4256 				&mode, preferred_mode,
4257 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4258 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4259 	}
4260 
4261 	if (!dm_state)
4262 		drm_mode_set_crtcinfo(&mode, 0);
4263 
4264 	/*
4265 	* If scaling is enabled and refresh rate didn't change
4266 	* we copy the vic and polarities of the old timings
4267 	*/
4268 	if (!scale || mode_refresh != preferred_refresh)
4269 		fill_stream_properties_from_drm_display_mode(stream,
4270 			&mode, &aconnector->base, con_state, NULL);
4271 	else
4272 		fill_stream_properties_from_drm_display_mode(stream,
4273 			&mode, &aconnector->base, con_state, old_stream);
4274 
4275 	stream->timing.flags.DSC = 0;
4276 
4277 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4278 #if defined(CONFIG_DRM_AMD_DC_DCN)
4279 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4280 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4281 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4282 				      &dsc_caps);
4283 #endif
4284 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4285 							     dc_link_get_link_cap(aconnector->dc_link));
4286 
4287 #if defined(CONFIG_DRM_AMD_DC_DCN)
4288 		if (dsc_caps.is_dsc_supported)
4289 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4290 						  &dsc_caps,
4291 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4292 						  link_bandwidth_kbps,
4293 						  &stream->timing,
4294 						  &stream->timing.dsc_cfg))
4295 				stream->timing.flags.DSC = 1;
4296 #endif
4297 	}
4298 
4299 	update_stream_scaling_settings(&mode, dm_state, stream);
4300 
4301 	fill_audio_info(
4302 		&stream->audio_info,
4303 		drm_connector,
4304 		sink);
4305 
4306 	update_stream_signal(stream, sink);
4307 
4308 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4309 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4310 	if (stream->link->psr_feature_enabled)	{
4311 		struct dc  *core_dc = stream->link->ctx->dc;
4312 
4313 		if (dc_is_dmcu_initialized(core_dc)) {
4314 			struct dmcu *dmcu = core_dc->res_pool->dmcu;
4315 
4316 			stream->psr_version = dmcu->dmcu_version.psr_version;
4317 			mod_build_vsc_infopacket(stream,
4318 					&stream->vsc_infopacket,
4319 					&stream->use_vsc_sdp_for_colorimetry);
4320 		}
4321 	}
4322 finish:
4323 	dc_sink_release(sink);
4324 
4325 	return stream;
4326 }
4327 
4328 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4329 {
4330 	drm_crtc_cleanup(crtc);
4331 	kfree(crtc);
4332 }
4333 
4334 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4335 				  struct drm_crtc_state *state)
4336 {
4337 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4338 
4339 	/* TODO Destroy dc_stream objects are stream object is flattened */
4340 	if (cur->stream)
4341 		dc_stream_release(cur->stream);
4342 
4343 
4344 	__drm_atomic_helper_crtc_destroy_state(state);
4345 
4346 
4347 	kfree(state);
4348 }
4349 
4350 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4351 {
4352 	struct dm_crtc_state *state;
4353 
4354 	if (crtc->state)
4355 		dm_crtc_destroy_state(crtc, crtc->state);
4356 
4357 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4358 	if (WARN_ON(!state))
4359 		return;
4360 
4361 	crtc->state = &state->base;
4362 	crtc->state->crtc = crtc;
4363 
4364 }
4365 
4366 static struct drm_crtc_state *
4367 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4368 {
4369 	struct dm_crtc_state *state, *cur;
4370 
4371 	cur = to_dm_crtc_state(crtc->state);
4372 
4373 	if (WARN_ON(!crtc->state))
4374 		return NULL;
4375 
4376 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4377 	if (!state)
4378 		return NULL;
4379 
4380 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4381 
4382 	if (cur->stream) {
4383 		state->stream = cur->stream;
4384 		dc_stream_retain(state->stream);
4385 	}
4386 
4387 	state->active_planes = cur->active_planes;
4388 	state->interrupts_enabled = cur->interrupts_enabled;
4389 	state->vrr_params = cur->vrr_params;
4390 	state->vrr_infopacket = cur->vrr_infopacket;
4391 	state->abm_level = cur->abm_level;
4392 	state->vrr_supported = cur->vrr_supported;
4393 	state->freesync_config = cur->freesync_config;
4394 	state->crc_src = cur->crc_src;
4395 	state->cm_has_degamma = cur->cm_has_degamma;
4396 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4397 
4398 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4399 
4400 	return &state->base;
4401 }
4402 
4403 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4404 {
4405 	enum dc_irq_source irq_source;
4406 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4407 	struct amdgpu_device *adev = crtc->dev->dev_private;
4408 	int rc;
4409 
4410 	/* Do not set vupdate for DCN hardware */
4411 	if (adev->family > AMDGPU_FAMILY_AI)
4412 		return 0;
4413 
4414 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4415 
4416 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4417 
4418 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4419 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4420 	return rc;
4421 }
4422 
4423 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4424 {
4425 	enum dc_irq_source irq_source;
4426 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4427 	struct amdgpu_device *adev = crtc->dev->dev_private;
4428 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4429 	int rc = 0;
4430 
4431 	if (enable) {
4432 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4433 		if (amdgpu_dm_vrr_active(acrtc_state))
4434 			rc = dm_set_vupdate_irq(crtc, true);
4435 	} else {
4436 		/* vblank irq off -> vupdate irq off */
4437 		rc = dm_set_vupdate_irq(crtc, false);
4438 	}
4439 
4440 	if (rc)
4441 		return rc;
4442 
4443 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4444 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4445 }
4446 
4447 static int dm_enable_vblank(struct drm_crtc *crtc)
4448 {
4449 	return dm_set_vblank(crtc, true);
4450 }
4451 
4452 static void dm_disable_vblank(struct drm_crtc *crtc)
4453 {
4454 	dm_set_vblank(crtc, false);
4455 }
4456 
4457 /* Implemented only the options currently availible for the driver */
4458 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4459 	.reset = dm_crtc_reset_state,
4460 	.destroy = amdgpu_dm_crtc_destroy,
4461 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4462 	.set_config = drm_atomic_helper_set_config,
4463 	.page_flip = drm_atomic_helper_page_flip,
4464 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4465 	.atomic_destroy_state = dm_crtc_destroy_state,
4466 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4467 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4468 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4469 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4470 	.enable_vblank = dm_enable_vblank,
4471 	.disable_vblank = dm_disable_vblank,
4472 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4473 };
4474 
4475 static enum drm_connector_status
4476 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4477 {
4478 	bool connected;
4479 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4480 
4481 	/*
4482 	 * Notes:
4483 	 * 1. This interface is NOT called in context of HPD irq.
4484 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4485 	 * makes it a bad place for *any* MST-related activity.
4486 	 */
4487 
4488 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4489 	    !aconnector->fake_enable)
4490 		connected = (aconnector->dc_sink != NULL);
4491 	else
4492 		connected = (aconnector->base.force == DRM_FORCE_ON);
4493 
4494 	return (connected ? connector_status_connected :
4495 			connector_status_disconnected);
4496 }
4497 
4498 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4499 					    struct drm_connector_state *connector_state,
4500 					    struct drm_property *property,
4501 					    uint64_t val)
4502 {
4503 	struct drm_device *dev = connector->dev;
4504 	struct amdgpu_device *adev = dev->dev_private;
4505 	struct dm_connector_state *dm_old_state =
4506 		to_dm_connector_state(connector->state);
4507 	struct dm_connector_state *dm_new_state =
4508 		to_dm_connector_state(connector_state);
4509 
4510 	int ret = -EINVAL;
4511 
4512 	if (property == dev->mode_config.scaling_mode_property) {
4513 		enum amdgpu_rmx_type rmx_type;
4514 
4515 		switch (val) {
4516 		case DRM_MODE_SCALE_CENTER:
4517 			rmx_type = RMX_CENTER;
4518 			break;
4519 		case DRM_MODE_SCALE_ASPECT:
4520 			rmx_type = RMX_ASPECT;
4521 			break;
4522 		case DRM_MODE_SCALE_FULLSCREEN:
4523 			rmx_type = RMX_FULL;
4524 			break;
4525 		case DRM_MODE_SCALE_NONE:
4526 		default:
4527 			rmx_type = RMX_OFF;
4528 			break;
4529 		}
4530 
4531 		if (dm_old_state->scaling == rmx_type)
4532 			return 0;
4533 
4534 		dm_new_state->scaling = rmx_type;
4535 		ret = 0;
4536 	} else if (property == adev->mode_info.underscan_hborder_property) {
4537 		dm_new_state->underscan_hborder = val;
4538 		ret = 0;
4539 	} else if (property == adev->mode_info.underscan_vborder_property) {
4540 		dm_new_state->underscan_vborder = val;
4541 		ret = 0;
4542 	} else if (property == adev->mode_info.underscan_property) {
4543 		dm_new_state->underscan_enable = val;
4544 		ret = 0;
4545 	} else if (property == adev->mode_info.abm_level_property) {
4546 		dm_new_state->abm_level = val;
4547 		ret = 0;
4548 	}
4549 
4550 	return ret;
4551 }
4552 
4553 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4554 					    const struct drm_connector_state *state,
4555 					    struct drm_property *property,
4556 					    uint64_t *val)
4557 {
4558 	struct drm_device *dev = connector->dev;
4559 	struct amdgpu_device *adev = dev->dev_private;
4560 	struct dm_connector_state *dm_state =
4561 		to_dm_connector_state(state);
4562 	int ret = -EINVAL;
4563 
4564 	if (property == dev->mode_config.scaling_mode_property) {
4565 		switch (dm_state->scaling) {
4566 		case RMX_CENTER:
4567 			*val = DRM_MODE_SCALE_CENTER;
4568 			break;
4569 		case RMX_ASPECT:
4570 			*val = DRM_MODE_SCALE_ASPECT;
4571 			break;
4572 		case RMX_FULL:
4573 			*val = DRM_MODE_SCALE_FULLSCREEN;
4574 			break;
4575 		case RMX_OFF:
4576 		default:
4577 			*val = DRM_MODE_SCALE_NONE;
4578 			break;
4579 		}
4580 		ret = 0;
4581 	} else if (property == adev->mode_info.underscan_hborder_property) {
4582 		*val = dm_state->underscan_hborder;
4583 		ret = 0;
4584 	} else if (property == adev->mode_info.underscan_vborder_property) {
4585 		*val = dm_state->underscan_vborder;
4586 		ret = 0;
4587 	} else if (property == adev->mode_info.underscan_property) {
4588 		*val = dm_state->underscan_enable;
4589 		ret = 0;
4590 	} else if (property == adev->mode_info.abm_level_property) {
4591 		*val = dm_state->abm_level;
4592 		ret = 0;
4593 	}
4594 
4595 	return ret;
4596 }
4597 
4598 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4599 {
4600 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4601 
4602 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4603 }
4604 
4605 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4606 {
4607 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4608 	const struct dc_link *link = aconnector->dc_link;
4609 	struct amdgpu_device *adev = connector->dev->dev_private;
4610 	struct amdgpu_display_manager *dm = &adev->dm;
4611 
4612 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4613 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4614 
4615 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4616 	    link->type != dc_connection_none &&
4617 	    dm->backlight_dev) {
4618 		backlight_device_unregister(dm->backlight_dev);
4619 		dm->backlight_dev = NULL;
4620 	}
4621 #endif
4622 
4623 	if (aconnector->dc_em_sink)
4624 		dc_sink_release(aconnector->dc_em_sink);
4625 	aconnector->dc_em_sink = NULL;
4626 	if (aconnector->dc_sink)
4627 		dc_sink_release(aconnector->dc_sink);
4628 	aconnector->dc_sink = NULL;
4629 
4630 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4631 	drm_connector_unregister(connector);
4632 	drm_connector_cleanup(connector);
4633 	if (aconnector->i2c) {
4634 		i2c_del_adapter(&aconnector->i2c->base);
4635 		kfree(aconnector->i2c);
4636 	}
4637 
4638 	kfree(connector);
4639 }
4640 
4641 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4642 {
4643 	struct dm_connector_state *state =
4644 		to_dm_connector_state(connector->state);
4645 
4646 	if (connector->state)
4647 		__drm_atomic_helper_connector_destroy_state(connector->state);
4648 
4649 	kfree(state);
4650 
4651 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4652 
4653 	if (state) {
4654 		state->scaling = RMX_OFF;
4655 		state->underscan_enable = false;
4656 		state->underscan_hborder = 0;
4657 		state->underscan_vborder = 0;
4658 		state->base.max_requested_bpc = 8;
4659 		state->vcpi_slots = 0;
4660 		state->pbn = 0;
4661 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4662 			state->abm_level = amdgpu_dm_abm_level;
4663 
4664 		__drm_atomic_helper_connector_reset(connector, &state->base);
4665 	}
4666 }
4667 
4668 struct drm_connector_state *
4669 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4670 {
4671 	struct dm_connector_state *state =
4672 		to_dm_connector_state(connector->state);
4673 
4674 	struct dm_connector_state *new_state =
4675 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4676 
4677 	if (!new_state)
4678 		return NULL;
4679 
4680 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4681 
4682 	new_state->freesync_capable = state->freesync_capable;
4683 	new_state->abm_level = state->abm_level;
4684 	new_state->scaling = state->scaling;
4685 	new_state->underscan_enable = state->underscan_enable;
4686 	new_state->underscan_hborder = state->underscan_hborder;
4687 	new_state->underscan_vborder = state->underscan_vborder;
4688 	new_state->vcpi_slots = state->vcpi_slots;
4689 	new_state->pbn = state->pbn;
4690 	return &new_state->base;
4691 }
4692 
4693 static int
4694 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4695 {
4696 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4697 		to_amdgpu_dm_connector(connector);
4698 
4699 #if defined(CONFIG_DEBUG_FS)
4700 	connector_debugfs_init(amdgpu_dm_connector);
4701 #endif
4702 
4703 	return 0;
4704 }
4705 
4706 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4707 	.reset = amdgpu_dm_connector_funcs_reset,
4708 	.detect = amdgpu_dm_connector_detect,
4709 	.fill_modes = drm_helper_probe_single_connector_modes,
4710 	.destroy = amdgpu_dm_connector_destroy,
4711 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4712 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4713 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4714 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4715 	.late_register = amdgpu_dm_connector_late_register,
4716 	.early_unregister = amdgpu_dm_connector_unregister
4717 };
4718 
4719 static int get_modes(struct drm_connector *connector)
4720 {
4721 	return amdgpu_dm_connector_get_modes(connector);
4722 }
4723 
4724 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4725 {
4726 	struct dc_sink_init_data init_params = {
4727 			.link = aconnector->dc_link,
4728 			.sink_signal = SIGNAL_TYPE_VIRTUAL
4729 	};
4730 	struct edid *edid;
4731 
4732 	if (!aconnector->base.edid_blob_ptr) {
4733 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4734 				aconnector->base.name);
4735 
4736 		aconnector->base.force = DRM_FORCE_OFF;
4737 		aconnector->base.override_edid = false;
4738 		return;
4739 	}
4740 
4741 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4742 
4743 	aconnector->edid = edid;
4744 
4745 	aconnector->dc_em_sink = dc_link_add_remote_sink(
4746 		aconnector->dc_link,
4747 		(uint8_t *)edid,
4748 		(edid->extensions + 1) * EDID_LENGTH,
4749 		&init_params);
4750 
4751 	if (aconnector->base.force == DRM_FORCE_ON) {
4752 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
4753 		aconnector->dc_link->local_sink :
4754 		aconnector->dc_em_sink;
4755 		dc_sink_retain(aconnector->dc_sink);
4756 	}
4757 }
4758 
4759 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4760 {
4761 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4762 
4763 	/*
4764 	 * In case of headless boot with force on for DP managed connector
4765 	 * Those settings have to be != 0 to get initial modeset
4766 	 */
4767 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4768 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4769 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4770 	}
4771 
4772 
4773 	aconnector->base.override_edid = true;
4774 	create_eml_sink(aconnector);
4775 }
4776 
4777 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4778 				   struct drm_display_mode *mode)
4779 {
4780 	int result = MODE_ERROR;
4781 	struct dc_sink *dc_sink;
4782 	struct amdgpu_device *adev = connector->dev->dev_private;
4783 	/* TODO: Unhardcode stream count */
4784 	struct dc_stream_state *stream;
4785 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4786 	enum dc_status dc_result = DC_OK;
4787 
4788 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4789 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
4790 		return result;
4791 
4792 	/*
4793 	 * Only run this the first time mode_valid is called to initilialize
4794 	 * EDID mgmt
4795 	 */
4796 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4797 		!aconnector->dc_em_sink)
4798 		handle_edid_mgmt(aconnector);
4799 
4800 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4801 
4802 	if (dc_sink == NULL) {
4803 		DRM_ERROR("dc_sink is NULL!\n");
4804 		goto fail;
4805 	}
4806 
4807 	stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4808 	if (stream == NULL) {
4809 		DRM_ERROR("Failed to create stream for sink!\n");
4810 		goto fail;
4811 	}
4812 
4813 	dc_result = dc_validate_stream(adev->dm.dc, stream);
4814 
4815 	if (dc_result == DC_OK)
4816 		result = MODE_OK;
4817 	else
4818 		DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4819 			      mode->hdisplay,
4820 			      mode->vdisplay,
4821 			      mode->clock,
4822 			      dc_result);
4823 
4824 	dc_stream_release(stream);
4825 
4826 fail:
4827 	/* TODO: error handling*/
4828 	return result;
4829 }
4830 
4831 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4832 				struct dc_info_packet *out)
4833 {
4834 	struct hdmi_drm_infoframe frame;
4835 	unsigned char buf[30]; /* 26 + 4 */
4836 	ssize_t len;
4837 	int ret, i;
4838 
4839 	memset(out, 0, sizeof(*out));
4840 
4841 	if (!state->hdr_output_metadata)
4842 		return 0;
4843 
4844 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4845 	if (ret)
4846 		return ret;
4847 
4848 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4849 	if (len < 0)
4850 		return (int)len;
4851 
4852 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
4853 	if (len != 30)
4854 		return -EINVAL;
4855 
4856 	/* Prepare the infopacket for DC. */
4857 	switch (state->connector->connector_type) {
4858 	case DRM_MODE_CONNECTOR_HDMIA:
4859 		out->hb0 = 0x87; /* type */
4860 		out->hb1 = 0x01; /* version */
4861 		out->hb2 = 0x1A; /* length */
4862 		out->sb[0] = buf[3]; /* checksum */
4863 		i = 1;
4864 		break;
4865 
4866 	case DRM_MODE_CONNECTOR_DisplayPort:
4867 	case DRM_MODE_CONNECTOR_eDP:
4868 		out->hb0 = 0x00; /* sdp id, zero */
4869 		out->hb1 = 0x87; /* type */
4870 		out->hb2 = 0x1D; /* payload len - 1 */
4871 		out->hb3 = (0x13 << 2); /* sdp version */
4872 		out->sb[0] = 0x01; /* version */
4873 		out->sb[1] = 0x1A; /* length */
4874 		i = 2;
4875 		break;
4876 
4877 	default:
4878 		return -EINVAL;
4879 	}
4880 
4881 	memcpy(&out->sb[i], &buf[4], 26);
4882 	out->valid = true;
4883 
4884 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4885 		       sizeof(out->sb), false);
4886 
4887 	return 0;
4888 }
4889 
4890 static bool
4891 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4892 			  const struct drm_connector_state *new_state)
4893 {
4894 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4895 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4896 
4897 	if (old_blob != new_blob) {
4898 		if (old_blob && new_blob &&
4899 		    old_blob->length == new_blob->length)
4900 			return memcmp(old_blob->data, new_blob->data,
4901 				      old_blob->length);
4902 
4903 		return true;
4904 	}
4905 
4906 	return false;
4907 }
4908 
4909 static int
4910 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4911 				 struct drm_atomic_state *state)
4912 {
4913 	struct drm_connector_state *new_con_state =
4914 		drm_atomic_get_new_connector_state(state, conn);
4915 	struct drm_connector_state *old_con_state =
4916 		drm_atomic_get_old_connector_state(state, conn);
4917 	struct drm_crtc *crtc = new_con_state->crtc;
4918 	struct drm_crtc_state *new_crtc_state;
4919 	int ret;
4920 
4921 	if (!crtc)
4922 		return 0;
4923 
4924 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4925 		struct dc_info_packet hdr_infopacket;
4926 
4927 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4928 		if (ret)
4929 			return ret;
4930 
4931 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4932 		if (IS_ERR(new_crtc_state))
4933 			return PTR_ERR(new_crtc_state);
4934 
4935 		/*
4936 		 * DC considers the stream backends changed if the
4937 		 * static metadata changes. Forcing the modeset also
4938 		 * gives a simple way for userspace to switch from
4939 		 * 8bpc to 10bpc when setting the metadata to enter
4940 		 * or exit HDR.
4941 		 *
4942 		 * Changing the static metadata after it's been
4943 		 * set is permissible, however. So only force a
4944 		 * modeset if we're entering or exiting HDR.
4945 		 */
4946 		new_crtc_state->mode_changed =
4947 			!old_con_state->hdr_output_metadata ||
4948 			!new_con_state->hdr_output_metadata;
4949 	}
4950 
4951 	return 0;
4952 }
4953 
4954 static const struct drm_connector_helper_funcs
4955 amdgpu_dm_connector_helper_funcs = {
4956 	/*
4957 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
4958 	 * modes will be filtered by drm_mode_validate_size(), and those modes
4959 	 * are missing after user start lightdm. So we need to renew modes list.
4960 	 * in get_modes call back, not just return the modes count
4961 	 */
4962 	.get_modes = get_modes,
4963 	.mode_valid = amdgpu_dm_connector_mode_valid,
4964 	.atomic_check = amdgpu_dm_connector_atomic_check,
4965 };
4966 
4967 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
4968 {
4969 }
4970 
4971 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
4972 {
4973 	struct drm_device *dev = new_crtc_state->crtc->dev;
4974 	struct drm_plane *plane;
4975 
4976 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
4977 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
4978 			return true;
4979 	}
4980 
4981 	return false;
4982 }
4983 
4984 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
4985 {
4986 	struct drm_atomic_state *state = new_crtc_state->state;
4987 	struct drm_plane *plane;
4988 	int num_active = 0;
4989 
4990 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
4991 		struct drm_plane_state *new_plane_state;
4992 
4993 		/* Cursor planes are "fake". */
4994 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
4995 			continue;
4996 
4997 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
4998 
4999 		if (!new_plane_state) {
5000 			/*
5001 			 * The plane is enable on the CRTC and hasn't changed
5002 			 * state. This means that it previously passed
5003 			 * validation and is therefore enabled.
5004 			 */
5005 			num_active += 1;
5006 			continue;
5007 		}
5008 
5009 		/* We need a framebuffer to be considered enabled. */
5010 		num_active += (new_plane_state->fb != NULL);
5011 	}
5012 
5013 	return num_active;
5014 }
5015 
5016 /*
5017  * Sets whether interrupts should be enabled on a specific CRTC.
5018  * We require that the stream be enabled and that there exist active
5019  * DC planes on the stream.
5020  */
5021 static void
5022 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5023 			       struct drm_crtc_state *new_crtc_state)
5024 {
5025 	struct dm_crtc_state *dm_new_crtc_state =
5026 		to_dm_crtc_state(new_crtc_state);
5027 
5028 	dm_new_crtc_state->active_planes = 0;
5029 	dm_new_crtc_state->interrupts_enabled = false;
5030 
5031 	if (!dm_new_crtc_state->stream)
5032 		return;
5033 
5034 	dm_new_crtc_state->active_planes =
5035 		count_crtc_active_planes(new_crtc_state);
5036 
5037 	dm_new_crtc_state->interrupts_enabled =
5038 		dm_new_crtc_state->active_planes > 0;
5039 }
5040 
5041 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5042 				       struct drm_crtc_state *state)
5043 {
5044 	struct amdgpu_device *adev = crtc->dev->dev_private;
5045 	struct dc *dc = adev->dm.dc;
5046 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5047 	int ret = -EINVAL;
5048 
5049 	/*
5050 	 * Update interrupt state for the CRTC. This needs to happen whenever
5051 	 * the CRTC has changed or whenever any of its planes have changed.
5052 	 * Atomic check satisfies both of these requirements since the CRTC
5053 	 * is added to the state by DRM during drm_atomic_helper_check_planes.
5054 	 */
5055 	dm_update_crtc_interrupt_state(crtc, state);
5056 
5057 	if (unlikely(!dm_crtc_state->stream &&
5058 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5059 		WARN_ON(1);
5060 		return ret;
5061 	}
5062 
5063 	/* In some use cases, like reset, no stream is attached */
5064 	if (!dm_crtc_state->stream)
5065 		return 0;
5066 
5067 	/*
5068 	 * We want at least one hardware plane enabled to use
5069 	 * the stream with a cursor enabled.
5070 	 */
5071 	if (state->enable && state->active &&
5072 	    does_crtc_have_active_cursor(state) &&
5073 	    dm_crtc_state->active_planes == 0)
5074 		return -EINVAL;
5075 
5076 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5077 		return 0;
5078 
5079 	return ret;
5080 }
5081 
5082 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5083 				      const struct drm_display_mode *mode,
5084 				      struct drm_display_mode *adjusted_mode)
5085 {
5086 	return true;
5087 }
5088 
5089 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5090 	.disable = dm_crtc_helper_disable,
5091 	.atomic_check = dm_crtc_helper_atomic_check,
5092 	.mode_fixup = dm_crtc_helper_mode_fixup,
5093 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5094 };
5095 
5096 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5097 {
5098 
5099 }
5100 
5101 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5102 {
5103 	switch (display_color_depth) {
5104 		case COLOR_DEPTH_666:
5105 			return 6;
5106 		case COLOR_DEPTH_888:
5107 			return 8;
5108 		case COLOR_DEPTH_101010:
5109 			return 10;
5110 		case COLOR_DEPTH_121212:
5111 			return 12;
5112 		case COLOR_DEPTH_141414:
5113 			return 14;
5114 		case COLOR_DEPTH_161616:
5115 			return 16;
5116 		default:
5117 			break;
5118 		}
5119 	return 0;
5120 }
5121 
5122 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5123 					  struct drm_crtc_state *crtc_state,
5124 					  struct drm_connector_state *conn_state)
5125 {
5126 	struct drm_atomic_state *state = crtc_state->state;
5127 	struct drm_connector *connector = conn_state->connector;
5128 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5129 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5130 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5131 	struct drm_dp_mst_topology_mgr *mst_mgr;
5132 	struct drm_dp_mst_port *mst_port;
5133 	enum dc_color_depth color_depth;
5134 	int clock, bpp = 0;
5135 	bool is_y420 = false;
5136 
5137 	if (!aconnector->port || !aconnector->dc_sink)
5138 		return 0;
5139 
5140 	mst_port = aconnector->port;
5141 	mst_mgr = &aconnector->mst_port->mst_mgr;
5142 
5143 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5144 		return 0;
5145 
5146 	if (!state->duplicated) {
5147 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5148 				aconnector->force_yuv420_output;
5149 		color_depth = convert_color_depth_from_display_info(connector, conn_state,
5150 								    is_y420);
5151 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5152 		clock = adjusted_mode->clock;
5153 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5154 	}
5155 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5156 									   mst_mgr,
5157 									   mst_port,
5158 									   dm_new_connector_state->pbn,
5159 									   0);
5160 	if (dm_new_connector_state->vcpi_slots < 0) {
5161 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5162 		return dm_new_connector_state->vcpi_slots;
5163 	}
5164 	return 0;
5165 }
5166 
5167 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5168 	.disable = dm_encoder_helper_disable,
5169 	.atomic_check = dm_encoder_helper_atomic_check
5170 };
5171 
5172 #if defined(CONFIG_DRM_AMD_DC_DCN)
5173 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5174 					    struct dc_state *dc_state)
5175 {
5176 	struct dc_stream_state *stream = NULL;
5177 	struct drm_connector *connector;
5178 	struct drm_connector_state *new_con_state, *old_con_state;
5179 	struct amdgpu_dm_connector *aconnector;
5180 	struct dm_connector_state *dm_conn_state;
5181 	int i, j, clock, bpp;
5182 	int vcpi, pbn_div, pbn = 0;
5183 
5184 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5185 
5186 		aconnector = to_amdgpu_dm_connector(connector);
5187 
5188 		if (!aconnector->port)
5189 			continue;
5190 
5191 		if (!new_con_state || !new_con_state->crtc)
5192 			continue;
5193 
5194 		dm_conn_state = to_dm_connector_state(new_con_state);
5195 
5196 		for (j = 0; j < dc_state->stream_count; j++) {
5197 			stream = dc_state->streams[j];
5198 			if (!stream)
5199 				continue;
5200 
5201 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5202 				break;
5203 
5204 			stream = NULL;
5205 		}
5206 
5207 		if (!stream)
5208 			continue;
5209 
5210 		if (stream->timing.flags.DSC != 1) {
5211 			drm_dp_mst_atomic_enable_dsc(state,
5212 						     aconnector->port,
5213 						     dm_conn_state->pbn,
5214 						     0,
5215 						     false);
5216 			continue;
5217 		}
5218 
5219 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5220 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5221 		clock = stream->timing.pix_clk_100hz / 10;
5222 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5223 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5224 						    aconnector->port,
5225 						    pbn, pbn_div,
5226 						    true);
5227 		if (vcpi < 0)
5228 			return vcpi;
5229 
5230 		dm_conn_state->pbn = pbn;
5231 		dm_conn_state->vcpi_slots = vcpi;
5232 	}
5233 	return 0;
5234 }
5235 #endif
5236 
5237 static void dm_drm_plane_reset(struct drm_plane *plane)
5238 {
5239 	struct dm_plane_state *amdgpu_state = NULL;
5240 
5241 	if (plane->state)
5242 		plane->funcs->atomic_destroy_state(plane, plane->state);
5243 
5244 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5245 	WARN_ON(amdgpu_state == NULL);
5246 
5247 	if (amdgpu_state)
5248 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5249 }
5250 
5251 static struct drm_plane_state *
5252 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5253 {
5254 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5255 
5256 	old_dm_plane_state = to_dm_plane_state(plane->state);
5257 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5258 	if (!dm_plane_state)
5259 		return NULL;
5260 
5261 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5262 
5263 	if (old_dm_plane_state->dc_state) {
5264 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5265 		dc_plane_state_retain(dm_plane_state->dc_state);
5266 	}
5267 
5268 	return &dm_plane_state->base;
5269 }
5270 
5271 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5272 				struct drm_plane_state *state)
5273 {
5274 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5275 
5276 	if (dm_plane_state->dc_state)
5277 		dc_plane_state_release(dm_plane_state->dc_state);
5278 
5279 	drm_atomic_helper_plane_destroy_state(plane, state);
5280 }
5281 
5282 static const struct drm_plane_funcs dm_plane_funcs = {
5283 	.update_plane	= drm_atomic_helper_update_plane,
5284 	.disable_plane	= drm_atomic_helper_disable_plane,
5285 	.destroy	= drm_primary_helper_destroy,
5286 	.reset = dm_drm_plane_reset,
5287 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5288 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5289 };
5290 
5291 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5292 				      struct drm_plane_state *new_state)
5293 {
5294 	struct amdgpu_framebuffer *afb;
5295 	struct drm_gem_object *obj;
5296 	struct amdgpu_device *adev;
5297 	struct amdgpu_bo *rbo;
5298 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5299 	struct list_head list;
5300 	struct ttm_validate_buffer tv;
5301 	struct ww_acquire_ctx ticket;
5302 	uint64_t tiling_flags;
5303 	uint32_t domain;
5304 	int r;
5305 
5306 	dm_plane_state_old = to_dm_plane_state(plane->state);
5307 	dm_plane_state_new = to_dm_plane_state(new_state);
5308 
5309 	if (!new_state->fb) {
5310 		DRM_DEBUG_DRIVER("No FB bound\n");
5311 		return 0;
5312 	}
5313 
5314 	afb = to_amdgpu_framebuffer(new_state->fb);
5315 	obj = new_state->fb->obj[0];
5316 	rbo = gem_to_amdgpu_bo(obj);
5317 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5318 	INIT_LIST_HEAD(&list);
5319 
5320 	tv.bo = &rbo->tbo;
5321 	tv.num_shared = 1;
5322 	list_add(&tv.head, &list);
5323 
5324 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5325 	if (r) {
5326 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5327 		return r;
5328 	}
5329 
5330 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5331 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5332 	else
5333 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5334 
5335 	r = amdgpu_bo_pin(rbo, domain);
5336 	if (unlikely(r != 0)) {
5337 		if (r != -ERESTARTSYS)
5338 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5339 		ttm_eu_backoff_reservation(&ticket, &list);
5340 		return r;
5341 	}
5342 
5343 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5344 	if (unlikely(r != 0)) {
5345 		amdgpu_bo_unpin(rbo);
5346 		ttm_eu_backoff_reservation(&ticket, &list);
5347 		DRM_ERROR("%p bind failed\n", rbo);
5348 		return r;
5349 	}
5350 
5351 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5352 
5353 	ttm_eu_backoff_reservation(&ticket, &list);
5354 
5355 	afb->address = amdgpu_bo_gpu_offset(rbo);
5356 
5357 	amdgpu_bo_ref(rbo);
5358 
5359 	if (dm_plane_state_new->dc_state &&
5360 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5361 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5362 
5363 		fill_plane_buffer_attributes(
5364 			adev, afb, plane_state->format, plane_state->rotation,
5365 			tiling_flags, &plane_state->tiling_info,
5366 			&plane_state->plane_size, &plane_state->dcc,
5367 			&plane_state->address);
5368 	}
5369 
5370 	return 0;
5371 }
5372 
5373 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5374 				       struct drm_plane_state *old_state)
5375 {
5376 	struct amdgpu_bo *rbo;
5377 	int r;
5378 
5379 	if (!old_state->fb)
5380 		return;
5381 
5382 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5383 	r = amdgpu_bo_reserve(rbo, false);
5384 	if (unlikely(r)) {
5385 		DRM_ERROR("failed to reserve rbo before unpin\n");
5386 		return;
5387 	}
5388 
5389 	amdgpu_bo_unpin(rbo);
5390 	amdgpu_bo_unreserve(rbo);
5391 	amdgpu_bo_unref(&rbo);
5392 }
5393 
5394 static int dm_plane_atomic_check(struct drm_plane *plane,
5395 				 struct drm_plane_state *state)
5396 {
5397 	struct amdgpu_device *adev = plane->dev->dev_private;
5398 	struct dc *dc = adev->dm.dc;
5399 	struct dm_plane_state *dm_plane_state;
5400 	struct dc_scaling_info scaling_info;
5401 	int ret;
5402 
5403 	dm_plane_state = to_dm_plane_state(state);
5404 
5405 	if (!dm_plane_state->dc_state)
5406 		return 0;
5407 
5408 	ret = fill_dc_scaling_info(state, &scaling_info);
5409 	if (ret)
5410 		return ret;
5411 
5412 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5413 		return 0;
5414 
5415 	return -EINVAL;
5416 }
5417 
5418 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5419 				       struct drm_plane_state *new_plane_state)
5420 {
5421 	/* Only support async updates on cursor planes. */
5422 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5423 		return -EINVAL;
5424 
5425 	return 0;
5426 }
5427 
5428 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5429 					 struct drm_plane_state *new_state)
5430 {
5431 	struct drm_plane_state *old_state =
5432 		drm_atomic_get_old_plane_state(new_state->state, plane);
5433 
5434 	swap(plane->state->fb, new_state->fb);
5435 
5436 	plane->state->src_x = new_state->src_x;
5437 	plane->state->src_y = new_state->src_y;
5438 	plane->state->src_w = new_state->src_w;
5439 	plane->state->src_h = new_state->src_h;
5440 	plane->state->crtc_x = new_state->crtc_x;
5441 	plane->state->crtc_y = new_state->crtc_y;
5442 	plane->state->crtc_w = new_state->crtc_w;
5443 	plane->state->crtc_h = new_state->crtc_h;
5444 
5445 	handle_cursor_update(plane, old_state);
5446 }
5447 
5448 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5449 	.prepare_fb = dm_plane_helper_prepare_fb,
5450 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5451 	.atomic_check = dm_plane_atomic_check,
5452 	.atomic_async_check = dm_plane_atomic_async_check,
5453 	.atomic_async_update = dm_plane_atomic_async_update
5454 };
5455 
5456 /*
5457  * TODO: these are currently initialized to rgb formats only.
5458  * For future use cases we should either initialize them dynamically based on
5459  * plane capabilities, or initialize this array to all formats, so internal drm
5460  * check will succeed, and let DC implement proper check
5461  */
5462 static const uint32_t rgb_formats[] = {
5463 	DRM_FORMAT_XRGB8888,
5464 	DRM_FORMAT_ARGB8888,
5465 	DRM_FORMAT_RGBA8888,
5466 	DRM_FORMAT_XRGB2101010,
5467 	DRM_FORMAT_XBGR2101010,
5468 	DRM_FORMAT_ARGB2101010,
5469 	DRM_FORMAT_ABGR2101010,
5470 	DRM_FORMAT_XBGR8888,
5471 	DRM_FORMAT_ABGR8888,
5472 	DRM_FORMAT_RGB565,
5473 };
5474 
5475 static const uint32_t overlay_formats[] = {
5476 	DRM_FORMAT_XRGB8888,
5477 	DRM_FORMAT_ARGB8888,
5478 	DRM_FORMAT_RGBA8888,
5479 	DRM_FORMAT_XBGR8888,
5480 	DRM_FORMAT_ABGR8888,
5481 	DRM_FORMAT_RGB565
5482 };
5483 
5484 static const u32 cursor_formats[] = {
5485 	DRM_FORMAT_ARGB8888
5486 };
5487 
5488 static int get_plane_formats(const struct drm_plane *plane,
5489 			     const struct dc_plane_cap *plane_cap,
5490 			     uint32_t *formats, int max_formats)
5491 {
5492 	int i, num_formats = 0;
5493 
5494 	/*
5495 	 * TODO: Query support for each group of formats directly from
5496 	 * DC plane caps. This will require adding more formats to the
5497 	 * caps list.
5498 	 */
5499 
5500 	switch (plane->type) {
5501 	case DRM_PLANE_TYPE_PRIMARY:
5502 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5503 			if (num_formats >= max_formats)
5504 				break;
5505 
5506 			formats[num_formats++] = rgb_formats[i];
5507 		}
5508 
5509 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5510 			formats[num_formats++] = DRM_FORMAT_NV12;
5511 		break;
5512 
5513 	case DRM_PLANE_TYPE_OVERLAY:
5514 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5515 			if (num_formats >= max_formats)
5516 				break;
5517 
5518 			formats[num_formats++] = overlay_formats[i];
5519 		}
5520 		break;
5521 
5522 	case DRM_PLANE_TYPE_CURSOR:
5523 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5524 			if (num_formats >= max_formats)
5525 				break;
5526 
5527 			formats[num_formats++] = cursor_formats[i];
5528 		}
5529 		break;
5530 	}
5531 
5532 	return num_formats;
5533 }
5534 
5535 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5536 				struct drm_plane *plane,
5537 				unsigned long possible_crtcs,
5538 				const struct dc_plane_cap *plane_cap)
5539 {
5540 	uint32_t formats[32];
5541 	int num_formats;
5542 	int res = -EPERM;
5543 
5544 	num_formats = get_plane_formats(plane, plane_cap, formats,
5545 					ARRAY_SIZE(formats));
5546 
5547 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5548 				       &dm_plane_funcs, formats, num_formats,
5549 				       NULL, plane->type, NULL);
5550 	if (res)
5551 		return res;
5552 
5553 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5554 	    plane_cap && plane_cap->per_pixel_alpha) {
5555 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5556 					  BIT(DRM_MODE_BLEND_PREMULTI);
5557 
5558 		drm_plane_create_alpha_property(plane);
5559 		drm_plane_create_blend_mode_property(plane, blend_caps);
5560 	}
5561 
5562 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5563 	    plane_cap && plane_cap->pixel_format_support.nv12) {
5564 		/* This only affects YUV formats. */
5565 		drm_plane_create_color_properties(
5566 			plane,
5567 			BIT(DRM_COLOR_YCBCR_BT601) |
5568 			BIT(DRM_COLOR_YCBCR_BT709),
5569 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5570 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5571 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5572 	}
5573 
5574 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5575 
5576 	/* Create (reset) the plane state */
5577 	if (plane->funcs->reset)
5578 		plane->funcs->reset(plane);
5579 
5580 	return 0;
5581 }
5582 
5583 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5584 			       struct drm_plane *plane,
5585 			       uint32_t crtc_index)
5586 {
5587 	struct amdgpu_crtc *acrtc = NULL;
5588 	struct drm_plane *cursor_plane;
5589 
5590 	int res = -ENOMEM;
5591 
5592 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5593 	if (!cursor_plane)
5594 		goto fail;
5595 
5596 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5597 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5598 
5599 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5600 	if (!acrtc)
5601 		goto fail;
5602 
5603 	res = drm_crtc_init_with_planes(
5604 			dm->ddev,
5605 			&acrtc->base,
5606 			plane,
5607 			cursor_plane,
5608 			&amdgpu_dm_crtc_funcs, NULL);
5609 
5610 	if (res)
5611 		goto fail;
5612 
5613 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5614 
5615 	/* Create (reset) the plane state */
5616 	if (acrtc->base.funcs->reset)
5617 		acrtc->base.funcs->reset(&acrtc->base);
5618 
5619 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5620 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5621 
5622 	acrtc->crtc_id = crtc_index;
5623 	acrtc->base.enabled = false;
5624 	acrtc->otg_inst = -1;
5625 
5626 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5627 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5628 				   true, MAX_COLOR_LUT_ENTRIES);
5629 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5630 
5631 	return 0;
5632 
5633 fail:
5634 	kfree(acrtc);
5635 	kfree(cursor_plane);
5636 	return res;
5637 }
5638 
5639 
5640 static int to_drm_connector_type(enum signal_type st)
5641 {
5642 	switch (st) {
5643 	case SIGNAL_TYPE_HDMI_TYPE_A:
5644 		return DRM_MODE_CONNECTOR_HDMIA;
5645 	case SIGNAL_TYPE_EDP:
5646 		return DRM_MODE_CONNECTOR_eDP;
5647 	case SIGNAL_TYPE_LVDS:
5648 		return DRM_MODE_CONNECTOR_LVDS;
5649 	case SIGNAL_TYPE_RGB:
5650 		return DRM_MODE_CONNECTOR_VGA;
5651 	case SIGNAL_TYPE_DISPLAY_PORT:
5652 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
5653 		return DRM_MODE_CONNECTOR_DisplayPort;
5654 	case SIGNAL_TYPE_DVI_DUAL_LINK:
5655 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
5656 		return DRM_MODE_CONNECTOR_DVID;
5657 	case SIGNAL_TYPE_VIRTUAL:
5658 		return DRM_MODE_CONNECTOR_VIRTUAL;
5659 
5660 	default:
5661 		return DRM_MODE_CONNECTOR_Unknown;
5662 	}
5663 }
5664 
5665 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5666 {
5667 	struct drm_encoder *encoder;
5668 
5669 	/* There is only one encoder per connector */
5670 	drm_connector_for_each_possible_encoder(connector, encoder)
5671 		return encoder;
5672 
5673 	return NULL;
5674 }
5675 
5676 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5677 {
5678 	struct drm_encoder *encoder;
5679 	struct amdgpu_encoder *amdgpu_encoder;
5680 
5681 	encoder = amdgpu_dm_connector_to_encoder(connector);
5682 
5683 	if (encoder == NULL)
5684 		return;
5685 
5686 	amdgpu_encoder = to_amdgpu_encoder(encoder);
5687 
5688 	amdgpu_encoder->native_mode.clock = 0;
5689 
5690 	if (!list_empty(&connector->probed_modes)) {
5691 		struct drm_display_mode *preferred_mode = NULL;
5692 
5693 		list_for_each_entry(preferred_mode,
5694 				    &connector->probed_modes,
5695 				    head) {
5696 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5697 				amdgpu_encoder->native_mode = *preferred_mode;
5698 
5699 			break;
5700 		}
5701 
5702 	}
5703 }
5704 
5705 static struct drm_display_mode *
5706 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5707 			     char *name,
5708 			     int hdisplay, int vdisplay)
5709 {
5710 	struct drm_device *dev = encoder->dev;
5711 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5712 	struct drm_display_mode *mode = NULL;
5713 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5714 
5715 	mode = drm_mode_duplicate(dev, native_mode);
5716 
5717 	if (mode == NULL)
5718 		return NULL;
5719 
5720 	mode->hdisplay = hdisplay;
5721 	mode->vdisplay = vdisplay;
5722 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5723 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5724 
5725 	return mode;
5726 
5727 }
5728 
5729 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5730 						 struct drm_connector *connector)
5731 {
5732 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5733 	struct drm_display_mode *mode = NULL;
5734 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5735 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5736 				to_amdgpu_dm_connector(connector);
5737 	int i;
5738 	int n;
5739 	struct mode_size {
5740 		char name[DRM_DISPLAY_MODE_LEN];
5741 		int w;
5742 		int h;
5743 	} common_modes[] = {
5744 		{  "640x480",  640,  480},
5745 		{  "800x600",  800,  600},
5746 		{ "1024x768", 1024,  768},
5747 		{ "1280x720", 1280,  720},
5748 		{ "1280x800", 1280,  800},
5749 		{"1280x1024", 1280, 1024},
5750 		{ "1440x900", 1440,  900},
5751 		{"1680x1050", 1680, 1050},
5752 		{"1600x1200", 1600, 1200},
5753 		{"1920x1080", 1920, 1080},
5754 		{"1920x1200", 1920, 1200}
5755 	};
5756 
5757 	n = ARRAY_SIZE(common_modes);
5758 
5759 	for (i = 0; i < n; i++) {
5760 		struct drm_display_mode *curmode = NULL;
5761 		bool mode_existed = false;
5762 
5763 		if (common_modes[i].w > native_mode->hdisplay ||
5764 		    common_modes[i].h > native_mode->vdisplay ||
5765 		   (common_modes[i].w == native_mode->hdisplay &&
5766 		    common_modes[i].h == native_mode->vdisplay))
5767 			continue;
5768 
5769 		list_for_each_entry(curmode, &connector->probed_modes, head) {
5770 			if (common_modes[i].w == curmode->hdisplay &&
5771 			    common_modes[i].h == curmode->vdisplay) {
5772 				mode_existed = true;
5773 				break;
5774 			}
5775 		}
5776 
5777 		if (mode_existed)
5778 			continue;
5779 
5780 		mode = amdgpu_dm_create_common_mode(encoder,
5781 				common_modes[i].name, common_modes[i].w,
5782 				common_modes[i].h);
5783 		drm_mode_probed_add(connector, mode);
5784 		amdgpu_dm_connector->num_modes++;
5785 	}
5786 }
5787 
5788 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5789 					      struct edid *edid)
5790 {
5791 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5792 			to_amdgpu_dm_connector(connector);
5793 
5794 	if (edid) {
5795 		/* empty probed_modes */
5796 		INIT_LIST_HEAD(&connector->probed_modes);
5797 		amdgpu_dm_connector->num_modes =
5798 				drm_add_edid_modes(connector, edid);
5799 
5800 		/* sorting the probed modes before calling function
5801 		 * amdgpu_dm_get_native_mode() since EDID can have
5802 		 * more than one preferred mode. The modes that are
5803 		 * later in the probed mode list could be of higher
5804 		 * and preferred resolution. For example, 3840x2160
5805 		 * resolution in base EDID preferred timing and 4096x2160
5806 		 * preferred resolution in DID extension block later.
5807 		 */
5808 		drm_mode_sort(&connector->probed_modes);
5809 		amdgpu_dm_get_native_mode(connector);
5810 	} else {
5811 		amdgpu_dm_connector->num_modes = 0;
5812 	}
5813 }
5814 
5815 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5816 {
5817 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5818 			to_amdgpu_dm_connector(connector);
5819 	struct drm_encoder *encoder;
5820 	struct edid *edid = amdgpu_dm_connector->edid;
5821 
5822 	encoder = amdgpu_dm_connector_to_encoder(connector);
5823 
5824 	if (!edid || !drm_edid_is_valid(edid)) {
5825 		amdgpu_dm_connector->num_modes =
5826 				drm_add_modes_noedid(connector, 640, 480);
5827 	} else {
5828 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
5829 		amdgpu_dm_connector_add_common_modes(encoder, connector);
5830 	}
5831 	amdgpu_dm_fbc_init(connector);
5832 
5833 	return amdgpu_dm_connector->num_modes;
5834 }
5835 
5836 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5837 				     struct amdgpu_dm_connector *aconnector,
5838 				     int connector_type,
5839 				     struct dc_link *link,
5840 				     int link_index)
5841 {
5842 	struct amdgpu_device *adev = dm->ddev->dev_private;
5843 
5844 	/*
5845 	 * Some of the properties below require access to state, like bpc.
5846 	 * Allocate some default initial connector state with our reset helper.
5847 	 */
5848 	if (aconnector->base.funcs->reset)
5849 		aconnector->base.funcs->reset(&aconnector->base);
5850 
5851 	aconnector->connector_id = link_index;
5852 	aconnector->dc_link = link;
5853 	aconnector->base.interlace_allowed = false;
5854 	aconnector->base.doublescan_allowed = false;
5855 	aconnector->base.stereo_allowed = false;
5856 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5857 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5858 	aconnector->audio_inst = -1;
5859 	mutex_init(&aconnector->hpd_lock);
5860 
5861 	/*
5862 	 * configure support HPD hot plug connector_>polled default value is 0
5863 	 * which means HPD hot plug not supported
5864 	 */
5865 	switch (connector_type) {
5866 	case DRM_MODE_CONNECTOR_HDMIA:
5867 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5868 		aconnector->base.ycbcr_420_allowed =
5869 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5870 		break;
5871 	case DRM_MODE_CONNECTOR_DisplayPort:
5872 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5873 		aconnector->base.ycbcr_420_allowed =
5874 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
5875 		break;
5876 	case DRM_MODE_CONNECTOR_DVID:
5877 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5878 		break;
5879 	default:
5880 		break;
5881 	}
5882 
5883 	drm_object_attach_property(&aconnector->base.base,
5884 				dm->ddev->mode_config.scaling_mode_property,
5885 				DRM_MODE_SCALE_NONE);
5886 
5887 	drm_object_attach_property(&aconnector->base.base,
5888 				adev->mode_info.underscan_property,
5889 				UNDERSCAN_OFF);
5890 	drm_object_attach_property(&aconnector->base.base,
5891 				adev->mode_info.underscan_hborder_property,
5892 				0);
5893 	drm_object_attach_property(&aconnector->base.base,
5894 				adev->mode_info.underscan_vborder_property,
5895 				0);
5896 
5897 	drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5898 
5899 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
5900 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5901 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5902 
5903 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5904 	    dc_is_dmcu_initialized(adev->dm.dc)) {
5905 		drm_object_attach_property(&aconnector->base.base,
5906 				adev->mode_info.abm_level_property, 0);
5907 	}
5908 
5909 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5910 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5911 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
5912 		drm_object_attach_property(
5913 			&aconnector->base.base,
5914 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
5915 
5916 		drm_connector_attach_vrr_capable_property(
5917 			&aconnector->base);
5918 #ifdef CONFIG_DRM_AMD_DC_HDCP
5919 		if (adev->dm.hdcp_workqueue)
5920 			drm_connector_attach_content_protection_property(&aconnector->base, true);
5921 #endif
5922 	}
5923 }
5924 
5925 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5926 			      struct i2c_msg *msgs, int num)
5927 {
5928 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5929 	struct ddc_service *ddc_service = i2c->ddc_service;
5930 	struct i2c_command cmd;
5931 	int i;
5932 	int result = -EIO;
5933 
5934 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5935 
5936 	if (!cmd.payloads)
5937 		return result;
5938 
5939 	cmd.number_of_payloads = num;
5940 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5941 	cmd.speed = 100;
5942 
5943 	for (i = 0; i < num; i++) {
5944 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5945 		cmd.payloads[i].address = msgs[i].addr;
5946 		cmd.payloads[i].length = msgs[i].len;
5947 		cmd.payloads[i].data = msgs[i].buf;
5948 	}
5949 
5950 	if (dc_submit_i2c(
5951 			ddc_service->ctx->dc,
5952 			ddc_service->ddc_pin->hw_info.ddc_channel,
5953 			&cmd))
5954 		result = num;
5955 
5956 	kfree(cmd.payloads);
5957 	return result;
5958 }
5959 
5960 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
5961 {
5962 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
5963 }
5964 
5965 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
5966 	.master_xfer = amdgpu_dm_i2c_xfer,
5967 	.functionality = amdgpu_dm_i2c_func,
5968 };
5969 
5970 static struct amdgpu_i2c_adapter *
5971 create_i2c(struct ddc_service *ddc_service,
5972 	   int link_index,
5973 	   int *res)
5974 {
5975 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
5976 	struct amdgpu_i2c_adapter *i2c;
5977 
5978 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
5979 	if (!i2c)
5980 		return NULL;
5981 	i2c->base.owner = THIS_MODULE;
5982 	i2c->base.class = I2C_CLASS_DDC;
5983 	i2c->base.dev.parent = &adev->pdev->dev;
5984 	i2c->base.algo = &amdgpu_dm_i2c_algo;
5985 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
5986 	i2c_set_adapdata(&i2c->base, i2c);
5987 	i2c->ddc_service = ddc_service;
5988 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
5989 
5990 	return i2c;
5991 }
5992 
5993 
5994 /*
5995  * Note: this function assumes that dc_link_detect() was called for the
5996  * dc_link which will be represented by this aconnector.
5997  */
5998 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
5999 				    struct amdgpu_dm_connector *aconnector,
6000 				    uint32_t link_index,
6001 				    struct amdgpu_encoder *aencoder)
6002 {
6003 	int res = 0;
6004 	int connector_type;
6005 	struct dc *dc = dm->dc;
6006 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6007 	struct amdgpu_i2c_adapter *i2c;
6008 
6009 	link->priv = aconnector;
6010 
6011 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6012 
6013 	i2c = create_i2c(link->ddc, link->link_index, &res);
6014 	if (!i2c) {
6015 		DRM_ERROR("Failed to create i2c adapter data\n");
6016 		return -ENOMEM;
6017 	}
6018 
6019 	aconnector->i2c = i2c;
6020 	res = i2c_add_adapter(&i2c->base);
6021 
6022 	if (res) {
6023 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6024 		goto out_free;
6025 	}
6026 
6027 	connector_type = to_drm_connector_type(link->connector_signal);
6028 
6029 	res = drm_connector_init_with_ddc(
6030 			dm->ddev,
6031 			&aconnector->base,
6032 			&amdgpu_dm_connector_funcs,
6033 			connector_type,
6034 			&i2c->base);
6035 
6036 	if (res) {
6037 		DRM_ERROR("connector_init failed\n");
6038 		aconnector->connector_id = -1;
6039 		goto out_free;
6040 	}
6041 
6042 	drm_connector_helper_add(
6043 			&aconnector->base,
6044 			&amdgpu_dm_connector_helper_funcs);
6045 
6046 	amdgpu_dm_connector_init_helper(
6047 		dm,
6048 		aconnector,
6049 		connector_type,
6050 		link,
6051 		link_index);
6052 
6053 	drm_connector_attach_encoder(
6054 		&aconnector->base, &aencoder->base);
6055 
6056 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6057 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6058 		amdgpu_dm_initialize_dp_connector(dm, aconnector);
6059 
6060 out_free:
6061 	if (res) {
6062 		kfree(i2c);
6063 		aconnector->i2c = NULL;
6064 	}
6065 	return res;
6066 }
6067 
6068 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6069 {
6070 	switch (adev->mode_info.num_crtc) {
6071 	case 1:
6072 		return 0x1;
6073 	case 2:
6074 		return 0x3;
6075 	case 3:
6076 		return 0x7;
6077 	case 4:
6078 		return 0xf;
6079 	case 5:
6080 		return 0x1f;
6081 	case 6:
6082 	default:
6083 		return 0x3f;
6084 	}
6085 }
6086 
6087 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6088 				  struct amdgpu_encoder *aencoder,
6089 				  uint32_t link_index)
6090 {
6091 	struct amdgpu_device *adev = dev->dev_private;
6092 
6093 	int res = drm_encoder_init(dev,
6094 				   &aencoder->base,
6095 				   &amdgpu_dm_encoder_funcs,
6096 				   DRM_MODE_ENCODER_TMDS,
6097 				   NULL);
6098 
6099 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6100 
6101 	if (!res)
6102 		aencoder->encoder_id = link_index;
6103 	else
6104 		aencoder->encoder_id = -1;
6105 
6106 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6107 
6108 	return res;
6109 }
6110 
6111 static void manage_dm_interrupts(struct amdgpu_device *adev,
6112 				 struct amdgpu_crtc *acrtc,
6113 				 bool enable)
6114 {
6115 	/*
6116 	 * this is not correct translation but will work as soon as VBLANK
6117 	 * constant is the same as PFLIP
6118 	 */
6119 	int irq_type =
6120 		amdgpu_display_crtc_idx_to_irq_type(
6121 			adev,
6122 			acrtc->crtc_id);
6123 
6124 	if (enable) {
6125 		drm_crtc_vblank_on(&acrtc->base);
6126 		amdgpu_irq_get(
6127 			adev,
6128 			&adev->pageflip_irq,
6129 			irq_type);
6130 	} else {
6131 
6132 		amdgpu_irq_put(
6133 			adev,
6134 			&adev->pageflip_irq,
6135 			irq_type);
6136 		drm_crtc_vblank_off(&acrtc->base);
6137 	}
6138 }
6139 
6140 static bool
6141 is_scaling_state_different(const struct dm_connector_state *dm_state,
6142 			   const struct dm_connector_state *old_dm_state)
6143 {
6144 	if (dm_state->scaling != old_dm_state->scaling)
6145 		return true;
6146 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6147 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6148 			return true;
6149 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6150 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6151 			return true;
6152 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6153 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6154 		return true;
6155 	return false;
6156 }
6157 
6158 #ifdef CONFIG_DRM_AMD_DC_HDCP
6159 static bool is_content_protection_different(struct drm_connector_state *state,
6160 					    const struct drm_connector_state *old_state,
6161 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6162 {
6163 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6164 
6165 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6166 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6167 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6168 		return true;
6169 	}
6170 
6171 	/* CP is being re enabled, ignore this */
6172 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6173 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6174 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6175 		return false;
6176 	}
6177 
6178 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6179 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6180 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6181 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6182 
6183 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6184 	 * hot-plug, headless s3, dpms
6185 	 */
6186 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6187 	    aconnector->dc_sink != NULL)
6188 		return true;
6189 
6190 	if (old_state->content_protection == state->content_protection)
6191 		return false;
6192 
6193 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6194 		return true;
6195 
6196 	return false;
6197 }
6198 
6199 #endif
6200 static void remove_stream(struct amdgpu_device *adev,
6201 			  struct amdgpu_crtc *acrtc,
6202 			  struct dc_stream_state *stream)
6203 {
6204 	/* this is the update mode case */
6205 
6206 	acrtc->otg_inst = -1;
6207 	acrtc->enabled = false;
6208 }
6209 
6210 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6211 			       struct dc_cursor_position *position)
6212 {
6213 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6214 	int x, y;
6215 	int xorigin = 0, yorigin = 0;
6216 
6217 	position->enable = false;
6218 	position->x = 0;
6219 	position->y = 0;
6220 
6221 	if (!crtc || !plane->state->fb)
6222 		return 0;
6223 
6224 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6225 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6226 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6227 			  __func__,
6228 			  plane->state->crtc_w,
6229 			  plane->state->crtc_h);
6230 		return -EINVAL;
6231 	}
6232 
6233 	x = plane->state->crtc_x;
6234 	y = plane->state->crtc_y;
6235 
6236 	if (x <= -amdgpu_crtc->max_cursor_width ||
6237 	    y <= -amdgpu_crtc->max_cursor_height)
6238 		return 0;
6239 
6240 	if (crtc->primary->state) {
6241 		/* avivo cursor are offset into the total surface */
6242 		x += crtc->primary->state->src_x >> 16;
6243 		y += crtc->primary->state->src_y >> 16;
6244 	}
6245 
6246 	if (x < 0) {
6247 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6248 		x = 0;
6249 	}
6250 	if (y < 0) {
6251 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6252 		y = 0;
6253 	}
6254 	position->enable = true;
6255 	position->x = x;
6256 	position->y = y;
6257 	position->x_hotspot = xorigin;
6258 	position->y_hotspot = yorigin;
6259 
6260 	return 0;
6261 }
6262 
6263 static void handle_cursor_update(struct drm_plane *plane,
6264 				 struct drm_plane_state *old_plane_state)
6265 {
6266 	struct amdgpu_device *adev = plane->dev->dev_private;
6267 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6268 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6269 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6270 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6271 	uint64_t address = afb ? afb->address : 0;
6272 	struct dc_cursor_position position;
6273 	struct dc_cursor_attributes attributes;
6274 	int ret;
6275 
6276 	if (!plane->state->fb && !old_plane_state->fb)
6277 		return;
6278 
6279 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6280 			 __func__,
6281 			 amdgpu_crtc->crtc_id,
6282 			 plane->state->crtc_w,
6283 			 plane->state->crtc_h);
6284 
6285 	ret = get_cursor_position(plane, crtc, &position);
6286 	if (ret)
6287 		return;
6288 
6289 	if (!position.enable) {
6290 		/* turn off cursor */
6291 		if (crtc_state && crtc_state->stream) {
6292 			mutex_lock(&adev->dm.dc_lock);
6293 			dc_stream_set_cursor_position(crtc_state->stream,
6294 						      &position);
6295 			mutex_unlock(&adev->dm.dc_lock);
6296 		}
6297 		return;
6298 	}
6299 
6300 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6301 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6302 
6303 	memset(&attributes, 0, sizeof(attributes));
6304 	attributes.address.high_part = upper_32_bits(address);
6305 	attributes.address.low_part  = lower_32_bits(address);
6306 	attributes.width             = plane->state->crtc_w;
6307 	attributes.height            = plane->state->crtc_h;
6308 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6309 	attributes.rotation_angle    = 0;
6310 	attributes.attribute_flags.value = 0;
6311 
6312 	attributes.pitch = attributes.width;
6313 
6314 	if (crtc_state->stream) {
6315 		mutex_lock(&adev->dm.dc_lock);
6316 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6317 							 &attributes))
6318 			DRM_ERROR("DC failed to set cursor attributes\n");
6319 
6320 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6321 						   &position))
6322 			DRM_ERROR("DC failed to set cursor position\n");
6323 		mutex_unlock(&adev->dm.dc_lock);
6324 	}
6325 }
6326 
6327 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6328 {
6329 
6330 	assert_spin_locked(&acrtc->base.dev->event_lock);
6331 	WARN_ON(acrtc->event);
6332 
6333 	acrtc->event = acrtc->base.state->event;
6334 
6335 	/* Set the flip status */
6336 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6337 
6338 	/* Mark this event as consumed */
6339 	acrtc->base.state->event = NULL;
6340 
6341 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6342 						 acrtc->crtc_id);
6343 }
6344 
6345 static void update_freesync_state_on_stream(
6346 	struct amdgpu_display_manager *dm,
6347 	struct dm_crtc_state *new_crtc_state,
6348 	struct dc_stream_state *new_stream,
6349 	struct dc_plane_state *surface,
6350 	u32 flip_timestamp_in_us)
6351 {
6352 	struct mod_vrr_params vrr_params;
6353 	struct dc_info_packet vrr_infopacket = {0};
6354 	struct amdgpu_device *adev = dm->adev;
6355 	unsigned long flags;
6356 
6357 	if (!new_stream)
6358 		return;
6359 
6360 	/*
6361 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6362 	 * For now it's sufficient to just guard against these conditions.
6363 	 */
6364 
6365 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6366 		return;
6367 
6368 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6369 	vrr_params = new_crtc_state->vrr_params;
6370 
6371 	if (surface) {
6372 		mod_freesync_handle_preflip(
6373 			dm->freesync_module,
6374 			surface,
6375 			new_stream,
6376 			flip_timestamp_in_us,
6377 			&vrr_params);
6378 
6379 		if (adev->family < AMDGPU_FAMILY_AI &&
6380 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6381 			mod_freesync_handle_v_update(dm->freesync_module,
6382 						     new_stream, &vrr_params);
6383 
6384 			/* Need to call this before the frame ends. */
6385 			dc_stream_adjust_vmin_vmax(dm->dc,
6386 						   new_crtc_state->stream,
6387 						   &vrr_params.adjust);
6388 		}
6389 	}
6390 
6391 	mod_freesync_build_vrr_infopacket(
6392 		dm->freesync_module,
6393 		new_stream,
6394 		&vrr_params,
6395 		PACKET_TYPE_VRR,
6396 		TRANSFER_FUNC_UNKNOWN,
6397 		&vrr_infopacket);
6398 
6399 	new_crtc_state->freesync_timing_changed |=
6400 		(memcmp(&new_crtc_state->vrr_params.adjust,
6401 			&vrr_params.adjust,
6402 			sizeof(vrr_params.adjust)) != 0);
6403 
6404 	new_crtc_state->freesync_vrr_info_changed |=
6405 		(memcmp(&new_crtc_state->vrr_infopacket,
6406 			&vrr_infopacket,
6407 			sizeof(vrr_infopacket)) != 0);
6408 
6409 	new_crtc_state->vrr_params = vrr_params;
6410 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6411 
6412 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6413 	new_stream->vrr_infopacket = vrr_infopacket;
6414 
6415 	if (new_crtc_state->freesync_vrr_info_changed)
6416 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6417 			      new_crtc_state->base.crtc->base.id,
6418 			      (int)new_crtc_state->base.vrr_enabled,
6419 			      (int)vrr_params.state);
6420 
6421 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6422 }
6423 
6424 static void pre_update_freesync_state_on_stream(
6425 	struct amdgpu_display_manager *dm,
6426 	struct dm_crtc_state *new_crtc_state)
6427 {
6428 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6429 	struct mod_vrr_params vrr_params;
6430 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6431 	struct amdgpu_device *adev = dm->adev;
6432 	unsigned long flags;
6433 
6434 	if (!new_stream)
6435 		return;
6436 
6437 	/*
6438 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6439 	 * For now it's sufficient to just guard against these conditions.
6440 	 */
6441 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6442 		return;
6443 
6444 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6445 	vrr_params = new_crtc_state->vrr_params;
6446 
6447 	if (new_crtc_state->vrr_supported &&
6448 	    config.min_refresh_in_uhz &&
6449 	    config.max_refresh_in_uhz) {
6450 		config.state = new_crtc_state->base.vrr_enabled ?
6451 			VRR_STATE_ACTIVE_VARIABLE :
6452 			VRR_STATE_INACTIVE;
6453 	} else {
6454 		config.state = VRR_STATE_UNSUPPORTED;
6455 	}
6456 
6457 	mod_freesync_build_vrr_params(dm->freesync_module,
6458 				      new_stream,
6459 				      &config, &vrr_params);
6460 
6461 	new_crtc_state->freesync_timing_changed |=
6462 		(memcmp(&new_crtc_state->vrr_params.adjust,
6463 			&vrr_params.adjust,
6464 			sizeof(vrr_params.adjust)) != 0);
6465 
6466 	new_crtc_state->vrr_params = vrr_params;
6467 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6468 }
6469 
6470 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6471 					    struct dm_crtc_state *new_state)
6472 {
6473 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6474 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6475 
6476 	if (!old_vrr_active && new_vrr_active) {
6477 		/* Transition VRR inactive -> active:
6478 		 * While VRR is active, we must not disable vblank irq, as a
6479 		 * reenable after disable would compute bogus vblank/pflip
6480 		 * timestamps if it likely happened inside display front-porch.
6481 		 *
6482 		 * We also need vupdate irq for the actual core vblank handling
6483 		 * at end of vblank.
6484 		 */
6485 		dm_set_vupdate_irq(new_state->base.crtc, true);
6486 		drm_crtc_vblank_get(new_state->base.crtc);
6487 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6488 				 __func__, new_state->base.crtc->base.id);
6489 	} else if (old_vrr_active && !new_vrr_active) {
6490 		/* Transition VRR active -> inactive:
6491 		 * Allow vblank irq disable again for fixed refresh rate.
6492 		 */
6493 		dm_set_vupdate_irq(new_state->base.crtc, false);
6494 		drm_crtc_vblank_put(new_state->base.crtc);
6495 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6496 				 __func__, new_state->base.crtc->base.id);
6497 	}
6498 }
6499 
6500 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6501 {
6502 	struct drm_plane *plane;
6503 	struct drm_plane_state *old_plane_state, *new_plane_state;
6504 	int i;
6505 
6506 	/*
6507 	 * TODO: Make this per-stream so we don't issue redundant updates for
6508 	 * commits with multiple streams.
6509 	 */
6510 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6511 				       new_plane_state, i)
6512 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6513 			handle_cursor_update(plane, old_plane_state);
6514 }
6515 
6516 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6517 				    struct dc_state *dc_state,
6518 				    struct drm_device *dev,
6519 				    struct amdgpu_display_manager *dm,
6520 				    struct drm_crtc *pcrtc,
6521 				    bool wait_for_vblank)
6522 {
6523 	uint32_t i;
6524 	uint64_t timestamp_ns;
6525 	struct drm_plane *plane;
6526 	struct drm_plane_state *old_plane_state, *new_plane_state;
6527 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6528 	struct drm_crtc_state *new_pcrtc_state =
6529 			drm_atomic_get_new_crtc_state(state, pcrtc);
6530 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6531 	struct dm_crtc_state *dm_old_crtc_state =
6532 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6533 	int planes_count = 0, vpos, hpos;
6534 	long r;
6535 	unsigned long flags;
6536 	struct amdgpu_bo *abo;
6537 	uint64_t tiling_flags;
6538 	uint32_t target_vblank, last_flip_vblank;
6539 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6540 	bool pflip_present = false;
6541 	bool swizzle = true;
6542 	struct {
6543 		struct dc_surface_update surface_updates[MAX_SURFACES];
6544 		struct dc_plane_info plane_infos[MAX_SURFACES];
6545 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6546 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6547 		struct dc_stream_update stream_update;
6548 	} *bundle;
6549 
6550 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6551 
6552 	if (!bundle) {
6553 		dm_error("Failed to allocate update bundle\n");
6554 		goto cleanup;
6555 	}
6556 
6557 	/*
6558 	 * Disable the cursor first if we're disabling all the planes.
6559 	 * It'll remain on the screen after the planes are re-enabled
6560 	 * if we don't.
6561 	 */
6562 	if (acrtc_state->active_planes == 0)
6563 		amdgpu_dm_commit_cursors(state);
6564 
6565 	/* update planes when needed */
6566 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6567 		struct drm_crtc *crtc = new_plane_state->crtc;
6568 		struct drm_crtc_state *new_crtc_state;
6569 		struct drm_framebuffer *fb = new_plane_state->fb;
6570 		bool plane_needs_flip;
6571 		struct dc_plane_state *dc_plane;
6572 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6573 
6574 		/* Cursor plane is handled after stream updates */
6575 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6576 			continue;
6577 
6578 		if (!fb || !crtc || pcrtc != crtc)
6579 			continue;
6580 
6581 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6582 		if (!new_crtc_state->active)
6583 			continue;
6584 
6585 		dc_plane = dm_new_plane_state->dc_state;
6586 
6587 		if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
6588 			swizzle = false;
6589 
6590 		bundle->surface_updates[planes_count].surface = dc_plane;
6591 		if (new_pcrtc_state->color_mgmt_changed) {
6592 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6593 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6594 		}
6595 
6596 		fill_dc_scaling_info(new_plane_state,
6597 				     &bundle->scaling_infos[planes_count]);
6598 
6599 		bundle->surface_updates[planes_count].scaling_info =
6600 			&bundle->scaling_infos[planes_count];
6601 
6602 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6603 
6604 		pflip_present = pflip_present || plane_needs_flip;
6605 
6606 		if (!plane_needs_flip) {
6607 			planes_count += 1;
6608 			continue;
6609 		}
6610 
6611 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6612 
6613 		/*
6614 		 * Wait for all fences on this FB. Do limited wait to avoid
6615 		 * deadlock during GPU reset when this fence will not signal
6616 		 * but we hold reservation lock for the BO.
6617 		 */
6618 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6619 							false,
6620 							msecs_to_jiffies(5000));
6621 		if (unlikely(r <= 0))
6622 			DRM_ERROR("Waiting for fences timed out!");
6623 
6624 		/*
6625 		 * TODO This might fail and hence better not used, wait
6626 		 * explicitly on fences instead
6627 		 * and in general should be called for
6628 		 * blocking commit to as per framework helpers
6629 		 */
6630 		r = amdgpu_bo_reserve(abo, true);
6631 		if (unlikely(r != 0))
6632 			DRM_ERROR("failed to reserve buffer before flip\n");
6633 
6634 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6635 
6636 		amdgpu_bo_unreserve(abo);
6637 
6638 		fill_dc_plane_info_and_addr(
6639 			dm->adev, new_plane_state, tiling_flags,
6640 			&bundle->plane_infos[planes_count],
6641 			&bundle->flip_addrs[planes_count].address);
6642 
6643 		bundle->surface_updates[planes_count].plane_info =
6644 			&bundle->plane_infos[planes_count];
6645 
6646 		/*
6647 		 * Only allow immediate flips for fast updates that don't
6648 		 * change FB pitch, DCC state, rotation or mirroing.
6649 		 */
6650 		bundle->flip_addrs[planes_count].flip_immediate =
6651 			crtc->state->async_flip &&
6652 			acrtc_state->update_type == UPDATE_TYPE_FAST;
6653 
6654 		timestamp_ns = ktime_get_ns();
6655 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6656 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6657 		bundle->surface_updates[planes_count].surface = dc_plane;
6658 
6659 		if (!bundle->surface_updates[planes_count].surface) {
6660 			DRM_ERROR("No surface for CRTC: id=%d\n",
6661 					acrtc_attach->crtc_id);
6662 			continue;
6663 		}
6664 
6665 		if (plane == pcrtc->primary)
6666 			update_freesync_state_on_stream(
6667 				dm,
6668 				acrtc_state,
6669 				acrtc_state->stream,
6670 				dc_plane,
6671 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6672 
6673 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6674 				 __func__,
6675 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6676 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6677 
6678 		planes_count += 1;
6679 
6680 	}
6681 
6682 	if (pflip_present) {
6683 		if (!vrr_active) {
6684 			/* Use old throttling in non-vrr fixed refresh rate mode
6685 			 * to keep flip scheduling based on target vblank counts
6686 			 * working in a backwards compatible way, e.g., for
6687 			 * clients using the GLX_OML_sync_control extension or
6688 			 * DRI3/Present extension with defined target_msc.
6689 			 */
6690 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6691 		}
6692 		else {
6693 			/* For variable refresh rate mode only:
6694 			 * Get vblank of last completed flip to avoid > 1 vrr
6695 			 * flips per video frame by use of throttling, but allow
6696 			 * flip programming anywhere in the possibly large
6697 			 * variable vrr vblank interval for fine-grained flip
6698 			 * timing control and more opportunity to avoid stutter
6699 			 * on late submission of flips.
6700 			 */
6701 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6702 			last_flip_vblank = acrtc_attach->last_flip_vblank;
6703 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6704 		}
6705 
6706 		target_vblank = last_flip_vblank + wait_for_vblank;
6707 
6708 		/*
6709 		 * Wait until we're out of the vertical blank period before the one
6710 		 * targeted by the flip
6711 		 */
6712 		while ((acrtc_attach->enabled &&
6713 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6714 							    0, &vpos, &hpos, NULL,
6715 							    NULL, &pcrtc->hwmode)
6716 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6717 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6718 			(int)(target_vblank -
6719 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6720 			usleep_range(1000, 1100);
6721 		}
6722 
6723 		if (acrtc_attach->base.state->event) {
6724 			drm_crtc_vblank_get(pcrtc);
6725 
6726 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6727 
6728 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6729 			prepare_flip_isr(acrtc_attach);
6730 
6731 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6732 		}
6733 
6734 		if (acrtc_state->stream) {
6735 			if (acrtc_state->freesync_vrr_info_changed)
6736 				bundle->stream_update.vrr_infopacket =
6737 					&acrtc_state->stream->vrr_infopacket;
6738 		}
6739 	}
6740 
6741 	/* Update the planes if changed or disable if we don't have any. */
6742 	if ((planes_count || acrtc_state->active_planes == 0) &&
6743 		acrtc_state->stream) {
6744 		bundle->stream_update.stream = acrtc_state->stream;
6745 		if (new_pcrtc_state->mode_changed) {
6746 			bundle->stream_update.src = acrtc_state->stream->src;
6747 			bundle->stream_update.dst = acrtc_state->stream->dst;
6748 		}
6749 
6750 		if (new_pcrtc_state->color_mgmt_changed) {
6751 			/*
6752 			 * TODO: This isn't fully correct since we've actually
6753 			 * already modified the stream in place.
6754 			 */
6755 			bundle->stream_update.gamut_remap =
6756 				&acrtc_state->stream->gamut_remap_matrix;
6757 			bundle->stream_update.output_csc_transform =
6758 				&acrtc_state->stream->csc_color_matrix;
6759 			bundle->stream_update.out_transfer_func =
6760 				acrtc_state->stream->out_transfer_func;
6761 		}
6762 
6763 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
6764 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6765 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
6766 
6767 		/*
6768 		 * If FreeSync state on the stream has changed then we need to
6769 		 * re-adjust the min/max bounds now that DC doesn't handle this
6770 		 * as part of commit.
6771 		 */
6772 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6773 		    amdgpu_dm_vrr_active(acrtc_state)) {
6774 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6775 			dc_stream_adjust_vmin_vmax(
6776 				dm->dc, acrtc_state->stream,
6777 				&acrtc_state->vrr_params.adjust);
6778 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6779 		}
6780 		mutex_lock(&dm->dc_lock);
6781 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6782 				acrtc_state->stream->link->psr_allow_active)
6783 			amdgpu_dm_psr_disable(acrtc_state->stream);
6784 
6785 		dc_commit_updates_for_stream(dm->dc,
6786 						     bundle->surface_updates,
6787 						     planes_count,
6788 						     acrtc_state->stream,
6789 						     &bundle->stream_update,
6790 						     dc_state);
6791 
6792 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6793 						acrtc_state->stream->psr_version &&
6794 						!acrtc_state->stream->link->psr_feature_enabled)
6795 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
6796 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6797 						acrtc_state->stream->link->psr_feature_enabled &&
6798 						!acrtc_state->stream->link->psr_allow_active &&
6799 						swizzle) {
6800 			amdgpu_dm_psr_enable(acrtc_state->stream);
6801 		}
6802 
6803 		mutex_unlock(&dm->dc_lock);
6804 	}
6805 
6806 	/*
6807 	 * Update cursor state *after* programming all the planes.
6808 	 * This avoids redundant programming in the case where we're going
6809 	 * to be disabling a single plane - those pipes are being disabled.
6810 	 */
6811 	if (acrtc_state->active_planes)
6812 		amdgpu_dm_commit_cursors(state);
6813 
6814 cleanup:
6815 	kfree(bundle);
6816 }
6817 
6818 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6819 				   struct drm_atomic_state *state)
6820 {
6821 	struct amdgpu_device *adev = dev->dev_private;
6822 	struct amdgpu_dm_connector *aconnector;
6823 	struct drm_connector *connector;
6824 	struct drm_connector_state *old_con_state, *new_con_state;
6825 	struct drm_crtc_state *new_crtc_state;
6826 	struct dm_crtc_state *new_dm_crtc_state;
6827 	const struct dc_stream_status *status;
6828 	int i, inst;
6829 
6830 	/* Notify device removals. */
6831 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6832 		if (old_con_state->crtc != new_con_state->crtc) {
6833 			/* CRTC changes require notification. */
6834 			goto notify;
6835 		}
6836 
6837 		if (!new_con_state->crtc)
6838 			continue;
6839 
6840 		new_crtc_state = drm_atomic_get_new_crtc_state(
6841 			state, new_con_state->crtc);
6842 
6843 		if (!new_crtc_state)
6844 			continue;
6845 
6846 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6847 			continue;
6848 
6849 	notify:
6850 		aconnector = to_amdgpu_dm_connector(connector);
6851 
6852 		mutex_lock(&adev->dm.audio_lock);
6853 		inst = aconnector->audio_inst;
6854 		aconnector->audio_inst = -1;
6855 		mutex_unlock(&adev->dm.audio_lock);
6856 
6857 		amdgpu_dm_audio_eld_notify(adev, inst);
6858 	}
6859 
6860 	/* Notify audio device additions. */
6861 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6862 		if (!new_con_state->crtc)
6863 			continue;
6864 
6865 		new_crtc_state = drm_atomic_get_new_crtc_state(
6866 			state, new_con_state->crtc);
6867 
6868 		if (!new_crtc_state)
6869 			continue;
6870 
6871 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6872 			continue;
6873 
6874 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6875 		if (!new_dm_crtc_state->stream)
6876 			continue;
6877 
6878 		status = dc_stream_get_status(new_dm_crtc_state->stream);
6879 		if (!status)
6880 			continue;
6881 
6882 		aconnector = to_amdgpu_dm_connector(connector);
6883 
6884 		mutex_lock(&adev->dm.audio_lock);
6885 		inst = status->audio_inst;
6886 		aconnector->audio_inst = inst;
6887 		mutex_unlock(&adev->dm.audio_lock);
6888 
6889 		amdgpu_dm_audio_eld_notify(adev, inst);
6890 	}
6891 }
6892 
6893 /*
6894  * Enable interrupts on CRTCs that are newly active, undergone
6895  * a modeset, or have active planes again.
6896  *
6897  * Done in two passes, based on the for_modeset flag:
6898  * Pass 1: For CRTCs going through modeset
6899  * Pass 2: For CRTCs going from 0 to n active planes
6900  *
6901  * Interrupts can only be enabled after the planes are programmed,
6902  * so this requires a two-pass approach since we don't want to
6903  * just defer the interrupts until after commit planes every time.
6904  */
6905 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6906 					     struct drm_atomic_state *state,
6907 					     bool for_modeset)
6908 {
6909 	struct amdgpu_device *adev = dev->dev_private;
6910 	struct drm_crtc *crtc;
6911 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6912 	int i;
6913 #ifdef CONFIG_DEBUG_FS
6914 	enum amdgpu_dm_pipe_crc_source source;
6915 #endif
6916 
6917 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6918 				      new_crtc_state, i) {
6919 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6920 		struct dm_crtc_state *dm_new_crtc_state =
6921 			to_dm_crtc_state(new_crtc_state);
6922 		struct dm_crtc_state *dm_old_crtc_state =
6923 			to_dm_crtc_state(old_crtc_state);
6924 		bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6925 		bool run_pass;
6926 
6927 		run_pass = (for_modeset && modeset) ||
6928 			   (!for_modeset && !modeset &&
6929 			    !dm_old_crtc_state->interrupts_enabled);
6930 
6931 		if (!run_pass)
6932 			continue;
6933 
6934 		if (!dm_new_crtc_state->interrupts_enabled)
6935 			continue;
6936 
6937 		manage_dm_interrupts(adev, acrtc, true);
6938 
6939 #ifdef CONFIG_DEBUG_FS
6940 		/* The stream has changed so CRC capture needs to re-enabled. */
6941 		source = dm_new_crtc_state->crc_src;
6942 		if (amdgpu_dm_is_valid_crc_source(source)) {
6943 			amdgpu_dm_crtc_configure_crc_source(
6944 				crtc, dm_new_crtc_state,
6945 				dm_new_crtc_state->crc_src);
6946 		}
6947 #endif
6948 	}
6949 }
6950 
6951 /*
6952  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6953  * @crtc_state: the DRM CRTC state
6954  * @stream_state: the DC stream state.
6955  *
6956  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6957  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6958  */
6959 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6960 						struct dc_stream_state *stream_state)
6961 {
6962 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
6963 }
6964 
6965 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
6966 				   struct drm_atomic_state *state,
6967 				   bool nonblock)
6968 {
6969 	struct drm_crtc *crtc;
6970 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6971 	struct amdgpu_device *adev = dev->dev_private;
6972 	int i;
6973 
6974 	/*
6975 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
6976 	 * a modeset, being disabled, or have no active planes.
6977 	 *
6978 	 * It's done in atomic commit rather than commit tail for now since
6979 	 * some of these interrupt handlers access the current CRTC state and
6980 	 * potentially the stream pointer itself.
6981 	 *
6982 	 * Since the atomic state is swapped within atomic commit and not within
6983 	 * commit tail this would leave to new state (that hasn't been committed yet)
6984 	 * being accesssed from within the handlers.
6985 	 *
6986 	 * TODO: Fix this so we can do this in commit tail and not have to block
6987 	 * in atomic check.
6988 	 */
6989 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
6990 		struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
6991 		struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
6992 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6993 
6994 		if (dm_old_crtc_state->interrupts_enabled &&
6995 		    (!dm_new_crtc_state->interrupts_enabled ||
6996 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
6997 			manage_dm_interrupts(adev, acrtc, false);
6998 	}
6999 	/*
7000 	 * Add check here for SoC's that support hardware cursor plane, to
7001 	 * unset legacy_cursor_update
7002 	 */
7003 
7004 	return drm_atomic_helper_commit(dev, state, nonblock);
7005 
7006 	/*TODO Handle EINTR, reenable IRQ*/
7007 }
7008 
7009 /**
7010  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7011  * @state: The atomic state to commit
7012  *
7013  * This will tell DC to commit the constructed DC state from atomic_check,
7014  * programming the hardware. Any failures here implies a hardware failure, since
7015  * atomic check should have filtered anything non-kosher.
7016  */
7017 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7018 {
7019 	struct drm_device *dev = state->dev;
7020 	struct amdgpu_device *adev = dev->dev_private;
7021 	struct amdgpu_display_manager *dm = &adev->dm;
7022 	struct dm_atomic_state *dm_state;
7023 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7024 	uint32_t i, j;
7025 	struct drm_crtc *crtc;
7026 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7027 	unsigned long flags;
7028 	bool wait_for_vblank = true;
7029 	struct drm_connector *connector;
7030 	struct drm_connector_state *old_con_state, *new_con_state;
7031 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7032 	int crtc_disable_count = 0;
7033 
7034 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7035 
7036 	dm_state = dm_atomic_get_new_state(state);
7037 	if (dm_state && dm_state->context) {
7038 		dc_state = dm_state->context;
7039 	} else {
7040 		/* No state changes, retain current state. */
7041 		dc_state_temp = dc_create_state(dm->dc);
7042 		ASSERT(dc_state_temp);
7043 		dc_state = dc_state_temp;
7044 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7045 	}
7046 
7047 	/* update changed items */
7048 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7049 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7050 
7051 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7052 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7053 
7054 		DRM_DEBUG_DRIVER(
7055 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7056 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7057 			"connectors_changed:%d\n",
7058 			acrtc->crtc_id,
7059 			new_crtc_state->enable,
7060 			new_crtc_state->active,
7061 			new_crtc_state->planes_changed,
7062 			new_crtc_state->mode_changed,
7063 			new_crtc_state->active_changed,
7064 			new_crtc_state->connectors_changed);
7065 
7066 		/* Copy all transient state flags into dc state */
7067 		if (dm_new_crtc_state->stream) {
7068 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7069 							    dm_new_crtc_state->stream);
7070 		}
7071 
7072 		/* handles headless hotplug case, updating new_state and
7073 		 * aconnector as needed
7074 		 */
7075 
7076 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7077 
7078 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7079 
7080 			if (!dm_new_crtc_state->stream) {
7081 				/*
7082 				 * this could happen because of issues with
7083 				 * userspace notifications delivery.
7084 				 * In this case userspace tries to set mode on
7085 				 * display which is disconnected in fact.
7086 				 * dc_sink is NULL in this case on aconnector.
7087 				 * We expect reset mode will come soon.
7088 				 *
7089 				 * This can also happen when unplug is done
7090 				 * during resume sequence ended
7091 				 *
7092 				 * In this case, we want to pretend we still
7093 				 * have a sink to keep the pipe running so that
7094 				 * hw state is consistent with the sw state
7095 				 */
7096 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7097 						__func__, acrtc->base.base.id);
7098 				continue;
7099 			}
7100 
7101 			if (dm_old_crtc_state->stream)
7102 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7103 
7104 			pm_runtime_get_noresume(dev->dev);
7105 
7106 			acrtc->enabled = true;
7107 			acrtc->hw_mode = new_crtc_state->mode;
7108 			crtc->hwmode = new_crtc_state->mode;
7109 		} else if (modereset_required(new_crtc_state)) {
7110 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7111 			/* i.e. reset mode */
7112 			if (dm_old_crtc_state->stream) {
7113 				if (dm_old_crtc_state->stream->link->psr_allow_active)
7114 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7115 
7116 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7117 			}
7118 		}
7119 	} /* for_each_crtc_in_state() */
7120 
7121 	if (dc_state) {
7122 		dm_enable_per_frame_crtc_master_sync(dc_state);
7123 		mutex_lock(&dm->dc_lock);
7124 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7125 		mutex_unlock(&dm->dc_lock);
7126 	}
7127 
7128 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7129 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7130 
7131 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7132 
7133 		if (dm_new_crtc_state->stream != NULL) {
7134 			const struct dc_stream_status *status =
7135 					dc_stream_get_status(dm_new_crtc_state->stream);
7136 
7137 			if (!status)
7138 				status = dc_stream_get_status_from_state(dc_state,
7139 									 dm_new_crtc_state->stream);
7140 
7141 			if (!status)
7142 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7143 			else
7144 				acrtc->otg_inst = status->primary_otg_inst;
7145 		}
7146 	}
7147 #ifdef CONFIG_DRM_AMD_DC_HDCP
7148 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7149 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7150 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7151 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7152 
7153 		new_crtc_state = NULL;
7154 
7155 		if (acrtc)
7156 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7157 
7158 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7159 
7160 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7161 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7162 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7163 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7164 			continue;
7165 		}
7166 
7167 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7168 			hdcp_update_display(
7169 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7170 				new_con_state->hdcp_content_type,
7171 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7172 													 : false);
7173 	}
7174 #endif
7175 
7176 	/* Handle connector state changes */
7177 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7178 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7179 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7180 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7181 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7182 		struct dc_stream_update stream_update;
7183 		struct dc_info_packet hdr_packet;
7184 		struct dc_stream_status *status = NULL;
7185 		bool abm_changed, hdr_changed, scaling_changed;
7186 
7187 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7188 		memset(&stream_update, 0, sizeof(stream_update));
7189 
7190 		if (acrtc) {
7191 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7192 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7193 		}
7194 
7195 		/* Skip any modesets/resets */
7196 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7197 			continue;
7198 
7199 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7200 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7201 
7202 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7203 							     dm_old_con_state);
7204 
7205 		abm_changed = dm_new_crtc_state->abm_level !=
7206 			      dm_old_crtc_state->abm_level;
7207 
7208 		hdr_changed =
7209 			is_hdr_metadata_different(old_con_state, new_con_state);
7210 
7211 		if (!scaling_changed && !abm_changed && !hdr_changed)
7212 			continue;
7213 
7214 		stream_update.stream = dm_new_crtc_state->stream;
7215 		if (scaling_changed) {
7216 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7217 					dm_new_con_state, dm_new_crtc_state->stream);
7218 
7219 			stream_update.src = dm_new_crtc_state->stream->src;
7220 			stream_update.dst = dm_new_crtc_state->stream->dst;
7221 		}
7222 
7223 		if (abm_changed) {
7224 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7225 
7226 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7227 		}
7228 
7229 		if (hdr_changed) {
7230 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7231 			stream_update.hdr_static_metadata = &hdr_packet;
7232 		}
7233 
7234 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7235 		WARN_ON(!status);
7236 		WARN_ON(!status->plane_count);
7237 
7238 		/*
7239 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7240 		 * Here we create an empty update on each plane.
7241 		 * To fix this, DC should permit updating only stream properties.
7242 		 */
7243 		for (j = 0; j < status->plane_count; j++)
7244 			dummy_updates[j].surface = status->plane_states[0];
7245 
7246 
7247 		mutex_lock(&dm->dc_lock);
7248 		dc_commit_updates_for_stream(dm->dc,
7249 						     dummy_updates,
7250 						     status->plane_count,
7251 						     dm_new_crtc_state->stream,
7252 						     &stream_update,
7253 						     dc_state);
7254 		mutex_unlock(&dm->dc_lock);
7255 	}
7256 
7257 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7258 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7259 				      new_crtc_state, i) {
7260 		if (old_crtc_state->active && !new_crtc_state->active)
7261 			crtc_disable_count++;
7262 
7263 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7264 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7265 
7266 		/* Update freesync active state. */
7267 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7268 
7269 		/* Handle vrr on->off / off->on transitions */
7270 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7271 						dm_new_crtc_state);
7272 	}
7273 
7274 	/* Enable interrupts for CRTCs going through a modeset. */
7275 	amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7276 
7277 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7278 		if (new_crtc_state->async_flip)
7279 			wait_for_vblank = false;
7280 
7281 	/* update planes when needed per crtc*/
7282 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7283 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7284 
7285 		if (dm_new_crtc_state->stream)
7286 			amdgpu_dm_commit_planes(state, dc_state, dev,
7287 						dm, crtc, wait_for_vblank);
7288 	}
7289 
7290 	/* Enable interrupts for CRTCs going from 0 to n active planes. */
7291 	amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7292 
7293 	/* Update audio instances for each connector. */
7294 	amdgpu_dm_commit_audio(dev, state);
7295 
7296 	/*
7297 	 * send vblank event on all events not handled in flip and
7298 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7299 	 */
7300 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7301 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7302 
7303 		if (new_crtc_state->event)
7304 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7305 
7306 		new_crtc_state->event = NULL;
7307 	}
7308 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7309 
7310 	/* Signal HW programming completion */
7311 	drm_atomic_helper_commit_hw_done(state);
7312 
7313 	if (wait_for_vblank)
7314 		drm_atomic_helper_wait_for_flip_done(dev, state);
7315 
7316 	drm_atomic_helper_cleanup_planes(dev, state);
7317 
7318 	/*
7319 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7320 	 * so we can put the GPU into runtime suspend if we're not driving any
7321 	 * displays anymore
7322 	 */
7323 	for (i = 0; i < crtc_disable_count; i++)
7324 		pm_runtime_put_autosuspend(dev->dev);
7325 	pm_runtime_mark_last_busy(dev->dev);
7326 
7327 	if (dc_state_temp)
7328 		dc_release_state(dc_state_temp);
7329 }
7330 
7331 
7332 static int dm_force_atomic_commit(struct drm_connector *connector)
7333 {
7334 	int ret = 0;
7335 	struct drm_device *ddev = connector->dev;
7336 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7337 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7338 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7339 	struct drm_connector_state *conn_state;
7340 	struct drm_crtc_state *crtc_state;
7341 	struct drm_plane_state *plane_state;
7342 
7343 	if (!state)
7344 		return -ENOMEM;
7345 
7346 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7347 
7348 	/* Construct an atomic state to restore previous display setting */
7349 
7350 	/*
7351 	 * Attach connectors to drm_atomic_state
7352 	 */
7353 	conn_state = drm_atomic_get_connector_state(state, connector);
7354 
7355 	ret = PTR_ERR_OR_ZERO(conn_state);
7356 	if (ret)
7357 		goto err;
7358 
7359 	/* Attach crtc to drm_atomic_state*/
7360 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7361 
7362 	ret = PTR_ERR_OR_ZERO(crtc_state);
7363 	if (ret)
7364 		goto err;
7365 
7366 	/* force a restore */
7367 	crtc_state->mode_changed = true;
7368 
7369 	/* Attach plane to drm_atomic_state */
7370 	plane_state = drm_atomic_get_plane_state(state, plane);
7371 
7372 	ret = PTR_ERR_OR_ZERO(plane_state);
7373 	if (ret)
7374 		goto err;
7375 
7376 
7377 	/* Call commit internally with the state we just constructed */
7378 	ret = drm_atomic_commit(state);
7379 	if (!ret)
7380 		return 0;
7381 
7382 err:
7383 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7384 	drm_atomic_state_put(state);
7385 
7386 	return ret;
7387 }
7388 
7389 /*
7390  * This function handles all cases when set mode does not come upon hotplug.
7391  * This includes when a display is unplugged then plugged back into the
7392  * same port and when running without usermode desktop manager supprot
7393  */
7394 void dm_restore_drm_connector_state(struct drm_device *dev,
7395 				    struct drm_connector *connector)
7396 {
7397 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7398 	struct amdgpu_crtc *disconnected_acrtc;
7399 	struct dm_crtc_state *acrtc_state;
7400 
7401 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7402 		return;
7403 
7404 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7405 	if (!disconnected_acrtc)
7406 		return;
7407 
7408 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7409 	if (!acrtc_state->stream)
7410 		return;
7411 
7412 	/*
7413 	 * If the previous sink is not released and different from the current,
7414 	 * we deduce we are in a state where we can not rely on usermode call
7415 	 * to turn on the display, so we do it here
7416 	 */
7417 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7418 		dm_force_atomic_commit(&aconnector->base);
7419 }
7420 
7421 /*
7422  * Grabs all modesetting locks to serialize against any blocking commits,
7423  * Waits for completion of all non blocking commits.
7424  */
7425 static int do_aquire_global_lock(struct drm_device *dev,
7426 				 struct drm_atomic_state *state)
7427 {
7428 	struct drm_crtc *crtc;
7429 	struct drm_crtc_commit *commit;
7430 	long ret;
7431 
7432 	/*
7433 	 * Adding all modeset locks to aquire_ctx will
7434 	 * ensure that when the framework release it the
7435 	 * extra locks we are locking here will get released to
7436 	 */
7437 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7438 	if (ret)
7439 		return ret;
7440 
7441 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7442 		spin_lock(&crtc->commit_lock);
7443 		commit = list_first_entry_or_null(&crtc->commit_list,
7444 				struct drm_crtc_commit, commit_entry);
7445 		if (commit)
7446 			drm_crtc_commit_get(commit);
7447 		spin_unlock(&crtc->commit_lock);
7448 
7449 		if (!commit)
7450 			continue;
7451 
7452 		/*
7453 		 * Make sure all pending HW programming completed and
7454 		 * page flips done
7455 		 */
7456 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7457 
7458 		if (ret > 0)
7459 			ret = wait_for_completion_interruptible_timeout(
7460 					&commit->flip_done, 10*HZ);
7461 
7462 		if (ret == 0)
7463 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7464 				  "timed out\n", crtc->base.id, crtc->name);
7465 
7466 		drm_crtc_commit_put(commit);
7467 	}
7468 
7469 	return ret < 0 ? ret : 0;
7470 }
7471 
7472 static void get_freesync_config_for_crtc(
7473 	struct dm_crtc_state *new_crtc_state,
7474 	struct dm_connector_state *new_con_state)
7475 {
7476 	struct mod_freesync_config config = {0};
7477 	struct amdgpu_dm_connector *aconnector =
7478 			to_amdgpu_dm_connector(new_con_state->base.connector);
7479 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7480 	int vrefresh = drm_mode_vrefresh(mode);
7481 
7482 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7483 					vrefresh >= aconnector->min_vfreq &&
7484 					vrefresh <= aconnector->max_vfreq;
7485 
7486 	if (new_crtc_state->vrr_supported) {
7487 		new_crtc_state->stream->ignore_msa_timing_param = true;
7488 		config.state = new_crtc_state->base.vrr_enabled ?
7489 				VRR_STATE_ACTIVE_VARIABLE :
7490 				VRR_STATE_INACTIVE;
7491 		config.min_refresh_in_uhz =
7492 				aconnector->min_vfreq * 1000000;
7493 		config.max_refresh_in_uhz =
7494 				aconnector->max_vfreq * 1000000;
7495 		config.vsif_supported = true;
7496 		config.btr = true;
7497 	}
7498 
7499 	new_crtc_state->freesync_config = config;
7500 }
7501 
7502 static void reset_freesync_config_for_crtc(
7503 	struct dm_crtc_state *new_crtc_state)
7504 {
7505 	new_crtc_state->vrr_supported = false;
7506 
7507 	memset(&new_crtc_state->vrr_params, 0,
7508 	       sizeof(new_crtc_state->vrr_params));
7509 	memset(&new_crtc_state->vrr_infopacket, 0,
7510 	       sizeof(new_crtc_state->vrr_infopacket));
7511 }
7512 
7513 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7514 				struct drm_atomic_state *state,
7515 				struct drm_crtc *crtc,
7516 				struct drm_crtc_state *old_crtc_state,
7517 				struct drm_crtc_state *new_crtc_state,
7518 				bool enable,
7519 				bool *lock_and_validation_needed)
7520 {
7521 	struct dm_atomic_state *dm_state = NULL;
7522 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7523 	struct dc_stream_state *new_stream;
7524 	int ret = 0;
7525 
7526 	/*
7527 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7528 	 * update changed items
7529 	 */
7530 	struct amdgpu_crtc *acrtc = NULL;
7531 	struct amdgpu_dm_connector *aconnector = NULL;
7532 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7533 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7534 
7535 	new_stream = NULL;
7536 
7537 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7538 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7539 	acrtc = to_amdgpu_crtc(crtc);
7540 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7541 
7542 	/* TODO This hack should go away */
7543 	if (aconnector && enable) {
7544 		/* Make sure fake sink is created in plug-in scenario */
7545 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7546 							    &aconnector->base);
7547 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7548 							    &aconnector->base);
7549 
7550 		if (IS_ERR(drm_new_conn_state)) {
7551 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7552 			goto fail;
7553 		}
7554 
7555 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7556 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7557 
7558 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7559 			goto skip_modeset;
7560 
7561 		new_stream = create_stream_for_sink(aconnector,
7562 						     &new_crtc_state->mode,
7563 						    dm_new_conn_state,
7564 						    dm_old_crtc_state->stream);
7565 
7566 		/*
7567 		 * we can have no stream on ACTION_SET if a display
7568 		 * was disconnected during S3, in this case it is not an
7569 		 * error, the OS will be updated after detection, and
7570 		 * will do the right thing on next atomic commit
7571 		 */
7572 
7573 		if (!new_stream) {
7574 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7575 					__func__, acrtc->base.base.id);
7576 			ret = -ENOMEM;
7577 			goto fail;
7578 		}
7579 
7580 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7581 
7582 		ret = fill_hdr_info_packet(drm_new_conn_state,
7583 					   &new_stream->hdr_static_metadata);
7584 		if (ret)
7585 			goto fail;
7586 
7587 		/*
7588 		 * If we already removed the old stream from the context
7589 		 * (and set the new stream to NULL) then we can't reuse
7590 		 * the old stream even if the stream and scaling are unchanged.
7591 		 * We'll hit the BUG_ON and black screen.
7592 		 *
7593 		 * TODO: Refactor this function to allow this check to work
7594 		 * in all conditions.
7595 		 */
7596 		if (dm_new_crtc_state->stream &&
7597 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7598 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7599 			new_crtc_state->mode_changed = false;
7600 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7601 					 new_crtc_state->mode_changed);
7602 		}
7603 	}
7604 
7605 	/* mode_changed flag may get updated above, need to check again */
7606 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7607 		goto skip_modeset;
7608 
7609 	DRM_DEBUG_DRIVER(
7610 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7611 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7612 		"connectors_changed:%d\n",
7613 		acrtc->crtc_id,
7614 		new_crtc_state->enable,
7615 		new_crtc_state->active,
7616 		new_crtc_state->planes_changed,
7617 		new_crtc_state->mode_changed,
7618 		new_crtc_state->active_changed,
7619 		new_crtc_state->connectors_changed);
7620 
7621 	/* Remove stream for any changed/disabled CRTC */
7622 	if (!enable) {
7623 
7624 		if (!dm_old_crtc_state->stream)
7625 			goto skip_modeset;
7626 
7627 		ret = dm_atomic_get_state(state, &dm_state);
7628 		if (ret)
7629 			goto fail;
7630 
7631 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7632 				crtc->base.id);
7633 
7634 		/* i.e. reset mode */
7635 		if (dc_remove_stream_from_ctx(
7636 				dm->dc,
7637 				dm_state->context,
7638 				dm_old_crtc_state->stream) != DC_OK) {
7639 			ret = -EINVAL;
7640 			goto fail;
7641 		}
7642 
7643 		dc_stream_release(dm_old_crtc_state->stream);
7644 		dm_new_crtc_state->stream = NULL;
7645 
7646 		reset_freesync_config_for_crtc(dm_new_crtc_state);
7647 
7648 		*lock_and_validation_needed = true;
7649 
7650 	} else {/* Add stream for any updated/enabled CRTC */
7651 		/*
7652 		 * Quick fix to prevent NULL pointer on new_stream when
7653 		 * added MST connectors not found in existing crtc_state in the chained mode
7654 		 * TODO: need to dig out the root cause of that
7655 		 */
7656 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7657 			goto skip_modeset;
7658 
7659 		if (modereset_required(new_crtc_state))
7660 			goto skip_modeset;
7661 
7662 		if (modeset_required(new_crtc_state, new_stream,
7663 				     dm_old_crtc_state->stream)) {
7664 
7665 			WARN_ON(dm_new_crtc_state->stream);
7666 
7667 			ret = dm_atomic_get_state(state, &dm_state);
7668 			if (ret)
7669 				goto fail;
7670 
7671 			dm_new_crtc_state->stream = new_stream;
7672 
7673 			dc_stream_retain(new_stream);
7674 
7675 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7676 						crtc->base.id);
7677 
7678 			if (dc_add_stream_to_ctx(
7679 					dm->dc,
7680 					dm_state->context,
7681 					dm_new_crtc_state->stream) != DC_OK) {
7682 				ret = -EINVAL;
7683 				goto fail;
7684 			}
7685 
7686 			*lock_and_validation_needed = true;
7687 		}
7688 	}
7689 
7690 skip_modeset:
7691 	/* Release extra reference */
7692 	if (new_stream)
7693 		 dc_stream_release(new_stream);
7694 
7695 	/*
7696 	 * We want to do dc stream updates that do not require a
7697 	 * full modeset below.
7698 	 */
7699 	if (!(enable && aconnector && new_crtc_state->enable &&
7700 	      new_crtc_state->active))
7701 		return 0;
7702 	/*
7703 	 * Given above conditions, the dc state cannot be NULL because:
7704 	 * 1. We're in the process of enabling CRTCs (just been added
7705 	 *    to the dc context, or already is on the context)
7706 	 * 2. Has a valid connector attached, and
7707 	 * 3. Is currently active and enabled.
7708 	 * => The dc stream state currently exists.
7709 	 */
7710 	BUG_ON(dm_new_crtc_state->stream == NULL);
7711 
7712 	/* Scaling or underscan settings */
7713 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7714 		update_stream_scaling_settings(
7715 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7716 
7717 	/* ABM settings */
7718 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7719 
7720 	/*
7721 	 * Color management settings. We also update color properties
7722 	 * when a modeset is needed, to ensure it gets reprogrammed.
7723 	 */
7724 	if (dm_new_crtc_state->base.color_mgmt_changed ||
7725 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7726 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7727 		if (ret)
7728 			goto fail;
7729 	}
7730 
7731 	/* Update Freesync settings. */
7732 	get_freesync_config_for_crtc(dm_new_crtc_state,
7733 				     dm_new_conn_state);
7734 
7735 	return ret;
7736 
7737 fail:
7738 	if (new_stream)
7739 		dc_stream_release(new_stream);
7740 	return ret;
7741 }
7742 
7743 static bool should_reset_plane(struct drm_atomic_state *state,
7744 			       struct drm_plane *plane,
7745 			       struct drm_plane_state *old_plane_state,
7746 			       struct drm_plane_state *new_plane_state)
7747 {
7748 	struct drm_plane *other;
7749 	struct drm_plane_state *old_other_state, *new_other_state;
7750 	struct drm_crtc_state *new_crtc_state;
7751 	int i;
7752 
7753 	/*
7754 	 * TODO: Remove this hack once the checks below are sufficient
7755 	 * enough to determine when we need to reset all the planes on
7756 	 * the stream.
7757 	 */
7758 	if (state->allow_modeset)
7759 		return true;
7760 
7761 	/* Exit early if we know that we're adding or removing the plane. */
7762 	if (old_plane_state->crtc != new_plane_state->crtc)
7763 		return true;
7764 
7765 	/* old crtc == new_crtc == NULL, plane not in context. */
7766 	if (!new_plane_state->crtc)
7767 		return false;
7768 
7769 	new_crtc_state =
7770 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7771 
7772 	if (!new_crtc_state)
7773 		return true;
7774 
7775 	/* CRTC Degamma changes currently require us to recreate planes. */
7776 	if (new_crtc_state->color_mgmt_changed)
7777 		return true;
7778 
7779 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7780 		return true;
7781 
7782 	/*
7783 	 * If there are any new primary or overlay planes being added or
7784 	 * removed then the z-order can potentially change. To ensure
7785 	 * correct z-order and pipe acquisition the current DC architecture
7786 	 * requires us to remove and recreate all existing planes.
7787 	 *
7788 	 * TODO: Come up with a more elegant solution for this.
7789 	 */
7790 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7791 		if (other->type == DRM_PLANE_TYPE_CURSOR)
7792 			continue;
7793 
7794 		if (old_other_state->crtc != new_plane_state->crtc &&
7795 		    new_other_state->crtc != new_plane_state->crtc)
7796 			continue;
7797 
7798 		if (old_other_state->crtc != new_other_state->crtc)
7799 			return true;
7800 
7801 		/* TODO: Remove this once we can handle fast format changes. */
7802 		if (old_other_state->fb && new_other_state->fb &&
7803 		    old_other_state->fb->format != new_other_state->fb->format)
7804 			return true;
7805 	}
7806 
7807 	return false;
7808 }
7809 
7810 static int dm_update_plane_state(struct dc *dc,
7811 				 struct drm_atomic_state *state,
7812 				 struct drm_plane *plane,
7813 				 struct drm_plane_state *old_plane_state,
7814 				 struct drm_plane_state *new_plane_state,
7815 				 bool enable,
7816 				 bool *lock_and_validation_needed)
7817 {
7818 
7819 	struct dm_atomic_state *dm_state = NULL;
7820 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7821 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7822 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7823 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7824 	bool needs_reset;
7825 	int ret = 0;
7826 
7827 
7828 	new_plane_crtc = new_plane_state->crtc;
7829 	old_plane_crtc = old_plane_state->crtc;
7830 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
7831 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
7832 
7833 	/*TODO Implement atomic check for cursor plane */
7834 	if (plane->type == DRM_PLANE_TYPE_CURSOR)
7835 		return 0;
7836 
7837 	needs_reset = should_reset_plane(state, plane, old_plane_state,
7838 					 new_plane_state);
7839 
7840 	/* Remove any changed/removed planes */
7841 	if (!enable) {
7842 		if (!needs_reset)
7843 			return 0;
7844 
7845 		if (!old_plane_crtc)
7846 			return 0;
7847 
7848 		old_crtc_state = drm_atomic_get_old_crtc_state(
7849 				state, old_plane_crtc);
7850 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7851 
7852 		if (!dm_old_crtc_state->stream)
7853 			return 0;
7854 
7855 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7856 				plane->base.id, old_plane_crtc->base.id);
7857 
7858 		ret = dm_atomic_get_state(state, &dm_state);
7859 		if (ret)
7860 			return ret;
7861 
7862 		if (!dc_remove_plane_from_context(
7863 				dc,
7864 				dm_old_crtc_state->stream,
7865 				dm_old_plane_state->dc_state,
7866 				dm_state->context)) {
7867 
7868 			ret = EINVAL;
7869 			return ret;
7870 		}
7871 
7872 
7873 		dc_plane_state_release(dm_old_plane_state->dc_state);
7874 		dm_new_plane_state->dc_state = NULL;
7875 
7876 		*lock_and_validation_needed = true;
7877 
7878 	} else { /* Add new planes */
7879 		struct dc_plane_state *dc_new_plane_state;
7880 
7881 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7882 			return 0;
7883 
7884 		if (!new_plane_crtc)
7885 			return 0;
7886 
7887 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7888 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7889 
7890 		if (!dm_new_crtc_state->stream)
7891 			return 0;
7892 
7893 		if (!needs_reset)
7894 			return 0;
7895 
7896 		WARN_ON(dm_new_plane_state->dc_state);
7897 
7898 		dc_new_plane_state = dc_create_plane_state(dc);
7899 		if (!dc_new_plane_state)
7900 			return -ENOMEM;
7901 
7902 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7903 				plane->base.id, new_plane_crtc->base.id);
7904 
7905 		ret = fill_dc_plane_attributes(
7906 			new_plane_crtc->dev->dev_private,
7907 			dc_new_plane_state,
7908 			new_plane_state,
7909 			new_crtc_state);
7910 		if (ret) {
7911 			dc_plane_state_release(dc_new_plane_state);
7912 			return ret;
7913 		}
7914 
7915 		ret = dm_atomic_get_state(state, &dm_state);
7916 		if (ret) {
7917 			dc_plane_state_release(dc_new_plane_state);
7918 			return ret;
7919 		}
7920 
7921 		/*
7922 		 * Any atomic check errors that occur after this will
7923 		 * not need a release. The plane state will be attached
7924 		 * to the stream, and therefore part of the atomic
7925 		 * state. It'll be released when the atomic state is
7926 		 * cleaned.
7927 		 */
7928 		if (!dc_add_plane_to_context(
7929 				dc,
7930 				dm_new_crtc_state->stream,
7931 				dc_new_plane_state,
7932 				dm_state->context)) {
7933 
7934 			dc_plane_state_release(dc_new_plane_state);
7935 			return -EINVAL;
7936 		}
7937 
7938 		dm_new_plane_state->dc_state = dc_new_plane_state;
7939 
7940 		/* Tell DC to do a full surface update every time there
7941 		 * is a plane change. Inefficient, but works for now.
7942 		 */
7943 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7944 
7945 		*lock_and_validation_needed = true;
7946 	}
7947 
7948 
7949 	return ret;
7950 }
7951 
7952 static int
7953 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
7954 				    struct drm_atomic_state *state,
7955 				    enum surface_update_type *out_type)
7956 {
7957 	struct dc *dc = dm->dc;
7958 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
7959 	int i, j, num_plane, ret = 0;
7960 	struct drm_plane_state *old_plane_state, *new_plane_state;
7961 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
7962 	struct drm_crtc *new_plane_crtc;
7963 	struct drm_plane *plane;
7964 
7965 	struct drm_crtc *crtc;
7966 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
7967 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
7968 	struct dc_stream_status *status = NULL;
7969 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
7970 	struct surface_info_bundle {
7971 		struct dc_surface_update surface_updates[MAX_SURFACES];
7972 		struct dc_plane_info plane_infos[MAX_SURFACES];
7973 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7974 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7975 		struct dc_stream_update stream_update;
7976 	} *bundle;
7977 
7978 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7979 
7980 	if (!bundle) {
7981 		DRM_ERROR("Failed to allocate update bundle\n");
7982 		/* Set type to FULL to avoid crashing in DC*/
7983 		update_type = UPDATE_TYPE_FULL;
7984 		goto cleanup;
7985 	}
7986 
7987 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7988 
7989 		memset(bundle, 0, sizeof(struct surface_info_bundle));
7990 
7991 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7992 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
7993 		num_plane = 0;
7994 
7995 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
7996 			update_type = UPDATE_TYPE_FULL;
7997 			goto cleanup;
7998 		}
7999 
8000 		if (!new_dm_crtc_state->stream)
8001 			continue;
8002 
8003 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8004 			const struct amdgpu_framebuffer *amdgpu_fb =
8005 				to_amdgpu_framebuffer(new_plane_state->fb);
8006 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8007 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8008 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8009 			uint64_t tiling_flags;
8010 
8011 			new_plane_crtc = new_plane_state->crtc;
8012 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8013 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8014 
8015 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8016 				continue;
8017 
8018 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8019 				update_type = UPDATE_TYPE_FULL;
8020 				goto cleanup;
8021 			}
8022 
8023 			if (crtc != new_plane_crtc)
8024 				continue;
8025 
8026 			bundle->surface_updates[num_plane].surface =
8027 					new_dm_plane_state->dc_state;
8028 
8029 			if (new_crtc_state->mode_changed) {
8030 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8031 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8032 			}
8033 
8034 			if (new_crtc_state->color_mgmt_changed) {
8035 				bundle->surface_updates[num_plane].gamma =
8036 						new_dm_plane_state->dc_state->gamma_correction;
8037 				bundle->surface_updates[num_plane].in_transfer_func =
8038 						new_dm_plane_state->dc_state->in_transfer_func;
8039 				bundle->stream_update.gamut_remap =
8040 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8041 				bundle->stream_update.output_csc_transform =
8042 						&new_dm_crtc_state->stream->csc_color_matrix;
8043 				bundle->stream_update.out_transfer_func =
8044 						new_dm_crtc_state->stream->out_transfer_func;
8045 			}
8046 
8047 			ret = fill_dc_scaling_info(new_plane_state,
8048 						   scaling_info);
8049 			if (ret)
8050 				goto cleanup;
8051 
8052 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8053 
8054 			if (amdgpu_fb) {
8055 				ret = get_fb_info(amdgpu_fb, &tiling_flags);
8056 				if (ret)
8057 					goto cleanup;
8058 
8059 				ret = fill_dc_plane_info_and_addr(
8060 					dm->adev, new_plane_state, tiling_flags,
8061 					plane_info,
8062 					&flip_addr->address);
8063 				if (ret)
8064 					goto cleanup;
8065 
8066 				bundle->surface_updates[num_plane].plane_info = plane_info;
8067 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8068 			}
8069 
8070 			num_plane++;
8071 		}
8072 
8073 		if (num_plane == 0)
8074 			continue;
8075 
8076 		ret = dm_atomic_get_state(state, &dm_state);
8077 		if (ret)
8078 			goto cleanup;
8079 
8080 		old_dm_state = dm_atomic_get_old_state(state);
8081 		if (!old_dm_state) {
8082 			ret = -EINVAL;
8083 			goto cleanup;
8084 		}
8085 
8086 		status = dc_stream_get_status_from_state(old_dm_state->context,
8087 							 new_dm_crtc_state->stream);
8088 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8089 		/*
8090 		 * TODO: DC modifies the surface during this call so we need
8091 		 * to lock here - find a way to do this without locking.
8092 		 */
8093 		mutex_lock(&dm->dc_lock);
8094 		update_type = dc_check_update_surfaces_for_stream(
8095 				dc,	bundle->surface_updates, num_plane,
8096 				&bundle->stream_update, status);
8097 		mutex_unlock(&dm->dc_lock);
8098 
8099 		if (update_type > UPDATE_TYPE_MED) {
8100 			update_type = UPDATE_TYPE_FULL;
8101 			goto cleanup;
8102 		}
8103 	}
8104 
8105 cleanup:
8106 	kfree(bundle);
8107 
8108 	*out_type = update_type;
8109 	return ret;
8110 }
8111 
8112 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8113 {
8114 	struct drm_connector *connector;
8115 	struct drm_connector_state *conn_state;
8116 	struct amdgpu_dm_connector *aconnector = NULL;
8117 	int i;
8118 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8119 		if (conn_state->crtc != crtc)
8120 			continue;
8121 
8122 		aconnector = to_amdgpu_dm_connector(connector);
8123 		if (!aconnector->port || !aconnector->mst_port)
8124 			aconnector = NULL;
8125 		else
8126 			break;
8127 	}
8128 
8129 	if (!aconnector)
8130 		return 0;
8131 
8132 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8133 }
8134 
8135 /**
8136  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8137  * @dev: The DRM device
8138  * @state: The atomic state to commit
8139  *
8140  * Validate that the given atomic state is programmable by DC into hardware.
8141  * This involves constructing a &struct dc_state reflecting the new hardware
8142  * state we wish to commit, then querying DC to see if it is programmable. It's
8143  * important not to modify the existing DC state. Otherwise, atomic_check
8144  * may unexpectedly commit hardware changes.
8145  *
8146  * When validating the DC state, it's important that the right locks are
8147  * acquired. For full updates case which removes/adds/updates streams on one
8148  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8149  * that any such full update commit will wait for completion of any outstanding
8150  * flip using DRMs synchronization events. See
8151  * dm_determine_update_type_for_commit()
8152  *
8153  * Note that DM adds the affected connectors for all CRTCs in state, when that
8154  * might not seem necessary. This is because DC stream creation requires the
8155  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8156  * be possible but non-trivial - a possible TODO item.
8157  *
8158  * Return: -Error code if validation failed.
8159  */
8160 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8161 				  struct drm_atomic_state *state)
8162 {
8163 	struct amdgpu_device *adev = dev->dev_private;
8164 	struct dm_atomic_state *dm_state = NULL;
8165 	struct dc *dc = adev->dm.dc;
8166 	struct drm_connector *connector;
8167 	struct drm_connector_state *old_con_state, *new_con_state;
8168 	struct drm_crtc *crtc;
8169 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8170 	struct drm_plane *plane;
8171 	struct drm_plane_state *old_plane_state, *new_plane_state;
8172 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8173 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8174 
8175 	int ret, i;
8176 
8177 	/*
8178 	 * This bool will be set for true for any modeset/reset
8179 	 * or plane update which implies non fast surface update.
8180 	 */
8181 	bool lock_and_validation_needed = false;
8182 
8183 	ret = drm_atomic_helper_check_modeset(dev, state);
8184 	if (ret)
8185 		goto fail;
8186 
8187 	if (adev->asic_type >= CHIP_NAVI10) {
8188 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8189 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8190 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8191 				if (ret)
8192 					goto fail;
8193 			}
8194 		}
8195 	}
8196 
8197 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8198 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8199 		    !new_crtc_state->color_mgmt_changed &&
8200 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8201 			continue;
8202 
8203 		if (!new_crtc_state->enable)
8204 			continue;
8205 
8206 		ret = drm_atomic_add_affected_connectors(state, crtc);
8207 		if (ret)
8208 			return ret;
8209 
8210 		ret = drm_atomic_add_affected_planes(state, crtc);
8211 		if (ret)
8212 			goto fail;
8213 	}
8214 
8215 	/*
8216 	 * Add all primary and overlay planes on the CRTC to the state
8217 	 * whenever a plane is enabled to maintain correct z-ordering
8218 	 * and to enable fast surface updates.
8219 	 */
8220 	drm_for_each_crtc(crtc, dev) {
8221 		bool modified = false;
8222 
8223 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8224 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8225 				continue;
8226 
8227 			if (new_plane_state->crtc == crtc ||
8228 			    old_plane_state->crtc == crtc) {
8229 				modified = true;
8230 				break;
8231 			}
8232 		}
8233 
8234 		if (!modified)
8235 			continue;
8236 
8237 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8238 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8239 				continue;
8240 
8241 			new_plane_state =
8242 				drm_atomic_get_plane_state(state, plane);
8243 
8244 			if (IS_ERR(new_plane_state)) {
8245 				ret = PTR_ERR(new_plane_state);
8246 				goto fail;
8247 			}
8248 		}
8249 	}
8250 
8251 	/* Remove exiting planes if they are modified */
8252 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8253 		ret = dm_update_plane_state(dc, state, plane,
8254 					    old_plane_state,
8255 					    new_plane_state,
8256 					    false,
8257 					    &lock_and_validation_needed);
8258 		if (ret)
8259 			goto fail;
8260 	}
8261 
8262 	/* Disable all crtcs which require disable */
8263 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8264 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8265 					   old_crtc_state,
8266 					   new_crtc_state,
8267 					   false,
8268 					   &lock_and_validation_needed);
8269 		if (ret)
8270 			goto fail;
8271 	}
8272 
8273 	/* Enable all crtcs which require enable */
8274 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8275 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8276 					   old_crtc_state,
8277 					   new_crtc_state,
8278 					   true,
8279 					   &lock_and_validation_needed);
8280 		if (ret)
8281 			goto fail;
8282 	}
8283 
8284 	/* Add new/modified planes */
8285 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8286 		ret = dm_update_plane_state(dc, state, plane,
8287 					    old_plane_state,
8288 					    new_plane_state,
8289 					    true,
8290 					    &lock_and_validation_needed);
8291 		if (ret)
8292 			goto fail;
8293 	}
8294 
8295 	/* Run this here since we want to validate the streams we created */
8296 	ret = drm_atomic_helper_check_planes(dev, state);
8297 	if (ret)
8298 		goto fail;
8299 
8300 	if (state->legacy_cursor_update) {
8301 		/*
8302 		 * This is a fast cursor update coming from the plane update
8303 		 * helper, check if it can be done asynchronously for better
8304 		 * performance.
8305 		 */
8306 		state->async_update =
8307 			!drm_atomic_helper_async_check(dev, state);
8308 
8309 		/*
8310 		 * Skip the remaining global validation if this is an async
8311 		 * update. Cursor updates can be done without affecting
8312 		 * state or bandwidth calcs and this avoids the performance
8313 		 * penalty of locking the private state object and
8314 		 * allocating a new dc_state.
8315 		 */
8316 		if (state->async_update)
8317 			return 0;
8318 	}
8319 
8320 	/* Check scaling and underscan changes*/
8321 	/* TODO Removed scaling changes validation due to inability to commit
8322 	 * new stream into context w\o causing full reset. Need to
8323 	 * decide how to handle.
8324 	 */
8325 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8326 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8327 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8328 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8329 
8330 		/* Skip any modesets/resets */
8331 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8332 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8333 			continue;
8334 
8335 		/* Skip any thing not scale or underscan changes */
8336 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8337 			continue;
8338 
8339 		overall_update_type = UPDATE_TYPE_FULL;
8340 		lock_and_validation_needed = true;
8341 	}
8342 
8343 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8344 	if (ret)
8345 		goto fail;
8346 
8347 	if (overall_update_type < update_type)
8348 		overall_update_type = update_type;
8349 
8350 	/*
8351 	 * lock_and_validation_needed was an old way to determine if we need to set
8352 	 * the global lock. Leaving it in to check if we broke any corner cases
8353 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8354 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8355 	 */
8356 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8357 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8358 
8359 	if (overall_update_type > UPDATE_TYPE_FAST) {
8360 		ret = dm_atomic_get_state(state, &dm_state);
8361 		if (ret)
8362 			goto fail;
8363 
8364 		ret = do_aquire_global_lock(dev, state);
8365 		if (ret)
8366 			goto fail;
8367 
8368 #if defined(CONFIG_DRM_AMD_DC_DCN)
8369 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8370 			goto fail;
8371 
8372 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8373 		if (ret)
8374 			goto fail;
8375 #endif
8376 
8377 		/*
8378 		 * Perform validation of MST topology in the state:
8379 		 * We need to perform MST atomic check before calling
8380 		 * dc_validate_global_state(), or there is a chance
8381 		 * to get stuck in an infinite loop and hang eventually.
8382 		 */
8383 		ret = drm_dp_mst_atomic_check(state);
8384 		if (ret)
8385 			goto fail;
8386 
8387 		if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8388 			ret = -EINVAL;
8389 			goto fail;
8390 		}
8391 	} else {
8392 		/*
8393 		 * The commit is a fast update. Fast updates shouldn't change
8394 		 * the DC context, affect global validation, and can have their
8395 		 * commit work done in parallel with other commits not touching
8396 		 * the same resource. If we have a new DC context as part of
8397 		 * the DM atomic state from validation we need to free it and
8398 		 * retain the existing one instead.
8399 		 */
8400 		struct dm_atomic_state *new_dm_state, *old_dm_state;
8401 
8402 		new_dm_state = dm_atomic_get_new_state(state);
8403 		old_dm_state = dm_atomic_get_old_state(state);
8404 
8405 		if (new_dm_state && old_dm_state) {
8406 			if (new_dm_state->context)
8407 				dc_release_state(new_dm_state->context);
8408 
8409 			new_dm_state->context = old_dm_state->context;
8410 
8411 			if (old_dm_state->context)
8412 				dc_retain_state(old_dm_state->context);
8413 		}
8414 	}
8415 
8416 	/* Store the overall update type for use later in atomic check. */
8417 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8418 		struct dm_crtc_state *dm_new_crtc_state =
8419 			to_dm_crtc_state(new_crtc_state);
8420 
8421 		dm_new_crtc_state->update_type = (int)overall_update_type;
8422 	}
8423 
8424 	/* Must be success */
8425 	WARN_ON(ret);
8426 	return ret;
8427 
8428 fail:
8429 	if (ret == -EDEADLK)
8430 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8431 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8432 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8433 	else
8434 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8435 
8436 	return ret;
8437 }
8438 
8439 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8440 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8441 {
8442 	uint8_t dpcd_data;
8443 	bool capable = false;
8444 
8445 	if (amdgpu_dm_connector->dc_link &&
8446 		dm_helpers_dp_read_dpcd(
8447 				NULL,
8448 				amdgpu_dm_connector->dc_link,
8449 				DP_DOWN_STREAM_PORT_COUNT,
8450 				&dpcd_data,
8451 				sizeof(dpcd_data))) {
8452 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8453 	}
8454 
8455 	return capable;
8456 }
8457 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8458 					struct edid *edid)
8459 {
8460 	int i;
8461 	bool edid_check_required;
8462 	struct detailed_timing *timing;
8463 	struct detailed_non_pixel *data;
8464 	struct detailed_data_monitor_range *range;
8465 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8466 			to_amdgpu_dm_connector(connector);
8467 	struct dm_connector_state *dm_con_state = NULL;
8468 
8469 	struct drm_device *dev = connector->dev;
8470 	struct amdgpu_device *adev = dev->dev_private;
8471 	bool freesync_capable = false;
8472 
8473 	if (!connector->state) {
8474 		DRM_ERROR("%s - Connector has no state", __func__);
8475 		goto update;
8476 	}
8477 
8478 	if (!edid) {
8479 		dm_con_state = to_dm_connector_state(connector->state);
8480 
8481 		amdgpu_dm_connector->min_vfreq = 0;
8482 		amdgpu_dm_connector->max_vfreq = 0;
8483 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8484 
8485 		goto update;
8486 	}
8487 
8488 	dm_con_state = to_dm_connector_state(connector->state);
8489 
8490 	edid_check_required = false;
8491 	if (!amdgpu_dm_connector->dc_sink) {
8492 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8493 		goto update;
8494 	}
8495 	if (!adev->dm.freesync_module)
8496 		goto update;
8497 	/*
8498 	 * if edid non zero restrict freesync only for dp and edp
8499 	 */
8500 	if (edid) {
8501 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8502 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8503 			edid_check_required = is_dp_capable_without_timing_msa(
8504 						adev->dm.dc,
8505 						amdgpu_dm_connector);
8506 		}
8507 	}
8508 	if (edid_check_required == true && (edid->version > 1 ||
8509 	   (edid->version == 1 && edid->revision > 1))) {
8510 		for (i = 0; i < 4; i++) {
8511 
8512 			timing	= &edid->detailed_timings[i];
8513 			data	= &timing->data.other_data;
8514 			range	= &data->data.range;
8515 			/*
8516 			 * Check if monitor has continuous frequency mode
8517 			 */
8518 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8519 				continue;
8520 			/*
8521 			 * Check for flag range limits only. If flag == 1 then
8522 			 * no additional timing information provided.
8523 			 * Default GTF, GTF Secondary curve and CVT are not
8524 			 * supported
8525 			 */
8526 			if (range->flags != 1)
8527 				continue;
8528 
8529 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8530 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8531 			amdgpu_dm_connector->pixel_clock_mhz =
8532 				range->pixel_clock_mhz * 10;
8533 			break;
8534 		}
8535 
8536 		if (amdgpu_dm_connector->max_vfreq -
8537 		    amdgpu_dm_connector->min_vfreq > 10) {
8538 
8539 			freesync_capable = true;
8540 		}
8541 	}
8542 
8543 update:
8544 	if (dm_con_state)
8545 		dm_con_state->freesync_capable = freesync_capable;
8546 
8547 	if (connector->vrr_capable_property)
8548 		drm_connector_set_vrr_capable_property(connector,
8549 						       freesync_capable);
8550 }
8551 
8552 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8553 {
8554 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8555 
8556 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8557 		return;
8558 	if (link->type == dc_connection_none)
8559 		return;
8560 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8561 					dpcd_data, sizeof(dpcd_data))) {
8562 		link->psr_feature_enabled = dpcd_data[0] ? true:false;
8563 		DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8564 	}
8565 }
8566 
8567 /*
8568  * amdgpu_dm_link_setup_psr() - configure psr link
8569  * @stream: stream state
8570  *
8571  * Return: true if success
8572  */
8573 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8574 {
8575 	struct dc_link *link = NULL;
8576 	struct psr_config psr_config = {0};
8577 	struct psr_context psr_context = {0};
8578 	struct dc *dc = NULL;
8579 	bool ret = false;
8580 
8581 	if (stream == NULL)
8582 		return false;
8583 
8584 	link = stream->link;
8585 	dc = link->ctx->dc;
8586 
8587 	psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8588 
8589 	if (psr_config.psr_version > 0) {
8590 		psr_config.psr_exit_link_training_required = 0x1;
8591 		psr_config.psr_frame_capture_indication_req = 0;
8592 		psr_config.psr_rfb_setup_time = 0x37;
8593 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8594 		psr_config.allow_smu_optimizations = 0x0;
8595 
8596 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8597 
8598 	}
8599 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_feature_enabled);
8600 
8601 	return ret;
8602 }
8603 
8604 /*
8605  * amdgpu_dm_psr_enable() - enable psr f/w
8606  * @stream: stream state
8607  *
8608  * Return: true if success
8609  */
8610 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8611 {
8612 	struct dc_link *link = stream->link;
8613 	unsigned int vsync_rate_hz = 0;
8614 	struct dc_static_screen_params params = {0};
8615 	/* Calculate number of static frames before generating interrupt to
8616 	 * enter PSR.
8617 	 */
8618 	// Init fail safe of 2 frames static
8619 	unsigned int num_frames_static = 2;
8620 
8621 	DRM_DEBUG_DRIVER("Enabling psr...\n");
8622 
8623 	vsync_rate_hz = div64_u64(div64_u64((
8624 			stream->timing.pix_clk_100hz * 100),
8625 			stream->timing.v_total),
8626 			stream->timing.h_total);
8627 
8628 	/* Round up
8629 	 * Calculate number of frames such that at least 30 ms of time has
8630 	 * passed.
8631 	 */
8632 	if (vsync_rate_hz != 0) {
8633 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8634 		num_frames_static = (30000 / frame_time_microsec) + 1;
8635 	}
8636 
8637 	params.triggers.cursor_update = true;
8638 	params.triggers.overlay_update = true;
8639 	params.triggers.surface_update = true;
8640 	params.num_frames = num_frames_static;
8641 
8642 	dc_stream_set_static_screen_params(link->ctx->dc,
8643 					   &stream, 1,
8644 					   &params);
8645 
8646 	return dc_link_set_psr_allow_active(link, true, false);
8647 }
8648 
8649 /*
8650  * amdgpu_dm_psr_disable() - disable psr f/w
8651  * @stream:  stream state
8652  *
8653  * Return: true if success
8654  */
8655 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8656 {
8657 
8658 	DRM_DEBUG_DRIVER("Disabling psr...\n");
8659 
8660 	return dc_link_set_psr_allow_active(stream->link, false, true);
8661 }
8662