1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/inc/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 
98 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100 
101 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103 
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106 
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109 
110 /**
111  * DOC: overview
112  *
113  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115  * requests into DC requests, and DC responses into DRM responses.
116  *
117  * The root control structure is &struct amdgpu_display_manager.
118  */
119 
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123 
124 /*
125  * initializes drm_device display related structures, based on the information
126  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127  * drm_encoder, drm_mode_config
128  *
129  * Returns 0 on success
130  */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134 
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136 				struct drm_plane *plane,
137 				unsigned long possible_crtcs,
138 				const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140 			       struct drm_plane *plane,
141 			       uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
144 				    uint32_t link_index,
145 				    struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147 				  struct amdgpu_encoder *aencoder,
148 				  uint32_t link_index);
149 
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151 
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153 				   struct drm_atomic_state *state,
154 				   bool nonblock);
155 
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157 
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159 				  struct drm_atomic_state *state);
160 
161 static void handle_cursor_update(struct drm_plane *plane,
162 				 struct drm_plane_state *old_plane_state);
163 
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168 
169 
170 /*
171  * dm_vblank_get_counter
172  *
173  * @brief
174  * Get counter for number of vertical blanks
175  *
176  * @param
177  * struct amdgpu_device *adev - [in] desired amdgpu device
178  * int disp_idx - [in] which CRTC to get the counter from
179  *
180  * @return
181  * Counter for vertical blanks
182  */
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184 {
185 	if (crtc >= adev->mode_info.num_crtc)
186 		return 0;
187 	else {
188 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190 				acrtc->base.state);
191 
192 
193 		if (acrtc_state->stream == NULL) {
194 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195 				  crtc);
196 			return 0;
197 		}
198 
199 		return dc_stream_get_vblank_counter(acrtc_state->stream);
200 	}
201 }
202 
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204 				  u32 *vbl, u32 *position)
205 {
206 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
207 
208 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209 		return -EINVAL;
210 	else {
211 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212 		struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213 						acrtc->base.state);
214 
215 		if (acrtc_state->stream ==  NULL) {
216 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217 				  crtc);
218 			return 0;
219 		}
220 
221 		/*
222 		 * TODO rework base driver to use values directly.
223 		 * for now parse it back into reg-format
224 		 */
225 		dc_stream_get_scanoutpos(acrtc_state->stream,
226 					 &v_blank_start,
227 					 &v_blank_end,
228 					 &h_position,
229 					 &v_position);
230 
231 		*position = v_position | (h_position << 16);
232 		*vbl = v_blank_start | (v_blank_end << 16);
233 	}
234 
235 	return 0;
236 }
237 
238 static bool dm_is_idle(void *handle)
239 {
240 	/* XXX todo */
241 	return true;
242 }
243 
244 static int dm_wait_for_idle(void *handle)
245 {
246 	/* XXX todo */
247 	return 0;
248 }
249 
250 static bool dm_check_soft_reset(void *handle)
251 {
252 	return false;
253 }
254 
255 static int dm_soft_reset(void *handle)
256 {
257 	/* XXX todo */
258 	return 0;
259 }
260 
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
263 		     int otg_inst)
264 {
265 	struct drm_device *dev = adev->ddev;
266 	struct drm_crtc *crtc;
267 	struct amdgpu_crtc *amdgpu_crtc;
268 
269 	if (otg_inst == -1) {
270 		WARN_ON(1);
271 		return adev->mode_info.crtcs[0];
272 	}
273 
274 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275 		amdgpu_crtc = to_amdgpu_crtc(crtc);
276 
277 		if (amdgpu_crtc->otg_inst == otg_inst)
278 			return amdgpu_crtc;
279 	}
280 
281 	return NULL;
282 }
283 
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285 {
286 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288 }
289 
290 /**
291  * dm_pflip_high_irq() - Handle pageflip interrupt
292  * @interrupt_params: ignored
293  *
294  * Handles the pageflip interrupt by notifying all interested parties
295  * that the pageflip has been completed.
296  */
297 static void dm_pflip_high_irq(void *interrupt_params)
298 {
299 	struct amdgpu_crtc *amdgpu_crtc;
300 	struct common_irq_params *irq_params = interrupt_params;
301 	struct amdgpu_device *adev = irq_params->adev;
302 	unsigned long flags;
303 	struct drm_pending_vblank_event *e;
304 	struct dm_crtc_state *acrtc_state;
305 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
306 	bool vrr_active;
307 
308 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309 
310 	/* IRQ could occur when in initial stage */
311 	/* TODO work and BO cleanup */
312 	if (amdgpu_crtc == NULL) {
313 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314 		return;
315 	}
316 
317 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
318 
319 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321 						 amdgpu_crtc->pflip_status,
322 						 AMDGPU_FLIP_SUBMITTED,
323 						 amdgpu_crtc->crtc_id,
324 						 amdgpu_crtc);
325 		spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326 		return;
327 	}
328 
329 	/* page flip completed. */
330 	e = amdgpu_crtc->event;
331 	amdgpu_crtc->event = NULL;
332 
333 	if (!e)
334 		WARN_ON(1);
335 
336 	acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337 	vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338 
339 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
340 	if (!vrr_active ||
341 	    !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342 				      &v_blank_end, &hpos, &vpos) ||
343 	    (vpos < v_blank_start)) {
344 		/* Update to correct count and vblank timestamp if racing with
345 		 * vblank irq. This also updates to the correct vblank timestamp
346 		 * even in VRR mode, as scanout is past the front-porch atm.
347 		 */
348 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349 
350 		/* Wake up userspace by sending the pageflip event with proper
351 		 * count and timestamp of vblank of flip completion.
352 		 */
353 		if (e) {
354 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355 
356 			/* Event sent, so done with vblank for this flip */
357 			drm_crtc_vblank_put(&amdgpu_crtc->base);
358 		}
359 	} else if (e) {
360 		/* VRR active and inside front-porch: vblank count and
361 		 * timestamp for pageflip event will only be up to date after
362 		 * drm_crtc_handle_vblank() has been executed from late vblank
363 		 * irq handler after start of back-porch (vline 0). We queue the
364 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
365 		 * updated timestamp and count, once it runs after us.
366 		 *
367 		 * We need to open-code this instead of using the helper
368 		 * drm_crtc_arm_vblank_event(), as that helper would
369 		 * call drm_crtc_accurate_vblank_count(), which we must
370 		 * not call in VRR mode while we are in front-porch!
371 		 */
372 
373 		/* sequence will be replaced by real count during send-out. */
374 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375 		e->pipe = amdgpu_crtc->crtc_id;
376 
377 		list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378 		e = NULL;
379 	}
380 
381 	/* Keep track of vblank of this flip for flip throttling. We use the
382 	 * cooked hw counter, as that one incremented at start of this vblank
383 	 * of pageflip completion, so last_flip_vblank is the forbidden count
384 	 * for queueing new pageflips if vsync + VRR is enabled.
385 	 */
386 	amdgpu_crtc->last_flip_vblank =
387 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388 
389 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391 
392 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
394 			 vrr_active, (int) !e);
395 }
396 
397 static void dm_vupdate_high_irq(void *interrupt_params)
398 {
399 	struct common_irq_params *irq_params = interrupt_params;
400 	struct amdgpu_device *adev = irq_params->adev;
401 	struct amdgpu_crtc *acrtc;
402 	struct dm_crtc_state *acrtc_state;
403 	unsigned long flags;
404 
405 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406 
407 	if (acrtc) {
408 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
409 
410 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411 			      acrtc->crtc_id,
412 			      amdgpu_dm_vrr_active(acrtc_state));
413 
414 		/* Core vblank handling is done here after end of front-porch in
415 		 * vrr mode, as vblank timestamping will give valid results
416 		 * while now done after front-porch. This will also deliver
417 		 * page-flip completion events that have been queued to us
418 		 * if a pageflip happened inside front-porch.
419 		 */
420 		if (amdgpu_dm_vrr_active(acrtc_state)) {
421 			drm_crtc_handle_vblank(&acrtc->base);
422 
423 			/* BTR processing for pre-DCE12 ASICs */
424 			if (acrtc_state->stream &&
425 			    adev->family < AMDGPU_FAMILY_AI) {
426 				spin_lock_irqsave(&adev->ddev->event_lock, flags);
427 				mod_freesync_handle_v_update(
428 				    adev->dm.freesync_module,
429 				    acrtc_state->stream,
430 				    &acrtc_state->vrr_params);
431 
432 				dc_stream_adjust_vmin_vmax(
433 				    adev->dm.dc,
434 				    acrtc_state->stream,
435 				    &acrtc_state->vrr_params.adjust);
436 				spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437 			}
438 		}
439 	}
440 }
441 
442 /**
443  * dm_crtc_high_irq() - Handles CRTC interrupt
444  * @interrupt_params: ignored
445  *
446  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447  * event handler.
448  */
449 static void dm_crtc_high_irq(void *interrupt_params)
450 {
451 	struct common_irq_params *irq_params = interrupt_params;
452 	struct amdgpu_device *adev = irq_params->adev;
453 	struct amdgpu_crtc *acrtc;
454 	struct dm_crtc_state *acrtc_state;
455 	unsigned long flags;
456 
457 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458 
459 	if (acrtc) {
460 		acrtc_state = to_dm_crtc_state(acrtc->base.state);
461 
462 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
463 			      acrtc->crtc_id,
464 			      amdgpu_dm_vrr_active(acrtc_state));
465 
466 		/* Core vblank handling at start of front-porch is only possible
467 		 * in non-vrr mode, as only there vblank timestamping will give
468 		 * valid results while done in front-porch. Otherwise defer it
469 		 * to dm_vupdate_high_irq after end of front-porch.
470 		 */
471 		if (!amdgpu_dm_vrr_active(acrtc_state))
472 			drm_crtc_handle_vblank(&acrtc->base);
473 
474 		/* Following stuff must happen at start of vblank, for crc
475 		 * computation and below-the-range btr support in vrr mode.
476 		 */
477 		amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
478 
479 		if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
480 		    acrtc_state->vrr_params.supported &&
481 		    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
482 			spin_lock_irqsave(&adev->ddev->event_lock, flags);
483 			mod_freesync_handle_v_update(
484 				adev->dm.freesync_module,
485 				acrtc_state->stream,
486 				&acrtc_state->vrr_params);
487 
488 			dc_stream_adjust_vmin_vmax(
489 				adev->dm.dc,
490 				acrtc_state->stream,
491 				&acrtc_state->vrr_params.adjust);
492 			spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
493 		}
494 	}
495 }
496 
497 #if defined(CONFIG_DRM_AMD_DC_DCN)
498 /**
499  * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
500  * @interrupt params - interrupt parameters
501  *
502  * Notify DRM's vblank event handler at VSTARTUP
503  *
504  * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
505  * * We are close enough to VUPDATE - the point of no return for hw
506  * * We are in the fixed portion of variable front porch when vrr is enabled
507  * * We are before VUPDATE, where double-buffered vrr registers are swapped
508  *
509  * It is therefore the correct place to signal vblank, send user flip events,
510  * and update VRR.
511  */
512 static void dm_dcn_crtc_high_irq(void *interrupt_params)
513 {
514 	struct common_irq_params *irq_params = interrupt_params;
515 	struct amdgpu_device *adev = irq_params->adev;
516 	struct amdgpu_crtc *acrtc;
517 	struct dm_crtc_state *acrtc_state;
518 	unsigned long flags;
519 
520 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
521 
522 	if (!acrtc)
523 		return;
524 
525 	acrtc_state = to_dm_crtc_state(acrtc->base.state);
526 
527 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
528 			 amdgpu_dm_vrr_active(acrtc_state),
529 			 acrtc_state->active_planes);
530 
531 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
532 	drm_crtc_handle_vblank(&acrtc->base);
533 
534 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
535 
536 	if (acrtc_state->vrr_params.supported &&
537 	    acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
538 		mod_freesync_handle_v_update(
539 		adev->dm.freesync_module,
540 		acrtc_state->stream,
541 		&acrtc_state->vrr_params);
542 
543 		dc_stream_adjust_vmin_vmax(
544 			adev->dm.dc,
545 			acrtc_state->stream,
546 			&acrtc_state->vrr_params.adjust);
547 	}
548 
549 	/*
550 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
551 	 * In that case, pageflip completion interrupts won't fire and pageflip
552 	 * completion events won't get delivered. Prevent this by sending
553 	 * pending pageflip events from here if a flip is still pending.
554 	 *
555 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
556 	 * avoid race conditions between flip programming and completion,
557 	 * which could cause too early flip completion events.
558 	 */
559 	if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
560 	    acrtc_state->active_planes == 0) {
561 		if (acrtc->event) {
562 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
563 			acrtc->event = NULL;
564 			drm_crtc_vblank_put(&acrtc->base);
565 		}
566 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
567 	}
568 
569 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
570 }
571 #endif
572 
573 static int dm_set_clockgating_state(void *handle,
574 		  enum amd_clockgating_state state)
575 {
576 	return 0;
577 }
578 
579 static int dm_set_powergating_state(void *handle,
580 		  enum amd_powergating_state state)
581 {
582 	return 0;
583 }
584 
585 /* Prototypes of private functions */
586 static int dm_early_init(void* handle);
587 
588 /* Allocate memory for FBC compressed data  */
589 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
590 {
591 	struct drm_device *dev = connector->dev;
592 	struct amdgpu_device *adev = dev->dev_private;
593 	struct dm_comressor_info *compressor = &adev->dm.compressor;
594 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
595 	struct drm_display_mode *mode;
596 	unsigned long max_size = 0;
597 
598 	if (adev->dm.dc->fbc_compressor == NULL)
599 		return;
600 
601 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
602 		return;
603 
604 	if (compressor->bo_ptr)
605 		return;
606 
607 
608 	list_for_each_entry(mode, &connector->modes, head) {
609 		if (max_size < mode->htotal * mode->vtotal)
610 			max_size = mode->htotal * mode->vtotal;
611 	}
612 
613 	if (max_size) {
614 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
615 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
616 			    &compressor->gpu_addr, &compressor->cpu_addr);
617 
618 		if (r)
619 			DRM_ERROR("DM: Failed to initialize FBC\n");
620 		else {
621 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
622 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
623 		}
624 
625 	}
626 
627 }
628 
629 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
630 					  int pipe, bool *enabled,
631 					  unsigned char *buf, int max_bytes)
632 {
633 	struct drm_device *dev = dev_get_drvdata(kdev);
634 	struct amdgpu_device *adev = dev->dev_private;
635 	struct drm_connector *connector;
636 	struct drm_connector_list_iter conn_iter;
637 	struct amdgpu_dm_connector *aconnector;
638 	int ret = 0;
639 
640 	*enabled = false;
641 
642 	mutex_lock(&adev->dm.audio_lock);
643 
644 	drm_connector_list_iter_begin(dev, &conn_iter);
645 	drm_for_each_connector_iter(connector, &conn_iter) {
646 		aconnector = to_amdgpu_dm_connector(connector);
647 		if (aconnector->audio_inst != port)
648 			continue;
649 
650 		*enabled = true;
651 		ret = drm_eld_size(connector->eld);
652 		memcpy(buf, connector->eld, min(max_bytes, ret));
653 
654 		break;
655 	}
656 	drm_connector_list_iter_end(&conn_iter);
657 
658 	mutex_unlock(&adev->dm.audio_lock);
659 
660 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
661 
662 	return ret;
663 }
664 
665 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
666 	.get_eld = amdgpu_dm_audio_component_get_eld,
667 };
668 
669 static int amdgpu_dm_audio_component_bind(struct device *kdev,
670 				       struct device *hda_kdev, void *data)
671 {
672 	struct drm_device *dev = dev_get_drvdata(kdev);
673 	struct amdgpu_device *adev = dev->dev_private;
674 	struct drm_audio_component *acomp = data;
675 
676 	acomp->ops = &amdgpu_dm_audio_component_ops;
677 	acomp->dev = kdev;
678 	adev->dm.audio_component = acomp;
679 
680 	return 0;
681 }
682 
683 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
684 					  struct device *hda_kdev, void *data)
685 {
686 	struct drm_device *dev = dev_get_drvdata(kdev);
687 	struct amdgpu_device *adev = dev->dev_private;
688 	struct drm_audio_component *acomp = data;
689 
690 	acomp->ops = NULL;
691 	acomp->dev = NULL;
692 	adev->dm.audio_component = NULL;
693 }
694 
695 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
696 	.bind	= amdgpu_dm_audio_component_bind,
697 	.unbind	= amdgpu_dm_audio_component_unbind,
698 };
699 
700 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
701 {
702 	int i, ret;
703 
704 	if (!amdgpu_audio)
705 		return 0;
706 
707 	adev->mode_info.audio.enabled = true;
708 
709 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
710 
711 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
712 		adev->mode_info.audio.pin[i].channels = -1;
713 		adev->mode_info.audio.pin[i].rate = -1;
714 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
715 		adev->mode_info.audio.pin[i].status_bits = 0;
716 		adev->mode_info.audio.pin[i].category_code = 0;
717 		adev->mode_info.audio.pin[i].connected = false;
718 		adev->mode_info.audio.pin[i].id =
719 			adev->dm.dc->res_pool->audios[i]->inst;
720 		adev->mode_info.audio.pin[i].offset = 0;
721 	}
722 
723 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
724 	if (ret < 0)
725 		return ret;
726 
727 	adev->dm.audio_registered = true;
728 
729 	return 0;
730 }
731 
732 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
733 {
734 	if (!amdgpu_audio)
735 		return;
736 
737 	if (!adev->mode_info.audio.enabled)
738 		return;
739 
740 	if (adev->dm.audio_registered) {
741 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
742 		adev->dm.audio_registered = false;
743 	}
744 
745 	/* TODO: Disable audio? */
746 
747 	adev->mode_info.audio.enabled = false;
748 }
749 
750 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
751 {
752 	struct drm_audio_component *acomp = adev->dm.audio_component;
753 
754 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
755 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
756 
757 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
758 						 pin, -1);
759 	}
760 }
761 
762 static int dm_dmub_hw_init(struct amdgpu_device *adev)
763 {
764 	const struct dmcub_firmware_header_v1_0 *hdr;
765 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
766 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
767 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
768 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
769 	struct abm *abm = adev->dm.dc->res_pool->abm;
770 	struct dmub_srv_hw_params hw_params;
771 	enum dmub_status status;
772 	const unsigned char *fw_inst_const, *fw_bss_data;
773 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
774 	bool has_hw_support;
775 
776 	if (!dmub_srv)
777 		/* DMUB isn't supported on the ASIC. */
778 		return 0;
779 
780 	if (!fb_info) {
781 		DRM_ERROR("No framebuffer info for DMUB service.\n");
782 		return -EINVAL;
783 	}
784 
785 	if (!dmub_fw) {
786 		/* Firmware required for DMUB support. */
787 		DRM_ERROR("No firmware provided for DMUB.\n");
788 		return -EINVAL;
789 	}
790 
791 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
792 	if (status != DMUB_STATUS_OK) {
793 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
794 		return -EINVAL;
795 	}
796 
797 	if (!has_hw_support) {
798 		DRM_INFO("DMUB unsupported on ASIC\n");
799 		return 0;
800 	}
801 
802 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
803 
804 	fw_inst_const = dmub_fw->data +
805 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 			PSP_HEADER_BYTES;
807 
808 	fw_bss_data = dmub_fw->data +
809 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
810 		      le32_to_cpu(hdr->inst_const_bytes);
811 
812 	/* Copy firmware and bios info into FB memory. */
813 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
814 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
815 
816 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
817 
818 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
819 	 * amdgpu_ucode_init_single_fw will load dmub firmware
820 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
821 	 * will be done by dm_dmub_hw_init
822 	 */
823 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
824 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
825 				fw_inst_const_size);
826 	}
827 
828 	if (fw_bss_data_size)
829 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
830 		       fw_bss_data, fw_bss_data_size);
831 
832 	/* Copy firmware bios info into FB memory. */
833 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
834 	       adev->bios_size);
835 
836 	/* Reset regions that need to be reset. */
837 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
838 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
839 
840 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
841 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
842 
843 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
844 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
845 
846 	/* Initialize hardware. */
847 	memset(&hw_params, 0, sizeof(hw_params));
848 	hw_params.fb_base = adev->gmc.fb_start;
849 	hw_params.fb_offset = adev->gmc.aper_base;
850 
851 	/* backdoor load firmware and trigger dmub running */
852 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
853 		hw_params.load_inst_const = true;
854 
855 	if (dmcu)
856 		hw_params.psp_version = dmcu->psp_version;
857 
858 	for (i = 0; i < fb_info->num_fb; ++i)
859 		hw_params.fb[i] = &fb_info->fb[i];
860 
861 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
862 	if (status != DMUB_STATUS_OK) {
863 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
864 		return -EINVAL;
865 	}
866 
867 	/* Wait for firmware load to finish. */
868 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
869 	if (status != DMUB_STATUS_OK)
870 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
871 
872 	/* Init DMCU and ABM if available. */
873 	if (dmcu && abm) {
874 		dmcu->funcs->dmcu_init(dmcu);
875 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
876 	}
877 
878 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
879 	if (!adev->dm.dc->ctx->dmub_srv) {
880 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
881 		return -ENOMEM;
882 	}
883 
884 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
885 		 adev->dm.dmcub_fw_version);
886 
887 	return 0;
888 }
889 
890 static int amdgpu_dm_init(struct amdgpu_device *adev)
891 {
892 	struct dc_init_data init_data;
893 #ifdef CONFIG_DRM_AMD_DC_HDCP
894 	struct dc_callback_init init_params;
895 #endif
896 	int r;
897 
898 	adev->dm.ddev = adev->ddev;
899 	adev->dm.adev = adev;
900 
901 	/* Zero all the fields */
902 	memset(&init_data, 0, sizeof(init_data));
903 #ifdef CONFIG_DRM_AMD_DC_HDCP
904 	memset(&init_params, 0, sizeof(init_params));
905 #endif
906 
907 	mutex_init(&adev->dm.dc_lock);
908 	mutex_init(&adev->dm.audio_lock);
909 
910 	if(amdgpu_dm_irq_init(adev)) {
911 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
912 		goto error;
913 	}
914 
915 	init_data.asic_id.chip_family = adev->family;
916 
917 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
918 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
919 
920 	init_data.asic_id.vram_width = adev->gmc.vram_width;
921 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
922 	init_data.asic_id.atombios_base_address =
923 		adev->mode_info.atom_context->bios;
924 
925 	init_data.driver = adev;
926 
927 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
928 
929 	if (!adev->dm.cgs_device) {
930 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
931 		goto error;
932 	}
933 
934 	init_data.cgs_device = adev->dm.cgs_device;
935 
936 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
937 
938 	switch (adev->asic_type) {
939 	case CHIP_CARRIZO:
940 	case CHIP_STONEY:
941 	case CHIP_RAVEN:
942 	case CHIP_RENOIR:
943 		init_data.flags.gpu_vm_support = true;
944 		break;
945 	default:
946 		break;
947 	}
948 
949 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
950 		init_data.flags.fbc_support = true;
951 
952 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
953 		init_data.flags.multi_mon_pp_mclk_switch = true;
954 
955 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
956 		init_data.flags.disable_fractional_pwm = true;
957 
958 	init_data.flags.power_down_display_on_boot = true;
959 
960 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
961 
962 	/* Display Core create. */
963 	adev->dm.dc = dc_create(&init_data);
964 
965 	if (adev->dm.dc) {
966 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
967 	} else {
968 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
969 		goto error;
970 	}
971 
972 	r = dm_dmub_hw_init(adev);
973 	if (r) {
974 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
975 		goto error;
976 	}
977 
978 	dc_hardware_init(adev->dm.dc);
979 
980 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
981 	if (!adev->dm.freesync_module) {
982 		DRM_ERROR(
983 		"amdgpu: failed to initialize freesync_module.\n");
984 	} else
985 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
986 				adev->dm.freesync_module);
987 
988 	amdgpu_dm_init_color_mod();
989 
990 #ifdef CONFIG_DRM_AMD_DC_HDCP
991 	if (adev->asic_type >= CHIP_RAVEN) {
992 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
993 
994 		if (!adev->dm.hdcp_workqueue)
995 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
996 		else
997 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
998 
999 		dc_init_callbacks(adev->dm.dc, &init_params);
1000 	}
1001 #endif
1002 	if (amdgpu_dm_initialize_drm_device(adev)) {
1003 		DRM_ERROR(
1004 		"amdgpu: failed to initialize sw for display support.\n");
1005 		goto error;
1006 	}
1007 
1008 	/* Update the actual used number of crtc */
1009 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
1010 
1011 	/* TODO: Add_display_info? */
1012 
1013 	/* TODO use dynamic cursor width */
1014 	adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1015 	adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1016 
1017 	if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
1018 		DRM_ERROR(
1019 		"amdgpu: failed to initialize sw for display support.\n");
1020 		goto error;
1021 	}
1022 
1023 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1024 
1025 	return 0;
1026 error:
1027 	amdgpu_dm_fini(adev);
1028 
1029 	return -EINVAL;
1030 }
1031 
1032 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1033 {
1034 	amdgpu_dm_audio_fini(adev);
1035 
1036 	amdgpu_dm_destroy_drm_device(&adev->dm);
1037 
1038 #ifdef CONFIG_DRM_AMD_DC_HDCP
1039 	if (adev->dm.hdcp_workqueue) {
1040 		hdcp_destroy(adev->dm.hdcp_workqueue);
1041 		adev->dm.hdcp_workqueue = NULL;
1042 	}
1043 
1044 	if (adev->dm.dc)
1045 		dc_deinit_callbacks(adev->dm.dc);
1046 #endif
1047 	if (adev->dm.dc->ctx->dmub_srv) {
1048 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1049 		adev->dm.dc->ctx->dmub_srv = NULL;
1050 	}
1051 
1052 	if (adev->dm.dmub_bo)
1053 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1054 				      &adev->dm.dmub_bo_gpu_addr,
1055 				      &adev->dm.dmub_bo_cpu_addr);
1056 
1057 	/* DC Destroy TODO: Replace destroy DAL */
1058 	if (adev->dm.dc)
1059 		dc_destroy(&adev->dm.dc);
1060 	/*
1061 	 * TODO: pageflip, vlank interrupt
1062 	 *
1063 	 * amdgpu_dm_irq_fini(adev);
1064 	 */
1065 
1066 	if (adev->dm.cgs_device) {
1067 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1068 		adev->dm.cgs_device = NULL;
1069 	}
1070 	if (adev->dm.freesync_module) {
1071 		mod_freesync_destroy(adev->dm.freesync_module);
1072 		adev->dm.freesync_module = NULL;
1073 	}
1074 
1075 	mutex_destroy(&adev->dm.audio_lock);
1076 	mutex_destroy(&adev->dm.dc_lock);
1077 
1078 	return;
1079 }
1080 
1081 static int load_dmcu_fw(struct amdgpu_device *adev)
1082 {
1083 	const char *fw_name_dmcu = NULL;
1084 	int r;
1085 	const struct dmcu_firmware_header_v1_0 *hdr;
1086 
1087 	switch(adev->asic_type) {
1088 	case CHIP_BONAIRE:
1089 	case CHIP_HAWAII:
1090 	case CHIP_KAVERI:
1091 	case CHIP_KABINI:
1092 	case CHIP_MULLINS:
1093 	case CHIP_TONGA:
1094 	case CHIP_FIJI:
1095 	case CHIP_CARRIZO:
1096 	case CHIP_STONEY:
1097 	case CHIP_POLARIS11:
1098 	case CHIP_POLARIS10:
1099 	case CHIP_POLARIS12:
1100 	case CHIP_VEGAM:
1101 	case CHIP_VEGA10:
1102 	case CHIP_VEGA12:
1103 	case CHIP_VEGA20:
1104 	case CHIP_NAVI10:
1105 	case CHIP_NAVI14:
1106 	case CHIP_RENOIR:
1107 		return 0;
1108 	case CHIP_NAVI12:
1109 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1110 		break;
1111 	case CHIP_RAVEN:
1112 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1113 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1114 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1115 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1116 		else
1117 			return 0;
1118 		break;
1119 	default:
1120 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1121 		return -EINVAL;
1122 	}
1123 
1124 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1125 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1126 		return 0;
1127 	}
1128 
1129 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1130 	if (r == -ENOENT) {
1131 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1132 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1133 		adev->dm.fw_dmcu = NULL;
1134 		return 0;
1135 	}
1136 	if (r) {
1137 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1138 			fw_name_dmcu);
1139 		return r;
1140 	}
1141 
1142 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1143 	if (r) {
1144 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1145 			fw_name_dmcu);
1146 		release_firmware(adev->dm.fw_dmcu);
1147 		adev->dm.fw_dmcu = NULL;
1148 		return r;
1149 	}
1150 
1151 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1152 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1153 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1154 	adev->firmware.fw_size +=
1155 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1156 
1157 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1158 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1159 	adev->firmware.fw_size +=
1160 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1161 
1162 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1163 
1164 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1165 
1166 	return 0;
1167 }
1168 
1169 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1170 {
1171 	struct amdgpu_device *adev = ctx;
1172 
1173 	return dm_read_reg(adev->dm.dc->ctx, address);
1174 }
1175 
1176 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1177 				     uint32_t value)
1178 {
1179 	struct amdgpu_device *adev = ctx;
1180 
1181 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1182 }
1183 
1184 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1185 {
1186 	struct dmub_srv_create_params create_params;
1187 	struct dmub_srv_region_params region_params;
1188 	struct dmub_srv_region_info region_info;
1189 	struct dmub_srv_fb_params fb_params;
1190 	struct dmub_srv_fb_info *fb_info;
1191 	struct dmub_srv *dmub_srv;
1192 	const struct dmcub_firmware_header_v1_0 *hdr;
1193 	const char *fw_name_dmub;
1194 	enum dmub_asic dmub_asic;
1195 	enum dmub_status status;
1196 	int r;
1197 
1198 	switch (adev->asic_type) {
1199 	case CHIP_RENOIR:
1200 		dmub_asic = DMUB_ASIC_DCN21;
1201 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1202 		break;
1203 
1204 	default:
1205 		/* ASIC doesn't support DMUB. */
1206 		return 0;
1207 	}
1208 
1209 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1210 	if (r) {
1211 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1212 		return 0;
1213 	}
1214 
1215 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1216 	if (r) {
1217 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1218 		return 0;
1219 	}
1220 
1221 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1222 
1223 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1224 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1225 			AMDGPU_UCODE_ID_DMCUB;
1226 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1227 			adev->dm.dmub_fw;
1228 		adev->firmware.fw_size +=
1229 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1230 
1231 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1232 			 adev->dm.dmcub_fw_version);
1233 	}
1234 
1235 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1236 
1237 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1238 	dmub_srv = adev->dm.dmub_srv;
1239 
1240 	if (!dmub_srv) {
1241 		DRM_ERROR("Failed to allocate DMUB service!\n");
1242 		return -ENOMEM;
1243 	}
1244 
1245 	memset(&create_params, 0, sizeof(create_params));
1246 	create_params.user_ctx = adev;
1247 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1248 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1249 	create_params.asic = dmub_asic;
1250 
1251 	/* Create the DMUB service. */
1252 	status = dmub_srv_create(dmub_srv, &create_params);
1253 	if (status != DMUB_STATUS_OK) {
1254 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1255 		return -EINVAL;
1256 	}
1257 
1258 	/* Calculate the size of all the regions for the DMUB service. */
1259 	memset(&region_params, 0, sizeof(region_params));
1260 
1261 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1262 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1263 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1264 	region_params.vbios_size = adev->bios_size;
1265 	region_params.fw_bss_data =
1266 		adev->dm.dmub_fw->data +
1267 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1268 		le32_to_cpu(hdr->inst_const_bytes);
1269 	region_params.fw_inst_const =
1270 		adev->dm.dmub_fw->data +
1271 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1272 		PSP_HEADER_BYTES;
1273 
1274 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1275 					   &region_info);
1276 
1277 	if (status != DMUB_STATUS_OK) {
1278 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1279 		return -EINVAL;
1280 	}
1281 
1282 	/*
1283 	 * Allocate a framebuffer based on the total size of all the regions.
1284 	 * TODO: Move this into GART.
1285 	 */
1286 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1287 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1288 				    &adev->dm.dmub_bo_gpu_addr,
1289 				    &adev->dm.dmub_bo_cpu_addr);
1290 	if (r)
1291 		return r;
1292 
1293 	/* Rebase the regions on the framebuffer address. */
1294 	memset(&fb_params, 0, sizeof(fb_params));
1295 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1296 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1297 	fb_params.region_info = &region_info;
1298 
1299 	adev->dm.dmub_fb_info =
1300 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1301 	fb_info = adev->dm.dmub_fb_info;
1302 
1303 	if (!fb_info) {
1304 		DRM_ERROR(
1305 			"Failed to allocate framebuffer info for DMUB service!\n");
1306 		return -ENOMEM;
1307 	}
1308 
1309 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1310 	if (status != DMUB_STATUS_OK) {
1311 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1312 		return -EINVAL;
1313 	}
1314 
1315 	return 0;
1316 }
1317 
1318 static int dm_sw_init(void *handle)
1319 {
1320 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1321 	int r;
1322 
1323 	r = dm_dmub_sw_init(adev);
1324 	if (r)
1325 		return r;
1326 
1327 	return load_dmcu_fw(adev);
1328 }
1329 
1330 static int dm_sw_fini(void *handle)
1331 {
1332 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1333 
1334 	kfree(adev->dm.dmub_fb_info);
1335 	adev->dm.dmub_fb_info = NULL;
1336 
1337 	if (adev->dm.dmub_srv) {
1338 		dmub_srv_destroy(adev->dm.dmub_srv);
1339 		adev->dm.dmub_srv = NULL;
1340 	}
1341 
1342 	if (adev->dm.dmub_fw) {
1343 		release_firmware(adev->dm.dmub_fw);
1344 		adev->dm.dmub_fw = NULL;
1345 	}
1346 
1347 	if(adev->dm.fw_dmcu) {
1348 		release_firmware(adev->dm.fw_dmcu);
1349 		adev->dm.fw_dmcu = NULL;
1350 	}
1351 
1352 	return 0;
1353 }
1354 
1355 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1356 {
1357 	struct amdgpu_dm_connector *aconnector;
1358 	struct drm_connector *connector;
1359 	struct drm_connector_list_iter iter;
1360 	int ret = 0;
1361 
1362 	drm_connector_list_iter_begin(dev, &iter);
1363 	drm_for_each_connector_iter(connector, &iter) {
1364 		aconnector = to_amdgpu_dm_connector(connector);
1365 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1366 		    aconnector->mst_mgr.aux) {
1367 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1368 					 aconnector,
1369 					 aconnector->base.base.id);
1370 
1371 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1372 			if (ret < 0) {
1373 				DRM_ERROR("DM_MST: Failed to start MST\n");
1374 				aconnector->dc_link->type =
1375 					dc_connection_single;
1376 				break;
1377 			}
1378 		}
1379 	}
1380 	drm_connector_list_iter_end(&iter);
1381 
1382 	return ret;
1383 }
1384 
1385 static int dm_late_init(void *handle)
1386 {
1387 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1388 
1389 	struct dmcu_iram_parameters params;
1390 	unsigned int linear_lut[16];
1391 	int i;
1392 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1393 	bool ret = false;
1394 
1395 	for (i = 0; i < 16; i++)
1396 		linear_lut[i] = 0xFFFF * i / 15;
1397 
1398 	params.set = 0;
1399 	params.backlight_ramping_start = 0xCCCC;
1400 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1401 	params.backlight_lut_array_size = 16;
1402 	params.backlight_lut_array = linear_lut;
1403 
1404 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1405 	 * 0xFFFF x 0.01 = 0x28F
1406 	 */
1407 	params.min_abm_backlight = 0x28F;
1408 
1409 	/* todo will enable for navi10 */
1410 	if (adev->asic_type <= CHIP_RAVEN) {
1411 		ret = dmcu_load_iram(dmcu, params);
1412 
1413 		if (!ret)
1414 			return -EINVAL;
1415 	}
1416 
1417 	return detect_mst_link_for_all_connectors(adev->ddev);
1418 }
1419 
1420 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1421 {
1422 	struct amdgpu_dm_connector *aconnector;
1423 	struct drm_connector *connector;
1424 	struct drm_connector_list_iter iter;
1425 	struct drm_dp_mst_topology_mgr *mgr;
1426 	int ret;
1427 	bool need_hotplug = false;
1428 
1429 	drm_connector_list_iter_begin(dev, &iter);
1430 	drm_for_each_connector_iter(connector, &iter) {
1431 		aconnector = to_amdgpu_dm_connector(connector);
1432 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1433 		    aconnector->mst_port)
1434 			continue;
1435 
1436 		mgr = &aconnector->mst_mgr;
1437 
1438 		if (suspend) {
1439 			drm_dp_mst_topology_mgr_suspend(mgr);
1440 		} else {
1441 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1442 			if (ret < 0) {
1443 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1444 				need_hotplug = true;
1445 			}
1446 		}
1447 	}
1448 	drm_connector_list_iter_end(&iter);
1449 
1450 	if (need_hotplug)
1451 		drm_kms_helper_hotplug_event(dev);
1452 }
1453 
1454 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1455 {
1456 	struct smu_context *smu = &adev->smu;
1457 	int ret = 0;
1458 
1459 	if (!is_support_sw_smu(adev))
1460 		return 0;
1461 
1462 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1463 	 * on window driver dc implementation.
1464 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1465 	 * should be passed to smu during boot up and resume from s3.
1466 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1467 	 * dcn20_resource_construct
1468 	 * then call pplib functions below to pass the settings to smu:
1469 	 * smu_set_watermarks_for_clock_ranges
1470 	 * smu_set_watermarks_table
1471 	 * navi10_set_watermarks_table
1472 	 * smu_write_watermarks_table
1473 	 *
1474 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1475 	 * dc has implemented different flow for window driver:
1476 	 * dc_hardware_init / dc_set_power_state
1477 	 * dcn10_init_hw
1478 	 * notify_wm_ranges
1479 	 * set_wm_ranges
1480 	 * -- Linux
1481 	 * smu_set_watermarks_for_clock_ranges
1482 	 * renoir_set_watermarks_table
1483 	 * smu_write_watermarks_table
1484 	 *
1485 	 * For Linux,
1486 	 * dc_hardware_init -> amdgpu_dm_init
1487 	 * dc_set_power_state --> dm_resume
1488 	 *
1489 	 * therefore, this function apply to navi10/12/14 but not Renoir
1490 	 * *
1491 	 */
1492 	switch(adev->asic_type) {
1493 	case CHIP_NAVI10:
1494 	case CHIP_NAVI14:
1495 	case CHIP_NAVI12:
1496 		break;
1497 	default:
1498 		return 0;
1499 	}
1500 
1501 	mutex_lock(&smu->mutex);
1502 
1503 	/* pass data to smu controller */
1504 	if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1505 			!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1506 		ret = smu_write_watermarks_table(smu);
1507 
1508 		if (ret) {
1509 			mutex_unlock(&smu->mutex);
1510 			DRM_ERROR("Failed to update WMTABLE!\n");
1511 			return ret;
1512 		}
1513 		smu->watermarks_bitmap |= WATERMARKS_LOADED;
1514 	}
1515 
1516 	mutex_unlock(&smu->mutex);
1517 
1518 	return 0;
1519 }
1520 
1521 /**
1522  * dm_hw_init() - Initialize DC device
1523  * @handle: The base driver device containing the amdgpu_dm device.
1524  *
1525  * Initialize the &struct amdgpu_display_manager device. This involves calling
1526  * the initializers of each DM component, then populating the struct with them.
1527  *
1528  * Although the function implies hardware initialization, both hardware and
1529  * software are initialized here. Splitting them out to their relevant init
1530  * hooks is a future TODO item.
1531  *
1532  * Some notable things that are initialized here:
1533  *
1534  * - Display Core, both software and hardware
1535  * - DC modules that we need (freesync and color management)
1536  * - DRM software states
1537  * - Interrupt sources and handlers
1538  * - Vblank support
1539  * - Debug FS entries, if enabled
1540  */
1541 static int dm_hw_init(void *handle)
1542 {
1543 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1544 	/* Create DAL display manager */
1545 	amdgpu_dm_init(adev);
1546 	amdgpu_dm_hpd_init(adev);
1547 
1548 	return 0;
1549 }
1550 
1551 /**
1552  * dm_hw_fini() - Teardown DC device
1553  * @handle: The base driver device containing the amdgpu_dm device.
1554  *
1555  * Teardown components within &struct amdgpu_display_manager that require
1556  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1557  * were loaded. Also flush IRQ workqueues and disable them.
1558  */
1559 static int dm_hw_fini(void *handle)
1560 {
1561 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1562 
1563 	amdgpu_dm_hpd_fini(adev);
1564 
1565 	amdgpu_dm_irq_fini(adev);
1566 	amdgpu_dm_fini(adev);
1567 	return 0;
1568 }
1569 
1570 static int dm_suspend(void *handle)
1571 {
1572 	struct amdgpu_device *adev = handle;
1573 	struct amdgpu_display_manager *dm = &adev->dm;
1574 	int ret = 0;
1575 
1576 	WARN_ON(adev->dm.cached_state);
1577 	adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1578 
1579 	s3_handle_mst(adev->ddev, true);
1580 
1581 	amdgpu_dm_irq_suspend(adev);
1582 
1583 
1584 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1585 
1586 	return ret;
1587 }
1588 
1589 static struct amdgpu_dm_connector *
1590 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1591 					     struct drm_crtc *crtc)
1592 {
1593 	uint32_t i;
1594 	struct drm_connector_state *new_con_state;
1595 	struct drm_connector *connector;
1596 	struct drm_crtc *crtc_from_state;
1597 
1598 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1599 		crtc_from_state = new_con_state->crtc;
1600 
1601 		if (crtc_from_state == crtc)
1602 			return to_amdgpu_dm_connector(connector);
1603 	}
1604 
1605 	return NULL;
1606 }
1607 
1608 static void emulated_link_detect(struct dc_link *link)
1609 {
1610 	struct dc_sink_init_data sink_init_data = { 0 };
1611 	struct display_sink_capability sink_caps = { 0 };
1612 	enum dc_edid_status edid_status;
1613 	struct dc_context *dc_ctx = link->ctx;
1614 	struct dc_sink *sink = NULL;
1615 	struct dc_sink *prev_sink = NULL;
1616 
1617 	link->type = dc_connection_none;
1618 	prev_sink = link->local_sink;
1619 
1620 	if (prev_sink != NULL)
1621 		dc_sink_retain(prev_sink);
1622 
1623 	switch (link->connector_signal) {
1624 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1625 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1626 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1627 		break;
1628 	}
1629 
1630 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1631 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1632 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1633 		break;
1634 	}
1635 
1636 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1637 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1638 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1639 		break;
1640 	}
1641 
1642 	case SIGNAL_TYPE_LVDS: {
1643 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1644 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1645 		break;
1646 	}
1647 
1648 	case SIGNAL_TYPE_EDP: {
1649 		sink_caps.transaction_type =
1650 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1651 		sink_caps.signal = SIGNAL_TYPE_EDP;
1652 		break;
1653 	}
1654 
1655 	case SIGNAL_TYPE_DISPLAY_PORT: {
1656 		sink_caps.transaction_type =
1657 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1658 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1659 		break;
1660 	}
1661 
1662 	default:
1663 		DC_ERROR("Invalid connector type! signal:%d\n",
1664 			link->connector_signal);
1665 		return;
1666 	}
1667 
1668 	sink_init_data.link = link;
1669 	sink_init_data.sink_signal = sink_caps.signal;
1670 
1671 	sink = dc_sink_create(&sink_init_data);
1672 	if (!sink) {
1673 		DC_ERROR("Failed to create sink!\n");
1674 		return;
1675 	}
1676 
1677 	/* dc_sink_create returns a new reference */
1678 	link->local_sink = sink;
1679 
1680 	edid_status = dm_helpers_read_local_edid(
1681 			link->ctx,
1682 			link,
1683 			sink);
1684 
1685 	if (edid_status != EDID_OK)
1686 		DC_ERROR("Failed to read EDID");
1687 
1688 }
1689 
1690 static int dm_resume(void *handle)
1691 {
1692 	struct amdgpu_device *adev = handle;
1693 	struct drm_device *ddev = adev->ddev;
1694 	struct amdgpu_display_manager *dm = &adev->dm;
1695 	struct amdgpu_dm_connector *aconnector;
1696 	struct drm_connector *connector;
1697 	struct drm_connector_list_iter iter;
1698 	struct drm_crtc *crtc;
1699 	struct drm_crtc_state *new_crtc_state;
1700 	struct dm_crtc_state *dm_new_crtc_state;
1701 	struct drm_plane *plane;
1702 	struct drm_plane_state *new_plane_state;
1703 	struct dm_plane_state *dm_new_plane_state;
1704 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1705 	enum dc_connection_type new_connection_type = dc_connection_none;
1706 	int i, r;
1707 
1708 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1709 	dc_release_state(dm_state->context);
1710 	dm_state->context = dc_create_state(dm->dc);
1711 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1712 	dc_resource_state_construct(dm->dc, dm_state->context);
1713 
1714 	/* Before powering on DC we need to re-initialize DMUB. */
1715 	r = dm_dmub_hw_init(adev);
1716 	if (r)
1717 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1718 
1719 	/* power on hardware */
1720 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1721 
1722 	/* program HPD filter */
1723 	dc_resume(dm->dc);
1724 
1725 	/*
1726 	 * early enable HPD Rx IRQ, should be done before set mode as short
1727 	 * pulse interrupts are used for MST
1728 	 */
1729 	amdgpu_dm_irq_resume_early(adev);
1730 
1731 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1732 	s3_handle_mst(ddev, false);
1733 
1734 	/* Do detection*/
1735 	drm_connector_list_iter_begin(ddev, &iter);
1736 	drm_for_each_connector_iter(connector, &iter) {
1737 		aconnector = to_amdgpu_dm_connector(connector);
1738 
1739 		/*
1740 		 * this is the case when traversing through already created
1741 		 * MST connectors, should be skipped
1742 		 */
1743 		if (aconnector->mst_port)
1744 			continue;
1745 
1746 		mutex_lock(&aconnector->hpd_lock);
1747 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1748 			DRM_ERROR("KMS: Failed to detect connector\n");
1749 
1750 		if (aconnector->base.force && new_connection_type == dc_connection_none)
1751 			emulated_link_detect(aconnector->dc_link);
1752 		else
1753 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1754 
1755 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1756 			aconnector->fake_enable = false;
1757 
1758 		if (aconnector->dc_sink)
1759 			dc_sink_release(aconnector->dc_sink);
1760 		aconnector->dc_sink = NULL;
1761 		amdgpu_dm_update_connector_after_detect(aconnector);
1762 		mutex_unlock(&aconnector->hpd_lock);
1763 	}
1764 	drm_connector_list_iter_end(&iter);
1765 
1766 	/* Force mode set in atomic commit */
1767 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1768 		new_crtc_state->active_changed = true;
1769 
1770 	/*
1771 	 * atomic_check is expected to create the dc states. We need to release
1772 	 * them here, since they were duplicated as part of the suspend
1773 	 * procedure.
1774 	 */
1775 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1776 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1777 		if (dm_new_crtc_state->stream) {
1778 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1779 			dc_stream_release(dm_new_crtc_state->stream);
1780 			dm_new_crtc_state->stream = NULL;
1781 		}
1782 	}
1783 
1784 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1785 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
1786 		if (dm_new_plane_state->dc_state) {
1787 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1788 			dc_plane_state_release(dm_new_plane_state->dc_state);
1789 			dm_new_plane_state->dc_state = NULL;
1790 		}
1791 	}
1792 
1793 	drm_atomic_helper_resume(ddev, dm->cached_state);
1794 
1795 	dm->cached_state = NULL;
1796 
1797 	amdgpu_dm_irq_resume_late(adev);
1798 
1799 	amdgpu_dm_smu_write_watermarks_table(adev);
1800 
1801 	return 0;
1802 }
1803 
1804 /**
1805  * DOC: DM Lifecycle
1806  *
1807  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1808  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1809  * the base driver's device list to be initialized and torn down accordingly.
1810  *
1811  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1812  */
1813 
1814 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1815 	.name = "dm",
1816 	.early_init = dm_early_init,
1817 	.late_init = dm_late_init,
1818 	.sw_init = dm_sw_init,
1819 	.sw_fini = dm_sw_fini,
1820 	.hw_init = dm_hw_init,
1821 	.hw_fini = dm_hw_fini,
1822 	.suspend = dm_suspend,
1823 	.resume = dm_resume,
1824 	.is_idle = dm_is_idle,
1825 	.wait_for_idle = dm_wait_for_idle,
1826 	.check_soft_reset = dm_check_soft_reset,
1827 	.soft_reset = dm_soft_reset,
1828 	.set_clockgating_state = dm_set_clockgating_state,
1829 	.set_powergating_state = dm_set_powergating_state,
1830 };
1831 
1832 const struct amdgpu_ip_block_version dm_ip_block =
1833 {
1834 	.type = AMD_IP_BLOCK_TYPE_DCE,
1835 	.major = 1,
1836 	.minor = 0,
1837 	.rev = 0,
1838 	.funcs = &amdgpu_dm_funcs,
1839 };
1840 
1841 
1842 /**
1843  * DOC: atomic
1844  *
1845  * *WIP*
1846  */
1847 
1848 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1849 	.fb_create = amdgpu_display_user_framebuffer_create,
1850 	.output_poll_changed = drm_fb_helper_output_poll_changed,
1851 	.atomic_check = amdgpu_dm_atomic_check,
1852 	.atomic_commit = amdgpu_dm_atomic_commit,
1853 };
1854 
1855 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1856 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1857 };
1858 
1859 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1860 {
1861 	u32 max_cll, min_cll, max, min, q, r;
1862 	struct amdgpu_dm_backlight_caps *caps;
1863 	struct amdgpu_display_manager *dm;
1864 	struct drm_connector *conn_base;
1865 	struct amdgpu_device *adev;
1866 	static const u8 pre_computed_values[] = {
1867 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1868 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1869 
1870 	if (!aconnector || !aconnector->dc_link)
1871 		return;
1872 
1873 	conn_base = &aconnector->base;
1874 	adev = conn_base->dev->dev_private;
1875 	dm = &adev->dm;
1876 	caps = &dm->backlight_caps;
1877 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1878 	caps->aux_support = false;
1879 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1880 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1881 
1882 	if (caps->ext_caps->bits.oled == 1 ||
1883 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1884 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1885 		caps->aux_support = true;
1886 
1887 	/* From the specification (CTA-861-G), for calculating the maximum
1888 	 * luminance we need to use:
1889 	 *	Luminance = 50*2**(CV/32)
1890 	 * Where CV is a one-byte value.
1891 	 * For calculating this expression we may need float point precision;
1892 	 * to avoid this complexity level, we take advantage that CV is divided
1893 	 * by a constant. From the Euclids division algorithm, we know that CV
1894 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
1895 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1896 	 * need to pre-compute the value of r/32. For pre-computing the values
1897 	 * We just used the following Ruby line:
1898 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1899 	 * The results of the above expressions can be verified at
1900 	 * pre_computed_values.
1901 	 */
1902 	q = max_cll >> 5;
1903 	r = max_cll % 32;
1904 	max = (1 << q) * pre_computed_values[r];
1905 
1906 	// min luminance: maxLum * (CV/255)^2 / 100
1907 	q = DIV_ROUND_CLOSEST(min_cll, 255);
1908 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
1909 
1910 	caps->aux_max_input_signal = max;
1911 	caps->aux_min_input_signal = min;
1912 }
1913 
1914 void amdgpu_dm_update_connector_after_detect(
1915 		struct amdgpu_dm_connector *aconnector)
1916 {
1917 	struct drm_connector *connector = &aconnector->base;
1918 	struct drm_device *dev = connector->dev;
1919 	struct dc_sink *sink;
1920 
1921 	/* MST handled by drm_mst framework */
1922 	if (aconnector->mst_mgr.mst_state == true)
1923 		return;
1924 
1925 
1926 	sink = aconnector->dc_link->local_sink;
1927 	if (sink)
1928 		dc_sink_retain(sink);
1929 
1930 	/*
1931 	 * Edid mgmt connector gets first update only in mode_valid hook and then
1932 	 * the connector sink is set to either fake or physical sink depends on link status.
1933 	 * Skip if already done during boot.
1934 	 */
1935 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1936 			&& aconnector->dc_em_sink) {
1937 
1938 		/*
1939 		 * For S3 resume with headless use eml_sink to fake stream
1940 		 * because on resume connector->sink is set to NULL
1941 		 */
1942 		mutex_lock(&dev->mode_config.mutex);
1943 
1944 		if (sink) {
1945 			if (aconnector->dc_sink) {
1946 				amdgpu_dm_update_freesync_caps(connector, NULL);
1947 				/*
1948 				 * retain and release below are used to
1949 				 * bump up refcount for sink because the link doesn't point
1950 				 * to it anymore after disconnect, so on next crtc to connector
1951 				 * reshuffle by UMD we will get into unwanted dc_sink release
1952 				 */
1953 				dc_sink_release(aconnector->dc_sink);
1954 			}
1955 			aconnector->dc_sink = sink;
1956 			dc_sink_retain(aconnector->dc_sink);
1957 			amdgpu_dm_update_freesync_caps(connector,
1958 					aconnector->edid);
1959 		} else {
1960 			amdgpu_dm_update_freesync_caps(connector, NULL);
1961 			if (!aconnector->dc_sink) {
1962 				aconnector->dc_sink = aconnector->dc_em_sink;
1963 				dc_sink_retain(aconnector->dc_sink);
1964 			}
1965 		}
1966 
1967 		mutex_unlock(&dev->mode_config.mutex);
1968 
1969 		if (sink)
1970 			dc_sink_release(sink);
1971 		return;
1972 	}
1973 
1974 	/*
1975 	 * TODO: temporary guard to look for proper fix
1976 	 * if this sink is MST sink, we should not do anything
1977 	 */
1978 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1979 		dc_sink_release(sink);
1980 		return;
1981 	}
1982 
1983 	if (aconnector->dc_sink == sink) {
1984 		/*
1985 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
1986 		 * Do nothing!!
1987 		 */
1988 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1989 				aconnector->connector_id);
1990 		if (sink)
1991 			dc_sink_release(sink);
1992 		return;
1993 	}
1994 
1995 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1996 		aconnector->connector_id, aconnector->dc_sink, sink);
1997 
1998 	mutex_lock(&dev->mode_config.mutex);
1999 
2000 	/*
2001 	 * 1. Update status of the drm connector
2002 	 * 2. Send an event and let userspace tell us what to do
2003 	 */
2004 	if (sink) {
2005 		/*
2006 		 * TODO: check if we still need the S3 mode update workaround.
2007 		 * If yes, put it here.
2008 		 */
2009 		if (aconnector->dc_sink)
2010 			amdgpu_dm_update_freesync_caps(connector, NULL);
2011 
2012 		aconnector->dc_sink = sink;
2013 		dc_sink_retain(aconnector->dc_sink);
2014 		if (sink->dc_edid.length == 0) {
2015 			aconnector->edid = NULL;
2016 			drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2017 		} else {
2018 			aconnector->edid =
2019 				(struct edid *) sink->dc_edid.raw_edid;
2020 
2021 
2022 			drm_connector_update_edid_property(connector,
2023 					aconnector->edid);
2024 			drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2025 					    aconnector->edid);
2026 		}
2027 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2028 		update_connector_ext_caps(aconnector);
2029 	} else {
2030 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2031 		amdgpu_dm_update_freesync_caps(connector, NULL);
2032 		drm_connector_update_edid_property(connector, NULL);
2033 		aconnector->num_modes = 0;
2034 		dc_sink_release(aconnector->dc_sink);
2035 		aconnector->dc_sink = NULL;
2036 		aconnector->edid = NULL;
2037 #ifdef CONFIG_DRM_AMD_DC_HDCP
2038 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2039 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2040 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2041 #endif
2042 	}
2043 
2044 	mutex_unlock(&dev->mode_config.mutex);
2045 
2046 	if (sink)
2047 		dc_sink_release(sink);
2048 }
2049 
2050 static void handle_hpd_irq(void *param)
2051 {
2052 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2053 	struct drm_connector *connector = &aconnector->base;
2054 	struct drm_device *dev = connector->dev;
2055 	enum dc_connection_type new_connection_type = dc_connection_none;
2056 #ifdef CONFIG_DRM_AMD_DC_HDCP
2057 	struct amdgpu_device *adev = dev->dev_private;
2058 #endif
2059 
2060 	/*
2061 	 * In case of failure or MST no need to update connector status or notify the OS
2062 	 * since (for MST case) MST does this in its own context.
2063 	 */
2064 	mutex_lock(&aconnector->hpd_lock);
2065 
2066 #ifdef CONFIG_DRM_AMD_DC_HDCP
2067 	if (adev->dm.hdcp_workqueue)
2068 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2069 #endif
2070 	if (aconnector->fake_enable)
2071 		aconnector->fake_enable = false;
2072 
2073 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2074 		DRM_ERROR("KMS: Failed to detect connector\n");
2075 
2076 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2077 		emulated_link_detect(aconnector->dc_link);
2078 
2079 
2080 		drm_modeset_lock_all(dev);
2081 		dm_restore_drm_connector_state(dev, connector);
2082 		drm_modeset_unlock_all(dev);
2083 
2084 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2085 			drm_kms_helper_hotplug_event(dev);
2086 
2087 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2088 		amdgpu_dm_update_connector_after_detect(aconnector);
2089 
2090 
2091 		drm_modeset_lock_all(dev);
2092 		dm_restore_drm_connector_state(dev, connector);
2093 		drm_modeset_unlock_all(dev);
2094 
2095 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2096 			drm_kms_helper_hotplug_event(dev);
2097 	}
2098 	mutex_unlock(&aconnector->hpd_lock);
2099 
2100 }
2101 
2102 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2103 {
2104 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2105 	uint8_t dret;
2106 	bool new_irq_handled = false;
2107 	int dpcd_addr;
2108 	int dpcd_bytes_to_read;
2109 
2110 	const int max_process_count = 30;
2111 	int process_count = 0;
2112 
2113 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2114 
2115 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2116 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2117 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2118 		dpcd_addr = DP_SINK_COUNT;
2119 	} else {
2120 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2121 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2122 		dpcd_addr = DP_SINK_COUNT_ESI;
2123 	}
2124 
2125 	dret = drm_dp_dpcd_read(
2126 		&aconnector->dm_dp_aux.aux,
2127 		dpcd_addr,
2128 		esi,
2129 		dpcd_bytes_to_read);
2130 
2131 	while (dret == dpcd_bytes_to_read &&
2132 		process_count < max_process_count) {
2133 		uint8_t retry;
2134 		dret = 0;
2135 
2136 		process_count++;
2137 
2138 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2139 		/* handle HPD short pulse irq */
2140 		if (aconnector->mst_mgr.mst_state)
2141 			drm_dp_mst_hpd_irq(
2142 				&aconnector->mst_mgr,
2143 				esi,
2144 				&new_irq_handled);
2145 
2146 		if (new_irq_handled) {
2147 			/* ACK at DPCD to notify down stream */
2148 			const int ack_dpcd_bytes_to_write =
2149 				dpcd_bytes_to_read - 1;
2150 
2151 			for (retry = 0; retry < 3; retry++) {
2152 				uint8_t wret;
2153 
2154 				wret = drm_dp_dpcd_write(
2155 					&aconnector->dm_dp_aux.aux,
2156 					dpcd_addr + 1,
2157 					&esi[1],
2158 					ack_dpcd_bytes_to_write);
2159 				if (wret == ack_dpcd_bytes_to_write)
2160 					break;
2161 			}
2162 
2163 			/* check if there is new irq to be handled */
2164 			dret = drm_dp_dpcd_read(
2165 				&aconnector->dm_dp_aux.aux,
2166 				dpcd_addr,
2167 				esi,
2168 				dpcd_bytes_to_read);
2169 
2170 			new_irq_handled = false;
2171 		} else {
2172 			break;
2173 		}
2174 	}
2175 
2176 	if (process_count == max_process_count)
2177 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2178 }
2179 
2180 static void handle_hpd_rx_irq(void *param)
2181 {
2182 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2183 	struct drm_connector *connector = &aconnector->base;
2184 	struct drm_device *dev = connector->dev;
2185 	struct dc_link *dc_link = aconnector->dc_link;
2186 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2187 	enum dc_connection_type new_connection_type = dc_connection_none;
2188 #ifdef CONFIG_DRM_AMD_DC_HDCP
2189 	union hpd_irq_data hpd_irq_data;
2190 	struct amdgpu_device *adev = dev->dev_private;
2191 
2192 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2193 #endif
2194 
2195 	/*
2196 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2197 	 * conflict, after implement i2c helper, this mutex should be
2198 	 * retired.
2199 	 */
2200 	if (dc_link->type != dc_connection_mst_branch)
2201 		mutex_lock(&aconnector->hpd_lock);
2202 
2203 
2204 #ifdef CONFIG_DRM_AMD_DC_HDCP
2205 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2206 #else
2207 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2208 #endif
2209 			!is_mst_root_connector) {
2210 		/* Downstream Port status changed. */
2211 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2212 			DRM_ERROR("KMS: Failed to detect connector\n");
2213 
2214 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2215 			emulated_link_detect(dc_link);
2216 
2217 			if (aconnector->fake_enable)
2218 				aconnector->fake_enable = false;
2219 
2220 			amdgpu_dm_update_connector_after_detect(aconnector);
2221 
2222 
2223 			drm_modeset_lock_all(dev);
2224 			dm_restore_drm_connector_state(dev, connector);
2225 			drm_modeset_unlock_all(dev);
2226 
2227 			drm_kms_helper_hotplug_event(dev);
2228 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2229 
2230 			if (aconnector->fake_enable)
2231 				aconnector->fake_enable = false;
2232 
2233 			amdgpu_dm_update_connector_after_detect(aconnector);
2234 
2235 
2236 			drm_modeset_lock_all(dev);
2237 			dm_restore_drm_connector_state(dev, connector);
2238 			drm_modeset_unlock_all(dev);
2239 
2240 			drm_kms_helper_hotplug_event(dev);
2241 		}
2242 	}
2243 #ifdef CONFIG_DRM_AMD_DC_HDCP
2244 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2245 		if (adev->dm.hdcp_workqueue)
2246 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2247 	}
2248 #endif
2249 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2250 	    (dc_link->type == dc_connection_mst_branch))
2251 		dm_handle_hpd_rx_irq(aconnector);
2252 
2253 	if (dc_link->type != dc_connection_mst_branch) {
2254 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2255 		mutex_unlock(&aconnector->hpd_lock);
2256 	}
2257 }
2258 
2259 static void register_hpd_handlers(struct amdgpu_device *adev)
2260 {
2261 	struct drm_device *dev = adev->ddev;
2262 	struct drm_connector *connector;
2263 	struct amdgpu_dm_connector *aconnector;
2264 	const struct dc_link *dc_link;
2265 	struct dc_interrupt_params int_params = {0};
2266 
2267 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2268 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2269 
2270 	list_for_each_entry(connector,
2271 			&dev->mode_config.connector_list, head)	{
2272 
2273 		aconnector = to_amdgpu_dm_connector(connector);
2274 		dc_link = aconnector->dc_link;
2275 
2276 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2277 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2278 			int_params.irq_source = dc_link->irq_source_hpd;
2279 
2280 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2281 					handle_hpd_irq,
2282 					(void *) aconnector);
2283 		}
2284 
2285 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2286 
2287 			/* Also register for DP short pulse (hpd_rx). */
2288 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2289 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2290 
2291 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2292 					handle_hpd_rx_irq,
2293 					(void *) aconnector);
2294 		}
2295 	}
2296 }
2297 
2298 /* Register IRQ sources and initialize IRQ callbacks */
2299 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2300 {
2301 	struct dc *dc = adev->dm.dc;
2302 	struct common_irq_params *c_irq_params;
2303 	struct dc_interrupt_params int_params = {0};
2304 	int r;
2305 	int i;
2306 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2307 
2308 	if (adev->asic_type >= CHIP_VEGA10)
2309 		client_id = SOC15_IH_CLIENTID_DCE;
2310 
2311 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2312 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2313 
2314 	/*
2315 	 * Actions of amdgpu_irq_add_id():
2316 	 * 1. Register a set() function with base driver.
2317 	 *    Base driver will call set() function to enable/disable an
2318 	 *    interrupt in DC hardware.
2319 	 * 2. Register amdgpu_dm_irq_handler().
2320 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2321 	 *    coming from DC hardware.
2322 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2323 	 *    for acknowledging and handling. */
2324 
2325 	/* Use VBLANK interrupt */
2326 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2327 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2328 		if (r) {
2329 			DRM_ERROR("Failed to add crtc irq id!\n");
2330 			return r;
2331 		}
2332 
2333 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2334 		int_params.irq_source =
2335 			dc_interrupt_to_irq_source(dc, i, 0);
2336 
2337 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2338 
2339 		c_irq_params->adev = adev;
2340 		c_irq_params->irq_src = int_params.irq_source;
2341 
2342 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2343 				dm_crtc_high_irq, c_irq_params);
2344 	}
2345 
2346 	/* Use VUPDATE interrupt */
2347 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2348 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2349 		if (r) {
2350 			DRM_ERROR("Failed to add vupdate irq id!\n");
2351 			return r;
2352 		}
2353 
2354 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2355 		int_params.irq_source =
2356 			dc_interrupt_to_irq_source(dc, i, 0);
2357 
2358 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2359 
2360 		c_irq_params->adev = adev;
2361 		c_irq_params->irq_src = int_params.irq_source;
2362 
2363 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2364 				dm_vupdate_high_irq, c_irq_params);
2365 	}
2366 
2367 	/* Use GRPH_PFLIP interrupt */
2368 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2369 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2370 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2371 		if (r) {
2372 			DRM_ERROR("Failed to add page flip irq id!\n");
2373 			return r;
2374 		}
2375 
2376 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2377 		int_params.irq_source =
2378 			dc_interrupt_to_irq_source(dc, i, 0);
2379 
2380 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2381 
2382 		c_irq_params->adev = adev;
2383 		c_irq_params->irq_src = int_params.irq_source;
2384 
2385 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2386 				dm_pflip_high_irq, c_irq_params);
2387 
2388 	}
2389 
2390 	/* HPD */
2391 	r = amdgpu_irq_add_id(adev, client_id,
2392 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2393 	if (r) {
2394 		DRM_ERROR("Failed to add hpd irq id!\n");
2395 		return r;
2396 	}
2397 
2398 	register_hpd_handlers(adev);
2399 
2400 	return 0;
2401 }
2402 
2403 #if defined(CONFIG_DRM_AMD_DC_DCN)
2404 /* Register IRQ sources and initialize IRQ callbacks */
2405 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2406 {
2407 	struct dc *dc = adev->dm.dc;
2408 	struct common_irq_params *c_irq_params;
2409 	struct dc_interrupt_params int_params = {0};
2410 	int r;
2411 	int i;
2412 
2413 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2414 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2415 
2416 	/*
2417 	 * Actions of amdgpu_irq_add_id():
2418 	 * 1. Register a set() function with base driver.
2419 	 *    Base driver will call set() function to enable/disable an
2420 	 *    interrupt in DC hardware.
2421 	 * 2. Register amdgpu_dm_irq_handler().
2422 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2423 	 *    coming from DC hardware.
2424 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2425 	 *    for acknowledging and handling.
2426 	 */
2427 
2428 	/* Use VSTARTUP interrupt */
2429 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2430 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2431 			i++) {
2432 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2433 
2434 		if (r) {
2435 			DRM_ERROR("Failed to add crtc irq id!\n");
2436 			return r;
2437 		}
2438 
2439 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2440 		int_params.irq_source =
2441 			dc_interrupt_to_irq_source(dc, i, 0);
2442 
2443 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2444 
2445 		c_irq_params->adev = adev;
2446 		c_irq_params->irq_src = int_params.irq_source;
2447 
2448 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2449 				dm_dcn_crtc_high_irq, c_irq_params);
2450 	}
2451 
2452 	/* Use GRPH_PFLIP interrupt */
2453 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2454 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2455 			i++) {
2456 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2457 		if (r) {
2458 			DRM_ERROR("Failed to add page flip irq id!\n");
2459 			return r;
2460 		}
2461 
2462 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2463 		int_params.irq_source =
2464 			dc_interrupt_to_irq_source(dc, i, 0);
2465 
2466 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2467 
2468 		c_irq_params->adev = adev;
2469 		c_irq_params->irq_src = int_params.irq_source;
2470 
2471 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2472 				dm_pflip_high_irq, c_irq_params);
2473 
2474 	}
2475 
2476 	/* HPD */
2477 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2478 			&adev->hpd_irq);
2479 	if (r) {
2480 		DRM_ERROR("Failed to add hpd irq id!\n");
2481 		return r;
2482 	}
2483 
2484 	register_hpd_handlers(adev);
2485 
2486 	return 0;
2487 }
2488 #endif
2489 
2490 /*
2491  * Acquires the lock for the atomic state object and returns
2492  * the new atomic state.
2493  *
2494  * This should only be called during atomic check.
2495  */
2496 static int dm_atomic_get_state(struct drm_atomic_state *state,
2497 			       struct dm_atomic_state **dm_state)
2498 {
2499 	struct drm_device *dev = state->dev;
2500 	struct amdgpu_device *adev = dev->dev_private;
2501 	struct amdgpu_display_manager *dm = &adev->dm;
2502 	struct drm_private_state *priv_state;
2503 
2504 	if (*dm_state)
2505 		return 0;
2506 
2507 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2508 	if (IS_ERR(priv_state))
2509 		return PTR_ERR(priv_state);
2510 
2511 	*dm_state = to_dm_atomic_state(priv_state);
2512 
2513 	return 0;
2514 }
2515 
2516 struct dm_atomic_state *
2517 dm_atomic_get_new_state(struct drm_atomic_state *state)
2518 {
2519 	struct drm_device *dev = state->dev;
2520 	struct amdgpu_device *adev = dev->dev_private;
2521 	struct amdgpu_display_manager *dm = &adev->dm;
2522 	struct drm_private_obj *obj;
2523 	struct drm_private_state *new_obj_state;
2524 	int i;
2525 
2526 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2527 		if (obj->funcs == dm->atomic_obj.funcs)
2528 			return to_dm_atomic_state(new_obj_state);
2529 	}
2530 
2531 	return NULL;
2532 }
2533 
2534 struct dm_atomic_state *
2535 dm_atomic_get_old_state(struct drm_atomic_state *state)
2536 {
2537 	struct drm_device *dev = state->dev;
2538 	struct amdgpu_device *adev = dev->dev_private;
2539 	struct amdgpu_display_manager *dm = &adev->dm;
2540 	struct drm_private_obj *obj;
2541 	struct drm_private_state *old_obj_state;
2542 	int i;
2543 
2544 	for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2545 		if (obj->funcs == dm->atomic_obj.funcs)
2546 			return to_dm_atomic_state(old_obj_state);
2547 	}
2548 
2549 	return NULL;
2550 }
2551 
2552 static struct drm_private_state *
2553 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2554 {
2555 	struct dm_atomic_state *old_state, *new_state;
2556 
2557 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2558 	if (!new_state)
2559 		return NULL;
2560 
2561 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2562 
2563 	old_state = to_dm_atomic_state(obj->state);
2564 
2565 	if (old_state && old_state->context)
2566 		new_state->context = dc_copy_state(old_state->context);
2567 
2568 	if (!new_state->context) {
2569 		kfree(new_state);
2570 		return NULL;
2571 	}
2572 
2573 	return &new_state->base;
2574 }
2575 
2576 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2577 				    struct drm_private_state *state)
2578 {
2579 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2580 
2581 	if (dm_state && dm_state->context)
2582 		dc_release_state(dm_state->context);
2583 
2584 	kfree(dm_state);
2585 }
2586 
2587 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2588 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2589 	.atomic_destroy_state = dm_atomic_destroy_state,
2590 };
2591 
2592 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2593 {
2594 	struct dm_atomic_state *state;
2595 	int r;
2596 
2597 	adev->mode_info.mode_config_initialized = true;
2598 
2599 	adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2600 	adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2601 
2602 	adev->ddev->mode_config.max_width = 16384;
2603 	adev->ddev->mode_config.max_height = 16384;
2604 
2605 	adev->ddev->mode_config.preferred_depth = 24;
2606 	adev->ddev->mode_config.prefer_shadow = 1;
2607 	/* indicates support for immediate flip */
2608 	adev->ddev->mode_config.async_page_flip = true;
2609 
2610 	adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2611 
2612 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2613 	if (!state)
2614 		return -ENOMEM;
2615 
2616 	state->context = dc_create_state(adev->dm.dc);
2617 	if (!state->context) {
2618 		kfree(state);
2619 		return -ENOMEM;
2620 	}
2621 
2622 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2623 
2624 	drm_atomic_private_obj_init(adev->ddev,
2625 				    &adev->dm.atomic_obj,
2626 				    &state->base,
2627 				    &dm_atomic_state_funcs);
2628 
2629 	r = amdgpu_display_modeset_create_props(adev);
2630 	if (r)
2631 		return r;
2632 
2633 	r = amdgpu_dm_audio_init(adev);
2634 	if (r)
2635 		return r;
2636 
2637 	return 0;
2638 }
2639 
2640 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2641 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2642 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2643 
2644 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2645 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2646 
2647 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2648 {
2649 #if defined(CONFIG_ACPI)
2650 	struct amdgpu_dm_backlight_caps caps;
2651 
2652 	if (dm->backlight_caps.caps_valid)
2653 		return;
2654 
2655 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2656 	if (caps.caps_valid) {
2657 		dm->backlight_caps.caps_valid = true;
2658 		if (caps.aux_support)
2659 			return;
2660 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
2661 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
2662 	} else {
2663 		dm->backlight_caps.min_input_signal =
2664 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2665 		dm->backlight_caps.max_input_signal =
2666 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2667 	}
2668 #else
2669 	if (dm->backlight_caps.aux_support)
2670 		return;
2671 
2672 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2673 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2674 #endif
2675 }
2676 
2677 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2678 {
2679 	bool rc;
2680 
2681 	if (!link)
2682 		return 1;
2683 
2684 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
2685 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2686 
2687 	return rc ? 0 : 1;
2688 }
2689 
2690 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2691 			      const uint32_t user_brightness)
2692 {
2693 	u32 min, max, conversion_pace;
2694 	u32 brightness = user_brightness;
2695 
2696 	if (!caps)
2697 		goto out;
2698 
2699 	if (!caps->aux_support) {
2700 		max = caps->max_input_signal;
2701 		min = caps->min_input_signal;
2702 		/*
2703 		 * The brightness input is in the range 0-255
2704 		 * It needs to be rescaled to be between the
2705 		 * requested min and max input signal
2706 		 * It also needs to be scaled up by 0x101 to
2707 		 * match the DC interface which has a range of
2708 		 * 0 to 0xffff
2709 		 */
2710 		conversion_pace = 0x101;
2711 		brightness =
2712 			user_brightness
2713 			* conversion_pace
2714 			* (max - min)
2715 			/ AMDGPU_MAX_BL_LEVEL
2716 			+ min * conversion_pace;
2717 	} else {
2718 		/* TODO
2719 		 * We are doing a linear interpolation here, which is OK but
2720 		 * does not provide the optimal result. We probably want
2721 		 * something close to the Perceptual Quantizer (PQ) curve.
2722 		 */
2723 		max = caps->aux_max_input_signal;
2724 		min = caps->aux_min_input_signal;
2725 
2726 		brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2727 			       + user_brightness * max;
2728 		// Multiple the value by 1000 since we use millinits
2729 		brightness *= 1000;
2730 		brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2731 	}
2732 
2733 out:
2734 	return brightness;
2735 }
2736 
2737 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2738 {
2739 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2740 	struct amdgpu_dm_backlight_caps caps;
2741 	struct dc_link *link = NULL;
2742 	u32 brightness;
2743 	bool rc;
2744 
2745 	amdgpu_dm_update_backlight_caps(dm);
2746 	caps = dm->backlight_caps;
2747 
2748 	link = (struct dc_link *)dm->backlight_link;
2749 
2750 	brightness = convert_brightness(&caps, bd->props.brightness);
2751 	// Change brightness based on AUX property
2752 	if (caps.aux_support)
2753 		return set_backlight_via_aux(link, brightness);
2754 
2755 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2756 
2757 	return rc ? 0 : 1;
2758 }
2759 
2760 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2761 {
2762 	struct amdgpu_display_manager *dm = bl_get_data(bd);
2763 	int ret = dc_link_get_backlight_level(dm->backlight_link);
2764 
2765 	if (ret == DC_ERROR_UNEXPECTED)
2766 		return bd->props.brightness;
2767 	return ret;
2768 }
2769 
2770 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2771 	.options = BL_CORE_SUSPENDRESUME,
2772 	.get_brightness = amdgpu_dm_backlight_get_brightness,
2773 	.update_status	= amdgpu_dm_backlight_update_status,
2774 };
2775 
2776 static void
2777 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2778 {
2779 	char bl_name[16];
2780 	struct backlight_properties props = { 0 };
2781 
2782 	amdgpu_dm_update_backlight_caps(dm);
2783 
2784 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2785 	props.brightness = AMDGPU_MAX_BL_LEVEL;
2786 	props.type = BACKLIGHT_RAW;
2787 
2788 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2789 			dm->adev->ddev->primary->index);
2790 
2791 	dm->backlight_dev = backlight_device_register(bl_name,
2792 			dm->adev->ddev->dev,
2793 			dm,
2794 			&amdgpu_dm_backlight_ops,
2795 			&props);
2796 
2797 	if (IS_ERR(dm->backlight_dev))
2798 		DRM_ERROR("DM: Backlight registration failed!\n");
2799 	else
2800 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2801 }
2802 
2803 #endif
2804 
2805 static int initialize_plane(struct amdgpu_display_manager *dm,
2806 			    struct amdgpu_mode_info *mode_info, int plane_id,
2807 			    enum drm_plane_type plane_type,
2808 			    const struct dc_plane_cap *plane_cap)
2809 {
2810 	struct drm_plane *plane;
2811 	unsigned long possible_crtcs;
2812 	int ret = 0;
2813 
2814 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2815 	if (!plane) {
2816 		DRM_ERROR("KMS: Failed to allocate plane\n");
2817 		return -ENOMEM;
2818 	}
2819 	plane->type = plane_type;
2820 
2821 	/*
2822 	 * HACK: IGT tests expect that the primary plane for a CRTC
2823 	 * can only have one possible CRTC. Only expose support for
2824 	 * any CRTC if they're not going to be used as a primary plane
2825 	 * for a CRTC - like overlay or underlay planes.
2826 	 */
2827 	possible_crtcs = 1 << plane_id;
2828 	if (plane_id >= dm->dc->caps.max_streams)
2829 		possible_crtcs = 0xff;
2830 
2831 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2832 
2833 	if (ret) {
2834 		DRM_ERROR("KMS: Failed to initialize plane\n");
2835 		kfree(plane);
2836 		return ret;
2837 	}
2838 
2839 	if (mode_info)
2840 		mode_info->planes[plane_id] = plane;
2841 
2842 	return ret;
2843 }
2844 
2845 
2846 static void register_backlight_device(struct amdgpu_display_manager *dm,
2847 				      struct dc_link *link)
2848 {
2849 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2850 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2851 
2852 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2853 	    link->type != dc_connection_none) {
2854 		/*
2855 		 * Event if registration failed, we should continue with
2856 		 * DM initialization because not having a backlight control
2857 		 * is better then a black screen.
2858 		 */
2859 		amdgpu_dm_register_backlight_device(dm);
2860 
2861 		if (dm->backlight_dev)
2862 			dm->backlight_link = link;
2863 	}
2864 #endif
2865 }
2866 
2867 
2868 /*
2869  * In this architecture, the association
2870  * connector -> encoder -> crtc
2871  * id not really requried. The crtc and connector will hold the
2872  * display_index as an abstraction to use with DAL component
2873  *
2874  * Returns 0 on success
2875  */
2876 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2877 {
2878 	struct amdgpu_display_manager *dm = &adev->dm;
2879 	int32_t i;
2880 	struct amdgpu_dm_connector *aconnector = NULL;
2881 	struct amdgpu_encoder *aencoder = NULL;
2882 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
2883 	uint32_t link_cnt;
2884 	int32_t primary_planes;
2885 	enum dc_connection_type new_connection_type = dc_connection_none;
2886 	const struct dc_plane_cap *plane;
2887 
2888 	link_cnt = dm->dc->caps.max_links;
2889 	if (amdgpu_dm_mode_config_init(dm->adev)) {
2890 		DRM_ERROR("DM: Failed to initialize mode config\n");
2891 		return -EINVAL;
2892 	}
2893 
2894 	/* There is one primary plane per CRTC */
2895 	primary_planes = dm->dc->caps.max_streams;
2896 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2897 
2898 	/*
2899 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
2900 	 * Order is reversed to match iteration order in atomic check.
2901 	 */
2902 	for (i = (primary_planes - 1); i >= 0; i--) {
2903 		plane = &dm->dc->caps.planes[i];
2904 
2905 		if (initialize_plane(dm, mode_info, i,
2906 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
2907 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
2908 			goto fail;
2909 		}
2910 	}
2911 
2912 	/*
2913 	 * Initialize overlay planes, index starting after primary planes.
2914 	 * These planes have a higher DRM index than the primary planes since
2915 	 * they should be considered as having a higher z-order.
2916 	 * Order is reversed to match iteration order in atomic check.
2917 	 *
2918 	 * Only support DCN for now, and only expose one so we don't encourage
2919 	 * userspace to use up all the pipes.
2920 	 */
2921 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2922 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2923 
2924 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2925 			continue;
2926 
2927 		if (!plane->blends_with_above || !plane->blends_with_below)
2928 			continue;
2929 
2930 		if (!plane->pixel_format_support.argb8888)
2931 			continue;
2932 
2933 		if (initialize_plane(dm, NULL, primary_planes + i,
2934 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
2935 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2936 			goto fail;
2937 		}
2938 
2939 		/* Only create one overlay plane. */
2940 		break;
2941 	}
2942 
2943 	for (i = 0; i < dm->dc->caps.max_streams; i++)
2944 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2945 			DRM_ERROR("KMS: Failed to initialize crtc\n");
2946 			goto fail;
2947 		}
2948 
2949 	dm->display_indexes_num = dm->dc->caps.max_streams;
2950 
2951 	/* loops over all connectors on the board */
2952 	for (i = 0; i < link_cnt; i++) {
2953 		struct dc_link *link = NULL;
2954 
2955 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2956 			DRM_ERROR(
2957 				"KMS: Cannot support more than %d display indexes\n",
2958 					AMDGPU_DM_MAX_DISPLAY_INDEX);
2959 			continue;
2960 		}
2961 
2962 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2963 		if (!aconnector)
2964 			goto fail;
2965 
2966 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2967 		if (!aencoder)
2968 			goto fail;
2969 
2970 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2971 			DRM_ERROR("KMS: Failed to initialize encoder\n");
2972 			goto fail;
2973 		}
2974 
2975 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2976 			DRM_ERROR("KMS: Failed to initialize connector\n");
2977 			goto fail;
2978 		}
2979 
2980 		link = dc_get_link_at_index(dm->dc, i);
2981 
2982 		if (!dc_link_detect_sink(link, &new_connection_type))
2983 			DRM_ERROR("KMS: Failed to detect connector\n");
2984 
2985 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2986 			emulated_link_detect(link);
2987 			amdgpu_dm_update_connector_after_detect(aconnector);
2988 
2989 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2990 			amdgpu_dm_update_connector_after_detect(aconnector);
2991 			register_backlight_device(dm, link);
2992 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2993 				amdgpu_dm_set_psr_caps(link);
2994 		}
2995 
2996 
2997 	}
2998 
2999 	/* Software is initialized. Now we can register interrupt handlers. */
3000 	switch (adev->asic_type) {
3001 	case CHIP_BONAIRE:
3002 	case CHIP_HAWAII:
3003 	case CHIP_KAVERI:
3004 	case CHIP_KABINI:
3005 	case CHIP_MULLINS:
3006 	case CHIP_TONGA:
3007 	case CHIP_FIJI:
3008 	case CHIP_CARRIZO:
3009 	case CHIP_STONEY:
3010 	case CHIP_POLARIS11:
3011 	case CHIP_POLARIS10:
3012 	case CHIP_POLARIS12:
3013 	case CHIP_VEGAM:
3014 	case CHIP_VEGA10:
3015 	case CHIP_VEGA12:
3016 	case CHIP_VEGA20:
3017 		if (dce110_register_irq_handlers(dm->adev)) {
3018 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3019 			goto fail;
3020 		}
3021 		break;
3022 #if defined(CONFIG_DRM_AMD_DC_DCN)
3023 	case CHIP_RAVEN:
3024 	case CHIP_NAVI12:
3025 	case CHIP_NAVI10:
3026 	case CHIP_NAVI14:
3027 	case CHIP_RENOIR:
3028 		if (dcn10_register_irq_handlers(dm->adev)) {
3029 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3030 			goto fail;
3031 		}
3032 		break;
3033 #endif
3034 	default:
3035 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3036 		goto fail;
3037 	}
3038 
3039 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3040 		dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3041 
3042 	/* No userspace support. */
3043 	dm->dc->debug.disable_tri_buf = true;
3044 
3045 	return 0;
3046 fail:
3047 	kfree(aencoder);
3048 	kfree(aconnector);
3049 
3050 	return -EINVAL;
3051 }
3052 
3053 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3054 {
3055 	drm_mode_config_cleanup(dm->ddev);
3056 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3057 	return;
3058 }
3059 
3060 /******************************************************************************
3061  * amdgpu_display_funcs functions
3062  *****************************************************************************/
3063 
3064 /*
3065  * dm_bandwidth_update - program display watermarks
3066  *
3067  * @adev: amdgpu_device pointer
3068  *
3069  * Calculate and program the display watermarks and line buffer allocation.
3070  */
3071 static void dm_bandwidth_update(struct amdgpu_device *adev)
3072 {
3073 	/* TODO: implement later */
3074 }
3075 
3076 static const struct amdgpu_display_funcs dm_display_funcs = {
3077 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3078 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3079 	.backlight_set_level = NULL, /* never called for DC */
3080 	.backlight_get_level = NULL, /* never called for DC */
3081 	.hpd_sense = NULL,/* called unconditionally */
3082 	.hpd_set_polarity = NULL, /* called unconditionally */
3083 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3084 	.page_flip_get_scanoutpos =
3085 		dm_crtc_get_scanoutpos,/* called unconditionally */
3086 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3087 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3088 };
3089 
3090 #if defined(CONFIG_DEBUG_KERNEL_DC)
3091 
3092 static ssize_t s3_debug_store(struct device *device,
3093 			      struct device_attribute *attr,
3094 			      const char *buf,
3095 			      size_t count)
3096 {
3097 	int ret;
3098 	int s3_state;
3099 	struct drm_device *drm_dev = dev_get_drvdata(device);
3100 	struct amdgpu_device *adev = drm_dev->dev_private;
3101 
3102 	ret = kstrtoint(buf, 0, &s3_state);
3103 
3104 	if (ret == 0) {
3105 		if (s3_state) {
3106 			dm_resume(adev);
3107 			drm_kms_helper_hotplug_event(adev->ddev);
3108 		} else
3109 			dm_suspend(adev);
3110 	}
3111 
3112 	return ret == 0 ? count : 0;
3113 }
3114 
3115 DEVICE_ATTR_WO(s3_debug);
3116 
3117 #endif
3118 
3119 static int dm_early_init(void *handle)
3120 {
3121 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3122 
3123 	switch (adev->asic_type) {
3124 	case CHIP_BONAIRE:
3125 	case CHIP_HAWAII:
3126 		adev->mode_info.num_crtc = 6;
3127 		adev->mode_info.num_hpd = 6;
3128 		adev->mode_info.num_dig = 6;
3129 		break;
3130 	case CHIP_KAVERI:
3131 		adev->mode_info.num_crtc = 4;
3132 		adev->mode_info.num_hpd = 6;
3133 		adev->mode_info.num_dig = 7;
3134 		break;
3135 	case CHIP_KABINI:
3136 	case CHIP_MULLINS:
3137 		adev->mode_info.num_crtc = 2;
3138 		adev->mode_info.num_hpd = 6;
3139 		adev->mode_info.num_dig = 6;
3140 		break;
3141 	case CHIP_FIJI:
3142 	case CHIP_TONGA:
3143 		adev->mode_info.num_crtc = 6;
3144 		adev->mode_info.num_hpd = 6;
3145 		adev->mode_info.num_dig = 7;
3146 		break;
3147 	case CHIP_CARRIZO:
3148 		adev->mode_info.num_crtc = 3;
3149 		adev->mode_info.num_hpd = 6;
3150 		adev->mode_info.num_dig = 9;
3151 		break;
3152 	case CHIP_STONEY:
3153 		adev->mode_info.num_crtc = 2;
3154 		adev->mode_info.num_hpd = 6;
3155 		adev->mode_info.num_dig = 9;
3156 		break;
3157 	case CHIP_POLARIS11:
3158 	case CHIP_POLARIS12:
3159 		adev->mode_info.num_crtc = 5;
3160 		adev->mode_info.num_hpd = 5;
3161 		adev->mode_info.num_dig = 5;
3162 		break;
3163 	case CHIP_POLARIS10:
3164 	case CHIP_VEGAM:
3165 		adev->mode_info.num_crtc = 6;
3166 		adev->mode_info.num_hpd = 6;
3167 		adev->mode_info.num_dig = 6;
3168 		break;
3169 	case CHIP_VEGA10:
3170 	case CHIP_VEGA12:
3171 	case CHIP_VEGA20:
3172 		adev->mode_info.num_crtc = 6;
3173 		adev->mode_info.num_hpd = 6;
3174 		adev->mode_info.num_dig = 6;
3175 		break;
3176 #if defined(CONFIG_DRM_AMD_DC_DCN)
3177 	case CHIP_RAVEN:
3178 		adev->mode_info.num_crtc = 4;
3179 		adev->mode_info.num_hpd = 4;
3180 		adev->mode_info.num_dig = 4;
3181 		break;
3182 #endif
3183 	case CHIP_NAVI10:
3184 	case CHIP_NAVI12:
3185 		adev->mode_info.num_crtc = 6;
3186 		adev->mode_info.num_hpd = 6;
3187 		adev->mode_info.num_dig = 6;
3188 		break;
3189 	case CHIP_NAVI14:
3190 		adev->mode_info.num_crtc = 5;
3191 		adev->mode_info.num_hpd = 5;
3192 		adev->mode_info.num_dig = 5;
3193 		break;
3194 	case CHIP_RENOIR:
3195 		adev->mode_info.num_crtc = 4;
3196 		adev->mode_info.num_hpd = 4;
3197 		adev->mode_info.num_dig = 4;
3198 		break;
3199 	default:
3200 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3201 		return -EINVAL;
3202 	}
3203 
3204 	amdgpu_dm_set_irq_funcs(adev);
3205 
3206 	if (adev->mode_info.funcs == NULL)
3207 		adev->mode_info.funcs = &dm_display_funcs;
3208 
3209 	/*
3210 	 * Note: Do NOT change adev->audio_endpt_rreg and
3211 	 * adev->audio_endpt_wreg because they are initialised in
3212 	 * amdgpu_device_init()
3213 	 */
3214 #if defined(CONFIG_DEBUG_KERNEL_DC)
3215 	device_create_file(
3216 		adev->ddev->dev,
3217 		&dev_attr_s3_debug);
3218 #endif
3219 
3220 	return 0;
3221 }
3222 
3223 static bool modeset_required(struct drm_crtc_state *crtc_state,
3224 			     struct dc_stream_state *new_stream,
3225 			     struct dc_stream_state *old_stream)
3226 {
3227 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3228 		return false;
3229 
3230 	if (!crtc_state->enable)
3231 		return false;
3232 
3233 	return crtc_state->active;
3234 }
3235 
3236 static bool modereset_required(struct drm_crtc_state *crtc_state)
3237 {
3238 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
3239 		return false;
3240 
3241 	return !crtc_state->enable || !crtc_state->active;
3242 }
3243 
3244 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3245 {
3246 	drm_encoder_cleanup(encoder);
3247 	kfree(encoder);
3248 }
3249 
3250 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3251 	.destroy = amdgpu_dm_encoder_destroy,
3252 };
3253 
3254 
3255 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3256 				struct dc_scaling_info *scaling_info)
3257 {
3258 	int scale_w, scale_h;
3259 
3260 	memset(scaling_info, 0, sizeof(*scaling_info));
3261 
3262 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3263 	scaling_info->src_rect.x = state->src_x >> 16;
3264 	scaling_info->src_rect.y = state->src_y >> 16;
3265 
3266 	scaling_info->src_rect.width = state->src_w >> 16;
3267 	if (scaling_info->src_rect.width == 0)
3268 		return -EINVAL;
3269 
3270 	scaling_info->src_rect.height = state->src_h >> 16;
3271 	if (scaling_info->src_rect.height == 0)
3272 		return -EINVAL;
3273 
3274 	scaling_info->dst_rect.x = state->crtc_x;
3275 	scaling_info->dst_rect.y = state->crtc_y;
3276 
3277 	if (state->crtc_w == 0)
3278 		return -EINVAL;
3279 
3280 	scaling_info->dst_rect.width = state->crtc_w;
3281 
3282 	if (state->crtc_h == 0)
3283 		return -EINVAL;
3284 
3285 	scaling_info->dst_rect.height = state->crtc_h;
3286 
3287 	/* DRM doesn't specify clipping on destination output. */
3288 	scaling_info->clip_rect = scaling_info->dst_rect;
3289 
3290 	/* TODO: Validate scaling per-format with DC plane caps */
3291 	scale_w = scaling_info->dst_rect.width * 1000 /
3292 		  scaling_info->src_rect.width;
3293 
3294 	if (scale_w < 250 || scale_w > 16000)
3295 		return -EINVAL;
3296 
3297 	scale_h = scaling_info->dst_rect.height * 1000 /
3298 		  scaling_info->src_rect.height;
3299 
3300 	if (scale_h < 250 || scale_h > 16000)
3301 		return -EINVAL;
3302 
3303 	/*
3304 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3305 	 * assume reasonable defaults based on the format.
3306 	 */
3307 
3308 	return 0;
3309 }
3310 
3311 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3312 		       uint64_t *tiling_flags)
3313 {
3314 	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3315 	int r = amdgpu_bo_reserve(rbo, false);
3316 
3317 	if (unlikely(r)) {
3318 		/* Don't show error message when returning -ERESTARTSYS */
3319 		if (r != -ERESTARTSYS)
3320 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3321 		return r;
3322 	}
3323 
3324 	if (tiling_flags)
3325 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3326 
3327 	amdgpu_bo_unreserve(rbo);
3328 
3329 	return r;
3330 }
3331 
3332 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3333 {
3334 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3335 
3336 	return offset ? (address + offset * 256) : 0;
3337 }
3338 
3339 static int
3340 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3341 			  const struct amdgpu_framebuffer *afb,
3342 			  const enum surface_pixel_format format,
3343 			  const enum dc_rotation_angle rotation,
3344 			  const struct plane_size *plane_size,
3345 			  const union dc_tiling_info *tiling_info,
3346 			  const uint64_t info,
3347 			  struct dc_plane_dcc_param *dcc,
3348 			  struct dc_plane_address *address)
3349 {
3350 	struct dc *dc = adev->dm.dc;
3351 	struct dc_dcc_surface_param input;
3352 	struct dc_surface_dcc_cap output;
3353 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3354 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3355 	uint64_t dcc_address;
3356 
3357 	memset(&input, 0, sizeof(input));
3358 	memset(&output, 0, sizeof(output));
3359 
3360 	if (!offset)
3361 		return 0;
3362 
3363 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3364 		return 0;
3365 
3366 	if (!dc->cap_funcs.get_dcc_compression_cap)
3367 		return -EINVAL;
3368 
3369 	input.format = format;
3370 	input.surface_size.width = plane_size->surface_size.width;
3371 	input.surface_size.height = plane_size->surface_size.height;
3372 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3373 
3374 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3375 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3376 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3377 		input.scan = SCAN_DIRECTION_VERTICAL;
3378 
3379 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3380 		return -EINVAL;
3381 
3382 	if (!output.capable)
3383 		return -EINVAL;
3384 
3385 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3386 		return -EINVAL;
3387 
3388 	dcc->enable = 1;
3389 	dcc->meta_pitch =
3390 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3391 	dcc->independent_64b_blks = i64b;
3392 
3393 	dcc_address = get_dcc_address(afb->address, info);
3394 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3395 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3396 
3397 	return 0;
3398 }
3399 
3400 static int
3401 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3402 			     const struct amdgpu_framebuffer *afb,
3403 			     const enum surface_pixel_format format,
3404 			     const enum dc_rotation_angle rotation,
3405 			     const uint64_t tiling_flags,
3406 			     union dc_tiling_info *tiling_info,
3407 			     struct plane_size *plane_size,
3408 			     struct dc_plane_dcc_param *dcc,
3409 			     struct dc_plane_address *address)
3410 {
3411 	const struct drm_framebuffer *fb = &afb->base;
3412 	int ret;
3413 
3414 	memset(tiling_info, 0, sizeof(*tiling_info));
3415 	memset(plane_size, 0, sizeof(*plane_size));
3416 	memset(dcc, 0, sizeof(*dcc));
3417 	memset(address, 0, sizeof(*address));
3418 
3419 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3420 		plane_size->surface_size.x = 0;
3421 		plane_size->surface_size.y = 0;
3422 		plane_size->surface_size.width = fb->width;
3423 		plane_size->surface_size.height = fb->height;
3424 		plane_size->surface_pitch =
3425 			fb->pitches[0] / fb->format->cpp[0];
3426 
3427 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3428 		address->grph.addr.low_part = lower_32_bits(afb->address);
3429 		address->grph.addr.high_part = upper_32_bits(afb->address);
3430 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3431 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3432 
3433 		plane_size->surface_size.x = 0;
3434 		plane_size->surface_size.y = 0;
3435 		plane_size->surface_size.width = fb->width;
3436 		plane_size->surface_size.height = fb->height;
3437 		plane_size->surface_pitch =
3438 			fb->pitches[0] / fb->format->cpp[0];
3439 
3440 		plane_size->chroma_size.x = 0;
3441 		plane_size->chroma_size.y = 0;
3442 		/* TODO: set these based on surface format */
3443 		plane_size->chroma_size.width = fb->width / 2;
3444 		plane_size->chroma_size.height = fb->height / 2;
3445 
3446 		plane_size->chroma_pitch =
3447 			fb->pitches[1] / fb->format->cpp[1];
3448 
3449 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3450 		address->video_progressive.luma_addr.low_part =
3451 			lower_32_bits(afb->address);
3452 		address->video_progressive.luma_addr.high_part =
3453 			upper_32_bits(afb->address);
3454 		address->video_progressive.chroma_addr.low_part =
3455 			lower_32_bits(chroma_addr);
3456 		address->video_progressive.chroma_addr.high_part =
3457 			upper_32_bits(chroma_addr);
3458 	}
3459 
3460 	/* Fill GFX8 params */
3461 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3462 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3463 
3464 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3465 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3466 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3467 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3468 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3469 
3470 		/* XXX fix me for VI */
3471 		tiling_info->gfx8.num_banks = num_banks;
3472 		tiling_info->gfx8.array_mode =
3473 				DC_ARRAY_2D_TILED_THIN1;
3474 		tiling_info->gfx8.tile_split = tile_split;
3475 		tiling_info->gfx8.bank_width = bankw;
3476 		tiling_info->gfx8.bank_height = bankh;
3477 		tiling_info->gfx8.tile_aspect = mtaspect;
3478 		tiling_info->gfx8.tile_mode =
3479 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3480 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3481 			== DC_ARRAY_1D_TILED_THIN1) {
3482 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3483 	}
3484 
3485 	tiling_info->gfx8.pipe_config =
3486 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3487 
3488 	if (adev->asic_type == CHIP_VEGA10 ||
3489 	    adev->asic_type == CHIP_VEGA12 ||
3490 	    adev->asic_type == CHIP_VEGA20 ||
3491 	    adev->asic_type == CHIP_NAVI10 ||
3492 	    adev->asic_type == CHIP_NAVI14 ||
3493 	    adev->asic_type == CHIP_NAVI12 ||
3494 	    adev->asic_type == CHIP_RENOIR ||
3495 	    adev->asic_type == CHIP_RAVEN) {
3496 		/* Fill GFX9 params */
3497 		tiling_info->gfx9.num_pipes =
3498 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3499 		tiling_info->gfx9.num_banks =
3500 			adev->gfx.config.gb_addr_config_fields.num_banks;
3501 		tiling_info->gfx9.pipe_interleave =
3502 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3503 		tiling_info->gfx9.num_shader_engines =
3504 			adev->gfx.config.gb_addr_config_fields.num_se;
3505 		tiling_info->gfx9.max_compressed_frags =
3506 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3507 		tiling_info->gfx9.num_rb_per_se =
3508 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3509 		tiling_info->gfx9.swizzle =
3510 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3511 		tiling_info->gfx9.shaderEnable = 1;
3512 
3513 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3514 						plane_size, tiling_info,
3515 						tiling_flags, dcc, address);
3516 		if (ret)
3517 			return ret;
3518 	}
3519 
3520 	return 0;
3521 }
3522 
3523 static void
3524 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3525 			       bool *per_pixel_alpha, bool *global_alpha,
3526 			       int *global_alpha_value)
3527 {
3528 	*per_pixel_alpha = false;
3529 	*global_alpha = false;
3530 	*global_alpha_value = 0xff;
3531 
3532 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3533 		return;
3534 
3535 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3536 		static const uint32_t alpha_formats[] = {
3537 			DRM_FORMAT_ARGB8888,
3538 			DRM_FORMAT_RGBA8888,
3539 			DRM_FORMAT_ABGR8888,
3540 		};
3541 		uint32_t format = plane_state->fb->format->format;
3542 		unsigned int i;
3543 
3544 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3545 			if (format == alpha_formats[i]) {
3546 				*per_pixel_alpha = true;
3547 				break;
3548 			}
3549 		}
3550 	}
3551 
3552 	if (plane_state->alpha < 0xffff) {
3553 		*global_alpha = true;
3554 		*global_alpha_value = plane_state->alpha >> 8;
3555 	}
3556 }
3557 
3558 static int
3559 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3560 			    const enum surface_pixel_format format,
3561 			    enum dc_color_space *color_space)
3562 {
3563 	bool full_range;
3564 
3565 	*color_space = COLOR_SPACE_SRGB;
3566 
3567 	/* DRM color properties only affect non-RGB formats. */
3568 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3569 		return 0;
3570 
3571 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3572 
3573 	switch (plane_state->color_encoding) {
3574 	case DRM_COLOR_YCBCR_BT601:
3575 		if (full_range)
3576 			*color_space = COLOR_SPACE_YCBCR601;
3577 		else
3578 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3579 		break;
3580 
3581 	case DRM_COLOR_YCBCR_BT709:
3582 		if (full_range)
3583 			*color_space = COLOR_SPACE_YCBCR709;
3584 		else
3585 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3586 		break;
3587 
3588 	case DRM_COLOR_YCBCR_BT2020:
3589 		if (full_range)
3590 			*color_space = COLOR_SPACE_2020_YCBCR;
3591 		else
3592 			return -EINVAL;
3593 		break;
3594 
3595 	default:
3596 		return -EINVAL;
3597 	}
3598 
3599 	return 0;
3600 }
3601 
3602 static int
3603 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3604 			    const struct drm_plane_state *plane_state,
3605 			    const uint64_t tiling_flags,
3606 			    struct dc_plane_info *plane_info,
3607 			    struct dc_plane_address *address)
3608 {
3609 	const struct drm_framebuffer *fb = plane_state->fb;
3610 	const struct amdgpu_framebuffer *afb =
3611 		to_amdgpu_framebuffer(plane_state->fb);
3612 	struct drm_format_name_buf format_name;
3613 	int ret;
3614 
3615 	memset(plane_info, 0, sizeof(*plane_info));
3616 
3617 	switch (fb->format->format) {
3618 	case DRM_FORMAT_C8:
3619 		plane_info->format =
3620 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3621 		break;
3622 	case DRM_FORMAT_RGB565:
3623 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3624 		break;
3625 	case DRM_FORMAT_XRGB8888:
3626 	case DRM_FORMAT_ARGB8888:
3627 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3628 		break;
3629 	case DRM_FORMAT_XRGB2101010:
3630 	case DRM_FORMAT_ARGB2101010:
3631 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3632 		break;
3633 	case DRM_FORMAT_XBGR2101010:
3634 	case DRM_FORMAT_ABGR2101010:
3635 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3636 		break;
3637 	case DRM_FORMAT_XBGR8888:
3638 	case DRM_FORMAT_ABGR8888:
3639 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3640 		break;
3641 	case DRM_FORMAT_NV21:
3642 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3643 		break;
3644 	case DRM_FORMAT_NV12:
3645 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3646 		break;
3647 	case DRM_FORMAT_P010:
3648 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3649 		break;
3650 	default:
3651 		DRM_ERROR(
3652 			"Unsupported screen format %s\n",
3653 			drm_get_format_name(fb->format->format, &format_name));
3654 		return -EINVAL;
3655 	}
3656 
3657 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3658 	case DRM_MODE_ROTATE_0:
3659 		plane_info->rotation = ROTATION_ANGLE_0;
3660 		break;
3661 	case DRM_MODE_ROTATE_90:
3662 		plane_info->rotation = ROTATION_ANGLE_90;
3663 		break;
3664 	case DRM_MODE_ROTATE_180:
3665 		plane_info->rotation = ROTATION_ANGLE_180;
3666 		break;
3667 	case DRM_MODE_ROTATE_270:
3668 		plane_info->rotation = ROTATION_ANGLE_270;
3669 		break;
3670 	default:
3671 		plane_info->rotation = ROTATION_ANGLE_0;
3672 		break;
3673 	}
3674 
3675 	plane_info->visible = true;
3676 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3677 
3678 	plane_info->layer_index = 0;
3679 
3680 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
3681 					  &plane_info->color_space);
3682 	if (ret)
3683 		return ret;
3684 
3685 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3686 					   plane_info->rotation, tiling_flags,
3687 					   &plane_info->tiling_info,
3688 					   &plane_info->plane_size,
3689 					   &plane_info->dcc, address);
3690 	if (ret)
3691 		return ret;
3692 
3693 	fill_blending_from_plane_state(
3694 		plane_state, &plane_info->per_pixel_alpha,
3695 		&plane_info->global_alpha, &plane_info->global_alpha_value);
3696 
3697 	return 0;
3698 }
3699 
3700 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3701 				    struct dm_plane_state *dm_plane_state,
3702 				    struct drm_plane_state *plane_state,
3703 				    struct drm_crtc_state *crtc_state)
3704 {
3705 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3706 	struct dc_plane_state *dc_plane_state = dm_plane_state->dc_state;
3707 	const struct amdgpu_framebuffer *amdgpu_fb =
3708 		to_amdgpu_framebuffer(plane_state->fb);
3709 	struct dc_scaling_info scaling_info;
3710 	struct dc_plane_info plane_info;
3711 	uint64_t tiling_flags;
3712 	int ret;
3713 
3714 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
3715 	if (ret)
3716 		return ret;
3717 
3718 	dc_plane_state->src_rect = scaling_info.src_rect;
3719 	dc_plane_state->dst_rect = scaling_info.dst_rect;
3720 	dc_plane_state->clip_rect = scaling_info.clip_rect;
3721 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3722 
3723 	ret = get_fb_info(amdgpu_fb, &tiling_flags);
3724 	if (ret)
3725 		return ret;
3726 
3727 	ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3728 					  &plane_info,
3729 					  &dc_plane_state->address);
3730 	if (ret)
3731 		return ret;
3732 
3733 	dc_plane_state->format = plane_info.format;
3734 	dc_plane_state->color_space = plane_info.color_space;
3735 	dc_plane_state->format = plane_info.format;
3736 	dc_plane_state->plane_size = plane_info.plane_size;
3737 	dc_plane_state->rotation = plane_info.rotation;
3738 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3739 	dc_plane_state->stereo_format = plane_info.stereo_format;
3740 	dc_plane_state->tiling_info = plane_info.tiling_info;
3741 	dc_plane_state->visible = plane_info.visible;
3742 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3743 	dc_plane_state->global_alpha = plane_info.global_alpha;
3744 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3745 	dc_plane_state->dcc = plane_info.dcc;
3746 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3747 
3748 	/*
3749 	 * Always set input transfer function, since plane state is refreshed
3750 	 * every time.
3751 	 */
3752 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dm_plane_state);
3753 	if (ret)
3754 		return ret;
3755 
3756 	return 0;
3757 }
3758 
3759 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3760 					   const struct dm_connector_state *dm_state,
3761 					   struct dc_stream_state *stream)
3762 {
3763 	enum amdgpu_rmx_type rmx_type;
3764 
3765 	struct rect src = { 0 }; /* viewport in composition space*/
3766 	struct rect dst = { 0 }; /* stream addressable area */
3767 
3768 	/* no mode. nothing to be done */
3769 	if (!mode)
3770 		return;
3771 
3772 	/* Full screen scaling by default */
3773 	src.width = mode->hdisplay;
3774 	src.height = mode->vdisplay;
3775 	dst.width = stream->timing.h_addressable;
3776 	dst.height = stream->timing.v_addressable;
3777 
3778 	if (dm_state) {
3779 		rmx_type = dm_state->scaling;
3780 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3781 			if (src.width * dst.height <
3782 					src.height * dst.width) {
3783 				/* height needs less upscaling/more downscaling */
3784 				dst.width = src.width *
3785 						dst.height / src.height;
3786 			} else {
3787 				/* width needs less upscaling/more downscaling */
3788 				dst.height = src.height *
3789 						dst.width / src.width;
3790 			}
3791 		} else if (rmx_type == RMX_CENTER) {
3792 			dst = src;
3793 		}
3794 
3795 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
3796 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
3797 
3798 		if (dm_state->underscan_enable) {
3799 			dst.x += dm_state->underscan_hborder / 2;
3800 			dst.y += dm_state->underscan_vborder / 2;
3801 			dst.width -= dm_state->underscan_hborder;
3802 			dst.height -= dm_state->underscan_vborder;
3803 		}
3804 	}
3805 
3806 	stream->src = src;
3807 	stream->dst = dst;
3808 
3809 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
3810 			dst.x, dst.y, dst.width, dst.height);
3811 
3812 }
3813 
3814 static enum dc_color_depth
3815 convert_color_depth_from_display_info(const struct drm_connector *connector,
3816 				      const struct drm_connector_state *state,
3817 				      bool is_y420)
3818 {
3819 	uint8_t bpc;
3820 
3821 	if (is_y420) {
3822 		bpc = 8;
3823 
3824 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
3825 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3826 			bpc = 16;
3827 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3828 			bpc = 12;
3829 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3830 			bpc = 10;
3831 	} else {
3832 		bpc = (uint8_t)connector->display_info.bpc;
3833 		/* Assume 8 bpc by default if no bpc is specified. */
3834 		bpc = bpc ? bpc : 8;
3835 	}
3836 
3837 	if (!state)
3838 		state = connector->state;
3839 
3840 	if (state) {
3841 		/*
3842 		 * Cap display bpc based on the user requested value.
3843 		 *
3844 		 * The value for state->max_bpc may not correctly updated
3845 		 * depending on when the connector gets added to the state
3846 		 * or if this was called outside of atomic check, so it
3847 		 * can't be used directly.
3848 		 */
3849 		bpc = min(bpc, state->max_requested_bpc);
3850 
3851 		/* Round down to the nearest even number. */
3852 		bpc = bpc - (bpc & 1);
3853 	}
3854 
3855 	switch (bpc) {
3856 	case 0:
3857 		/*
3858 		 * Temporary Work around, DRM doesn't parse color depth for
3859 		 * EDID revision before 1.4
3860 		 * TODO: Fix edid parsing
3861 		 */
3862 		return COLOR_DEPTH_888;
3863 	case 6:
3864 		return COLOR_DEPTH_666;
3865 	case 8:
3866 		return COLOR_DEPTH_888;
3867 	case 10:
3868 		return COLOR_DEPTH_101010;
3869 	case 12:
3870 		return COLOR_DEPTH_121212;
3871 	case 14:
3872 		return COLOR_DEPTH_141414;
3873 	case 16:
3874 		return COLOR_DEPTH_161616;
3875 	default:
3876 		return COLOR_DEPTH_UNDEFINED;
3877 	}
3878 }
3879 
3880 static enum dc_aspect_ratio
3881 get_aspect_ratio(const struct drm_display_mode *mode_in)
3882 {
3883 	/* 1-1 mapping, since both enums follow the HDMI spec. */
3884 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3885 }
3886 
3887 static enum dc_color_space
3888 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3889 {
3890 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
3891 
3892 	switch (dc_crtc_timing->pixel_encoding)	{
3893 	case PIXEL_ENCODING_YCBCR422:
3894 	case PIXEL_ENCODING_YCBCR444:
3895 	case PIXEL_ENCODING_YCBCR420:
3896 	{
3897 		/*
3898 		 * 27030khz is the separation point between HDTV and SDTV
3899 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
3900 		 * respectively
3901 		 */
3902 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
3903 			if (dc_crtc_timing->flags.Y_ONLY)
3904 				color_space =
3905 					COLOR_SPACE_YCBCR709_LIMITED;
3906 			else
3907 				color_space = COLOR_SPACE_YCBCR709;
3908 		} else {
3909 			if (dc_crtc_timing->flags.Y_ONLY)
3910 				color_space =
3911 					COLOR_SPACE_YCBCR601_LIMITED;
3912 			else
3913 				color_space = COLOR_SPACE_YCBCR601;
3914 		}
3915 
3916 	}
3917 	break;
3918 	case PIXEL_ENCODING_RGB:
3919 		color_space = COLOR_SPACE_SRGB;
3920 		break;
3921 
3922 	default:
3923 		WARN_ON(1);
3924 		break;
3925 	}
3926 
3927 	return color_space;
3928 }
3929 
3930 static bool adjust_colour_depth_from_display_info(
3931 	struct dc_crtc_timing *timing_out,
3932 	const struct drm_display_info *info)
3933 {
3934 	enum dc_color_depth depth = timing_out->display_color_depth;
3935 	int normalized_clk;
3936 	do {
3937 		normalized_clk = timing_out->pix_clk_100hz / 10;
3938 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3939 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3940 			normalized_clk /= 2;
3941 		/* Adjusting pix clock following on HDMI spec based on colour depth */
3942 		switch (depth) {
3943 		case COLOR_DEPTH_888:
3944 			break;
3945 		case COLOR_DEPTH_101010:
3946 			normalized_clk = (normalized_clk * 30) / 24;
3947 			break;
3948 		case COLOR_DEPTH_121212:
3949 			normalized_clk = (normalized_clk * 36) / 24;
3950 			break;
3951 		case COLOR_DEPTH_161616:
3952 			normalized_clk = (normalized_clk * 48) / 24;
3953 			break;
3954 		default:
3955 			/* The above depths are the only ones valid for HDMI. */
3956 			return false;
3957 		}
3958 		if (normalized_clk <= info->max_tmds_clock) {
3959 			timing_out->display_color_depth = depth;
3960 			return true;
3961 		}
3962 	} while (--depth > COLOR_DEPTH_666);
3963 	return false;
3964 }
3965 
3966 static void fill_stream_properties_from_drm_display_mode(
3967 	struct dc_stream_state *stream,
3968 	const struct drm_display_mode *mode_in,
3969 	const struct drm_connector *connector,
3970 	const struct drm_connector_state *connector_state,
3971 	const struct dc_stream_state *old_stream)
3972 {
3973 	struct dc_crtc_timing *timing_out = &stream->timing;
3974 	const struct drm_display_info *info = &connector->display_info;
3975 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3976 	struct hdmi_vendor_infoframe hv_frame;
3977 	struct hdmi_avi_infoframe avi_frame;
3978 
3979 	memset(&hv_frame, 0, sizeof(hv_frame));
3980 	memset(&avi_frame, 0, sizeof(avi_frame));
3981 
3982 	timing_out->h_border_left = 0;
3983 	timing_out->h_border_right = 0;
3984 	timing_out->v_border_top = 0;
3985 	timing_out->v_border_bottom = 0;
3986 	/* TODO: un-hardcode */
3987 	if (drm_mode_is_420_only(info, mode_in)
3988 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3989 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3990 	else if (drm_mode_is_420_also(info, mode_in)
3991 			&& aconnector->force_yuv420_output)
3992 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3993 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
3994 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3995 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
3996 	else
3997 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
3998 
3999 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4000 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4001 		connector, connector_state,
4002 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
4003 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4004 	timing_out->hdmi_vic = 0;
4005 
4006 	if(old_stream) {
4007 		timing_out->vic = old_stream->timing.vic;
4008 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4009 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4010 	} else {
4011 		timing_out->vic = drm_match_cea_mode(mode_in);
4012 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4013 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4014 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4015 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4016 	}
4017 
4018 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4019 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4020 		timing_out->vic = avi_frame.video_code;
4021 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4022 		timing_out->hdmi_vic = hv_frame.vic;
4023 	}
4024 
4025 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4026 	timing_out->h_total = mode_in->crtc_htotal;
4027 	timing_out->h_sync_width =
4028 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4029 	timing_out->h_front_porch =
4030 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4031 	timing_out->v_total = mode_in->crtc_vtotal;
4032 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4033 	timing_out->v_front_porch =
4034 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4035 	timing_out->v_sync_width =
4036 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4037 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4038 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4039 
4040 	stream->output_color_space = get_output_color_space(timing_out);
4041 
4042 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4043 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4044 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4045 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4046 		    drm_mode_is_420_also(info, mode_in) &&
4047 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4048 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4049 			adjust_colour_depth_from_display_info(timing_out, info);
4050 		}
4051 	}
4052 }
4053 
4054 static void fill_audio_info(struct audio_info *audio_info,
4055 			    const struct drm_connector *drm_connector,
4056 			    const struct dc_sink *dc_sink)
4057 {
4058 	int i = 0;
4059 	int cea_revision = 0;
4060 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4061 
4062 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4063 	audio_info->product_id = edid_caps->product_id;
4064 
4065 	cea_revision = drm_connector->display_info.cea_rev;
4066 
4067 	strscpy(audio_info->display_name,
4068 		edid_caps->display_name,
4069 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4070 
4071 	if (cea_revision >= 3) {
4072 		audio_info->mode_count = edid_caps->audio_mode_count;
4073 
4074 		for (i = 0; i < audio_info->mode_count; ++i) {
4075 			audio_info->modes[i].format_code =
4076 					(enum audio_format_code)
4077 					(edid_caps->audio_modes[i].format_code);
4078 			audio_info->modes[i].channel_count =
4079 					edid_caps->audio_modes[i].channel_count;
4080 			audio_info->modes[i].sample_rates.all =
4081 					edid_caps->audio_modes[i].sample_rate;
4082 			audio_info->modes[i].sample_size =
4083 					edid_caps->audio_modes[i].sample_size;
4084 		}
4085 	}
4086 
4087 	audio_info->flags.all = edid_caps->speaker_flags;
4088 
4089 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4090 	if (drm_connector->latency_present[0]) {
4091 		audio_info->video_latency = drm_connector->video_latency[0];
4092 		audio_info->audio_latency = drm_connector->audio_latency[0];
4093 	}
4094 
4095 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4096 
4097 }
4098 
4099 static void
4100 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4101 				      struct drm_display_mode *dst_mode)
4102 {
4103 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4104 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4105 	dst_mode->crtc_clock = src_mode->crtc_clock;
4106 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4107 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4108 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4109 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4110 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4111 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4112 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4113 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4114 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4115 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4116 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4117 }
4118 
4119 static void
4120 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4121 					const struct drm_display_mode *native_mode,
4122 					bool scale_enabled)
4123 {
4124 	if (scale_enabled) {
4125 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4126 	} else if (native_mode->clock == drm_mode->clock &&
4127 			native_mode->htotal == drm_mode->htotal &&
4128 			native_mode->vtotal == drm_mode->vtotal) {
4129 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4130 	} else {
4131 		/* no scaling nor amdgpu inserted, no need to patch */
4132 	}
4133 }
4134 
4135 static struct dc_sink *
4136 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4137 {
4138 	struct dc_sink_init_data sink_init_data = { 0 };
4139 	struct dc_sink *sink = NULL;
4140 	sink_init_data.link = aconnector->dc_link;
4141 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4142 
4143 	sink = dc_sink_create(&sink_init_data);
4144 	if (!sink) {
4145 		DRM_ERROR("Failed to create sink!\n");
4146 		return NULL;
4147 	}
4148 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4149 
4150 	return sink;
4151 }
4152 
4153 static void set_multisync_trigger_params(
4154 		struct dc_stream_state *stream)
4155 {
4156 	if (stream->triggered_crtc_reset.enabled) {
4157 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4158 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4159 	}
4160 }
4161 
4162 static void set_master_stream(struct dc_stream_state *stream_set[],
4163 			      int stream_count)
4164 {
4165 	int j, highest_rfr = 0, master_stream = 0;
4166 
4167 	for (j = 0;  j < stream_count; j++) {
4168 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4169 			int refresh_rate = 0;
4170 
4171 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4172 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4173 			if (refresh_rate > highest_rfr) {
4174 				highest_rfr = refresh_rate;
4175 				master_stream = j;
4176 			}
4177 		}
4178 	}
4179 	for (j = 0;  j < stream_count; j++) {
4180 		if (stream_set[j])
4181 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4182 	}
4183 }
4184 
4185 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4186 {
4187 	int i = 0;
4188 
4189 	if (context->stream_count < 2)
4190 		return;
4191 	for (i = 0; i < context->stream_count ; i++) {
4192 		if (!context->streams[i])
4193 			continue;
4194 		/*
4195 		 * TODO: add a function to read AMD VSDB bits and set
4196 		 * crtc_sync_master.multi_sync_enabled flag
4197 		 * For now it's set to false
4198 		 */
4199 		set_multisync_trigger_params(context->streams[i]);
4200 	}
4201 	set_master_stream(context->streams, context->stream_count);
4202 }
4203 
4204 static struct dc_stream_state *
4205 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4206 		       const struct drm_display_mode *drm_mode,
4207 		       const struct dm_connector_state *dm_state,
4208 		       const struct dc_stream_state *old_stream)
4209 {
4210 	struct drm_display_mode *preferred_mode = NULL;
4211 	struct drm_connector *drm_connector;
4212 	const struct drm_connector_state *con_state =
4213 		dm_state ? &dm_state->base : NULL;
4214 	struct dc_stream_state *stream = NULL;
4215 	struct drm_display_mode mode = *drm_mode;
4216 	bool native_mode_found = false;
4217 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4218 	int mode_refresh;
4219 	int preferred_refresh = 0;
4220 #if defined(CONFIG_DRM_AMD_DC_DCN)
4221 	struct dsc_dec_dpcd_caps dsc_caps;
4222 #endif
4223 	uint32_t link_bandwidth_kbps;
4224 
4225 	struct dc_sink *sink = NULL;
4226 	if (aconnector == NULL) {
4227 		DRM_ERROR("aconnector is NULL!\n");
4228 		return stream;
4229 	}
4230 
4231 	drm_connector = &aconnector->base;
4232 
4233 	if (!aconnector->dc_sink) {
4234 		sink = create_fake_sink(aconnector);
4235 		if (!sink)
4236 			return stream;
4237 	} else {
4238 		sink = aconnector->dc_sink;
4239 		dc_sink_retain(sink);
4240 	}
4241 
4242 	stream = dc_create_stream_for_sink(sink);
4243 
4244 	if (stream == NULL) {
4245 		DRM_ERROR("Failed to create stream for sink!\n");
4246 		goto finish;
4247 	}
4248 
4249 	stream->dm_stream_context = aconnector;
4250 
4251 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4252 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4253 
4254 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4255 		/* Search for preferred mode */
4256 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4257 			native_mode_found = true;
4258 			break;
4259 		}
4260 	}
4261 	if (!native_mode_found)
4262 		preferred_mode = list_first_entry_or_null(
4263 				&aconnector->base.modes,
4264 				struct drm_display_mode,
4265 				head);
4266 
4267 	mode_refresh = drm_mode_vrefresh(&mode);
4268 
4269 	if (preferred_mode == NULL) {
4270 		/*
4271 		 * This may not be an error, the use case is when we have no
4272 		 * usermode calls to reset and set mode upon hotplug. In this
4273 		 * case, we call set mode ourselves to restore the previous mode
4274 		 * and the modelist may not be filled in in time.
4275 		 */
4276 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4277 	} else {
4278 		decide_crtc_timing_for_drm_display_mode(
4279 				&mode, preferred_mode,
4280 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4281 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4282 	}
4283 
4284 	if (!dm_state)
4285 		drm_mode_set_crtcinfo(&mode, 0);
4286 
4287 	/*
4288 	* If scaling is enabled and refresh rate didn't change
4289 	* we copy the vic and polarities of the old timings
4290 	*/
4291 	if (!scale || mode_refresh != preferred_refresh)
4292 		fill_stream_properties_from_drm_display_mode(stream,
4293 			&mode, &aconnector->base, con_state, NULL);
4294 	else
4295 		fill_stream_properties_from_drm_display_mode(stream,
4296 			&mode, &aconnector->base, con_state, old_stream);
4297 
4298 	stream->timing.flags.DSC = 0;
4299 
4300 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4301 #if defined(CONFIG_DRM_AMD_DC_DCN)
4302 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4303 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4304 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4305 				      &dsc_caps);
4306 #endif
4307 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4308 							     dc_link_get_link_cap(aconnector->dc_link));
4309 
4310 #if defined(CONFIG_DRM_AMD_DC_DCN)
4311 		if (dsc_caps.is_dsc_supported)
4312 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4313 						  &dsc_caps,
4314 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4315 						  link_bandwidth_kbps,
4316 						  &stream->timing,
4317 						  &stream->timing.dsc_cfg))
4318 				stream->timing.flags.DSC = 1;
4319 #endif
4320 	}
4321 
4322 	update_stream_scaling_settings(&mode, dm_state, stream);
4323 
4324 	fill_audio_info(
4325 		&stream->audio_info,
4326 		drm_connector,
4327 		sink);
4328 
4329 	update_stream_signal(stream, sink);
4330 
4331 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4332 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4333 	if (stream->link->psr_feature_enabled)	{
4334 		struct dc  *core_dc = stream->link->ctx->dc;
4335 
4336 		if (dc_is_dmcu_initialized(core_dc)) {
4337 			struct dmcu *dmcu = core_dc->res_pool->dmcu;
4338 
4339 			stream->psr_version = dmcu->dmcu_version.psr_version;
4340 
4341 			//
4342 			// should decide stream support vsc sdp colorimetry capability
4343 			// before building vsc info packet
4344 			//
4345 			stream->use_vsc_sdp_for_colorimetry = false;
4346 			if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4347 				stream->use_vsc_sdp_for_colorimetry =
4348 					aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4349 			} else {
4350 				if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4351 					stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4352 					stream->use_vsc_sdp_for_colorimetry = true;
4353 				}
4354 			}
4355 			mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4356 		}
4357 	}
4358 finish:
4359 	dc_sink_release(sink);
4360 
4361 	return stream;
4362 }
4363 
4364 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4365 {
4366 	drm_crtc_cleanup(crtc);
4367 	kfree(crtc);
4368 }
4369 
4370 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4371 				  struct drm_crtc_state *state)
4372 {
4373 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4374 
4375 	/* TODO Destroy dc_stream objects are stream object is flattened */
4376 	if (cur->stream)
4377 		dc_stream_release(cur->stream);
4378 
4379 
4380 	__drm_atomic_helper_crtc_destroy_state(state);
4381 
4382 
4383 	kfree(state);
4384 }
4385 
4386 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4387 {
4388 	struct dm_crtc_state *state;
4389 
4390 	if (crtc->state)
4391 		dm_crtc_destroy_state(crtc, crtc->state);
4392 
4393 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4394 	if (WARN_ON(!state))
4395 		return;
4396 
4397 	crtc->state = &state->base;
4398 	crtc->state->crtc = crtc;
4399 
4400 }
4401 
4402 static struct drm_crtc_state *
4403 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4404 {
4405 	struct dm_crtc_state *state, *cur;
4406 
4407 	cur = to_dm_crtc_state(crtc->state);
4408 
4409 	if (WARN_ON(!crtc->state))
4410 		return NULL;
4411 
4412 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4413 	if (!state)
4414 		return NULL;
4415 
4416 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4417 
4418 	if (cur->stream) {
4419 		state->stream = cur->stream;
4420 		dc_stream_retain(state->stream);
4421 	}
4422 
4423 	state->active_planes = cur->active_planes;
4424 	state->interrupts_enabled = cur->interrupts_enabled;
4425 	state->vrr_params = cur->vrr_params;
4426 	state->vrr_infopacket = cur->vrr_infopacket;
4427 	state->abm_level = cur->abm_level;
4428 	state->vrr_supported = cur->vrr_supported;
4429 	state->freesync_config = cur->freesync_config;
4430 	state->crc_src = cur->crc_src;
4431 	state->cm_has_degamma = cur->cm_has_degamma;
4432 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4433 
4434 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4435 
4436 	return &state->base;
4437 }
4438 
4439 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4440 {
4441 	enum dc_irq_source irq_source;
4442 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4443 	struct amdgpu_device *adev = crtc->dev->dev_private;
4444 	int rc;
4445 
4446 	/* Do not set vupdate for DCN hardware */
4447 	if (adev->family > AMDGPU_FAMILY_AI)
4448 		return 0;
4449 
4450 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4451 
4452 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4453 
4454 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4455 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4456 	return rc;
4457 }
4458 
4459 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4460 {
4461 	enum dc_irq_source irq_source;
4462 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4463 	struct amdgpu_device *adev = crtc->dev->dev_private;
4464 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4465 	int rc = 0;
4466 
4467 	if (enable) {
4468 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4469 		if (amdgpu_dm_vrr_active(acrtc_state))
4470 			rc = dm_set_vupdate_irq(crtc, true);
4471 	} else {
4472 		/* vblank irq off -> vupdate irq off */
4473 		rc = dm_set_vupdate_irq(crtc, false);
4474 	}
4475 
4476 	if (rc)
4477 		return rc;
4478 
4479 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4480 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4481 }
4482 
4483 static int dm_enable_vblank(struct drm_crtc *crtc)
4484 {
4485 	return dm_set_vblank(crtc, true);
4486 }
4487 
4488 static void dm_disable_vblank(struct drm_crtc *crtc)
4489 {
4490 	dm_set_vblank(crtc, false);
4491 }
4492 
4493 /* Implemented only the options currently availible for the driver */
4494 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4495 	.reset = dm_crtc_reset_state,
4496 	.destroy = amdgpu_dm_crtc_destroy,
4497 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4498 	.set_config = drm_atomic_helper_set_config,
4499 	.page_flip = drm_atomic_helper_page_flip,
4500 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4501 	.atomic_destroy_state = dm_crtc_destroy_state,
4502 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4503 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4504 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4505 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4506 	.enable_vblank = dm_enable_vblank,
4507 	.disable_vblank = dm_disable_vblank,
4508 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4509 };
4510 
4511 static enum drm_connector_status
4512 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4513 {
4514 	bool connected;
4515 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4516 
4517 	/*
4518 	 * Notes:
4519 	 * 1. This interface is NOT called in context of HPD irq.
4520 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4521 	 * makes it a bad place for *any* MST-related activity.
4522 	 */
4523 
4524 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4525 	    !aconnector->fake_enable)
4526 		connected = (aconnector->dc_sink != NULL);
4527 	else
4528 		connected = (aconnector->base.force == DRM_FORCE_ON);
4529 
4530 	return (connected ? connector_status_connected :
4531 			connector_status_disconnected);
4532 }
4533 
4534 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4535 					    struct drm_connector_state *connector_state,
4536 					    struct drm_property *property,
4537 					    uint64_t val)
4538 {
4539 	struct drm_device *dev = connector->dev;
4540 	struct amdgpu_device *adev = dev->dev_private;
4541 	struct dm_connector_state *dm_old_state =
4542 		to_dm_connector_state(connector->state);
4543 	struct dm_connector_state *dm_new_state =
4544 		to_dm_connector_state(connector_state);
4545 
4546 	int ret = -EINVAL;
4547 
4548 	if (property == dev->mode_config.scaling_mode_property) {
4549 		enum amdgpu_rmx_type rmx_type;
4550 
4551 		switch (val) {
4552 		case DRM_MODE_SCALE_CENTER:
4553 			rmx_type = RMX_CENTER;
4554 			break;
4555 		case DRM_MODE_SCALE_ASPECT:
4556 			rmx_type = RMX_ASPECT;
4557 			break;
4558 		case DRM_MODE_SCALE_FULLSCREEN:
4559 			rmx_type = RMX_FULL;
4560 			break;
4561 		case DRM_MODE_SCALE_NONE:
4562 		default:
4563 			rmx_type = RMX_OFF;
4564 			break;
4565 		}
4566 
4567 		if (dm_old_state->scaling == rmx_type)
4568 			return 0;
4569 
4570 		dm_new_state->scaling = rmx_type;
4571 		ret = 0;
4572 	} else if (property == adev->mode_info.underscan_hborder_property) {
4573 		dm_new_state->underscan_hborder = val;
4574 		ret = 0;
4575 	} else if (property == adev->mode_info.underscan_vborder_property) {
4576 		dm_new_state->underscan_vborder = val;
4577 		ret = 0;
4578 	} else if (property == adev->mode_info.underscan_property) {
4579 		dm_new_state->underscan_enable = val;
4580 		ret = 0;
4581 	} else if (property == adev->mode_info.abm_level_property) {
4582 		dm_new_state->abm_level = val;
4583 		ret = 0;
4584 	}
4585 
4586 	return ret;
4587 }
4588 
4589 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4590 					    const struct drm_connector_state *state,
4591 					    struct drm_property *property,
4592 					    uint64_t *val)
4593 {
4594 	struct drm_device *dev = connector->dev;
4595 	struct amdgpu_device *adev = dev->dev_private;
4596 	struct dm_connector_state *dm_state =
4597 		to_dm_connector_state(state);
4598 	int ret = -EINVAL;
4599 
4600 	if (property == dev->mode_config.scaling_mode_property) {
4601 		switch (dm_state->scaling) {
4602 		case RMX_CENTER:
4603 			*val = DRM_MODE_SCALE_CENTER;
4604 			break;
4605 		case RMX_ASPECT:
4606 			*val = DRM_MODE_SCALE_ASPECT;
4607 			break;
4608 		case RMX_FULL:
4609 			*val = DRM_MODE_SCALE_FULLSCREEN;
4610 			break;
4611 		case RMX_OFF:
4612 		default:
4613 			*val = DRM_MODE_SCALE_NONE;
4614 			break;
4615 		}
4616 		ret = 0;
4617 	} else if (property == adev->mode_info.underscan_hborder_property) {
4618 		*val = dm_state->underscan_hborder;
4619 		ret = 0;
4620 	} else if (property == adev->mode_info.underscan_vborder_property) {
4621 		*val = dm_state->underscan_vborder;
4622 		ret = 0;
4623 	} else if (property == adev->mode_info.underscan_property) {
4624 		*val = dm_state->underscan_enable;
4625 		ret = 0;
4626 	} else if (property == adev->mode_info.abm_level_property) {
4627 		*val = dm_state->abm_level;
4628 		ret = 0;
4629 	}
4630 
4631 	return ret;
4632 }
4633 
4634 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4635 {
4636 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4637 
4638 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4639 }
4640 
4641 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4642 {
4643 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4644 	const struct dc_link *link = aconnector->dc_link;
4645 	struct amdgpu_device *adev = connector->dev->dev_private;
4646 	struct amdgpu_display_manager *dm = &adev->dm;
4647 
4648 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4649 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4650 
4651 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4652 	    link->type != dc_connection_none &&
4653 	    dm->backlight_dev) {
4654 		backlight_device_unregister(dm->backlight_dev);
4655 		dm->backlight_dev = NULL;
4656 	}
4657 #endif
4658 
4659 	if (aconnector->dc_em_sink)
4660 		dc_sink_release(aconnector->dc_em_sink);
4661 	aconnector->dc_em_sink = NULL;
4662 	if (aconnector->dc_sink)
4663 		dc_sink_release(aconnector->dc_sink);
4664 	aconnector->dc_sink = NULL;
4665 
4666 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4667 	drm_connector_unregister(connector);
4668 	drm_connector_cleanup(connector);
4669 	if (aconnector->i2c) {
4670 		i2c_del_adapter(&aconnector->i2c->base);
4671 		kfree(aconnector->i2c);
4672 	}
4673 
4674 	kfree(connector);
4675 }
4676 
4677 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4678 {
4679 	struct dm_connector_state *state =
4680 		to_dm_connector_state(connector->state);
4681 
4682 	if (connector->state)
4683 		__drm_atomic_helper_connector_destroy_state(connector->state);
4684 
4685 	kfree(state);
4686 
4687 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4688 
4689 	if (state) {
4690 		state->scaling = RMX_OFF;
4691 		state->underscan_enable = false;
4692 		state->underscan_hborder = 0;
4693 		state->underscan_vborder = 0;
4694 		state->base.max_requested_bpc = 8;
4695 		state->vcpi_slots = 0;
4696 		state->pbn = 0;
4697 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4698 			state->abm_level = amdgpu_dm_abm_level;
4699 
4700 		__drm_atomic_helper_connector_reset(connector, &state->base);
4701 	}
4702 }
4703 
4704 struct drm_connector_state *
4705 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4706 {
4707 	struct dm_connector_state *state =
4708 		to_dm_connector_state(connector->state);
4709 
4710 	struct dm_connector_state *new_state =
4711 			kmemdup(state, sizeof(*state), GFP_KERNEL);
4712 
4713 	if (!new_state)
4714 		return NULL;
4715 
4716 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4717 
4718 	new_state->freesync_capable = state->freesync_capable;
4719 	new_state->abm_level = state->abm_level;
4720 	new_state->scaling = state->scaling;
4721 	new_state->underscan_enable = state->underscan_enable;
4722 	new_state->underscan_hborder = state->underscan_hborder;
4723 	new_state->underscan_vborder = state->underscan_vborder;
4724 	new_state->vcpi_slots = state->vcpi_slots;
4725 	new_state->pbn = state->pbn;
4726 	return &new_state->base;
4727 }
4728 
4729 static int
4730 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4731 {
4732 #if defined(CONFIG_DEBUG_FS)
4733 	struct amdgpu_dm_connector *amdgpu_dm_connector =
4734 		to_amdgpu_dm_connector(connector);
4735 
4736 	connector_debugfs_init(amdgpu_dm_connector);
4737 #endif
4738 
4739 	return 0;
4740 }
4741 
4742 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4743 	.reset = amdgpu_dm_connector_funcs_reset,
4744 	.detect = amdgpu_dm_connector_detect,
4745 	.fill_modes = drm_helper_probe_single_connector_modes,
4746 	.destroy = amdgpu_dm_connector_destroy,
4747 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4748 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4749 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4750 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4751 	.late_register = amdgpu_dm_connector_late_register,
4752 	.early_unregister = amdgpu_dm_connector_unregister
4753 };
4754 
4755 static int get_modes(struct drm_connector *connector)
4756 {
4757 	return amdgpu_dm_connector_get_modes(connector);
4758 }
4759 
4760 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4761 {
4762 	struct dc_sink_init_data init_params = {
4763 			.link = aconnector->dc_link,
4764 			.sink_signal = SIGNAL_TYPE_VIRTUAL
4765 	};
4766 	struct edid *edid;
4767 
4768 	if (!aconnector->base.edid_blob_ptr) {
4769 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4770 				aconnector->base.name);
4771 
4772 		aconnector->base.force = DRM_FORCE_OFF;
4773 		aconnector->base.override_edid = false;
4774 		return;
4775 	}
4776 
4777 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4778 
4779 	aconnector->edid = edid;
4780 
4781 	aconnector->dc_em_sink = dc_link_add_remote_sink(
4782 		aconnector->dc_link,
4783 		(uint8_t *)edid,
4784 		(edid->extensions + 1) * EDID_LENGTH,
4785 		&init_params);
4786 
4787 	if (aconnector->base.force == DRM_FORCE_ON) {
4788 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
4789 		aconnector->dc_link->local_sink :
4790 		aconnector->dc_em_sink;
4791 		dc_sink_retain(aconnector->dc_sink);
4792 	}
4793 }
4794 
4795 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4796 {
4797 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4798 
4799 	/*
4800 	 * In case of headless boot with force on for DP managed connector
4801 	 * Those settings have to be != 0 to get initial modeset
4802 	 */
4803 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4804 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4805 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4806 	}
4807 
4808 
4809 	aconnector->base.override_edid = true;
4810 	create_eml_sink(aconnector);
4811 }
4812 
4813 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4814 				   struct drm_display_mode *mode)
4815 {
4816 	int result = MODE_ERROR;
4817 	struct dc_sink *dc_sink;
4818 	struct amdgpu_device *adev = connector->dev->dev_private;
4819 	/* TODO: Unhardcode stream count */
4820 	struct dc_stream_state *stream;
4821 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4822 	enum dc_status dc_result = DC_OK;
4823 
4824 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4825 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
4826 		return result;
4827 
4828 	/*
4829 	 * Only run this the first time mode_valid is called to initilialize
4830 	 * EDID mgmt
4831 	 */
4832 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4833 		!aconnector->dc_em_sink)
4834 		handle_edid_mgmt(aconnector);
4835 
4836 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4837 
4838 	if (dc_sink == NULL) {
4839 		DRM_ERROR("dc_sink is NULL!\n");
4840 		goto fail;
4841 	}
4842 
4843 	stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4844 	if (stream == NULL) {
4845 		DRM_ERROR("Failed to create stream for sink!\n");
4846 		goto fail;
4847 	}
4848 
4849 	dc_result = dc_validate_stream(adev->dm.dc, stream);
4850 
4851 	if (dc_result == DC_OK)
4852 		result = MODE_OK;
4853 	else
4854 		DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4855 			      mode->hdisplay,
4856 			      mode->vdisplay,
4857 			      mode->clock,
4858 			      dc_result);
4859 
4860 	dc_stream_release(stream);
4861 
4862 fail:
4863 	/* TODO: error handling*/
4864 	return result;
4865 }
4866 
4867 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4868 				struct dc_info_packet *out)
4869 {
4870 	struct hdmi_drm_infoframe frame;
4871 	unsigned char buf[30]; /* 26 + 4 */
4872 	ssize_t len;
4873 	int ret, i;
4874 
4875 	memset(out, 0, sizeof(*out));
4876 
4877 	if (!state->hdr_output_metadata)
4878 		return 0;
4879 
4880 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4881 	if (ret)
4882 		return ret;
4883 
4884 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4885 	if (len < 0)
4886 		return (int)len;
4887 
4888 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
4889 	if (len != 30)
4890 		return -EINVAL;
4891 
4892 	/* Prepare the infopacket for DC. */
4893 	switch (state->connector->connector_type) {
4894 	case DRM_MODE_CONNECTOR_HDMIA:
4895 		out->hb0 = 0x87; /* type */
4896 		out->hb1 = 0x01; /* version */
4897 		out->hb2 = 0x1A; /* length */
4898 		out->sb[0] = buf[3]; /* checksum */
4899 		i = 1;
4900 		break;
4901 
4902 	case DRM_MODE_CONNECTOR_DisplayPort:
4903 	case DRM_MODE_CONNECTOR_eDP:
4904 		out->hb0 = 0x00; /* sdp id, zero */
4905 		out->hb1 = 0x87; /* type */
4906 		out->hb2 = 0x1D; /* payload len - 1 */
4907 		out->hb3 = (0x13 << 2); /* sdp version */
4908 		out->sb[0] = 0x01; /* version */
4909 		out->sb[1] = 0x1A; /* length */
4910 		i = 2;
4911 		break;
4912 
4913 	default:
4914 		return -EINVAL;
4915 	}
4916 
4917 	memcpy(&out->sb[i], &buf[4], 26);
4918 	out->valid = true;
4919 
4920 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4921 		       sizeof(out->sb), false);
4922 
4923 	return 0;
4924 }
4925 
4926 static bool
4927 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4928 			  const struct drm_connector_state *new_state)
4929 {
4930 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4931 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4932 
4933 	if (old_blob != new_blob) {
4934 		if (old_blob && new_blob &&
4935 		    old_blob->length == new_blob->length)
4936 			return memcmp(old_blob->data, new_blob->data,
4937 				      old_blob->length);
4938 
4939 		return true;
4940 	}
4941 
4942 	return false;
4943 }
4944 
4945 static int
4946 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4947 				 struct drm_atomic_state *state)
4948 {
4949 	struct drm_connector_state *new_con_state =
4950 		drm_atomic_get_new_connector_state(state, conn);
4951 	struct drm_connector_state *old_con_state =
4952 		drm_atomic_get_old_connector_state(state, conn);
4953 	struct drm_crtc *crtc = new_con_state->crtc;
4954 	struct drm_crtc_state *new_crtc_state;
4955 	int ret;
4956 
4957 	if (!crtc)
4958 		return 0;
4959 
4960 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4961 		struct dc_info_packet hdr_infopacket;
4962 
4963 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4964 		if (ret)
4965 			return ret;
4966 
4967 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4968 		if (IS_ERR(new_crtc_state))
4969 			return PTR_ERR(new_crtc_state);
4970 
4971 		/*
4972 		 * DC considers the stream backends changed if the
4973 		 * static metadata changes. Forcing the modeset also
4974 		 * gives a simple way for userspace to switch from
4975 		 * 8bpc to 10bpc when setting the metadata to enter
4976 		 * or exit HDR.
4977 		 *
4978 		 * Changing the static metadata after it's been
4979 		 * set is permissible, however. So only force a
4980 		 * modeset if we're entering or exiting HDR.
4981 		 */
4982 		new_crtc_state->mode_changed =
4983 			!old_con_state->hdr_output_metadata ||
4984 			!new_con_state->hdr_output_metadata;
4985 	}
4986 
4987 	return 0;
4988 }
4989 
4990 static const struct drm_connector_helper_funcs
4991 amdgpu_dm_connector_helper_funcs = {
4992 	/*
4993 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
4994 	 * modes will be filtered by drm_mode_validate_size(), and those modes
4995 	 * are missing after user start lightdm. So we need to renew modes list.
4996 	 * in get_modes call back, not just return the modes count
4997 	 */
4998 	.get_modes = get_modes,
4999 	.mode_valid = amdgpu_dm_connector_mode_valid,
5000 	.atomic_check = amdgpu_dm_connector_atomic_check,
5001 };
5002 
5003 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5004 {
5005 }
5006 
5007 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5008 {
5009 	struct drm_device *dev = new_crtc_state->crtc->dev;
5010 	struct drm_plane *plane;
5011 
5012 	drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5013 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5014 			return true;
5015 	}
5016 
5017 	return false;
5018 }
5019 
5020 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5021 {
5022 	struct drm_atomic_state *state = new_crtc_state->state;
5023 	struct drm_plane *plane;
5024 	int num_active = 0;
5025 
5026 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5027 		struct drm_plane_state *new_plane_state;
5028 
5029 		/* Cursor planes are "fake". */
5030 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5031 			continue;
5032 
5033 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5034 
5035 		if (!new_plane_state) {
5036 			/*
5037 			 * The plane is enable on the CRTC and hasn't changed
5038 			 * state. This means that it previously passed
5039 			 * validation and is therefore enabled.
5040 			 */
5041 			num_active += 1;
5042 			continue;
5043 		}
5044 
5045 		/* We need a framebuffer to be considered enabled. */
5046 		num_active += (new_plane_state->fb != NULL);
5047 	}
5048 
5049 	return num_active;
5050 }
5051 
5052 /*
5053  * Sets whether interrupts should be enabled on a specific CRTC.
5054  * We require that the stream be enabled and that there exist active
5055  * DC planes on the stream.
5056  */
5057 static void
5058 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5059 			       struct drm_crtc_state *new_crtc_state)
5060 {
5061 	struct dm_crtc_state *dm_new_crtc_state =
5062 		to_dm_crtc_state(new_crtc_state);
5063 
5064 	dm_new_crtc_state->active_planes = 0;
5065 	dm_new_crtc_state->interrupts_enabled = false;
5066 
5067 	if (!dm_new_crtc_state->stream)
5068 		return;
5069 
5070 	dm_new_crtc_state->active_planes =
5071 		count_crtc_active_planes(new_crtc_state);
5072 
5073 	dm_new_crtc_state->interrupts_enabled =
5074 		dm_new_crtc_state->active_planes > 0;
5075 }
5076 
5077 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5078 				       struct drm_crtc_state *state)
5079 {
5080 	struct amdgpu_device *adev = crtc->dev->dev_private;
5081 	struct dc *dc = adev->dm.dc;
5082 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5083 	int ret = -EINVAL;
5084 
5085 	/*
5086 	 * Update interrupt state for the CRTC. This needs to happen whenever
5087 	 * the CRTC has changed or whenever any of its planes have changed.
5088 	 * Atomic check satisfies both of these requirements since the CRTC
5089 	 * is added to the state by DRM during drm_atomic_helper_check_planes.
5090 	 */
5091 	dm_update_crtc_interrupt_state(crtc, state);
5092 
5093 	if (unlikely(!dm_crtc_state->stream &&
5094 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5095 		WARN_ON(1);
5096 		return ret;
5097 	}
5098 
5099 	/* In some use cases, like reset, no stream is attached */
5100 	if (!dm_crtc_state->stream)
5101 		return 0;
5102 
5103 	/*
5104 	 * We want at least one hardware plane enabled to use
5105 	 * the stream with a cursor enabled.
5106 	 */
5107 	if (state->enable && state->active &&
5108 	    does_crtc_have_active_cursor(state) &&
5109 	    dm_crtc_state->active_planes == 0)
5110 		return -EINVAL;
5111 
5112 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5113 		return 0;
5114 
5115 	return ret;
5116 }
5117 
5118 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5119 				      const struct drm_display_mode *mode,
5120 				      struct drm_display_mode *adjusted_mode)
5121 {
5122 	return true;
5123 }
5124 
5125 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5126 	.disable = dm_crtc_helper_disable,
5127 	.atomic_check = dm_crtc_helper_atomic_check,
5128 	.mode_fixup = dm_crtc_helper_mode_fixup,
5129 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5130 };
5131 
5132 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5133 {
5134 
5135 }
5136 
5137 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5138 {
5139 	switch (display_color_depth) {
5140 		case COLOR_DEPTH_666:
5141 			return 6;
5142 		case COLOR_DEPTH_888:
5143 			return 8;
5144 		case COLOR_DEPTH_101010:
5145 			return 10;
5146 		case COLOR_DEPTH_121212:
5147 			return 12;
5148 		case COLOR_DEPTH_141414:
5149 			return 14;
5150 		case COLOR_DEPTH_161616:
5151 			return 16;
5152 		default:
5153 			break;
5154 		}
5155 	return 0;
5156 }
5157 
5158 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5159 					  struct drm_crtc_state *crtc_state,
5160 					  struct drm_connector_state *conn_state)
5161 {
5162 	struct drm_atomic_state *state = crtc_state->state;
5163 	struct drm_connector *connector = conn_state->connector;
5164 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5165 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5166 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5167 	struct drm_dp_mst_topology_mgr *mst_mgr;
5168 	struct drm_dp_mst_port *mst_port;
5169 	enum dc_color_depth color_depth;
5170 	int clock, bpp = 0;
5171 	bool is_y420 = false;
5172 
5173 	if (!aconnector->port || !aconnector->dc_sink)
5174 		return 0;
5175 
5176 	mst_port = aconnector->port;
5177 	mst_mgr = &aconnector->mst_port->mst_mgr;
5178 
5179 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5180 		return 0;
5181 
5182 	if (!state->duplicated) {
5183 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5184 				aconnector->force_yuv420_output;
5185 		color_depth = convert_color_depth_from_display_info(connector, conn_state,
5186 								    is_y420);
5187 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5188 		clock = adjusted_mode->clock;
5189 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5190 	}
5191 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5192 									   mst_mgr,
5193 									   mst_port,
5194 									   dm_new_connector_state->pbn,
5195 									   0);
5196 	if (dm_new_connector_state->vcpi_slots < 0) {
5197 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5198 		return dm_new_connector_state->vcpi_slots;
5199 	}
5200 	return 0;
5201 }
5202 
5203 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5204 	.disable = dm_encoder_helper_disable,
5205 	.atomic_check = dm_encoder_helper_atomic_check
5206 };
5207 
5208 #if defined(CONFIG_DRM_AMD_DC_DCN)
5209 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5210 					    struct dc_state *dc_state)
5211 {
5212 	struct dc_stream_state *stream = NULL;
5213 	struct drm_connector *connector;
5214 	struct drm_connector_state *new_con_state, *old_con_state;
5215 	struct amdgpu_dm_connector *aconnector;
5216 	struct dm_connector_state *dm_conn_state;
5217 	int i, j, clock, bpp;
5218 	int vcpi, pbn_div, pbn = 0;
5219 
5220 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5221 
5222 		aconnector = to_amdgpu_dm_connector(connector);
5223 
5224 		if (!aconnector->port)
5225 			continue;
5226 
5227 		if (!new_con_state || !new_con_state->crtc)
5228 			continue;
5229 
5230 		dm_conn_state = to_dm_connector_state(new_con_state);
5231 
5232 		for (j = 0; j < dc_state->stream_count; j++) {
5233 			stream = dc_state->streams[j];
5234 			if (!stream)
5235 				continue;
5236 
5237 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5238 				break;
5239 
5240 			stream = NULL;
5241 		}
5242 
5243 		if (!stream)
5244 			continue;
5245 
5246 		if (stream->timing.flags.DSC != 1) {
5247 			drm_dp_mst_atomic_enable_dsc(state,
5248 						     aconnector->port,
5249 						     dm_conn_state->pbn,
5250 						     0,
5251 						     false);
5252 			continue;
5253 		}
5254 
5255 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5256 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5257 		clock = stream->timing.pix_clk_100hz / 10;
5258 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5259 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5260 						    aconnector->port,
5261 						    pbn, pbn_div,
5262 						    true);
5263 		if (vcpi < 0)
5264 			return vcpi;
5265 
5266 		dm_conn_state->pbn = pbn;
5267 		dm_conn_state->vcpi_slots = vcpi;
5268 	}
5269 	return 0;
5270 }
5271 #endif
5272 
5273 static void dm_drm_plane_reset(struct drm_plane *plane)
5274 {
5275 	struct dm_plane_state *amdgpu_state = NULL;
5276 
5277 	if (plane->state)
5278 		plane->funcs->atomic_destroy_state(plane, plane->state);
5279 
5280 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5281 	WARN_ON(amdgpu_state == NULL);
5282 
5283 	if (amdgpu_state)
5284 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5285 }
5286 
5287 static struct drm_plane_state *
5288 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5289 {
5290 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5291 
5292 	old_dm_plane_state = to_dm_plane_state(plane->state);
5293 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5294 	if (!dm_plane_state)
5295 		return NULL;
5296 
5297 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5298 
5299 	if (old_dm_plane_state->dc_state) {
5300 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5301 		dc_plane_state_retain(dm_plane_state->dc_state);
5302 	}
5303 
5304 	return &dm_plane_state->base;
5305 }
5306 
5307 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5308 				struct drm_plane_state *state)
5309 {
5310 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5311 
5312 	if (dm_plane_state->dc_state)
5313 		dc_plane_state_release(dm_plane_state->dc_state);
5314 
5315 	drm_atomic_helper_plane_destroy_state(plane, state);
5316 }
5317 
5318 static const struct drm_plane_funcs dm_plane_funcs = {
5319 	.update_plane	= drm_atomic_helper_update_plane,
5320 	.disable_plane	= drm_atomic_helper_disable_plane,
5321 	.destroy	= drm_primary_helper_destroy,
5322 	.reset = dm_drm_plane_reset,
5323 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5324 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5325 };
5326 
5327 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5328 				      struct drm_plane_state *new_state)
5329 {
5330 	struct amdgpu_framebuffer *afb;
5331 	struct drm_gem_object *obj;
5332 	struct amdgpu_device *adev;
5333 	struct amdgpu_bo *rbo;
5334 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5335 	struct list_head list;
5336 	struct ttm_validate_buffer tv;
5337 	struct ww_acquire_ctx ticket;
5338 	uint64_t tiling_flags;
5339 	uint32_t domain;
5340 	int r;
5341 
5342 	dm_plane_state_old = to_dm_plane_state(plane->state);
5343 	dm_plane_state_new = to_dm_plane_state(new_state);
5344 
5345 	if (!new_state->fb) {
5346 		DRM_DEBUG_DRIVER("No FB bound\n");
5347 		return 0;
5348 	}
5349 
5350 	afb = to_amdgpu_framebuffer(new_state->fb);
5351 	obj = new_state->fb->obj[0];
5352 	rbo = gem_to_amdgpu_bo(obj);
5353 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5354 	INIT_LIST_HEAD(&list);
5355 
5356 	tv.bo = &rbo->tbo;
5357 	tv.num_shared = 1;
5358 	list_add(&tv.head, &list);
5359 
5360 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5361 	if (r) {
5362 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5363 		return r;
5364 	}
5365 
5366 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5367 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5368 	else
5369 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5370 
5371 	r = amdgpu_bo_pin(rbo, domain);
5372 	if (unlikely(r != 0)) {
5373 		if (r != -ERESTARTSYS)
5374 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5375 		ttm_eu_backoff_reservation(&ticket, &list);
5376 		return r;
5377 	}
5378 
5379 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5380 	if (unlikely(r != 0)) {
5381 		amdgpu_bo_unpin(rbo);
5382 		ttm_eu_backoff_reservation(&ticket, &list);
5383 		DRM_ERROR("%p bind failed\n", rbo);
5384 		return r;
5385 	}
5386 
5387 	amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5388 
5389 	ttm_eu_backoff_reservation(&ticket, &list);
5390 
5391 	afb->address = amdgpu_bo_gpu_offset(rbo);
5392 
5393 	amdgpu_bo_ref(rbo);
5394 
5395 	if (dm_plane_state_new->dc_state &&
5396 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5397 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5398 
5399 		fill_plane_buffer_attributes(
5400 			adev, afb, plane_state->format, plane_state->rotation,
5401 			tiling_flags, &plane_state->tiling_info,
5402 			&plane_state->plane_size, &plane_state->dcc,
5403 			&plane_state->address);
5404 	}
5405 
5406 	return 0;
5407 }
5408 
5409 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5410 				       struct drm_plane_state *old_state)
5411 {
5412 	struct amdgpu_bo *rbo;
5413 	int r;
5414 
5415 	if (!old_state->fb)
5416 		return;
5417 
5418 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5419 	r = amdgpu_bo_reserve(rbo, false);
5420 	if (unlikely(r)) {
5421 		DRM_ERROR("failed to reserve rbo before unpin\n");
5422 		return;
5423 	}
5424 
5425 	amdgpu_bo_unpin(rbo);
5426 	amdgpu_bo_unreserve(rbo);
5427 	amdgpu_bo_unref(&rbo);
5428 }
5429 
5430 static int dm_plane_atomic_check(struct drm_plane *plane,
5431 				 struct drm_plane_state *state)
5432 {
5433 	struct amdgpu_device *adev = plane->dev->dev_private;
5434 	struct dc *dc = adev->dm.dc;
5435 	struct dm_plane_state *dm_plane_state;
5436 	struct dc_scaling_info scaling_info;
5437 	int ret;
5438 
5439 	dm_plane_state = to_dm_plane_state(state);
5440 
5441 	if (!dm_plane_state->dc_state)
5442 		return 0;
5443 
5444 	ret = fill_dc_scaling_info(state, &scaling_info);
5445 	if (ret)
5446 		return ret;
5447 
5448 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5449 		return 0;
5450 
5451 	return -EINVAL;
5452 }
5453 
5454 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5455 				       struct drm_plane_state *new_plane_state)
5456 {
5457 	/* Only support async updates on cursor planes. */
5458 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5459 		return -EINVAL;
5460 
5461 	return 0;
5462 }
5463 
5464 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5465 					 struct drm_plane_state *new_state)
5466 {
5467 	struct drm_plane_state *old_state =
5468 		drm_atomic_get_old_plane_state(new_state->state, plane);
5469 
5470 	swap(plane->state->fb, new_state->fb);
5471 
5472 	plane->state->src_x = new_state->src_x;
5473 	plane->state->src_y = new_state->src_y;
5474 	plane->state->src_w = new_state->src_w;
5475 	plane->state->src_h = new_state->src_h;
5476 	plane->state->crtc_x = new_state->crtc_x;
5477 	plane->state->crtc_y = new_state->crtc_y;
5478 	plane->state->crtc_w = new_state->crtc_w;
5479 	plane->state->crtc_h = new_state->crtc_h;
5480 
5481 	handle_cursor_update(plane, old_state);
5482 }
5483 
5484 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5485 	.prepare_fb = dm_plane_helper_prepare_fb,
5486 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5487 	.atomic_check = dm_plane_atomic_check,
5488 	.atomic_async_check = dm_plane_atomic_async_check,
5489 	.atomic_async_update = dm_plane_atomic_async_update
5490 };
5491 
5492 /*
5493  * TODO: these are currently initialized to rgb formats only.
5494  * For future use cases we should either initialize them dynamically based on
5495  * plane capabilities, or initialize this array to all formats, so internal drm
5496  * check will succeed, and let DC implement proper check
5497  */
5498 static const uint32_t rgb_formats[] = {
5499 	DRM_FORMAT_XRGB8888,
5500 	DRM_FORMAT_ARGB8888,
5501 	DRM_FORMAT_RGBA8888,
5502 	DRM_FORMAT_XRGB2101010,
5503 	DRM_FORMAT_XBGR2101010,
5504 	DRM_FORMAT_ARGB2101010,
5505 	DRM_FORMAT_ABGR2101010,
5506 	DRM_FORMAT_XBGR8888,
5507 	DRM_FORMAT_ABGR8888,
5508 	DRM_FORMAT_RGB565,
5509 };
5510 
5511 static const uint32_t overlay_formats[] = {
5512 	DRM_FORMAT_XRGB8888,
5513 	DRM_FORMAT_ARGB8888,
5514 	DRM_FORMAT_RGBA8888,
5515 	DRM_FORMAT_XBGR8888,
5516 	DRM_FORMAT_ABGR8888,
5517 	DRM_FORMAT_RGB565
5518 };
5519 
5520 static const u32 cursor_formats[] = {
5521 	DRM_FORMAT_ARGB8888
5522 };
5523 
5524 static int get_plane_formats(const struct drm_plane *plane,
5525 			     const struct dc_plane_cap *plane_cap,
5526 			     uint32_t *formats, int max_formats)
5527 {
5528 	int i, num_formats = 0;
5529 
5530 	/*
5531 	 * TODO: Query support for each group of formats directly from
5532 	 * DC plane caps. This will require adding more formats to the
5533 	 * caps list.
5534 	 */
5535 
5536 	switch (plane->type) {
5537 	case DRM_PLANE_TYPE_PRIMARY:
5538 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5539 			if (num_formats >= max_formats)
5540 				break;
5541 
5542 			formats[num_formats++] = rgb_formats[i];
5543 		}
5544 
5545 		if (plane_cap && plane_cap->pixel_format_support.nv12)
5546 			formats[num_formats++] = DRM_FORMAT_NV12;
5547 		if (plane_cap && plane_cap->pixel_format_support.p010)
5548 			formats[num_formats++] = DRM_FORMAT_P010;
5549 		break;
5550 
5551 	case DRM_PLANE_TYPE_OVERLAY:
5552 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5553 			if (num_formats >= max_formats)
5554 				break;
5555 
5556 			formats[num_formats++] = overlay_formats[i];
5557 		}
5558 		break;
5559 
5560 	case DRM_PLANE_TYPE_CURSOR:
5561 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5562 			if (num_formats >= max_formats)
5563 				break;
5564 
5565 			formats[num_formats++] = cursor_formats[i];
5566 		}
5567 		break;
5568 	}
5569 
5570 	return num_formats;
5571 }
5572 
5573 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5574 				struct drm_plane *plane,
5575 				unsigned long possible_crtcs,
5576 				const struct dc_plane_cap *plane_cap)
5577 {
5578 	uint32_t formats[32];
5579 	int num_formats;
5580 	int res = -EPERM;
5581 
5582 	num_formats = get_plane_formats(plane, plane_cap, formats,
5583 					ARRAY_SIZE(formats));
5584 
5585 	res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5586 				       &dm_plane_funcs, formats, num_formats,
5587 				       NULL, plane->type, NULL);
5588 	if (res)
5589 		return res;
5590 
5591 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5592 	    plane_cap && plane_cap->per_pixel_alpha) {
5593 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5594 					  BIT(DRM_MODE_BLEND_PREMULTI);
5595 
5596 		drm_plane_create_alpha_property(plane);
5597 		drm_plane_create_blend_mode_property(plane, blend_caps);
5598 	}
5599 
5600 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5601 	    plane_cap &&
5602 	    (plane_cap->pixel_format_support.nv12 ||
5603 	     plane_cap->pixel_format_support.p010)) {
5604 		/* This only affects YUV formats. */
5605 		drm_plane_create_color_properties(
5606 			plane,
5607 			BIT(DRM_COLOR_YCBCR_BT601) |
5608 			BIT(DRM_COLOR_YCBCR_BT709) |
5609 			BIT(DRM_COLOR_YCBCR_BT2020),
5610 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5611 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5612 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5613 	}
5614 
5615 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5616 
5617 	/* Create (reset) the plane state */
5618 	if (plane->funcs->reset)
5619 		plane->funcs->reset(plane);
5620 
5621 	return 0;
5622 }
5623 
5624 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5625 			       struct drm_plane *plane,
5626 			       uint32_t crtc_index)
5627 {
5628 	struct amdgpu_crtc *acrtc = NULL;
5629 	struct drm_plane *cursor_plane;
5630 
5631 	int res = -ENOMEM;
5632 
5633 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5634 	if (!cursor_plane)
5635 		goto fail;
5636 
5637 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5638 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5639 
5640 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5641 	if (!acrtc)
5642 		goto fail;
5643 
5644 	res = drm_crtc_init_with_planes(
5645 			dm->ddev,
5646 			&acrtc->base,
5647 			plane,
5648 			cursor_plane,
5649 			&amdgpu_dm_crtc_funcs, NULL);
5650 
5651 	if (res)
5652 		goto fail;
5653 
5654 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5655 
5656 	/* Create (reset) the plane state */
5657 	if (acrtc->base.funcs->reset)
5658 		acrtc->base.funcs->reset(&acrtc->base);
5659 
5660 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5661 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5662 
5663 	acrtc->crtc_id = crtc_index;
5664 	acrtc->base.enabled = false;
5665 	acrtc->otg_inst = -1;
5666 
5667 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5668 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5669 				   true, MAX_COLOR_LUT_ENTRIES);
5670 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5671 
5672 	return 0;
5673 
5674 fail:
5675 	kfree(acrtc);
5676 	kfree(cursor_plane);
5677 	return res;
5678 }
5679 
5680 
5681 static int to_drm_connector_type(enum signal_type st)
5682 {
5683 	switch (st) {
5684 	case SIGNAL_TYPE_HDMI_TYPE_A:
5685 		return DRM_MODE_CONNECTOR_HDMIA;
5686 	case SIGNAL_TYPE_EDP:
5687 		return DRM_MODE_CONNECTOR_eDP;
5688 	case SIGNAL_TYPE_LVDS:
5689 		return DRM_MODE_CONNECTOR_LVDS;
5690 	case SIGNAL_TYPE_RGB:
5691 		return DRM_MODE_CONNECTOR_VGA;
5692 	case SIGNAL_TYPE_DISPLAY_PORT:
5693 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
5694 		return DRM_MODE_CONNECTOR_DisplayPort;
5695 	case SIGNAL_TYPE_DVI_DUAL_LINK:
5696 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
5697 		return DRM_MODE_CONNECTOR_DVID;
5698 	case SIGNAL_TYPE_VIRTUAL:
5699 		return DRM_MODE_CONNECTOR_VIRTUAL;
5700 
5701 	default:
5702 		return DRM_MODE_CONNECTOR_Unknown;
5703 	}
5704 }
5705 
5706 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5707 {
5708 	struct drm_encoder *encoder;
5709 
5710 	/* There is only one encoder per connector */
5711 	drm_connector_for_each_possible_encoder(connector, encoder)
5712 		return encoder;
5713 
5714 	return NULL;
5715 }
5716 
5717 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5718 {
5719 	struct drm_encoder *encoder;
5720 	struct amdgpu_encoder *amdgpu_encoder;
5721 
5722 	encoder = amdgpu_dm_connector_to_encoder(connector);
5723 
5724 	if (encoder == NULL)
5725 		return;
5726 
5727 	amdgpu_encoder = to_amdgpu_encoder(encoder);
5728 
5729 	amdgpu_encoder->native_mode.clock = 0;
5730 
5731 	if (!list_empty(&connector->probed_modes)) {
5732 		struct drm_display_mode *preferred_mode = NULL;
5733 
5734 		list_for_each_entry(preferred_mode,
5735 				    &connector->probed_modes,
5736 				    head) {
5737 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5738 				amdgpu_encoder->native_mode = *preferred_mode;
5739 
5740 			break;
5741 		}
5742 
5743 	}
5744 }
5745 
5746 static struct drm_display_mode *
5747 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5748 			     char *name,
5749 			     int hdisplay, int vdisplay)
5750 {
5751 	struct drm_device *dev = encoder->dev;
5752 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5753 	struct drm_display_mode *mode = NULL;
5754 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5755 
5756 	mode = drm_mode_duplicate(dev, native_mode);
5757 
5758 	if (mode == NULL)
5759 		return NULL;
5760 
5761 	mode->hdisplay = hdisplay;
5762 	mode->vdisplay = vdisplay;
5763 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5764 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5765 
5766 	return mode;
5767 
5768 }
5769 
5770 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5771 						 struct drm_connector *connector)
5772 {
5773 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5774 	struct drm_display_mode *mode = NULL;
5775 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5776 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5777 				to_amdgpu_dm_connector(connector);
5778 	int i;
5779 	int n;
5780 	struct mode_size {
5781 		char name[DRM_DISPLAY_MODE_LEN];
5782 		int w;
5783 		int h;
5784 	} common_modes[] = {
5785 		{  "640x480",  640,  480},
5786 		{  "800x600",  800,  600},
5787 		{ "1024x768", 1024,  768},
5788 		{ "1280x720", 1280,  720},
5789 		{ "1280x800", 1280,  800},
5790 		{"1280x1024", 1280, 1024},
5791 		{ "1440x900", 1440,  900},
5792 		{"1680x1050", 1680, 1050},
5793 		{"1600x1200", 1600, 1200},
5794 		{"1920x1080", 1920, 1080},
5795 		{"1920x1200", 1920, 1200}
5796 	};
5797 
5798 	n = ARRAY_SIZE(common_modes);
5799 
5800 	for (i = 0; i < n; i++) {
5801 		struct drm_display_mode *curmode = NULL;
5802 		bool mode_existed = false;
5803 
5804 		if (common_modes[i].w > native_mode->hdisplay ||
5805 		    common_modes[i].h > native_mode->vdisplay ||
5806 		   (common_modes[i].w == native_mode->hdisplay &&
5807 		    common_modes[i].h == native_mode->vdisplay))
5808 			continue;
5809 
5810 		list_for_each_entry(curmode, &connector->probed_modes, head) {
5811 			if (common_modes[i].w == curmode->hdisplay &&
5812 			    common_modes[i].h == curmode->vdisplay) {
5813 				mode_existed = true;
5814 				break;
5815 			}
5816 		}
5817 
5818 		if (mode_existed)
5819 			continue;
5820 
5821 		mode = amdgpu_dm_create_common_mode(encoder,
5822 				common_modes[i].name, common_modes[i].w,
5823 				common_modes[i].h);
5824 		drm_mode_probed_add(connector, mode);
5825 		amdgpu_dm_connector->num_modes++;
5826 	}
5827 }
5828 
5829 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5830 					      struct edid *edid)
5831 {
5832 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5833 			to_amdgpu_dm_connector(connector);
5834 
5835 	if (edid) {
5836 		/* empty probed_modes */
5837 		INIT_LIST_HEAD(&connector->probed_modes);
5838 		amdgpu_dm_connector->num_modes =
5839 				drm_add_edid_modes(connector, edid);
5840 
5841 		/* sorting the probed modes before calling function
5842 		 * amdgpu_dm_get_native_mode() since EDID can have
5843 		 * more than one preferred mode. The modes that are
5844 		 * later in the probed mode list could be of higher
5845 		 * and preferred resolution. For example, 3840x2160
5846 		 * resolution in base EDID preferred timing and 4096x2160
5847 		 * preferred resolution in DID extension block later.
5848 		 */
5849 		drm_mode_sort(&connector->probed_modes);
5850 		amdgpu_dm_get_native_mode(connector);
5851 	} else {
5852 		amdgpu_dm_connector->num_modes = 0;
5853 	}
5854 }
5855 
5856 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5857 {
5858 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5859 			to_amdgpu_dm_connector(connector);
5860 	struct drm_encoder *encoder;
5861 	struct edid *edid = amdgpu_dm_connector->edid;
5862 
5863 	encoder = amdgpu_dm_connector_to_encoder(connector);
5864 
5865 	if (!edid || !drm_edid_is_valid(edid)) {
5866 		amdgpu_dm_connector->num_modes =
5867 				drm_add_modes_noedid(connector, 640, 480);
5868 	} else {
5869 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
5870 		amdgpu_dm_connector_add_common_modes(encoder, connector);
5871 	}
5872 	amdgpu_dm_fbc_init(connector);
5873 
5874 	return amdgpu_dm_connector->num_modes;
5875 }
5876 
5877 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5878 				     struct amdgpu_dm_connector *aconnector,
5879 				     int connector_type,
5880 				     struct dc_link *link,
5881 				     int link_index)
5882 {
5883 	struct amdgpu_device *adev = dm->ddev->dev_private;
5884 
5885 	/*
5886 	 * Some of the properties below require access to state, like bpc.
5887 	 * Allocate some default initial connector state with our reset helper.
5888 	 */
5889 	if (aconnector->base.funcs->reset)
5890 		aconnector->base.funcs->reset(&aconnector->base);
5891 
5892 	aconnector->connector_id = link_index;
5893 	aconnector->dc_link = link;
5894 	aconnector->base.interlace_allowed = false;
5895 	aconnector->base.doublescan_allowed = false;
5896 	aconnector->base.stereo_allowed = false;
5897 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5898 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5899 	aconnector->audio_inst = -1;
5900 	mutex_init(&aconnector->hpd_lock);
5901 
5902 	/*
5903 	 * configure support HPD hot plug connector_>polled default value is 0
5904 	 * which means HPD hot plug not supported
5905 	 */
5906 	switch (connector_type) {
5907 	case DRM_MODE_CONNECTOR_HDMIA:
5908 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5909 		aconnector->base.ycbcr_420_allowed =
5910 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5911 		break;
5912 	case DRM_MODE_CONNECTOR_DisplayPort:
5913 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5914 		aconnector->base.ycbcr_420_allowed =
5915 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
5916 		break;
5917 	case DRM_MODE_CONNECTOR_DVID:
5918 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5919 		break;
5920 	default:
5921 		break;
5922 	}
5923 
5924 	drm_object_attach_property(&aconnector->base.base,
5925 				dm->ddev->mode_config.scaling_mode_property,
5926 				DRM_MODE_SCALE_NONE);
5927 
5928 	drm_object_attach_property(&aconnector->base.base,
5929 				adev->mode_info.underscan_property,
5930 				UNDERSCAN_OFF);
5931 	drm_object_attach_property(&aconnector->base.base,
5932 				adev->mode_info.underscan_hborder_property,
5933 				0);
5934 	drm_object_attach_property(&aconnector->base.base,
5935 				adev->mode_info.underscan_vborder_property,
5936 				0);
5937 
5938 	if (!aconnector->mst_port)
5939 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5940 
5941 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
5942 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5943 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5944 
5945 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5946 	    dc_is_dmcu_initialized(adev->dm.dc)) {
5947 		drm_object_attach_property(&aconnector->base.base,
5948 				adev->mode_info.abm_level_property, 0);
5949 	}
5950 
5951 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5952 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5953 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
5954 		drm_object_attach_property(
5955 			&aconnector->base.base,
5956 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
5957 
5958 		if (!aconnector->mst_port)
5959 			drm_connector_attach_vrr_capable_property(&aconnector->base);
5960 
5961 #ifdef CONFIG_DRM_AMD_DC_HDCP
5962 		if (adev->dm.hdcp_workqueue)
5963 			drm_connector_attach_content_protection_property(&aconnector->base, true);
5964 #endif
5965 	}
5966 }
5967 
5968 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5969 			      struct i2c_msg *msgs, int num)
5970 {
5971 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5972 	struct ddc_service *ddc_service = i2c->ddc_service;
5973 	struct i2c_command cmd;
5974 	int i;
5975 	int result = -EIO;
5976 
5977 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5978 
5979 	if (!cmd.payloads)
5980 		return result;
5981 
5982 	cmd.number_of_payloads = num;
5983 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
5984 	cmd.speed = 100;
5985 
5986 	for (i = 0; i < num; i++) {
5987 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
5988 		cmd.payloads[i].address = msgs[i].addr;
5989 		cmd.payloads[i].length = msgs[i].len;
5990 		cmd.payloads[i].data = msgs[i].buf;
5991 	}
5992 
5993 	if (dc_submit_i2c(
5994 			ddc_service->ctx->dc,
5995 			ddc_service->ddc_pin->hw_info.ddc_channel,
5996 			&cmd))
5997 		result = num;
5998 
5999 	kfree(cmd.payloads);
6000 	return result;
6001 }
6002 
6003 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6004 {
6005 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6006 }
6007 
6008 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6009 	.master_xfer = amdgpu_dm_i2c_xfer,
6010 	.functionality = amdgpu_dm_i2c_func,
6011 };
6012 
6013 static struct amdgpu_i2c_adapter *
6014 create_i2c(struct ddc_service *ddc_service,
6015 	   int link_index,
6016 	   int *res)
6017 {
6018 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6019 	struct amdgpu_i2c_adapter *i2c;
6020 
6021 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6022 	if (!i2c)
6023 		return NULL;
6024 	i2c->base.owner = THIS_MODULE;
6025 	i2c->base.class = I2C_CLASS_DDC;
6026 	i2c->base.dev.parent = &adev->pdev->dev;
6027 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6028 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6029 	i2c_set_adapdata(&i2c->base, i2c);
6030 	i2c->ddc_service = ddc_service;
6031 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6032 
6033 	return i2c;
6034 }
6035 
6036 
6037 /*
6038  * Note: this function assumes that dc_link_detect() was called for the
6039  * dc_link which will be represented by this aconnector.
6040  */
6041 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6042 				    struct amdgpu_dm_connector *aconnector,
6043 				    uint32_t link_index,
6044 				    struct amdgpu_encoder *aencoder)
6045 {
6046 	int res = 0;
6047 	int connector_type;
6048 	struct dc *dc = dm->dc;
6049 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6050 	struct amdgpu_i2c_adapter *i2c;
6051 
6052 	link->priv = aconnector;
6053 
6054 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6055 
6056 	i2c = create_i2c(link->ddc, link->link_index, &res);
6057 	if (!i2c) {
6058 		DRM_ERROR("Failed to create i2c adapter data\n");
6059 		return -ENOMEM;
6060 	}
6061 
6062 	aconnector->i2c = i2c;
6063 	res = i2c_add_adapter(&i2c->base);
6064 
6065 	if (res) {
6066 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6067 		goto out_free;
6068 	}
6069 
6070 	connector_type = to_drm_connector_type(link->connector_signal);
6071 
6072 	res = drm_connector_init_with_ddc(
6073 			dm->ddev,
6074 			&aconnector->base,
6075 			&amdgpu_dm_connector_funcs,
6076 			connector_type,
6077 			&i2c->base);
6078 
6079 	if (res) {
6080 		DRM_ERROR("connector_init failed\n");
6081 		aconnector->connector_id = -1;
6082 		goto out_free;
6083 	}
6084 
6085 	drm_connector_helper_add(
6086 			&aconnector->base,
6087 			&amdgpu_dm_connector_helper_funcs);
6088 
6089 	amdgpu_dm_connector_init_helper(
6090 		dm,
6091 		aconnector,
6092 		connector_type,
6093 		link,
6094 		link_index);
6095 
6096 	drm_connector_attach_encoder(
6097 		&aconnector->base, &aencoder->base);
6098 
6099 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6100 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6101 		amdgpu_dm_initialize_dp_connector(dm, aconnector);
6102 
6103 out_free:
6104 	if (res) {
6105 		kfree(i2c);
6106 		aconnector->i2c = NULL;
6107 	}
6108 	return res;
6109 }
6110 
6111 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6112 {
6113 	switch (adev->mode_info.num_crtc) {
6114 	case 1:
6115 		return 0x1;
6116 	case 2:
6117 		return 0x3;
6118 	case 3:
6119 		return 0x7;
6120 	case 4:
6121 		return 0xf;
6122 	case 5:
6123 		return 0x1f;
6124 	case 6:
6125 	default:
6126 		return 0x3f;
6127 	}
6128 }
6129 
6130 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6131 				  struct amdgpu_encoder *aencoder,
6132 				  uint32_t link_index)
6133 {
6134 	struct amdgpu_device *adev = dev->dev_private;
6135 
6136 	int res = drm_encoder_init(dev,
6137 				   &aencoder->base,
6138 				   &amdgpu_dm_encoder_funcs,
6139 				   DRM_MODE_ENCODER_TMDS,
6140 				   NULL);
6141 
6142 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6143 
6144 	if (!res)
6145 		aencoder->encoder_id = link_index;
6146 	else
6147 		aencoder->encoder_id = -1;
6148 
6149 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6150 
6151 	return res;
6152 }
6153 
6154 static void manage_dm_interrupts(struct amdgpu_device *adev,
6155 				 struct amdgpu_crtc *acrtc,
6156 				 bool enable)
6157 {
6158 	/*
6159 	 * this is not correct translation but will work as soon as VBLANK
6160 	 * constant is the same as PFLIP
6161 	 */
6162 	int irq_type =
6163 		amdgpu_display_crtc_idx_to_irq_type(
6164 			adev,
6165 			acrtc->crtc_id);
6166 
6167 	if (enable) {
6168 		drm_crtc_vblank_on(&acrtc->base);
6169 		amdgpu_irq_get(
6170 			adev,
6171 			&adev->pageflip_irq,
6172 			irq_type);
6173 	} else {
6174 
6175 		amdgpu_irq_put(
6176 			adev,
6177 			&adev->pageflip_irq,
6178 			irq_type);
6179 		drm_crtc_vblank_off(&acrtc->base);
6180 	}
6181 }
6182 
6183 static bool
6184 is_scaling_state_different(const struct dm_connector_state *dm_state,
6185 			   const struct dm_connector_state *old_dm_state)
6186 {
6187 	if (dm_state->scaling != old_dm_state->scaling)
6188 		return true;
6189 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6190 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6191 			return true;
6192 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6193 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6194 			return true;
6195 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6196 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6197 		return true;
6198 	return false;
6199 }
6200 
6201 #ifdef CONFIG_DRM_AMD_DC_HDCP
6202 static bool is_content_protection_different(struct drm_connector_state *state,
6203 					    const struct drm_connector_state *old_state,
6204 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6205 {
6206 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6207 
6208 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6209 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6210 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6211 		return true;
6212 	}
6213 
6214 	/* CP is being re enabled, ignore this */
6215 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6216 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6217 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6218 		return false;
6219 	}
6220 
6221 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6222 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6223 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6224 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6225 
6226 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6227 	 * hot-plug, headless s3, dpms
6228 	 */
6229 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6230 	    aconnector->dc_sink != NULL)
6231 		return true;
6232 
6233 	if (old_state->content_protection == state->content_protection)
6234 		return false;
6235 
6236 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6237 		return true;
6238 
6239 	return false;
6240 }
6241 
6242 #endif
6243 static void remove_stream(struct amdgpu_device *adev,
6244 			  struct amdgpu_crtc *acrtc,
6245 			  struct dc_stream_state *stream)
6246 {
6247 	/* this is the update mode case */
6248 
6249 	acrtc->otg_inst = -1;
6250 	acrtc->enabled = false;
6251 }
6252 
6253 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6254 			       struct dc_cursor_position *position)
6255 {
6256 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6257 	int x, y;
6258 	int xorigin = 0, yorigin = 0;
6259 
6260 	position->enable = false;
6261 	position->x = 0;
6262 	position->y = 0;
6263 
6264 	if (!crtc || !plane->state->fb)
6265 		return 0;
6266 
6267 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6268 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6269 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6270 			  __func__,
6271 			  plane->state->crtc_w,
6272 			  plane->state->crtc_h);
6273 		return -EINVAL;
6274 	}
6275 
6276 	x = plane->state->crtc_x;
6277 	y = plane->state->crtc_y;
6278 
6279 	if (x <= -amdgpu_crtc->max_cursor_width ||
6280 	    y <= -amdgpu_crtc->max_cursor_height)
6281 		return 0;
6282 
6283 	if (x < 0) {
6284 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6285 		x = 0;
6286 	}
6287 	if (y < 0) {
6288 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6289 		y = 0;
6290 	}
6291 	position->enable = true;
6292 	position->translate_by_source = true;
6293 	position->x = x;
6294 	position->y = y;
6295 	position->x_hotspot = xorigin;
6296 	position->y_hotspot = yorigin;
6297 
6298 	return 0;
6299 }
6300 
6301 static void handle_cursor_update(struct drm_plane *plane,
6302 				 struct drm_plane_state *old_plane_state)
6303 {
6304 	struct amdgpu_device *adev = plane->dev->dev_private;
6305 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6306 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6307 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6308 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6309 	uint64_t address = afb ? afb->address : 0;
6310 	struct dc_cursor_position position;
6311 	struct dc_cursor_attributes attributes;
6312 	int ret;
6313 
6314 	if (!plane->state->fb && !old_plane_state->fb)
6315 		return;
6316 
6317 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6318 			 __func__,
6319 			 amdgpu_crtc->crtc_id,
6320 			 plane->state->crtc_w,
6321 			 plane->state->crtc_h);
6322 
6323 	ret = get_cursor_position(plane, crtc, &position);
6324 	if (ret)
6325 		return;
6326 
6327 	if (!position.enable) {
6328 		/* turn off cursor */
6329 		if (crtc_state && crtc_state->stream) {
6330 			mutex_lock(&adev->dm.dc_lock);
6331 			dc_stream_set_cursor_position(crtc_state->stream,
6332 						      &position);
6333 			mutex_unlock(&adev->dm.dc_lock);
6334 		}
6335 		return;
6336 	}
6337 
6338 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6339 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6340 
6341 	memset(&attributes, 0, sizeof(attributes));
6342 	attributes.address.high_part = upper_32_bits(address);
6343 	attributes.address.low_part  = lower_32_bits(address);
6344 	attributes.width             = plane->state->crtc_w;
6345 	attributes.height            = plane->state->crtc_h;
6346 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6347 	attributes.rotation_angle    = 0;
6348 	attributes.attribute_flags.value = 0;
6349 
6350 	attributes.pitch = attributes.width;
6351 
6352 	if (crtc_state->stream) {
6353 		mutex_lock(&adev->dm.dc_lock);
6354 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6355 							 &attributes))
6356 			DRM_ERROR("DC failed to set cursor attributes\n");
6357 
6358 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6359 						   &position))
6360 			DRM_ERROR("DC failed to set cursor position\n");
6361 		mutex_unlock(&adev->dm.dc_lock);
6362 	}
6363 }
6364 
6365 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6366 {
6367 
6368 	assert_spin_locked(&acrtc->base.dev->event_lock);
6369 	WARN_ON(acrtc->event);
6370 
6371 	acrtc->event = acrtc->base.state->event;
6372 
6373 	/* Set the flip status */
6374 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6375 
6376 	/* Mark this event as consumed */
6377 	acrtc->base.state->event = NULL;
6378 
6379 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6380 						 acrtc->crtc_id);
6381 }
6382 
6383 static void update_freesync_state_on_stream(
6384 	struct amdgpu_display_manager *dm,
6385 	struct dm_crtc_state *new_crtc_state,
6386 	struct dc_stream_state *new_stream,
6387 	struct dc_plane_state *surface,
6388 	u32 flip_timestamp_in_us)
6389 {
6390 	struct mod_vrr_params vrr_params;
6391 	struct dc_info_packet vrr_infopacket = {0};
6392 	struct amdgpu_device *adev = dm->adev;
6393 	unsigned long flags;
6394 
6395 	if (!new_stream)
6396 		return;
6397 
6398 	/*
6399 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6400 	 * For now it's sufficient to just guard against these conditions.
6401 	 */
6402 
6403 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6404 		return;
6405 
6406 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6407 	vrr_params = new_crtc_state->vrr_params;
6408 
6409 	if (surface) {
6410 		mod_freesync_handle_preflip(
6411 			dm->freesync_module,
6412 			surface,
6413 			new_stream,
6414 			flip_timestamp_in_us,
6415 			&vrr_params);
6416 
6417 		if (adev->family < AMDGPU_FAMILY_AI &&
6418 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6419 			mod_freesync_handle_v_update(dm->freesync_module,
6420 						     new_stream, &vrr_params);
6421 
6422 			/* Need to call this before the frame ends. */
6423 			dc_stream_adjust_vmin_vmax(dm->dc,
6424 						   new_crtc_state->stream,
6425 						   &vrr_params.adjust);
6426 		}
6427 	}
6428 
6429 	mod_freesync_build_vrr_infopacket(
6430 		dm->freesync_module,
6431 		new_stream,
6432 		&vrr_params,
6433 		PACKET_TYPE_VRR,
6434 		TRANSFER_FUNC_UNKNOWN,
6435 		&vrr_infopacket);
6436 
6437 	new_crtc_state->freesync_timing_changed |=
6438 		(memcmp(&new_crtc_state->vrr_params.adjust,
6439 			&vrr_params.adjust,
6440 			sizeof(vrr_params.adjust)) != 0);
6441 
6442 	new_crtc_state->freesync_vrr_info_changed |=
6443 		(memcmp(&new_crtc_state->vrr_infopacket,
6444 			&vrr_infopacket,
6445 			sizeof(vrr_infopacket)) != 0);
6446 
6447 	new_crtc_state->vrr_params = vrr_params;
6448 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6449 
6450 	new_stream->adjust = new_crtc_state->vrr_params.adjust;
6451 	new_stream->vrr_infopacket = vrr_infopacket;
6452 
6453 	if (new_crtc_state->freesync_vrr_info_changed)
6454 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6455 			      new_crtc_state->base.crtc->base.id,
6456 			      (int)new_crtc_state->base.vrr_enabled,
6457 			      (int)vrr_params.state);
6458 
6459 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6460 }
6461 
6462 static void pre_update_freesync_state_on_stream(
6463 	struct amdgpu_display_manager *dm,
6464 	struct dm_crtc_state *new_crtc_state)
6465 {
6466 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6467 	struct mod_vrr_params vrr_params;
6468 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6469 	struct amdgpu_device *adev = dm->adev;
6470 	unsigned long flags;
6471 
6472 	if (!new_stream)
6473 		return;
6474 
6475 	/*
6476 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6477 	 * For now it's sufficient to just guard against these conditions.
6478 	 */
6479 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6480 		return;
6481 
6482 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
6483 	vrr_params = new_crtc_state->vrr_params;
6484 
6485 	if (new_crtc_state->vrr_supported &&
6486 	    config.min_refresh_in_uhz &&
6487 	    config.max_refresh_in_uhz) {
6488 		config.state = new_crtc_state->base.vrr_enabled ?
6489 			VRR_STATE_ACTIVE_VARIABLE :
6490 			VRR_STATE_INACTIVE;
6491 	} else {
6492 		config.state = VRR_STATE_UNSUPPORTED;
6493 	}
6494 
6495 	mod_freesync_build_vrr_params(dm->freesync_module,
6496 				      new_stream,
6497 				      &config, &vrr_params);
6498 
6499 	new_crtc_state->freesync_timing_changed |=
6500 		(memcmp(&new_crtc_state->vrr_params.adjust,
6501 			&vrr_params.adjust,
6502 			sizeof(vrr_params.adjust)) != 0);
6503 
6504 	new_crtc_state->vrr_params = vrr_params;
6505 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6506 }
6507 
6508 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6509 					    struct dm_crtc_state *new_state)
6510 {
6511 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6512 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6513 
6514 	if (!old_vrr_active && new_vrr_active) {
6515 		/* Transition VRR inactive -> active:
6516 		 * While VRR is active, we must not disable vblank irq, as a
6517 		 * reenable after disable would compute bogus vblank/pflip
6518 		 * timestamps if it likely happened inside display front-porch.
6519 		 *
6520 		 * We also need vupdate irq for the actual core vblank handling
6521 		 * at end of vblank.
6522 		 */
6523 		dm_set_vupdate_irq(new_state->base.crtc, true);
6524 		drm_crtc_vblank_get(new_state->base.crtc);
6525 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6526 				 __func__, new_state->base.crtc->base.id);
6527 	} else if (old_vrr_active && !new_vrr_active) {
6528 		/* Transition VRR active -> inactive:
6529 		 * Allow vblank irq disable again for fixed refresh rate.
6530 		 */
6531 		dm_set_vupdate_irq(new_state->base.crtc, false);
6532 		drm_crtc_vblank_put(new_state->base.crtc);
6533 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6534 				 __func__, new_state->base.crtc->base.id);
6535 	}
6536 }
6537 
6538 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6539 {
6540 	struct drm_plane *plane;
6541 	struct drm_plane_state *old_plane_state, *new_plane_state;
6542 	int i;
6543 
6544 	/*
6545 	 * TODO: Make this per-stream so we don't issue redundant updates for
6546 	 * commits with multiple streams.
6547 	 */
6548 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6549 				       new_plane_state, i)
6550 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6551 			handle_cursor_update(plane, old_plane_state);
6552 }
6553 
6554 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6555 				    struct dc_state *dc_state,
6556 				    struct drm_device *dev,
6557 				    struct amdgpu_display_manager *dm,
6558 				    struct drm_crtc *pcrtc,
6559 				    bool wait_for_vblank)
6560 {
6561 	uint32_t i;
6562 	uint64_t timestamp_ns;
6563 	struct drm_plane *plane;
6564 	struct drm_plane_state *old_plane_state, *new_plane_state;
6565 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6566 	struct drm_crtc_state *new_pcrtc_state =
6567 			drm_atomic_get_new_crtc_state(state, pcrtc);
6568 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6569 	struct dm_crtc_state *dm_old_crtc_state =
6570 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6571 	int planes_count = 0, vpos, hpos;
6572 	long r;
6573 	unsigned long flags;
6574 	struct amdgpu_bo *abo;
6575 	uint64_t tiling_flags;
6576 	uint32_t target_vblank, last_flip_vblank;
6577 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6578 	bool pflip_present = false;
6579 	struct {
6580 		struct dc_surface_update surface_updates[MAX_SURFACES];
6581 		struct dc_plane_info plane_infos[MAX_SURFACES];
6582 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
6583 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6584 		struct dc_stream_update stream_update;
6585 	} *bundle;
6586 
6587 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6588 
6589 	if (!bundle) {
6590 		dm_error("Failed to allocate update bundle\n");
6591 		goto cleanup;
6592 	}
6593 
6594 	/*
6595 	 * Disable the cursor first if we're disabling all the planes.
6596 	 * It'll remain on the screen after the planes are re-enabled
6597 	 * if we don't.
6598 	 */
6599 	if (acrtc_state->active_planes == 0)
6600 		amdgpu_dm_commit_cursors(state);
6601 
6602 	/* update planes when needed */
6603 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6604 		struct drm_crtc *crtc = new_plane_state->crtc;
6605 		struct drm_crtc_state *new_crtc_state;
6606 		struct drm_framebuffer *fb = new_plane_state->fb;
6607 		bool plane_needs_flip;
6608 		struct dc_plane_state *dc_plane;
6609 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6610 
6611 		/* Cursor plane is handled after stream updates */
6612 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6613 			continue;
6614 
6615 		if (!fb || !crtc || pcrtc != crtc)
6616 			continue;
6617 
6618 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6619 		if (!new_crtc_state->active)
6620 			continue;
6621 
6622 		dc_plane = dm_new_plane_state->dc_state;
6623 
6624 		bundle->surface_updates[planes_count].surface = dc_plane;
6625 		if (new_pcrtc_state->color_mgmt_changed) {
6626 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6627 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6628 		}
6629 
6630 		fill_dc_scaling_info(new_plane_state,
6631 				     &bundle->scaling_infos[planes_count]);
6632 
6633 		bundle->surface_updates[planes_count].scaling_info =
6634 			&bundle->scaling_infos[planes_count];
6635 
6636 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6637 
6638 		pflip_present = pflip_present || plane_needs_flip;
6639 
6640 		if (!plane_needs_flip) {
6641 			planes_count += 1;
6642 			continue;
6643 		}
6644 
6645 		abo = gem_to_amdgpu_bo(fb->obj[0]);
6646 
6647 		/*
6648 		 * Wait for all fences on this FB. Do limited wait to avoid
6649 		 * deadlock during GPU reset when this fence will not signal
6650 		 * but we hold reservation lock for the BO.
6651 		 */
6652 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6653 							false,
6654 							msecs_to_jiffies(5000));
6655 		if (unlikely(r <= 0))
6656 			DRM_ERROR("Waiting for fences timed out!");
6657 
6658 		/*
6659 		 * TODO This might fail and hence better not used, wait
6660 		 * explicitly on fences instead
6661 		 * and in general should be called for
6662 		 * blocking commit to as per framework helpers
6663 		 */
6664 		r = amdgpu_bo_reserve(abo, true);
6665 		if (unlikely(r != 0))
6666 			DRM_ERROR("failed to reserve buffer before flip\n");
6667 
6668 		amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6669 
6670 		amdgpu_bo_unreserve(abo);
6671 
6672 		fill_dc_plane_info_and_addr(
6673 			dm->adev, new_plane_state, tiling_flags,
6674 			&bundle->plane_infos[planes_count],
6675 			&bundle->flip_addrs[planes_count].address);
6676 
6677 		bundle->surface_updates[planes_count].plane_info =
6678 			&bundle->plane_infos[planes_count];
6679 
6680 		/*
6681 		 * Only allow immediate flips for fast updates that don't
6682 		 * change FB pitch, DCC state, rotation or mirroing.
6683 		 */
6684 		bundle->flip_addrs[planes_count].flip_immediate =
6685 			crtc->state->async_flip &&
6686 			acrtc_state->update_type == UPDATE_TYPE_FAST;
6687 
6688 		timestamp_ns = ktime_get_ns();
6689 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6690 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6691 		bundle->surface_updates[planes_count].surface = dc_plane;
6692 
6693 		if (!bundle->surface_updates[planes_count].surface) {
6694 			DRM_ERROR("No surface for CRTC: id=%d\n",
6695 					acrtc_attach->crtc_id);
6696 			continue;
6697 		}
6698 
6699 		if (plane == pcrtc->primary)
6700 			update_freesync_state_on_stream(
6701 				dm,
6702 				acrtc_state,
6703 				acrtc_state->stream,
6704 				dc_plane,
6705 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6706 
6707 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6708 				 __func__,
6709 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6710 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6711 
6712 		planes_count += 1;
6713 
6714 	}
6715 
6716 	if (pflip_present) {
6717 		if (!vrr_active) {
6718 			/* Use old throttling in non-vrr fixed refresh rate mode
6719 			 * to keep flip scheduling based on target vblank counts
6720 			 * working in a backwards compatible way, e.g., for
6721 			 * clients using the GLX_OML_sync_control extension or
6722 			 * DRI3/Present extension with defined target_msc.
6723 			 */
6724 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6725 		}
6726 		else {
6727 			/* For variable refresh rate mode only:
6728 			 * Get vblank of last completed flip to avoid > 1 vrr
6729 			 * flips per video frame by use of throttling, but allow
6730 			 * flip programming anywhere in the possibly large
6731 			 * variable vrr vblank interval for fine-grained flip
6732 			 * timing control and more opportunity to avoid stutter
6733 			 * on late submission of flips.
6734 			 */
6735 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6736 			last_flip_vblank = acrtc_attach->last_flip_vblank;
6737 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6738 		}
6739 
6740 		target_vblank = last_flip_vblank + wait_for_vblank;
6741 
6742 		/*
6743 		 * Wait until we're out of the vertical blank period before the one
6744 		 * targeted by the flip
6745 		 */
6746 		while ((acrtc_attach->enabled &&
6747 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6748 							    0, &vpos, &hpos, NULL,
6749 							    NULL, &pcrtc->hwmode)
6750 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6751 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6752 			(int)(target_vblank -
6753 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6754 			usleep_range(1000, 1100);
6755 		}
6756 
6757 		if (acrtc_attach->base.state->event) {
6758 			drm_crtc_vblank_get(pcrtc);
6759 
6760 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6761 
6762 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6763 			prepare_flip_isr(acrtc_attach);
6764 
6765 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6766 		}
6767 
6768 		if (acrtc_state->stream) {
6769 			if (acrtc_state->freesync_vrr_info_changed)
6770 				bundle->stream_update.vrr_infopacket =
6771 					&acrtc_state->stream->vrr_infopacket;
6772 		}
6773 	}
6774 
6775 	/* Update the planes if changed or disable if we don't have any. */
6776 	if ((planes_count || acrtc_state->active_planes == 0) &&
6777 		acrtc_state->stream) {
6778 		bundle->stream_update.stream = acrtc_state->stream;
6779 		if (new_pcrtc_state->mode_changed) {
6780 			bundle->stream_update.src = acrtc_state->stream->src;
6781 			bundle->stream_update.dst = acrtc_state->stream->dst;
6782 		}
6783 
6784 		if (new_pcrtc_state->color_mgmt_changed) {
6785 			/*
6786 			 * TODO: This isn't fully correct since we've actually
6787 			 * already modified the stream in place.
6788 			 */
6789 			bundle->stream_update.gamut_remap =
6790 				&acrtc_state->stream->gamut_remap_matrix;
6791 			bundle->stream_update.output_csc_transform =
6792 				&acrtc_state->stream->csc_color_matrix;
6793 			bundle->stream_update.out_transfer_func =
6794 				acrtc_state->stream->out_transfer_func;
6795 		}
6796 
6797 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
6798 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6799 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
6800 
6801 		/*
6802 		 * If FreeSync state on the stream has changed then we need to
6803 		 * re-adjust the min/max bounds now that DC doesn't handle this
6804 		 * as part of commit.
6805 		 */
6806 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6807 		    amdgpu_dm_vrr_active(acrtc_state)) {
6808 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6809 			dc_stream_adjust_vmin_vmax(
6810 				dm->dc, acrtc_state->stream,
6811 				&acrtc_state->vrr_params.adjust);
6812 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6813 		}
6814 		mutex_lock(&dm->dc_lock);
6815 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6816 				acrtc_state->stream->link->psr_allow_active)
6817 			amdgpu_dm_psr_disable(acrtc_state->stream);
6818 
6819 		dc_commit_updates_for_stream(dm->dc,
6820 						     bundle->surface_updates,
6821 						     planes_count,
6822 						     acrtc_state->stream,
6823 						     &bundle->stream_update,
6824 						     dc_state);
6825 
6826 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6827 						acrtc_state->stream->psr_version &&
6828 						!acrtc_state->stream->link->psr_feature_enabled)
6829 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
6830 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6831 						acrtc_state->stream->link->psr_feature_enabled &&
6832 						!acrtc_state->stream->link->psr_allow_active) {
6833 			amdgpu_dm_psr_enable(acrtc_state->stream);
6834 		}
6835 
6836 		mutex_unlock(&dm->dc_lock);
6837 	}
6838 
6839 	/*
6840 	 * Update cursor state *after* programming all the planes.
6841 	 * This avoids redundant programming in the case where we're going
6842 	 * to be disabling a single plane - those pipes are being disabled.
6843 	 */
6844 	if (acrtc_state->active_planes)
6845 		amdgpu_dm_commit_cursors(state);
6846 
6847 cleanup:
6848 	kfree(bundle);
6849 }
6850 
6851 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6852 				   struct drm_atomic_state *state)
6853 {
6854 	struct amdgpu_device *adev = dev->dev_private;
6855 	struct amdgpu_dm_connector *aconnector;
6856 	struct drm_connector *connector;
6857 	struct drm_connector_state *old_con_state, *new_con_state;
6858 	struct drm_crtc_state *new_crtc_state;
6859 	struct dm_crtc_state *new_dm_crtc_state;
6860 	const struct dc_stream_status *status;
6861 	int i, inst;
6862 
6863 	/* Notify device removals. */
6864 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6865 		if (old_con_state->crtc != new_con_state->crtc) {
6866 			/* CRTC changes require notification. */
6867 			goto notify;
6868 		}
6869 
6870 		if (!new_con_state->crtc)
6871 			continue;
6872 
6873 		new_crtc_state = drm_atomic_get_new_crtc_state(
6874 			state, new_con_state->crtc);
6875 
6876 		if (!new_crtc_state)
6877 			continue;
6878 
6879 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6880 			continue;
6881 
6882 	notify:
6883 		aconnector = to_amdgpu_dm_connector(connector);
6884 
6885 		mutex_lock(&adev->dm.audio_lock);
6886 		inst = aconnector->audio_inst;
6887 		aconnector->audio_inst = -1;
6888 		mutex_unlock(&adev->dm.audio_lock);
6889 
6890 		amdgpu_dm_audio_eld_notify(adev, inst);
6891 	}
6892 
6893 	/* Notify audio device additions. */
6894 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6895 		if (!new_con_state->crtc)
6896 			continue;
6897 
6898 		new_crtc_state = drm_atomic_get_new_crtc_state(
6899 			state, new_con_state->crtc);
6900 
6901 		if (!new_crtc_state)
6902 			continue;
6903 
6904 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6905 			continue;
6906 
6907 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6908 		if (!new_dm_crtc_state->stream)
6909 			continue;
6910 
6911 		status = dc_stream_get_status(new_dm_crtc_state->stream);
6912 		if (!status)
6913 			continue;
6914 
6915 		aconnector = to_amdgpu_dm_connector(connector);
6916 
6917 		mutex_lock(&adev->dm.audio_lock);
6918 		inst = status->audio_inst;
6919 		aconnector->audio_inst = inst;
6920 		mutex_unlock(&adev->dm.audio_lock);
6921 
6922 		amdgpu_dm_audio_eld_notify(adev, inst);
6923 	}
6924 }
6925 
6926 /*
6927  * Enable interrupts on CRTCs that are newly active, undergone
6928  * a modeset, or have active planes again.
6929  *
6930  * Done in two passes, based on the for_modeset flag:
6931  * Pass 1: For CRTCs going through modeset
6932  * Pass 2: For CRTCs going from 0 to n active planes
6933  *
6934  * Interrupts can only be enabled after the planes are programmed,
6935  * so this requires a two-pass approach since we don't want to
6936  * just defer the interrupts until after commit planes every time.
6937  */
6938 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6939 					     struct drm_atomic_state *state,
6940 					     bool for_modeset)
6941 {
6942 	struct amdgpu_device *adev = dev->dev_private;
6943 	struct drm_crtc *crtc;
6944 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6945 	int i;
6946 #ifdef CONFIG_DEBUG_FS
6947 	enum amdgpu_dm_pipe_crc_source source;
6948 #endif
6949 
6950 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6951 				      new_crtc_state, i) {
6952 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6953 		struct dm_crtc_state *dm_new_crtc_state =
6954 			to_dm_crtc_state(new_crtc_state);
6955 		struct dm_crtc_state *dm_old_crtc_state =
6956 			to_dm_crtc_state(old_crtc_state);
6957 		bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6958 		bool run_pass;
6959 
6960 		run_pass = (for_modeset && modeset) ||
6961 			   (!for_modeset && !modeset &&
6962 			    !dm_old_crtc_state->interrupts_enabled);
6963 
6964 		if (!run_pass)
6965 			continue;
6966 
6967 		if (!dm_new_crtc_state->interrupts_enabled)
6968 			continue;
6969 
6970 		manage_dm_interrupts(adev, acrtc, true);
6971 
6972 #ifdef CONFIG_DEBUG_FS
6973 		/* The stream has changed so CRC capture needs to re-enabled. */
6974 		source = dm_new_crtc_state->crc_src;
6975 		if (amdgpu_dm_is_valid_crc_source(source)) {
6976 			amdgpu_dm_crtc_configure_crc_source(
6977 				crtc, dm_new_crtc_state,
6978 				dm_new_crtc_state->crc_src);
6979 		}
6980 #endif
6981 	}
6982 }
6983 
6984 /*
6985  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
6986  * @crtc_state: the DRM CRTC state
6987  * @stream_state: the DC stream state.
6988  *
6989  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
6990  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
6991  */
6992 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
6993 						struct dc_stream_state *stream_state)
6994 {
6995 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
6996 }
6997 
6998 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
6999 				   struct drm_atomic_state *state,
7000 				   bool nonblock)
7001 {
7002 	struct drm_crtc *crtc;
7003 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7004 	struct amdgpu_device *adev = dev->dev_private;
7005 	int i;
7006 
7007 	/*
7008 	 * We evade vblank and pflip interrupts on CRTCs that are undergoing
7009 	 * a modeset, being disabled, or have no active planes.
7010 	 *
7011 	 * It's done in atomic commit rather than commit tail for now since
7012 	 * some of these interrupt handlers access the current CRTC state and
7013 	 * potentially the stream pointer itself.
7014 	 *
7015 	 * Since the atomic state is swapped within atomic commit and not within
7016 	 * commit tail this would leave to new state (that hasn't been committed yet)
7017 	 * being accesssed from within the handlers.
7018 	 *
7019 	 * TODO: Fix this so we can do this in commit tail and not have to block
7020 	 * in atomic check.
7021 	 */
7022 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7023 		struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7024 		struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7025 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7026 
7027 		if (dm_old_crtc_state->interrupts_enabled &&
7028 		    (!dm_new_crtc_state->interrupts_enabled ||
7029 		     drm_atomic_crtc_needs_modeset(new_crtc_state)))
7030 			manage_dm_interrupts(adev, acrtc, false);
7031 	}
7032 	/*
7033 	 * Add check here for SoC's that support hardware cursor plane, to
7034 	 * unset legacy_cursor_update
7035 	 */
7036 
7037 	return drm_atomic_helper_commit(dev, state, nonblock);
7038 
7039 	/*TODO Handle EINTR, reenable IRQ*/
7040 }
7041 
7042 /**
7043  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7044  * @state: The atomic state to commit
7045  *
7046  * This will tell DC to commit the constructed DC state from atomic_check,
7047  * programming the hardware. Any failures here implies a hardware failure, since
7048  * atomic check should have filtered anything non-kosher.
7049  */
7050 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7051 {
7052 	struct drm_device *dev = state->dev;
7053 	struct amdgpu_device *adev = dev->dev_private;
7054 	struct amdgpu_display_manager *dm = &adev->dm;
7055 	struct dm_atomic_state *dm_state;
7056 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7057 	uint32_t i, j;
7058 	struct drm_crtc *crtc;
7059 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7060 	unsigned long flags;
7061 	bool wait_for_vblank = true;
7062 	struct drm_connector *connector;
7063 	struct drm_connector_state *old_con_state, *new_con_state;
7064 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7065 	int crtc_disable_count = 0;
7066 
7067 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7068 
7069 	dm_state = dm_atomic_get_new_state(state);
7070 	if (dm_state && dm_state->context) {
7071 		dc_state = dm_state->context;
7072 	} else {
7073 		/* No state changes, retain current state. */
7074 		dc_state_temp = dc_create_state(dm->dc);
7075 		ASSERT(dc_state_temp);
7076 		dc_state = dc_state_temp;
7077 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7078 	}
7079 
7080 	/* update changed items */
7081 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7082 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7083 
7084 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7085 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7086 
7087 		DRM_DEBUG_DRIVER(
7088 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7089 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7090 			"connectors_changed:%d\n",
7091 			acrtc->crtc_id,
7092 			new_crtc_state->enable,
7093 			new_crtc_state->active,
7094 			new_crtc_state->planes_changed,
7095 			new_crtc_state->mode_changed,
7096 			new_crtc_state->active_changed,
7097 			new_crtc_state->connectors_changed);
7098 
7099 		/* Copy all transient state flags into dc state */
7100 		if (dm_new_crtc_state->stream) {
7101 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7102 							    dm_new_crtc_state->stream);
7103 		}
7104 
7105 		/* handles headless hotplug case, updating new_state and
7106 		 * aconnector as needed
7107 		 */
7108 
7109 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7110 
7111 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7112 
7113 			if (!dm_new_crtc_state->stream) {
7114 				/*
7115 				 * this could happen because of issues with
7116 				 * userspace notifications delivery.
7117 				 * In this case userspace tries to set mode on
7118 				 * display which is disconnected in fact.
7119 				 * dc_sink is NULL in this case on aconnector.
7120 				 * We expect reset mode will come soon.
7121 				 *
7122 				 * This can also happen when unplug is done
7123 				 * during resume sequence ended
7124 				 *
7125 				 * In this case, we want to pretend we still
7126 				 * have a sink to keep the pipe running so that
7127 				 * hw state is consistent with the sw state
7128 				 */
7129 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7130 						__func__, acrtc->base.base.id);
7131 				continue;
7132 			}
7133 
7134 			if (dm_old_crtc_state->stream)
7135 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7136 
7137 			pm_runtime_get_noresume(dev->dev);
7138 
7139 			acrtc->enabled = true;
7140 			acrtc->hw_mode = new_crtc_state->mode;
7141 			crtc->hwmode = new_crtc_state->mode;
7142 		} else if (modereset_required(new_crtc_state)) {
7143 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7144 			/* i.e. reset mode */
7145 			if (dm_old_crtc_state->stream) {
7146 				if (dm_old_crtc_state->stream->link->psr_allow_active)
7147 					amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7148 
7149 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7150 			}
7151 		}
7152 	} /* for_each_crtc_in_state() */
7153 
7154 	if (dc_state) {
7155 		dm_enable_per_frame_crtc_master_sync(dc_state);
7156 		mutex_lock(&dm->dc_lock);
7157 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7158 		mutex_unlock(&dm->dc_lock);
7159 	}
7160 
7161 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7162 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7163 
7164 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7165 
7166 		if (dm_new_crtc_state->stream != NULL) {
7167 			const struct dc_stream_status *status =
7168 					dc_stream_get_status(dm_new_crtc_state->stream);
7169 
7170 			if (!status)
7171 				status = dc_stream_get_status_from_state(dc_state,
7172 									 dm_new_crtc_state->stream);
7173 
7174 			if (!status)
7175 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7176 			else
7177 				acrtc->otg_inst = status->primary_otg_inst;
7178 		}
7179 	}
7180 #ifdef CONFIG_DRM_AMD_DC_HDCP
7181 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7182 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7183 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7184 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7185 
7186 		new_crtc_state = NULL;
7187 
7188 		if (acrtc)
7189 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7190 
7191 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7192 
7193 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7194 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7195 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7196 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7197 			continue;
7198 		}
7199 
7200 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7201 			hdcp_update_display(
7202 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7203 				new_con_state->hdcp_content_type,
7204 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7205 													 : false);
7206 	}
7207 #endif
7208 
7209 	/* Handle connector state changes */
7210 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7211 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7212 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7213 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7214 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7215 		struct dc_stream_update stream_update;
7216 		struct dc_info_packet hdr_packet;
7217 		struct dc_stream_status *status = NULL;
7218 		bool abm_changed, hdr_changed, scaling_changed;
7219 
7220 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7221 		memset(&stream_update, 0, sizeof(stream_update));
7222 
7223 		if (acrtc) {
7224 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7225 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7226 		}
7227 
7228 		/* Skip any modesets/resets */
7229 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7230 			continue;
7231 
7232 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7233 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7234 
7235 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7236 							     dm_old_con_state);
7237 
7238 		abm_changed = dm_new_crtc_state->abm_level !=
7239 			      dm_old_crtc_state->abm_level;
7240 
7241 		hdr_changed =
7242 			is_hdr_metadata_different(old_con_state, new_con_state);
7243 
7244 		if (!scaling_changed && !abm_changed && !hdr_changed)
7245 			continue;
7246 
7247 		stream_update.stream = dm_new_crtc_state->stream;
7248 		if (scaling_changed) {
7249 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7250 					dm_new_con_state, dm_new_crtc_state->stream);
7251 
7252 			stream_update.src = dm_new_crtc_state->stream->src;
7253 			stream_update.dst = dm_new_crtc_state->stream->dst;
7254 		}
7255 
7256 		if (abm_changed) {
7257 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7258 
7259 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7260 		}
7261 
7262 		if (hdr_changed) {
7263 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7264 			stream_update.hdr_static_metadata = &hdr_packet;
7265 		}
7266 
7267 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7268 		WARN_ON(!status);
7269 		WARN_ON(!status->plane_count);
7270 
7271 		/*
7272 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7273 		 * Here we create an empty update on each plane.
7274 		 * To fix this, DC should permit updating only stream properties.
7275 		 */
7276 		for (j = 0; j < status->plane_count; j++)
7277 			dummy_updates[j].surface = status->plane_states[0];
7278 
7279 
7280 		mutex_lock(&dm->dc_lock);
7281 		dc_commit_updates_for_stream(dm->dc,
7282 						     dummy_updates,
7283 						     status->plane_count,
7284 						     dm_new_crtc_state->stream,
7285 						     &stream_update,
7286 						     dc_state);
7287 		mutex_unlock(&dm->dc_lock);
7288 	}
7289 
7290 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7291 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7292 				      new_crtc_state, i) {
7293 		if (old_crtc_state->active && !new_crtc_state->active)
7294 			crtc_disable_count++;
7295 
7296 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7297 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7298 
7299 		/* Update freesync active state. */
7300 		pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7301 
7302 		/* Handle vrr on->off / off->on transitions */
7303 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7304 						dm_new_crtc_state);
7305 	}
7306 
7307 	/* Enable interrupts for CRTCs going through a modeset. */
7308 	amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7309 
7310 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7311 		if (new_crtc_state->async_flip)
7312 			wait_for_vblank = false;
7313 
7314 	/* update planes when needed per crtc*/
7315 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7316 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7317 
7318 		if (dm_new_crtc_state->stream)
7319 			amdgpu_dm_commit_planes(state, dc_state, dev,
7320 						dm, crtc, wait_for_vblank);
7321 	}
7322 
7323 	/* Enable interrupts for CRTCs going from 0 to n active planes. */
7324 	amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7325 
7326 	/* Update audio instances for each connector. */
7327 	amdgpu_dm_commit_audio(dev, state);
7328 
7329 	/*
7330 	 * send vblank event on all events not handled in flip and
7331 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7332 	 */
7333 	spin_lock_irqsave(&adev->ddev->event_lock, flags);
7334 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7335 
7336 		if (new_crtc_state->event)
7337 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7338 
7339 		new_crtc_state->event = NULL;
7340 	}
7341 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7342 
7343 	/* Signal HW programming completion */
7344 	drm_atomic_helper_commit_hw_done(state);
7345 
7346 	if (wait_for_vblank)
7347 		drm_atomic_helper_wait_for_flip_done(dev, state);
7348 
7349 	drm_atomic_helper_cleanup_planes(dev, state);
7350 
7351 	/*
7352 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7353 	 * so we can put the GPU into runtime suspend if we're not driving any
7354 	 * displays anymore
7355 	 */
7356 	for (i = 0; i < crtc_disable_count; i++)
7357 		pm_runtime_put_autosuspend(dev->dev);
7358 	pm_runtime_mark_last_busy(dev->dev);
7359 
7360 	if (dc_state_temp)
7361 		dc_release_state(dc_state_temp);
7362 }
7363 
7364 
7365 static int dm_force_atomic_commit(struct drm_connector *connector)
7366 {
7367 	int ret = 0;
7368 	struct drm_device *ddev = connector->dev;
7369 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7370 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7371 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7372 	struct drm_connector_state *conn_state;
7373 	struct drm_crtc_state *crtc_state;
7374 	struct drm_plane_state *plane_state;
7375 
7376 	if (!state)
7377 		return -ENOMEM;
7378 
7379 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7380 
7381 	/* Construct an atomic state to restore previous display setting */
7382 
7383 	/*
7384 	 * Attach connectors to drm_atomic_state
7385 	 */
7386 	conn_state = drm_atomic_get_connector_state(state, connector);
7387 
7388 	ret = PTR_ERR_OR_ZERO(conn_state);
7389 	if (ret)
7390 		goto err;
7391 
7392 	/* Attach crtc to drm_atomic_state*/
7393 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7394 
7395 	ret = PTR_ERR_OR_ZERO(crtc_state);
7396 	if (ret)
7397 		goto err;
7398 
7399 	/* force a restore */
7400 	crtc_state->mode_changed = true;
7401 
7402 	/* Attach plane to drm_atomic_state */
7403 	plane_state = drm_atomic_get_plane_state(state, plane);
7404 
7405 	ret = PTR_ERR_OR_ZERO(plane_state);
7406 	if (ret)
7407 		goto err;
7408 
7409 
7410 	/* Call commit internally with the state we just constructed */
7411 	ret = drm_atomic_commit(state);
7412 	if (!ret)
7413 		return 0;
7414 
7415 err:
7416 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7417 	drm_atomic_state_put(state);
7418 
7419 	return ret;
7420 }
7421 
7422 /*
7423  * This function handles all cases when set mode does not come upon hotplug.
7424  * This includes when a display is unplugged then plugged back into the
7425  * same port and when running without usermode desktop manager supprot
7426  */
7427 void dm_restore_drm_connector_state(struct drm_device *dev,
7428 				    struct drm_connector *connector)
7429 {
7430 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7431 	struct amdgpu_crtc *disconnected_acrtc;
7432 	struct dm_crtc_state *acrtc_state;
7433 
7434 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7435 		return;
7436 
7437 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7438 	if (!disconnected_acrtc)
7439 		return;
7440 
7441 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7442 	if (!acrtc_state->stream)
7443 		return;
7444 
7445 	/*
7446 	 * If the previous sink is not released and different from the current,
7447 	 * we deduce we are in a state where we can not rely on usermode call
7448 	 * to turn on the display, so we do it here
7449 	 */
7450 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7451 		dm_force_atomic_commit(&aconnector->base);
7452 }
7453 
7454 /*
7455  * Grabs all modesetting locks to serialize against any blocking commits,
7456  * Waits for completion of all non blocking commits.
7457  */
7458 static int do_aquire_global_lock(struct drm_device *dev,
7459 				 struct drm_atomic_state *state)
7460 {
7461 	struct drm_crtc *crtc;
7462 	struct drm_crtc_commit *commit;
7463 	long ret;
7464 
7465 	/*
7466 	 * Adding all modeset locks to aquire_ctx will
7467 	 * ensure that when the framework release it the
7468 	 * extra locks we are locking here will get released to
7469 	 */
7470 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7471 	if (ret)
7472 		return ret;
7473 
7474 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7475 		spin_lock(&crtc->commit_lock);
7476 		commit = list_first_entry_or_null(&crtc->commit_list,
7477 				struct drm_crtc_commit, commit_entry);
7478 		if (commit)
7479 			drm_crtc_commit_get(commit);
7480 		spin_unlock(&crtc->commit_lock);
7481 
7482 		if (!commit)
7483 			continue;
7484 
7485 		/*
7486 		 * Make sure all pending HW programming completed and
7487 		 * page flips done
7488 		 */
7489 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7490 
7491 		if (ret > 0)
7492 			ret = wait_for_completion_interruptible_timeout(
7493 					&commit->flip_done, 10*HZ);
7494 
7495 		if (ret == 0)
7496 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7497 				  "timed out\n", crtc->base.id, crtc->name);
7498 
7499 		drm_crtc_commit_put(commit);
7500 	}
7501 
7502 	return ret < 0 ? ret : 0;
7503 }
7504 
7505 static void get_freesync_config_for_crtc(
7506 	struct dm_crtc_state *new_crtc_state,
7507 	struct dm_connector_state *new_con_state)
7508 {
7509 	struct mod_freesync_config config = {0};
7510 	struct amdgpu_dm_connector *aconnector =
7511 			to_amdgpu_dm_connector(new_con_state->base.connector);
7512 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7513 	int vrefresh = drm_mode_vrefresh(mode);
7514 
7515 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7516 					vrefresh >= aconnector->min_vfreq &&
7517 					vrefresh <= aconnector->max_vfreq;
7518 
7519 	if (new_crtc_state->vrr_supported) {
7520 		new_crtc_state->stream->ignore_msa_timing_param = true;
7521 		config.state = new_crtc_state->base.vrr_enabled ?
7522 				VRR_STATE_ACTIVE_VARIABLE :
7523 				VRR_STATE_INACTIVE;
7524 		config.min_refresh_in_uhz =
7525 				aconnector->min_vfreq * 1000000;
7526 		config.max_refresh_in_uhz =
7527 				aconnector->max_vfreq * 1000000;
7528 		config.vsif_supported = true;
7529 		config.btr = true;
7530 	}
7531 
7532 	new_crtc_state->freesync_config = config;
7533 }
7534 
7535 static void reset_freesync_config_for_crtc(
7536 	struct dm_crtc_state *new_crtc_state)
7537 {
7538 	new_crtc_state->vrr_supported = false;
7539 
7540 	memset(&new_crtc_state->vrr_params, 0,
7541 	       sizeof(new_crtc_state->vrr_params));
7542 	memset(&new_crtc_state->vrr_infopacket, 0,
7543 	       sizeof(new_crtc_state->vrr_infopacket));
7544 }
7545 
7546 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7547 				struct drm_atomic_state *state,
7548 				struct drm_crtc *crtc,
7549 				struct drm_crtc_state *old_crtc_state,
7550 				struct drm_crtc_state *new_crtc_state,
7551 				bool enable,
7552 				bool *lock_and_validation_needed)
7553 {
7554 	struct dm_atomic_state *dm_state = NULL;
7555 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7556 	struct dc_stream_state *new_stream;
7557 	int ret = 0;
7558 
7559 	/*
7560 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7561 	 * update changed items
7562 	 */
7563 	struct amdgpu_crtc *acrtc = NULL;
7564 	struct amdgpu_dm_connector *aconnector = NULL;
7565 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7566 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7567 
7568 	new_stream = NULL;
7569 
7570 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7571 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7572 	acrtc = to_amdgpu_crtc(crtc);
7573 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7574 
7575 	/* TODO This hack should go away */
7576 	if (aconnector && enable) {
7577 		/* Make sure fake sink is created in plug-in scenario */
7578 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7579 							    &aconnector->base);
7580 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7581 							    &aconnector->base);
7582 
7583 		if (IS_ERR(drm_new_conn_state)) {
7584 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7585 			goto fail;
7586 		}
7587 
7588 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7589 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7590 
7591 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7592 			goto skip_modeset;
7593 
7594 		new_stream = create_stream_for_sink(aconnector,
7595 						     &new_crtc_state->mode,
7596 						    dm_new_conn_state,
7597 						    dm_old_crtc_state->stream);
7598 
7599 		/*
7600 		 * we can have no stream on ACTION_SET if a display
7601 		 * was disconnected during S3, in this case it is not an
7602 		 * error, the OS will be updated after detection, and
7603 		 * will do the right thing on next atomic commit
7604 		 */
7605 
7606 		if (!new_stream) {
7607 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7608 					__func__, acrtc->base.base.id);
7609 			ret = -ENOMEM;
7610 			goto fail;
7611 		}
7612 
7613 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7614 
7615 		ret = fill_hdr_info_packet(drm_new_conn_state,
7616 					   &new_stream->hdr_static_metadata);
7617 		if (ret)
7618 			goto fail;
7619 
7620 		/*
7621 		 * If we already removed the old stream from the context
7622 		 * (and set the new stream to NULL) then we can't reuse
7623 		 * the old stream even if the stream and scaling are unchanged.
7624 		 * We'll hit the BUG_ON and black screen.
7625 		 *
7626 		 * TODO: Refactor this function to allow this check to work
7627 		 * in all conditions.
7628 		 */
7629 		if (dm_new_crtc_state->stream &&
7630 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7631 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7632 			new_crtc_state->mode_changed = false;
7633 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7634 					 new_crtc_state->mode_changed);
7635 		}
7636 	}
7637 
7638 	/* mode_changed flag may get updated above, need to check again */
7639 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7640 		goto skip_modeset;
7641 
7642 	DRM_DEBUG_DRIVER(
7643 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7644 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7645 		"connectors_changed:%d\n",
7646 		acrtc->crtc_id,
7647 		new_crtc_state->enable,
7648 		new_crtc_state->active,
7649 		new_crtc_state->planes_changed,
7650 		new_crtc_state->mode_changed,
7651 		new_crtc_state->active_changed,
7652 		new_crtc_state->connectors_changed);
7653 
7654 	/* Remove stream for any changed/disabled CRTC */
7655 	if (!enable) {
7656 
7657 		if (!dm_old_crtc_state->stream)
7658 			goto skip_modeset;
7659 
7660 		ret = dm_atomic_get_state(state, &dm_state);
7661 		if (ret)
7662 			goto fail;
7663 
7664 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7665 				crtc->base.id);
7666 
7667 		/* i.e. reset mode */
7668 		if (dc_remove_stream_from_ctx(
7669 				dm->dc,
7670 				dm_state->context,
7671 				dm_old_crtc_state->stream) != DC_OK) {
7672 			ret = -EINVAL;
7673 			goto fail;
7674 		}
7675 
7676 		dc_stream_release(dm_old_crtc_state->stream);
7677 		dm_new_crtc_state->stream = NULL;
7678 
7679 		reset_freesync_config_for_crtc(dm_new_crtc_state);
7680 
7681 		*lock_and_validation_needed = true;
7682 
7683 	} else {/* Add stream for any updated/enabled CRTC */
7684 		/*
7685 		 * Quick fix to prevent NULL pointer on new_stream when
7686 		 * added MST connectors not found in existing crtc_state in the chained mode
7687 		 * TODO: need to dig out the root cause of that
7688 		 */
7689 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7690 			goto skip_modeset;
7691 
7692 		if (modereset_required(new_crtc_state))
7693 			goto skip_modeset;
7694 
7695 		if (modeset_required(new_crtc_state, new_stream,
7696 				     dm_old_crtc_state->stream)) {
7697 
7698 			WARN_ON(dm_new_crtc_state->stream);
7699 
7700 			ret = dm_atomic_get_state(state, &dm_state);
7701 			if (ret)
7702 				goto fail;
7703 
7704 			dm_new_crtc_state->stream = new_stream;
7705 
7706 			dc_stream_retain(new_stream);
7707 
7708 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7709 						crtc->base.id);
7710 
7711 			if (dc_add_stream_to_ctx(
7712 					dm->dc,
7713 					dm_state->context,
7714 					dm_new_crtc_state->stream) != DC_OK) {
7715 				ret = -EINVAL;
7716 				goto fail;
7717 			}
7718 
7719 			*lock_and_validation_needed = true;
7720 		}
7721 	}
7722 
7723 skip_modeset:
7724 	/* Release extra reference */
7725 	if (new_stream)
7726 		 dc_stream_release(new_stream);
7727 
7728 	/*
7729 	 * We want to do dc stream updates that do not require a
7730 	 * full modeset below.
7731 	 */
7732 	if (!(enable && aconnector && new_crtc_state->enable &&
7733 	      new_crtc_state->active))
7734 		return 0;
7735 	/*
7736 	 * Given above conditions, the dc state cannot be NULL because:
7737 	 * 1. We're in the process of enabling CRTCs (just been added
7738 	 *    to the dc context, or already is on the context)
7739 	 * 2. Has a valid connector attached, and
7740 	 * 3. Is currently active and enabled.
7741 	 * => The dc stream state currently exists.
7742 	 */
7743 	BUG_ON(dm_new_crtc_state->stream == NULL);
7744 
7745 	/* Scaling or underscan settings */
7746 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7747 		update_stream_scaling_settings(
7748 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7749 
7750 	/* ABM settings */
7751 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7752 
7753 	/*
7754 	 * Color management settings. We also update color properties
7755 	 * when a modeset is needed, to ensure it gets reprogrammed.
7756 	 */
7757 	if (dm_new_crtc_state->base.color_mgmt_changed ||
7758 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7759 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7760 		if (ret)
7761 			goto fail;
7762 	}
7763 
7764 	/* Update Freesync settings. */
7765 	get_freesync_config_for_crtc(dm_new_crtc_state,
7766 				     dm_new_conn_state);
7767 
7768 	return ret;
7769 
7770 fail:
7771 	if (new_stream)
7772 		dc_stream_release(new_stream);
7773 	return ret;
7774 }
7775 
7776 static bool should_reset_plane(struct drm_atomic_state *state,
7777 			       struct drm_plane *plane,
7778 			       struct drm_plane_state *old_plane_state,
7779 			       struct drm_plane_state *new_plane_state)
7780 {
7781 	struct drm_plane *other;
7782 	struct drm_plane_state *old_other_state, *new_other_state;
7783 	struct drm_crtc_state *new_crtc_state;
7784 	int i;
7785 
7786 	/*
7787 	 * TODO: Remove this hack once the checks below are sufficient
7788 	 * enough to determine when we need to reset all the planes on
7789 	 * the stream.
7790 	 */
7791 	if (state->allow_modeset)
7792 		return true;
7793 
7794 	/* Exit early if we know that we're adding or removing the plane. */
7795 	if (old_plane_state->crtc != new_plane_state->crtc)
7796 		return true;
7797 
7798 	/* old crtc == new_crtc == NULL, plane not in context. */
7799 	if (!new_plane_state->crtc)
7800 		return false;
7801 
7802 	new_crtc_state =
7803 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7804 
7805 	if (!new_crtc_state)
7806 		return true;
7807 
7808 	/* CRTC Degamma changes currently require us to recreate planes. */
7809 	if (new_crtc_state->color_mgmt_changed)
7810 		return true;
7811 
7812 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7813 		return true;
7814 
7815 	/*
7816 	 * If there are any new primary or overlay planes being added or
7817 	 * removed then the z-order can potentially change. To ensure
7818 	 * correct z-order and pipe acquisition the current DC architecture
7819 	 * requires us to remove and recreate all existing planes.
7820 	 *
7821 	 * TODO: Come up with a more elegant solution for this.
7822 	 */
7823 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7824 		if (other->type == DRM_PLANE_TYPE_CURSOR)
7825 			continue;
7826 
7827 		if (old_other_state->crtc != new_plane_state->crtc &&
7828 		    new_other_state->crtc != new_plane_state->crtc)
7829 			continue;
7830 
7831 		if (old_other_state->crtc != new_other_state->crtc)
7832 			return true;
7833 
7834 		/* TODO: Remove this once we can handle fast format changes. */
7835 		if (old_other_state->fb && new_other_state->fb &&
7836 		    old_other_state->fb->format != new_other_state->fb->format)
7837 			return true;
7838 	}
7839 
7840 	return false;
7841 }
7842 
7843 static int dm_update_plane_state(struct dc *dc,
7844 				 struct drm_atomic_state *state,
7845 				 struct drm_plane *plane,
7846 				 struct drm_plane_state *old_plane_state,
7847 				 struct drm_plane_state *new_plane_state,
7848 				 bool enable,
7849 				 bool *lock_and_validation_needed)
7850 {
7851 
7852 	struct dm_atomic_state *dm_state = NULL;
7853 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7854 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7855 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7856 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7857 	bool needs_reset;
7858 	int ret = 0;
7859 
7860 
7861 	new_plane_crtc = new_plane_state->crtc;
7862 	old_plane_crtc = old_plane_state->crtc;
7863 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
7864 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
7865 
7866 	/*TODO Implement atomic check for cursor plane */
7867 	if (plane->type == DRM_PLANE_TYPE_CURSOR)
7868 		return 0;
7869 
7870 	needs_reset = should_reset_plane(state, plane, old_plane_state,
7871 					 new_plane_state);
7872 
7873 	/* Remove any changed/removed planes */
7874 	if (!enable) {
7875 		if (!needs_reset)
7876 			return 0;
7877 
7878 		if (!old_plane_crtc)
7879 			return 0;
7880 
7881 		old_crtc_state = drm_atomic_get_old_crtc_state(
7882 				state, old_plane_crtc);
7883 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7884 
7885 		if (!dm_old_crtc_state->stream)
7886 			return 0;
7887 
7888 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7889 				plane->base.id, old_plane_crtc->base.id);
7890 
7891 		ret = dm_atomic_get_state(state, &dm_state);
7892 		if (ret)
7893 			return ret;
7894 
7895 		if (!dc_remove_plane_from_context(
7896 				dc,
7897 				dm_old_crtc_state->stream,
7898 				dm_old_plane_state->dc_state,
7899 				dm_state->context)) {
7900 
7901 			ret = EINVAL;
7902 			return ret;
7903 		}
7904 
7905 
7906 		dc_plane_state_release(dm_old_plane_state->dc_state);
7907 		dm_new_plane_state->dc_state = NULL;
7908 
7909 		*lock_and_validation_needed = true;
7910 
7911 	} else { /* Add new planes */
7912 		struct dc_plane_state *dc_new_plane_state;
7913 
7914 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7915 			return 0;
7916 
7917 		if (!new_plane_crtc)
7918 			return 0;
7919 
7920 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7921 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7922 
7923 		if (!dm_new_crtc_state->stream)
7924 			return 0;
7925 
7926 		if (!needs_reset)
7927 			return 0;
7928 
7929 		WARN_ON(dm_new_plane_state->dc_state);
7930 
7931 		dc_new_plane_state = dc_create_plane_state(dc);
7932 		if (!dc_new_plane_state)
7933 			return -ENOMEM;
7934 
7935 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7936 				plane->base.id, new_plane_crtc->base.id);
7937 
7938 		ret = dm_atomic_get_state(state, &dm_state);
7939 		if (ret) {
7940 			dc_plane_state_release(dc_new_plane_state);
7941 			return ret;
7942 		}
7943 
7944 		/*
7945 		 * Any atomic check errors that occur after this will
7946 		 * not need a release. The plane state will be attached
7947 		 * to the stream, and therefore part of the atomic
7948 		 * state. It'll be released when the atomic state is
7949 		 * cleaned.
7950 		 */
7951 		if (!dc_add_plane_to_context(
7952 				dc,
7953 				dm_new_crtc_state->stream,
7954 				dc_new_plane_state,
7955 				dm_state->context)) {
7956 
7957 			dc_plane_state_release(dc_new_plane_state);
7958 			return -EINVAL;
7959 		}
7960 
7961 		dm_new_plane_state->dc_state = dc_new_plane_state;
7962 
7963 		ret = fill_dc_plane_attributes(
7964 			new_plane_crtc->dev->dev_private,
7965 			dm_new_plane_state,
7966 			new_plane_state,
7967 			new_crtc_state);
7968 		if (ret)
7969 			return ret;
7970 
7971 		/* Tell DC to do a full surface update every time there
7972 		 * is a plane change. Inefficient, but works for now.
7973 		 */
7974 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
7975 
7976 		*lock_and_validation_needed = true;
7977 	}
7978 
7979 
7980 	return ret;
7981 }
7982 
7983 static int
7984 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
7985 				    struct drm_atomic_state *state,
7986 				    enum surface_update_type *out_type)
7987 {
7988 	struct dc *dc = dm->dc;
7989 	struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
7990 	int i, j, num_plane, ret = 0;
7991 	struct drm_plane_state *old_plane_state, *new_plane_state;
7992 	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
7993 	struct drm_crtc *new_plane_crtc;
7994 	struct drm_plane *plane;
7995 
7996 	struct drm_crtc *crtc;
7997 	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
7998 	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
7999 	struct dc_stream_status *status = NULL;
8000 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8001 	struct surface_info_bundle {
8002 		struct dc_surface_update surface_updates[MAX_SURFACES];
8003 		struct dc_plane_info plane_infos[MAX_SURFACES];
8004 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8005 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8006 		struct dc_stream_update stream_update;
8007 	} *bundle;
8008 
8009 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8010 
8011 	if (!bundle) {
8012 		DRM_ERROR("Failed to allocate update bundle\n");
8013 		/* Set type to FULL to avoid crashing in DC*/
8014 		update_type = UPDATE_TYPE_FULL;
8015 		goto cleanup;
8016 	}
8017 
8018 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8019 
8020 		memset(bundle, 0, sizeof(struct surface_info_bundle));
8021 
8022 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8023 		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8024 		num_plane = 0;
8025 
8026 		if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8027 			update_type = UPDATE_TYPE_FULL;
8028 			goto cleanup;
8029 		}
8030 
8031 		if (!new_dm_crtc_state->stream)
8032 			continue;
8033 
8034 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8035 			const struct amdgpu_framebuffer *amdgpu_fb =
8036 				to_amdgpu_framebuffer(new_plane_state->fb);
8037 			struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8038 			struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8039 			struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8040 			uint64_t tiling_flags;
8041 
8042 			new_plane_crtc = new_plane_state->crtc;
8043 			new_dm_plane_state = to_dm_plane_state(new_plane_state);
8044 			old_dm_plane_state = to_dm_plane_state(old_plane_state);
8045 
8046 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8047 				continue;
8048 
8049 			if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8050 				update_type = UPDATE_TYPE_FULL;
8051 				goto cleanup;
8052 			}
8053 
8054 			if (crtc != new_plane_crtc)
8055 				continue;
8056 
8057 			bundle->surface_updates[num_plane].surface =
8058 					new_dm_plane_state->dc_state;
8059 
8060 			if (new_crtc_state->mode_changed) {
8061 				bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8062 				bundle->stream_update.src = new_dm_crtc_state->stream->src;
8063 			}
8064 
8065 			if (new_crtc_state->color_mgmt_changed) {
8066 				bundle->surface_updates[num_plane].gamma =
8067 						new_dm_plane_state->dc_state->gamma_correction;
8068 				bundle->surface_updates[num_plane].in_transfer_func =
8069 						new_dm_plane_state->dc_state->in_transfer_func;
8070 				bundle->stream_update.gamut_remap =
8071 						&new_dm_crtc_state->stream->gamut_remap_matrix;
8072 				bundle->stream_update.output_csc_transform =
8073 						&new_dm_crtc_state->stream->csc_color_matrix;
8074 				bundle->stream_update.out_transfer_func =
8075 						new_dm_crtc_state->stream->out_transfer_func;
8076 			}
8077 
8078 			ret = fill_dc_scaling_info(new_plane_state,
8079 						   scaling_info);
8080 			if (ret)
8081 				goto cleanup;
8082 
8083 			bundle->surface_updates[num_plane].scaling_info = scaling_info;
8084 
8085 			if (amdgpu_fb) {
8086 				ret = get_fb_info(amdgpu_fb, &tiling_flags);
8087 				if (ret)
8088 					goto cleanup;
8089 
8090 				ret = fill_dc_plane_info_and_addr(
8091 					dm->adev, new_plane_state, tiling_flags,
8092 					plane_info,
8093 					&flip_addr->address);
8094 				if (ret)
8095 					goto cleanup;
8096 
8097 				bundle->surface_updates[num_plane].plane_info = plane_info;
8098 				bundle->surface_updates[num_plane].flip_addr = flip_addr;
8099 			}
8100 
8101 			num_plane++;
8102 		}
8103 
8104 		if (num_plane == 0)
8105 			continue;
8106 
8107 		ret = dm_atomic_get_state(state, &dm_state);
8108 		if (ret)
8109 			goto cleanup;
8110 
8111 		old_dm_state = dm_atomic_get_old_state(state);
8112 		if (!old_dm_state) {
8113 			ret = -EINVAL;
8114 			goto cleanup;
8115 		}
8116 
8117 		status = dc_stream_get_status_from_state(old_dm_state->context,
8118 							 new_dm_crtc_state->stream);
8119 		bundle->stream_update.stream = new_dm_crtc_state->stream;
8120 		/*
8121 		 * TODO: DC modifies the surface during this call so we need
8122 		 * to lock here - find a way to do this without locking.
8123 		 */
8124 		mutex_lock(&dm->dc_lock);
8125 		update_type = dc_check_update_surfaces_for_stream(
8126 				dc,	bundle->surface_updates, num_plane,
8127 				&bundle->stream_update, status);
8128 		mutex_unlock(&dm->dc_lock);
8129 
8130 		if (update_type > UPDATE_TYPE_MED) {
8131 			update_type = UPDATE_TYPE_FULL;
8132 			goto cleanup;
8133 		}
8134 	}
8135 
8136 cleanup:
8137 	kfree(bundle);
8138 
8139 	*out_type = update_type;
8140 	return ret;
8141 }
8142 
8143 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8144 {
8145 	struct drm_connector *connector;
8146 	struct drm_connector_state *conn_state;
8147 	struct amdgpu_dm_connector *aconnector = NULL;
8148 	int i;
8149 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8150 		if (conn_state->crtc != crtc)
8151 			continue;
8152 
8153 		aconnector = to_amdgpu_dm_connector(connector);
8154 		if (!aconnector->port || !aconnector->mst_port)
8155 			aconnector = NULL;
8156 		else
8157 			break;
8158 	}
8159 
8160 	if (!aconnector)
8161 		return 0;
8162 
8163 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8164 }
8165 
8166 /**
8167  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8168  * @dev: The DRM device
8169  * @state: The atomic state to commit
8170  *
8171  * Validate that the given atomic state is programmable by DC into hardware.
8172  * This involves constructing a &struct dc_state reflecting the new hardware
8173  * state we wish to commit, then querying DC to see if it is programmable. It's
8174  * important not to modify the existing DC state. Otherwise, atomic_check
8175  * may unexpectedly commit hardware changes.
8176  *
8177  * When validating the DC state, it's important that the right locks are
8178  * acquired. For full updates case which removes/adds/updates streams on one
8179  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8180  * that any such full update commit will wait for completion of any outstanding
8181  * flip using DRMs synchronization events. See
8182  * dm_determine_update_type_for_commit()
8183  *
8184  * Note that DM adds the affected connectors for all CRTCs in state, when that
8185  * might not seem necessary. This is because DC stream creation requires the
8186  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8187  * be possible but non-trivial - a possible TODO item.
8188  *
8189  * Return: -Error code if validation failed.
8190  */
8191 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8192 				  struct drm_atomic_state *state)
8193 {
8194 	struct amdgpu_device *adev = dev->dev_private;
8195 	struct dm_atomic_state *dm_state = NULL;
8196 	struct dc *dc = adev->dm.dc;
8197 	struct drm_connector *connector;
8198 	struct drm_connector_state *old_con_state, *new_con_state;
8199 	struct drm_crtc *crtc;
8200 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8201 	struct drm_plane *plane;
8202 	struct drm_plane_state *old_plane_state, *new_plane_state;
8203 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
8204 	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8205 
8206 	int ret, i;
8207 
8208 	/*
8209 	 * This bool will be set for true for any modeset/reset
8210 	 * or plane update which implies non fast surface update.
8211 	 */
8212 	bool lock_and_validation_needed = false;
8213 
8214 	ret = drm_atomic_helper_check_modeset(dev, state);
8215 	if (ret)
8216 		goto fail;
8217 
8218 	if (adev->asic_type >= CHIP_NAVI10) {
8219 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8220 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8221 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8222 				if (ret)
8223 					goto fail;
8224 			}
8225 		}
8226 	}
8227 
8228 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8229 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8230 		    !new_crtc_state->color_mgmt_changed &&
8231 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8232 			continue;
8233 
8234 		if (!new_crtc_state->enable)
8235 			continue;
8236 
8237 		ret = drm_atomic_add_affected_connectors(state, crtc);
8238 		if (ret)
8239 			return ret;
8240 
8241 		ret = drm_atomic_add_affected_planes(state, crtc);
8242 		if (ret)
8243 			goto fail;
8244 	}
8245 
8246 	/*
8247 	 * Add all primary and overlay planes on the CRTC to the state
8248 	 * whenever a plane is enabled to maintain correct z-ordering
8249 	 * and to enable fast surface updates.
8250 	 */
8251 	drm_for_each_crtc(crtc, dev) {
8252 		bool modified = false;
8253 
8254 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8255 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8256 				continue;
8257 
8258 			if (new_plane_state->crtc == crtc ||
8259 			    old_plane_state->crtc == crtc) {
8260 				modified = true;
8261 				break;
8262 			}
8263 		}
8264 
8265 		if (!modified)
8266 			continue;
8267 
8268 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8269 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8270 				continue;
8271 
8272 			new_plane_state =
8273 				drm_atomic_get_plane_state(state, plane);
8274 
8275 			if (IS_ERR(new_plane_state)) {
8276 				ret = PTR_ERR(new_plane_state);
8277 				goto fail;
8278 			}
8279 		}
8280 	}
8281 
8282 	/* Remove exiting planes if they are modified */
8283 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8284 		ret = dm_update_plane_state(dc, state, plane,
8285 					    old_plane_state,
8286 					    new_plane_state,
8287 					    false,
8288 					    &lock_and_validation_needed);
8289 		if (ret)
8290 			goto fail;
8291 	}
8292 
8293 	/* Disable all crtcs which require disable */
8294 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8295 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8296 					   old_crtc_state,
8297 					   new_crtc_state,
8298 					   false,
8299 					   &lock_and_validation_needed);
8300 		if (ret)
8301 			goto fail;
8302 	}
8303 
8304 	/* Enable all crtcs which require enable */
8305 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8306 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8307 					   old_crtc_state,
8308 					   new_crtc_state,
8309 					   true,
8310 					   &lock_and_validation_needed);
8311 		if (ret)
8312 			goto fail;
8313 	}
8314 
8315 	/* Add new/modified planes */
8316 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8317 		ret = dm_update_plane_state(dc, state, plane,
8318 					    old_plane_state,
8319 					    new_plane_state,
8320 					    true,
8321 					    &lock_and_validation_needed);
8322 		if (ret)
8323 			goto fail;
8324 	}
8325 
8326 	/* Run this here since we want to validate the streams we created */
8327 	ret = drm_atomic_helper_check_planes(dev, state);
8328 	if (ret)
8329 		goto fail;
8330 
8331 	if (state->legacy_cursor_update) {
8332 		/*
8333 		 * This is a fast cursor update coming from the plane update
8334 		 * helper, check if it can be done asynchronously for better
8335 		 * performance.
8336 		 */
8337 		state->async_update =
8338 			!drm_atomic_helper_async_check(dev, state);
8339 
8340 		/*
8341 		 * Skip the remaining global validation if this is an async
8342 		 * update. Cursor updates can be done without affecting
8343 		 * state or bandwidth calcs and this avoids the performance
8344 		 * penalty of locking the private state object and
8345 		 * allocating a new dc_state.
8346 		 */
8347 		if (state->async_update)
8348 			return 0;
8349 	}
8350 
8351 	/* Check scaling and underscan changes*/
8352 	/* TODO Removed scaling changes validation due to inability to commit
8353 	 * new stream into context w\o causing full reset. Need to
8354 	 * decide how to handle.
8355 	 */
8356 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8357 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8358 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8359 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8360 
8361 		/* Skip any modesets/resets */
8362 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8363 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8364 			continue;
8365 
8366 		/* Skip any thing not scale or underscan changes */
8367 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8368 			continue;
8369 
8370 		overall_update_type = UPDATE_TYPE_FULL;
8371 		lock_and_validation_needed = true;
8372 	}
8373 
8374 	ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8375 	if (ret)
8376 		goto fail;
8377 
8378 	if (overall_update_type < update_type)
8379 		overall_update_type = update_type;
8380 
8381 	/*
8382 	 * lock_and_validation_needed was an old way to determine if we need to set
8383 	 * the global lock. Leaving it in to check if we broke any corner cases
8384 	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8385 	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
8386 	 */
8387 	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8388 		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8389 
8390 	if (overall_update_type > UPDATE_TYPE_FAST) {
8391 		ret = dm_atomic_get_state(state, &dm_state);
8392 		if (ret)
8393 			goto fail;
8394 
8395 		ret = do_aquire_global_lock(dev, state);
8396 		if (ret)
8397 			goto fail;
8398 
8399 #if defined(CONFIG_DRM_AMD_DC_DCN)
8400 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8401 			goto fail;
8402 
8403 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8404 		if (ret)
8405 			goto fail;
8406 #endif
8407 
8408 		/*
8409 		 * Perform validation of MST topology in the state:
8410 		 * We need to perform MST atomic check before calling
8411 		 * dc_validate_global_state(), or there is a chance
8412 		 * to get stuck in an infinite loop and hang eventually.
8413 		 */
8414 		ret = drm_dp_mst_atomic_check(state);
8415 		if (ret)
8416 			goto fail;
8417 
8418 		if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8419 			ret = -EINVAL;
8420 			goto fail;
8421 		}
8422 	} else {
8423 		/*
8424 		 * The commit is a fast update. Fast updates shouldn't change
8425 		 * the DC context, affect global validation, and can have their
8426 		 * commit work done in parallel with other commits not touching
8427 		 * the same resource. If we have a new DC context as part of
8428 		 * the DM atomic state from validation we need to free it and
8429 		 * retain the existing one instead.
8430 		 */
8431 		struct dm_atomic_state *new_dm_state, *old_dm_state;
8432 
8433 		new_dm_state = dm_atomic_get_new_state(state);
8434 		old_dm_state = dm_atomic_get_old_state(state);
8435 
8436 		if (new_dm_state && old_dm_state) {
8437 			if (new_dm_state->context)
8438 				dc_release_state(new_dm_state->context);
8439 
8440 			new_dm_state->context = old_dm_state->context;
8441 
8442 			if (old_dm_state->context)
8443 				dc_retain_state(old_dm_state->context);
8444 		}
8445 	}
8446 
8447 	/* Store the overall update type for use later in atomic check. */
8448 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8449 		struct dm_crtc_state *dm_new_crtc_state =
8450 			to_dm_crtc_state(new_crtc_state);
8451 
8452 		dm_new_crtc_state->update_type = (int)overall_update_type;
8453 	}
8454 
8455 	/* Must be success */
8456 	WARN_ON(ret);
8457 	return ret;
8458 
8459 fail:
8460 	if (ret == -EDEADLK)
8461 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8462 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8463 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8464 	else
8465 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8466 
8467 	return ret;
8468 }
8469 
8470 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8471 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8472 {
8473 	uint8_t dpcd_data;
8474 	bool capable = false;
8475 
8476 	if (amdgpu_dm_connector->dc_link &&
8477 		dm_helpers_dp_read_dpcd(
8478 				NULL,
8479 				amdgpu_dm_connector->dc_link,
8480 				DP_DOWN_STREAM_PORT_COUNT,
8481 				&dpcd_data,
8482 				sizeof(dpcd_data))) {
8483 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8484 	}
8485 
8486 	return capable;
8487 }
8488 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8489 					struct edid *edid)
8490 {
8491 	int i;
8492 	bool edid_check_required;
8493 	struct detailed_timing *timing;
8494 	struct detailed_non_pixel *data;
8495 	struct detailed_data_monitor_range *range;
8496 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8497 			to_amdgpu_dm_connector(connector);
8498 	struct dm_connector_state *dm_con_state = NULL;
8499 
8500 	struct drm_device *dev = connector->dev;
8501 	struct amdgpu_device *adev = dev->dev_private;
8502 	bool freesync_capable = false;
8503 
8504 	if (!connector->state) {
8505 		DRM_ERROR("%s - Connector has no state", __func__);
8506 		goto update;
8507 	}
8508 
8509 	if (!edid) {
8510 		dm_con_state = to_dm_connector_state(connector->state);
8511 
8512 		amdgpu_dm_connector->min_vfreq = 0;
8513 		amdgpu_dm_connector->max_vfreq = 0;
8514 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8515 
8516 		goto update;
8517 	}
8518 
8519 	dm_con_state = to_dm_connector_state(connector->state);
8520 
8521 	edid_check_required = false;
8522 	if (!amdgpu_dm_connector->dc_sink) {
8523 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8524 		goto update;
8525 	}
8526 	if (!adev->dm.freesync_module)
8527 		goto update;
8528 	/*
8529 	 * if edid non zero restrict freesync only for dp and edp
8530 	 */
8531 	if (edid) {
8532 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8533 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8534 			edid_check_required = is_dp_capable_without_timing_msa(
8535 						adev->dm.dc,
8536 						amdgpu_dm_connector);
8537 		}
8538 	}
8539 	if (edid_check_required == true && (edid->version > 1 ||
8540 	   (edid->version == 1 && edid->revision > 1))) {
8541 		for (i = 0; i < 4; i++) {
8542 
8543 			timing	= &edid->detailed_timings[i];
8544 			data	= &timing->data.other_data;
8545 			range	= &data->data.range;
8546 			/*
8547 			 * Check if monitor has continuous frequency mode
8548 			 */
8549 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8550 				continue;
8551 			/*
8552 			 * Check for flag range limits only. If flag == 1 then
8553 			 * no additional timing information provided.
8554 			 * Default GTF, GTF Secondary curve and CVT are not
8555 			 * supported
8556 			 */
8557 			if (range->flags != 1)
8558 				continue;
8559 
8560 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8561 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8562 			amdgpu_dm_connector->pixel_clock_mhz =
8563 				range->pixel_clock_mhz * 10;
8564 			break;
8565 		}
8566 
8567 		if (amdgpu_dm_connector->max_vfreq -
8568 		    amdgpu_dm_connector->min_vfreq > 10) {
8569 
8570 			freesync_capable = true;
8571 		}
8572 	}
8573 
8574 update:
8575 	if (dm_con_state)
8576 		dm_con_state->freesync_capable = freesync_capable;
8577 
8578 	if (connector->vrr_capable_property)
8579 		drm_connector_set_vrr_capable_property(connector,
8580 						       freesync_capable);
8581 }
8582 
8583 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8584 {
8585 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8586 
8587 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8588 		return;
8589 	if (link->type == dc_connection_none)
8590 		return;
8591 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8592 					dpcd_data, sizeof(dpcd_data))) {
8593 		link->psr_feature_enabled = dpcd_data[0] ? true:false;
8594 		DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
8595 	}
8596 }
8597 
8598 /*
8599  * amdgpu_dm_link_setup_psr() - configure psr link
8600  * @stream: stream state
8601  *
8602  * Return: true if success
8603  */
8604 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8605 {
8606 	struct dc_link *link = NULL;
8607 	struct psr_config psr_config = {0};
8608 	struct psr_context psr_context = {0};
8609 	struct dc *dc = NULL;
8610 	bool ret = false;
8611 
8612 	if (stream == NULL)
8613 		return false;
8614 
8615 	link = stream->link;
8616 	dc = link->ctx->dc;
8617 
8618 	psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
8619 
8620 	if (psr_config.psr_version > 0) {
8621 		psr_config.psr_exit_link_training_required = 0x1;
8622 		psr_config.psr_frame_capture_indication_req = 0;
8623 		psr_config.psr_rfb_setup_time = 0x37;
8624 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8625 		psr_config.allow_smu_optimizations = 0x0;
8626 
8627 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8628 
8629 	}
8630 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_feature_enabled);
8631 
8632 	return ret;
8633 }
8634 
8635 /*
8636  * amdgpu_dm_psr_enable() - enable psr f/w
8637  * @stream: stream state
8638  *
8639  * Return: true if success
8640  */
8641 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8642 {
8643 	struct dc_link *link = stream->link;
8644 	unsigned int vsync_rate_hz = 0;
8645 	struct dc_static_screen_params params = {0};
8646 	/* Calculate number of static frames before generating interrupt to
8647 	 * enter PSR.
8648 	 */
8649 	// Init fail safe of 2 frames static
8650 	unsigned int num_frames_static = 2;
8651 
8652 	DRM_DEBUG_DRIVER("Enabling psr...\n");
8653 
8654 	vsync_rate_hz = div64_u64(div64_u64((
8655 			stream->timing.pix_clk_100hz * 100),
8656 			stream->timing.v_total),
8657 			stream->timing.h_total);
8658 
8659 	/* Round up
8660 	 * Calculate number of frames such that at least 30 ms of time has
8661 	 * passed.
8662 	 */
8663 	if (vsync_rate_hz != 0) {
8664 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8665 		num_frames_static = (30000 / frame_time_microsec) + 1;
8666 	}
8667 
8668 	params.triggers.cursor_update = true;
8669 	params.triggers.overlay_update = true;
8670 	params.triggers.surface_update = true;
8671 	params.num_frames = num_frames_static;
8672 
8673 	dc_stream_set_static_screen_params(link->ctx->dc,
8674 					   &stream, 1,
8675 					   &params);
8676 
8677 	return dc_link_set_psr_allow_active(link, true, false);
8678 }
8679 
8680 /*
8681  * amdgpu_dm_psr_disable() - disable psr f/w
8682  * @stream:  stream state
8683  *
8684  * Return: true if success
8685  */
8686 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8687 {
8688 
8689 	DRM_DEBUG_DRIVER("Disabling psr...\n");
8690 
8691 	return dc_link_set_psr_allow_active(stream->link, false, true);
8692 }
8693