1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38 
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50 
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58 
59 #include "ivsrcid/ivsrcid_vislands30.h"
60 
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107 
108 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110 
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113 
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116 
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119 
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129 
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135 {
136 	switch (link->dpcd_caps.dongle_type) {
137 	case DISPLAY_DONGLE_NONE:
138 		return DRM_MODE_SUBCONNECTOR_Native;
139 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 		return DRM_MODE_SUBCONNECTOR_VGA;
141 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 		return DRM_MODE_SUBCONNECTOR_DVID;
144 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 		return DRM_MODE_SUBCONNECTOR_HDMIA;
147 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148 	default:
149 		return DRM_MODE_SUBCONNECTOR_Unknown;
150 	}
151 }
152 
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154 {
155 	struct dc_link *link = aconnector->dc_link;
156 	struct drm_connector *connector = &aconnector->base;
157 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158 
159 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160 		return;
161 
162 	if (aconnector->dc_sink)
163 		subconnector = get_subconnector_type(link);
164 
165 	drm_object_property_set_value(&connector->base,
166 			connector->dev->mode_config.dp_subconnector_property,
167 			subconnector);
168 }
169 
170 /*
171  * initializes drm_device display related structures, based on the information
172  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173  * drm_encoder, drm_mode_config
174  *
175  * Returns 0 on success
176  */
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180 
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182 				struct drm_plane *plane,
183 				unsigned long possible_crtcs,
184 				const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 			       struct drm_plane *plane,
187 			       uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
190 				    uint32_t link_index,
191 				    struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 				  struct amdgpu_encoder *aencoder,
194 				  uint32_t link_index);
195 
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197 
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199 
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 				  struct drm_atomic_state *state);
202 
203 static void handle_cursor_update(struct drm_plane *plane,
204 				 struct drm_plane_state *old_plane_state);
205 
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
211 
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214 
215 /*
216  * dm_vblank_get_counter
217  *
218  * @brief
219  * Get counter for number of vertical blanks
220  *
221  * @param
222  * struct amdgpu_device *adev - [in] desired amdgpu device
223  * int disp_idx - [in] which CRTC to get the counter from
224  *
225  * @return
226  * Counter for vertical blanks
227  */
228 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
229 {
230 	if (crtc >= adev->mode_info.num_crtc)
231 		return 0;
232 	else {
233 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
234 
235 		if (acrtc->dm_irq_params.stream == NULL) {
236 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
237 				  crtc);
238 			return 0;
239 		}
240 
241 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
242 	}
243 }
244 
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246 				  u32 *vbl, u32 *position)
247 {
248 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
249 
250 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251 		return -EINVAL;
252 	else {
253 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254 
255 		if (acrtc->dm_irq_params.stream ==  NULL) {
256 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
257 				  crtc);
258 			return 0;
259 		}
260 
261 		/*
262 		 * TODO rework base driver to use values directly.
263 		 * for now parse it back into reg-format
264 		 */
265 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
266 					 &v_blank_start,
267 					 &v_blank_end,
268 					 &h_position,
269 					 &v_position);
270 
271 		*position = v_position | (h_position << 16);
272 		*vbl = v_blank_start | (v_blank_end << 16);
273 	}
274 
275 	return 0;
276 }
277 
278 static bool dm_is_idle(void *handle)
279 {
280 	/* XXX todo */
281 	return true;
282 }
283 
284 static int dm_wait_for_idle(void *handle)
285 {
286 	/* XXX todo */
287 	return 0;
288 }
289 
290 static bool dm_check_soft_reset(void *handle)
291 {
292 	return false;
293 }
294 
295 static int dm_soft_reset(void *handle)
296 {
297 	/* XXX todo */
298 	return 0;
299 }
300 
301 static struct amdgpu_crtc *
302 get_crtc_by_otg_inst(struct amdgpu_device *adev,
303 		     int otg_inst)
304 {
305 	struct drm_device *dev = adev_to_drm(adev);
306 	struct drm_crtc *crtc;
307 	struct amdgpu_crtc *amdgpu_crtc;
308 
309 	if (otg_inst == -1) {
310 		WARN_ON(1);
311 		return adev->mode_info.crtcs[0];
312 	}
313 
314 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315 		amdgpu_crtc = to_amdgpu_crtc(crtc);
316 
317 		if (amdgpu_crtc->otg_inst == otg_inst)
318 			return amdgpu_crtc;
319 	}
320 
321 	return NULL;
322 }
323 
324 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
325 {
326 	return acrtc->dm_irq_params.freesync_config.state ==
327 		       VRR_STATE_ACTIVE_VARIABLE ||
328 	       acrtc->dm_irq_params.freesync_config.state ==
329 		       VRR_STATE_ACTIVE_FIXED;
330 }
331 
332 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
333 {
334 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
336 }
337 
338 /**
339  * dm_pflip_high_irq() - Handle pageflip interrupt
340  * @interrupt_params: ignored
341  *
342  * Handles the pageflip interrupt by notifying all interested parties
343  * that the pageflip has been completed.
344  */
345 static void dm_pflip_high_irq(void *interrupt_params)
346 {
347 	struct amdgpu_crtc *amdgpu_crtc;
348 	struct common_irq_params *irq_params = interrupt_params;
349 	struct amdgpu_device *adev = irq_params->adev;
350 	unsigned long flags;
351 	struct drm_pending_vblank_event *e;
352 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
353 	bool vrr_active;
354 
355 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
356 
357 	/* IRQ could occur when in initial stage */
358 	/* TODO work and BO cleanup */
359 	if (amdgpu_crtc == NULL) {
360 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
361 		return;
362 	}
363 
364 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
365 
366 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368 						 amdgpu_crtc->pflip_status,
369 						 AMDGPU_FLIP_SUBMITTED,
370 						 amdgpu_crtc->crtc_id,
371 						 amdgpu_crtc);
372 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
373 		return;
374 	}
375 
376 	/* page flip completed. */
377 	e = amdgpu_crtc->event;
378 	amdgpu_crtc->event = NULL;
379 
380 	if (!e)
381 		WARN_ON(1);
382 
383 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
384 
385 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
386 	if (!vrr_active ||
387 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
388 				      &v_blank_end, &hpos, &vpos) ||
389 	    (vpos < v_blank_start)) {
390 		/* Update to correct count and vblank timestamp if racing with
391 		 * vblank irq. This also updates to the correct vblank timestamp
392 		 * even in VRR mode, as scanout is past the front-porch atm.
393 		 */
394 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
395 
396 		/* Wake up userspace by sending the pageflip event with proper
397 		 * count and timestamp of vblank of flip completion.
398 		 */
399 		if (e) {
400 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
401 
402 			/* Event sent, so done with vblank for this flip */
403 			drm_crtc_vblank_put(&amdgpu_crtc->base);
404 		}
405 	} else if (e) {
406 		/* VRR active and inside front-porch: vblank count and
407 		 * timestamp for pageflip event will only be up to date after
408 		 * drm_crtc_handle_vblank() has been executed from late vblank
409 		 * irq handler after start of back-porch (vline 0). We queue the
410 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
411 		 * updated timestamp and count, once it runs after us.
412 		 *
413 		 * We need to open-code this instead of using the helper
414 		 * drm_crtc_arm_vblank_event(), as that helper would
415 		 * call drm_crtc_accurate_vblank_count(), which we must
416 		 * not call in VRR mode while we are in front-porch!
417 		 */
418 
419 		/* sequence will be replaced by real count during send-out. */
420 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421 		e->pipe = amdgpu_crtc->crtc_id;
422 
423 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
424 		e = NULL;
425 	}
426 
427 	/* Keep track of vblank of this flip for flip throttling. We use the
428 	 * cooked hw counter, as that one incremented at start of this vblank
429 	 * of pageflip completion, so last_flip_vblank is the forbidden count
430 	 * for queueing new pageflips if vsync + VRR is enabled.
431 	 */
432 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
433 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
434 
435 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
436 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
437 
438 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
440 			 vrr_active, (int) !e);
441 }
442 
443 static void dm_vupdate_high_irq(void *interrupt_params)
444 {
445 	struct common_irq_params *irq_params = interrupt_params;
446 	struct amdgpu_device *adev = irq_params->adev;
447 	struct amdgpu_crtc *acrtc;
448 	unsigned long flags;
449 	int vrr_active;
450 
451 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
452 
453 	if (acrtc) {
454 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
455 
456 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
457 			      acrtc->crtc_id,
458 			      vrr_active);
459 
460 		/* Core vblank handling is done here after end of front-porch in
461 		 * vrr mode, as vblank timestamping will give valid results
462 		 * while now done after front-porch. This will also deliver
463 		 * page-flip completion events that have been queued to us
464 		 * if a pageflip happened inside front-porch.
465 		 */
466 		if (vrr_active) {
467 			drm_crtc_handle_vblank(&acrtc->base);
468 
469 			/* BTR processing for pre-DCE12 ASICs */
470 			if (acrtc->dm_irq_params.stream &&
471 			    adev->family < AMDGPU_FAMILY_AI) {
472 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
473 				mod_freesync_handle_v_update(
474 				    adev->dm.freesync_module,
475 				    acrtc->dm_irq_params.stream,
476 				    &acrtc->dm_irq_params.vrr_params);
477 
478 				dc_stream_adjust_vmin_vmax(
479 				    adev->dm.dc,
480 				    acrtc->dm_irq_params.stream,
481 				    &acrtc->dm_irq_params.vrr_params.adjust);
482 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
483 			}
484 		}
485 	}
486 }
487 
488 /**
489  * dm_crtc_high_irq() - Handles CRTC interrupt
490  * @interrupt_params: used for determining the CRTC instance
491  *
492  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
493  * event handler.
494  */
495 static void dm_crtc_high_irq(void *interrupt_params)
496 {
497 	struct common_irq_params *irq_params = interrupt_params;
498 	struct amdgpu_device *adev = irq_params->adev;
499 	struct amdgpu_crtc *acrtc;
500 	unsigned long flags;
501 	int vrr_active;
502 
503 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
504 	if (!acrtc)
505 		return;
506 
507 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
508 
509 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
510 		      vrr_active, acrtc->dm_irq_params.active_planes);
511 
512 	/**
513 	 * Core vblank handling at start of front-porch is only possible
514 	 * in non-vrr mode, as only there vblank timestamping will give
515 	 * valid results while done in front-porch. Otherwise defer it
516 	 * to dm_vupdate_high_irq after end of front-porch.
517 	 */
518 	if (!vrr_active)
519 		drm_crtc_handle_vblank(&acrtc->base);
520 
521 	/**
522 	 * Following stuff must happen at start of vblank, for crc
523 	 * computation and below-the-range btr support in vrr mode.
524 	 */
525 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
526 
527 	/* BTR updates need to happen before VUPDATE on Vega and above. */
528 	if (adev->family < AMDGPU_FAMILY_AI)
529 		return;
530 
531 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
532 
533 	if (acrtc->dm_irq_params.stream &&
534 	    acrtc->dm_irq_params.vrr_params.supported &&
535 	    acrtc->dm_irq_params.freesync_config.state ==
536 		    VRR_STATE_ACTIVE_VARIABLE) {
537 		mod_freesync_handle_v_update(adev->dm.freesync_module,
538 					     acrtc->dm_irq_params.stream,
539 					     &acrtc->dm_irq_params.vrr_params);
540 
541 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542 					   &acrtc->dm_irq_params.vrr_params.adjust);
543 	}
544 
545 	/*
546 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
547 	 * In that case, pageflip completion interrupts won't fire and pageflip
548 	 * completion events won't get delivered. Prevent this by sending
549 	 * pending pageflip events from here if a flip is still pending.
550 	 *
551 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
552 	 * avoid race conditions between flip programming and completion,
553 	 * which could cause too early flip completion events.
554 	 */
555 	if (adev->family >= AMDGPU_FAMILY_RV &&
556 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
557 	    acrtc->dm_irq_params.active_planes == 0) {
558 		if (acrtc->event) {
559 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
560 			acrtc->event = NULL;
561 			drm_crtc_vblank_put(&acrtc->base);
562 		}
563 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
564 	}
565 
566 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
567 }
568 
569 static int dm_set_clockgating_state(void *handle,
570 		  enum amd_clockgating_state state)
571 {
572 	return 0;
573 }
574 
575 static int dm_set_powergating_state(void *handle,
576 		  enum amd_powergating_state state)
577 {
578 	return 0;
579 }
580 
581 /* Prototypes of private functions */
582 static int dm_early_init(void* handle);
583 
584 /* Allocate memory for FBC compressed data  */
585 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
586 {
587 	struct drm_device *dev = connector->dev;
588 	struct amdgpu_device *adev = drm_to_adev(dev);
589 	struct dm_compressor_info *compressor = &adev->dm.compressor;
590 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591 	struct drm_display_mode *mode;
592 	unsigned long max_size = 0;
593 
594 	if (adev->dm.dc->fbc_compressor == NULL)
595 		return;
596 
597 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
598 		return;
599 
600 	if (compressor->bo_ptr)
601 		return;
602 
603 
604 	list_for_each_entry(mode, &connector->modes, head) {
605 		if (max_size < mode->htotal * mode->vtotal)
606 			max_size = mode->htotal * mode->vtotal;
607 	}
608 
609 	if (max_size) {
610 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
611 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
612 			    &compressor->gpu_addr, &compressor->cpu_addr);
613 
614 		if (r)
615 			DRM_ERROR("DM: Failed to initialize FBC\n");
616 		else {
617 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
619 		}
620 
621 	}
622 
623 }
624 
625 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626 					  int pipe, bool *enabled,
627 					  unsigned char *buf, int max_bytes)
628 {
629 	struct drm_device *dev = dev_get_drvdata(kdev);
630 	struct amdgpu_device *adev = drm_to_adev(dev);
631 	struct drm_connector *connector;
632 	struct drm_connector_list_iter conn_iter;
633 	struct amdgpu_dm_connector *aconnector;
634 	int ret = 0;
635 
636 	*enabled = false;
637 
638 	mutex_lock(&adev->dm.audio_lock);
639 
640 	drm_connector_list_iter_begin(dev, &conn_iter);
641 	drm_for_each_connector_iter(connector, &conn_iter) {
642 		aconnector = to_amdgpu_dm_connector(connector);
643 		if (aconnector->audio_inst != port)
644 			continue;
645 
646 		*enabled = true;
647 		ret = drm_eld_size(connector->eld);
648 		memcpy(buf, connector->eld, min(max_bytes, ret));
649 
650 		break;
651 	}
652 	drm_connector_list_iter_end(&conn_iter);
653 
654 	mutex_unlock(&adev->dm.audio_lock);
655 
656 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
657 
658 	return ret;
659 }
660 
661 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662 	.get_eld = amdgpu_dm_audio_component_get_eld,
663 };
664 
665 static int amdgpu_dm_audio_component_bind(struct device *kdev,
666 				       struct device *hda_kdev, void *data)
667 {
668 	struct drm_device *dev = dev_get_drvdata(kdev);
669 	struct amdgpu_device *adev = drm_to_adev(dev);
670 	struct drm_audio_component *acomp = data;
671 
672 	acomp->ops = &amdgpu_dm_audio_component_ops;
673 	acomp->dev = kdev;
674 	adev->dm.audio_component = acomp;
675 
676 	return 0;
677 }
678 
679 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680 					  struct device *hda_kdev, void *data)
681 {
682 	struct drm_device *dev = dev_get_drvdata(kdev);
683 	struct amdgpu_device *adev = drm_to_adev(dev);
684 	struct drm_audio_component *acomp = data;
685 
686 	acomp->ops = NULL;
687 	acomp->dev = NULL;
688 	adev->dm.audio_component = NULL;
689 }
690 
691 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692 	.bind	= amdgpu_dm_audio_component_bind,
693 	.unbind	= amdgpu_dm_audio_component_unbind,
694 };
695 
696 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
697 {
698 	int i, ret;
699 
700 	if (!amdgpu_audio)
701 		return 0;
702 
703 	adev->mode_info.audio.enabled = true;
704 
705 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
706 
707 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708 		adev->mode_info.audio.pin[i].channels = -1;
709 		adev->mode_info.audio.pin[i].rate = -1;
710 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
711 		adev->mode_info.audio.pin[i].status_bits = 0;
712 		adev->mode_info.audio.pin[i].category_code = 0;
713 		adev->mode_info.audio.pin[i].connected = false;
714 		adev->mode_info.audio.pin[i].id =
715 			adev->dm.dc->res_pool->audios[i]->inst;
716 		adev->mode_info.audio.pin[i].offset = 0;
717 	}
718 
719 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
720 	if (ret < 0)
721 		return ret;
722 
723 	adev->dm.audio_registered = true;
724 
725 	return 0;
726 }
727 
728 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
729 {
730 	if (!amdgpu_audio)
731 		return;
732 
733 	if (!adev->mode_info.audio.enabled)
734 		return;
735 
736 	if (adev->dm.audio_registered) {
737 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738 		adev->dm.audio_registered = false;
739 	}
740 
741 	/* TODO: Disable audio? */
742 
743 	adev->mode_info.audio.enabled = false;
744 }
745 
746 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
747 {
748 	struct drm_audio_component *acomp = adev->dm.audio_component;
749 
750 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
752 
753 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
754 						 pin, -1);
755 	}
756 }
757 
758 static int dm_dmub_hw_init(struct amdgpu_device *adev)
759 {
760 	const struct dmcub_firmware_header_v1_0 *hdr;
761 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
762 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
763 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
764 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765 	struct abm *abm = adev->dm.dc->res_pool->abm;
766 	struct dmub_srv_hw_params hw_params;
767 	enum dmub_status status;
768 	const unsigned char *fw_inst_const, *fw_bss_data;
769 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
770 	bool has_hw_support;
771 
772 	if (!dmub_srv)
773 		/* DMUB isn't supported on the ASIC. */
774 		return 0;
775 
776 	if (!fb_info) {
777 		DRM_ERROR("No framebuffer info for DMUB service.\n");
778 		return -EINVAL;
779 	}
780 
781 	if (!dmub_fw) {
782 		/* Firmware required for DMUB support. */
783 		DRM_ERROR("No firmware provided for DMUB.\n");
784 		return -EINVAL;
785 	}
786 
787 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788 	if (status != DMUB_STATUS_OK) {
789 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
790 		return -EINVAL;
791 	}
792 
793 	if (!has_hw_support) {
794 		DRM_INFO("DMUB unsupported on ASIC\n");
795 		return 0;
796 	}
797 
798 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
799 
800 	fw_inst_const = dmub_fw->data +
801 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
802 			PSP_HEADER_BYTES;
803 
804 	fw_bss_data = dmub_fw->data +
805 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 		      le32_to_cpu(hdr->inst_const_bytes);
807 
808 	/* Copy firmware and bios info into FB memory. */
809 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
811 
812 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
813 
814 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815 	 * amdgpu_ucode_init_single_fw will load dmub firmware
816 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
817 	 * will be done by dm_dmub_hw_init
818 	 */
819 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
821 				fw_inst_const_size);
822 	}
823 
824 	if (fw_bss_data_size)
825 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826 		       fw_bss_data, fw_bss_data_size);
827 
828 	/* Copy firmware bios info into FB memory. */
829 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
830 	       adev->bios_size);
831 
832 	/* Reset regions that need to be reset. */
833 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
835 
836 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
838 
839 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
841 
842 	/* Initialize hardware. */
843 	memset(&hw_params, 0, sizeof(hw_params));
844 	hw_params.fb_base = adev->gmc.fb_start;
845 	hw_params.fb_offset = adev->gmc.aper_base;
846 
847 	/* backdoor load firmware and trigger dmub running */
848 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849 		hw_params.load_inst_const = true;
850 
851 	if (dmcu)
852 		hw_params.psp_version = dmcu->psp_version;
853 
854 	for (i = 0; i < fb_info->num_fb; ++i)
855 		hw_params.fb[i] = &fb_info->fb[i];
856 
857 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
858 	if (status != DMUB_STATUS_OK) {
859 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
860 		return -EINVAL;
861 	}
862 
863 	/* Wait for firmware load to finish. */
864 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865 	if (status != DMUB_STATUS_OK)
866 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
867 
868 	/* Init DMCU and ABM if available. */
869 	if (dmcu && abm) {
870 		dmcu->funcs->dmcu_init(dmcu);
871 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
872 	}
873 
874 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 	if (!adev->dm.dc->ctx->dmub_srv) {
876 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877 		return -ENOMEM;
878 	}
879 
880 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 		 adev->dm.dmcub_fw_version);
882 
883 	return 0;
884 }
885 
886 #if defined(CONFIG_DRM_AMD_DC_DCN)
887 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
888 {
889 	uint64_t pt_base;
890 	uint32_t logical_addr_low;
891 	uint32_t logical_addr_high;
892 	uint32_t agp_base, agp_bot, agp_top;
893 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
894 
895 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
897 
898 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
899 		/*
900 		 * Raven2 has a HW issue that it is unable to use the vram which
901 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902 		 * workaround that increase system aperture high address (add 1)
903 		 * to get rid of the VM fault and hardware hang.
904 		 */
905 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
906 	else
907 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
908 
909 	agp_base = 0;
910 	agp_bot = adev->gmc.agp_start >> 24;
911 	agp_top = adev->gmc.agp_end >> 24;
912 
913 
914 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919 	page_table_base.low_part = lower_32_bits(pt_base);
920 
921 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
923 
924 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
927 
928 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
931 
932 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
935 
936 	pa_config->is_hvm_enabled = 0;
937 
938 }
939 #endif
940 #if defined(CONFIG_DRM_AMD_DC_DCN)
941 static void event_mall_stutter(struct work_struct *work)
942 {
943 
944 	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
945 	struct amdgpu_display_manager *dm = vblank_work->dm;
946 
947 	mutex_lock(&dm->dc_lock);
948 
949 	if (vblank_work->enable)
950 		dm->active_vblank_irq_count++;
951 	else
952 		dm->active_vblank_irq_count--;
953 
954 
955 	dc_allow_idle_optimizations(
956 		dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
957 
958 	DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
959 
960 
961 	mutex_unlock(&dm->dc_lock);
962 }
963 
964 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
965 {
966 
967 	int max_caps = dc->caps.max_links;
968 	struct vblank_workqueue *vblank_work;
969 	int i = 0;
970 
971 	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
972 	if (ZERO_OR_NULL_PTR(vblank_work)) {
973 		kfree(vblank_work);
974 		return NULL;
975 	}
976 
977 	for (i = 0; i < max_caps; i++)
978 		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
979 
980 	return vblank_work;
981 }
982 #endif
983 static int amdgpu_dm_init(struct amdgpu_device *adev)
984 {
985 	struct dc_init_data init_data;
986 #ifdef CONFIG_DRM_AMD_DC_HDCP
987 	struct dc_callback_init init_params;
988 #endif
989 	int r;
990 
991 	adev->dm.ddev = adev_to_drm(adev);
992 	adev->dm.adev = adev;
993 
994 	/* Zero all the fields */
995 	memset(&init_data, 0, sizeof(init_data));
996 #ifdef CONFIG_DRM_AMD_DC_HDCP
997 	memset(&init_params, 0, sizeof(init_params));
998 #endif
999 
1000 	mutex_init(&adev->dm.dc_lock);
1001 	mutex_init(&adev->dm.audio_lock);
1002 #if defined(CONFIG_DRM_AMD_DC_DCN)
1003 	spin_lock_init(&adev->dm.vblank_lock);
1004 #endif
1005 
1006 	if(amdgpu_dm_irq_init(adev)) {
1007 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1008 		goto error;
1009 	}
1010 
1011 	init_data.asic_id.chip_family = adev->family;
1012 
1013 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1014 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1015 
1016 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1017 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1018 	init_data.asic_id.atombios_base_address =
1019 		adev->mode_info.atom_context->bios;
1020 
1021 	init_data.driver = adev;
1022 
1023 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1024 
1025 	if (!adev->dm.cgs_device) {
1026 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1027 		goto error;
1028 	}
1029 
1030 	init_data.cgs_device = adev->dm.cgs_device;
1031 
1032 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1033 
1034 	switch (adev->asic_type) {
1035 	case CHIP_CARRIZO:
1036 	case CHIP_STONEY:
1037 	case CHIP_RAVEN:
1038 	case CHIP_RENOIR:
1039 		init_data.flags.gpu_vm_support = true;
1040 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1041 			init_data.flags.disable_dmcu = true;
1042 		break;
1043 #if defined(CONFIG_DRM_AMD_DC_DCN)
1044 	case CHIP_VANGOGH:
1045 		init_data.flags.gpu_vm_support = true;
1046 		break;
1047 #endif
1048 	default:
1049 		break;
1050 	}
1051 
1052 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1053 		init_data.flags.fbc_support = true;
1054 
1055 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1056 		init_data.flags.multi_mon_pp_mclk_switch = true;
1057 
1058 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1059 		init_data.flags.disable_fractional_pwm = true;
1060 
1061 	init_data.flags.power_down_display_on_boot = true;
1062 
1063 	/* Display Core create. */
1064 	adev->dm.dc = dc_create(&init_data);
1065 
1066 	if (adev->dm.dc) {
1067 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1068 	} else {
1069 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1070 		goto error;
1071 	}
1072 
1073 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1074 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1075 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1076 	}
1077 
1078 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1079 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1080 
1081 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1082 		adev->dm.dc->debug.disable_stutter = true;
1083 
1084 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1085 		adev->dm.dc->debug.disable_dsc = true;
1086 
1087 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1088 		adev->dm.dc->debug.disable_clock_gate = true;
1089 
1090 	r = dm_dmub_hw_init(adev);
1091 	if (r) {
1092 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1093 		goto error;
1094 	}
1095 
1096 	dc_hardware_init(adev->dm.dc);
1097 
1098 #if defined(CONFIG_DRM_AMD_DC_DCN)
1099 	if (adev->apu_flags) {
1100 		struct dc_phy_addr_space_config pa_config;
1101 
1102 		mmhub_read_system_context(adev, &pa_config);
1103 
1104 		// Call the DC init_memory func
1105 		dc_setup_system_context(adev->dm.dc, &pa_config);
1106 	}
1107 #endif
1108 
1109 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1110 	if (!adev->dm.freesync_module) {
1111 		DRM_ERROR(
1112 		"amdgpu: failed to initialize freesync_module.\n");
1113 	} else
1114 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1115 				adev->dm.freesync_module);
1116 
1117 	amdgpu_dm_init_color_mod();
1118 
1119 #if defined(CONFIG_DRM_AMD_DC_DCN)
1120 	if (adev->dm.dc->caps.max_links > 0) {
1121 		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1122 
1123 		if (!adev->dm.vblank_workqueue)
1124 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1125 		else
1126 			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1127 	}
1128 #endif
1129 
1130 #ifdef CONFIG_DRM_AMD_DC_HDCP
1131 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1132 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1133 
1134 		if (!adev->dm.hdcp_workqueue)
1135 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1136 		else
1137 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1138 
1139 		dc_init_callbacks(adev->dm.dc, &init_params);
1140 	}
1141 #endif
1142 	if (amdgpu_dm_initialize_drm_device(adev)) {
1143 		DRM_ERROR(
1144 		"amdgpu: failed to initialize sw for display support.\n");
1145 		goto error;
1146 	}
1147 
1148 	/* create fake encoders for MST */
1149 	dm_dp_create_fake_mst_encoders(adev);
1150 
1151 	/* TODO: Add_display_info? */
1152 
1153 	/* TODO use dynamic cursor width */
1154 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1155 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1156 
1157 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1158 		DRM_ERROR(
1159 		"amdgpu: failed to initialize sw for display support.\n");
1160 		goto error;
1161 	}
1162 
1163 
1164 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1165 
1166 	return 0;
1167 error:
1168 	amdgpu_dm_fini(adev);
1169 
1170 	return -EINVAL;
1171 }
1172 
1173 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1174 {
1175 	int i;
1176 
1177 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1178 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1179 	}
1180 
1181 	amdgpu_dm_audio_fini(adev);
1182 
1183 	amdgpu_dm_destroy_drm_device(&adev->dm);
1184 
1185 #ifdef CONFIG_DRM_AMD_DC_HDCP
1186 	if (adev->dm.hdcp_workqueue) {
1187 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1188 		adev->dm.hdcp_workqueue = NULL;
1189 	}
1190 
1191 	if (adev->dm.dc)
1192 		dc_deinit_callbacks(adev->dm.dc);
1193 #endif
1194 	if (adev->dm.dc->ctx->dmub_srv) {
1195 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1196 		adev->dm.dc->ctx->dmub_srv = NULL;
1197 	}
1198 
1199 	if (adev->dm.dmub_bo)
1200 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1201 				      &adev->dm.dmub_bo_gpu_addr,
1202 				      &adev->dm.dmub_bo_cpu_addr);
1203 
1204 	/* DC Destroy TODO: Replace destroy DAL */
1205 	if (adev->dm.dc)
1206 		dc_destroy(&adev->dm.dc);
1207 	/*
1208 	 * TODO: pageflip, vlank interrupt
1209 	 *
1210 	 * amdgpu_dm_irq_fini(adev);
1211 	 */
1212 
1213 	if (adev->dm.cgs_device) {
1214 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1215 		adev->dm.cgs_device = NULL;
1216 	}
1217 	if (adev->dm.freesync_module) {
1218 		mod_freesync_destroy(adev->dm.freesync_module);
1219 		adev->dm.freesync_module = NULL;
1220 	}
1221 
1222 	mutex_destroy(&adev->dm.audio_lock);
1223 	mutex_destroy(&adev->dm.dc_lock);
1224 
1225 	return;
1226 }
1227 
1228 static int load_dmcu_fw(struct amdgpu_device *adev)
1229 {
1230 	const char *fw_name_dmcu = NULL;
1231 	int r;
1232 	const struct dmcu_firmware_header_v1_0 *hdr;
1233 
1234 	switch(adev->asic_type) {
1235 #if defined(CONFIG_DRM_AMD_DC_SI)
1236 	case CHIP_TAHITI:
1237 	case CHIP_PITCAIRN:
1238 	case CHIP_VERDE:
1239 	case CHIP_OLAND:
1240 #endif
1241 	case CHIP_BONAIRE:
1242 	case CHIP_HAWAII:
1243 	case CHIP_KAVERI:
1244 	case CHIP_KABINI:
1245 	case CHIP_MULLINS:
1246 	case CHIP_TONGA:
1247 	case CHIP_FIJI:
1248 	case CHIP_CARRIZO:
1249 	case CHIP_STONEY:
1250 	case CHIP_POLARIS11:
1251 	case CHIP_POLARIS10:
1252 	case CHIP_POLARIS12:
1253 	case CHIP_VEGAM:
1254 	case CHIP_VEGA10:
1255 	case CHIP_VEGA12:
1256 	case CHIP_VEGA20:
1257 	case CHIP_NAVI10:
1258 	case CHIP_NAVI14:
1259 	case CHIP_RENOIR:
1260 	case CHIP_SIENNA_CICHLID:
1261 	case CHIP_NAVY_FLOUNDER:
1262 	case CHIP_DIMGREY_CAVEFISH:
1263 	case CHIP_VANGOGH:
1264 		return 0;
1265 	case CHIP_NAVI12:
1266 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1267 		break;
1268 	case CHIP_RAVEN:
1269 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1270 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1271 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1272 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1273 		else
1274 			return 0;
1275 		break;
1276 	default:
1277 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1278 		return -EINVAL;
1279 	}
1280 
1281 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1282 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1283 		return 0;
1284 	}
1285 
1286 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1287 	if (r == -ENOENT) {
1288 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1289 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1290 		adev->dm.fw_dmcu = NULL;
1291 		return 0;
1292 	}
1293 	if (r) {
1294 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1295 			fw_name_dmcu);
1296 		return r;
1297 	}
1298 
1299 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1300 	if (r) {
1301 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1302 			fw_name_dmcu);
1303 		release_firmware(adev->dm.fw_dmcu);
1304 		adev->dm.fw_dmcu = NULL;
1305 		return r;
1306 	}
1307 
1308 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1309 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1310 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1311 	adev->firmware.fw_size +=
1312 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1313 
1314 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1315 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1316 	adev->firmware.fw_size +=
1317 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1318 
1319 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1320 
1321 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1322 
1323 	return 0;
1324 }
1325 
1326 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1327 {
1328 	struct amdgpu_device *adev = ctx;
1329 
1330 	return dm_read_reg(adev->dm.dc->ctx, address);
1331 }
1332 
1333 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1334 				     uint32_t value)
1335 {
1336 	struct amdgpu_device *adev = ctx;
1337 
1338 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1339 }
1340 
1341 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1342 {
1343 	struct dmub_srv_create_params create_params;
1344 	struct dmub_srv_region_params region_params;
1345 	struct dmub_srv_region_info region_info;
1346 	struct dmub_srv_fb_params fb_params;
1347 	struct dmub_srv_fb_info *fb_info;
1348 	struct dmub_srv *dmub_srv;
1349 	const struct dmcub_firmware_header_v1_0 *hdr;
1350 	const char *fw_name_dmub;
1351 	enum dmub_asic dmub_asic;
1352 	enum dmub_status status;
1353 	int r;
1354 
1355 	switch (adev->asic_type) {
1356 	case CHIP_RENOIR:
1357 		dmub_asic = DMUB_ASIC_DCN21;
1358 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1359 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1360 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1361 		break;
1362 	case CHIP_SIENNA_CICHLID:
1363 		dmub_asic = DMUB_ASIC_DCN30;
1364 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1365 		break;
1366 	case CHIP_NAVY_FLOUNDER:
1367 		dmub_asic = DMUB_ASIC_DCN30;
1368 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1369 		break;
1370 	case CHIP_VANGOGH:
1371 		dmub_asic = DMUB_ASIC_DCN301;
1372 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1373 		break;
1374 	case CHIP_DIMGREY_CAVEFISH:
1375 		dmub_asic = DMUB_ASIC_DCN302;
1376 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1377 		break;
1378 
1379 	default:
1380 		/* ASIC doesn't support DMUB. */
1381 		return 0;
1382 	}
1383 
1384 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1385 	if (r) {
1386 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1387 		return 0;
1388 	}
1389 
1390 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1391 	if (r) {
1392 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1393 		return 0;
1394 	}
1395 
1396 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1397 
1398 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1399 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1400 			AMDGPU_UCODE_ID_DMCUB;
1401 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1402 			adev->dm.dmub_fw;
1403 		adev->firmware.fw_size +=
1404 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1405 
1406 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1407 			 adev->dm.dmcub_fw_version);
1408 	}
1409 
1410 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1411 
1412 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1413 	dmub_srv = adev->dm.dmub_srv;
1414 
1415 	if (!dmub_srv) {
1416 		DRM_ERROR("Failed to allocate DMUB service!\n");
1417 		return -ENOMEM;
1418 	}
1419 
1420 	memset(&create_params, 0, sizeof(create_params));
1421 	create_params.user_ctx = adev;
1422 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1423 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1424 	create_params.asic = dmub_asic;
1425 
1426 	/* Create the DMUB service. */
1427 	status = dmub_srv_create(dmub_srv, &create_params);
1428 	if (status != DMUB_STATUS_OK) {
1429 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1430 		return -EINVAL;
1431 	}
1432 
1433 	/* Calculate the size of all the regions for the DMUB service. */
1434 	memset(&region_params, 0, sizeof(region_params));
1435 
1436 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1437 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1438 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1439 	region_params.vbios_size = adev->bios_size;
1440 	region_params.fw_bss_data = region_params.bss_data_size ?
1441 		adev->dm.dmub_fw->data +
1442 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1443 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1444 	region_params.fw_inst_const =
1445 		adev->dm.dmub_fw->data +
1446 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1447 		PSP_HEADER_BYTES;
1448 
1449 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1450 					   &region_info);
1451 
1452 	if (status != DMUB_STATUS_OK) {
1453 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1454 		return -EINVAL;
1455 	}
1456 
1457 	/*
1458 	 * Allocate a framebuffer based on the total size of all the regions.
1459 	 * TODO: Move this into GART.
1460 	 */
1461 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1462 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1463 				    &adev->dm.dmub_bo_gpu_addr,
1464 				    &adev->dm.dmub_bo_cpu_addr);
1465 	if (r)
1466 		return r;
1467 
1468 	/* Rebase the regions on the framebuffer address. */
1469 	memset(&fb_params, 0, sizeof(fb_params));
1470 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1471 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1472 	fb_params.region_info = &region_info;
1473 
1474 	adev->dm.dmub_fb_info =
1475 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1476 	fb_info = adev->dm.dmub_fb_info;
1477 
1478 	if (!fb_info) {
1479 		DRM_ERROR(
1480 			"Failed to allocate framebuffer info for DMUB service!\n");
1481 		return -ENOMEM;
1482 	}
1483 
1484 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1485 	if (status != DMUB_STATUS_OK) {
1486 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1487 		return -EINVAL;
1488 	}
1489 
1490 	return 0;
1491 }
1492 
1493 static int dm_sw_init(void *handle)
1494 {
1495 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1496 	int r;
1497 
1498 	r = dm_dmub_sw_init(adev);
1499 	if (r)
1500 		return r;
1501 
1502 	return load_dmcu_fw(adev);
1503 }
1504 
1505 static int dm_sw_fini(void *handle)
1506 {
1507 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1508 
1509 	kfree(adev->dm.dmub_fb_info);
1510 	adev->dm.dmub_fb_info = NULL;
1511 
1512 	if (adev->dm.dmub_srv) {
1513 		dmub_srv_destroy(adev->dm.dmub_srv);
1514 		adev->dm.dmub_srv = NULL;
1515 	}
1516 
1517 	release_firmware(adev->dm.dmub_fw);
1518 	adev->dm.dmub_fw = NULL;
1519 
1520 	release_firmware(adev->dm.fw_dmcu);
1521 	adev->dm.fw_dmcu = NULL;
1522 
1523 	return 0;
1524 }
1525 
1526 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1527 {
1528 	struct amdgpu_dm_connector *aconnector;
1529 	struct drm_connector *connector;
1530 	struct drm_connector_list_iter iter;
1531 	int ret = 0;
1532 
1533 	drm_connector_list_iter_begin(dev, &iter);
1534 	drm_for_each_connector_iter(connector, &iter) {
1535 		aconnector = to_amdgpu_dm_connector(connector);
1536 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1537 		    aconnector->mst_mgr.aux) {
1538 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1539 					 aconnector,
1540 					 aconnector->base.base.id);
1541 
1542 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1543 			if (ret < 0) {
1544 				DRM_ERROR("DM_MST: Failed to start MST\n");
1545 				aconnector->dc_link->type =
1546 					dc_connection_single;
1547 				break;
1548 			}
1549 		}
1550 	}
1551 	drm_connector_list_iter_end(&iter);
1552 
1553 	return ret;
1554 }
1555 
1556 static int dm_late_init(void *handle)
1557 {
1558 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1559 
1560 	struct dmcu_iram_parameters params;
1561 	unsigned int linear_lut[16];
1562 	int i;
1563 	struct dmcu *dmcu = NULL;
1564 	bool ret = true;
1565 
1566 	dmcu = adev->dm.dc->res_pool->dmcu;
1567 
1568 	for (i = 0; i < 16; i++)
1569 		linear_lut[i] = 0xFFFF * i / 15;
1570 
1571 	params.set = 0;
1572 	params.backlight_ramping_start = 0xCCCC;
1573 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1574 	params.backlight_lut_array_size = 16;
1575 	params.backlight_lut_array = linear_lut;
1576 
1577 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1578 	 * 0xFFFF x 0.01 = 0x28F
1579 	 */
1580 	params.min_abm_backlight = 0x28F;
1581 
1582 	/* In the case where abm is implemented on dmcub,
1583 	 * dmcu object will be null.
1584 	 * ABM 2.4 and up are implemented on dmcub.
1585 	 */
1586 	if (dmcu)
1587 		ret = dmcu_load_iram(dmcu, params);
1588 	else if (adev->dm.dc->ctx->dmub_srv)
1589 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1590 
1591 	if (!ret)
1592 		return -EINVAL;
1593 
1594 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1595 }
1596 
1597 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1598 {
1599 	struct amdgpu_dm_connector *aconnector;
1600 	struct drm_connector *connector;
1601 	struct drm_connector_list_iter iter;
1602 	struct drm_dp_mst_topology_mgr *mgr;
1603 	int ret;
1604 	bool need_hotplug = false;
1605 
1606 	drm_connector_list_iter_begin(dev, &iter);
1607 	drm_for_each_connector_iter(connector, &iter) {
1608 		aconnector = to_amdgpu_dm_connector(connector);
1609 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1610 		    aconnector->mst_port)
1611 			continue;
1612 
1613 		mgr = &aconnector->mst_mgr;
1614 
1615 		if (suspend) {
1616 			drm_dp_mst_topology_mgr_suspend(mgr);
1617 		} else {
1618 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1619 			if (ret < 0) {
1620 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1621 				need_hotplug = true;
1622 			}
1623 		}
1624 	}
1625 	drm_connector_list_iter_end(&iter);
1626 
1627 	if (need_hotplug)
1628 		drm_kms_helper_hotplug_event(dev);
1629 }
1630 
1631 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1632 {
1633 	struct smu_context *smu = &adev->smu;
1634 	int ret = 0;
1635 
1636 	if (!is_support_sw_smu(adev))
1637 		return 0;
1638 
1639 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1640 	 * on window driver dc implementation.
1641 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1642 	 * should be passed to smu during boot up and resume from s3.
1643 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1644 	 * dcn20_resource_construct
1645 	 * then call pplib functions below to pass the settings to smu:
1646 	 * smu_set_watermarks_for_clock_ranges
1647 	 * smu_set_watermarks_table
1648 	 * navi10_set_watermarks_table
1649 	 * smu_write_watermarks_table
1650 	 *
1651 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1652 	 * dc has implemented different flow for window driver:
1653 	 * dc_hardware_init / dc_set_power_state
1654 	 * dcn10_init_hw
1655 	 * notify_wm_ranges
1656 	 * set_wm_ranges
1657 	 * -- Linux
1658 	 * smu_set_watermarks_for_clock_ranges
1659 	 * renoir_set_watermarks_table
1660 	 * smu_write_watermarks_table
1661 	 *
1662 	 * For Linux,
1663 	 * dc_hardware_init -> amdgpu_dm_init
1664 	 * dc_set_power_state --> dm_resume
1665 	 *
1666 	 * therefore, this function apply to navi10/12/14 but not Renoir
1667 	 * *
1668 	 */
1669 	switch(adev->asic_type) {
1670 	case CHIP_NAVI10:
1671 	case CHIP_NAVI14:
1672 	case CHIP_NAVI12:
1673 		break;
1674 	default:
1675 		return 0;
1676 	}
1677 
1678 	ret = smu_write_watermarks_table(smu);
1679 	if (ret) {
1680 		DRM_ERROR("Failed to update WMTABLE!\n");
1681 		return ret;
1682 	}
1683 
1684 	return 0;
1685 }
1686 
1687 /**
1688  * dm_hw_init() - Initialize DC device
1689  * @handle: The base driver device containing the amdgpu_dm device.
1690  *
1691  * Initialize the &struct amdgpu_display_manager device. This involves calling
1692  * the initializers of each DM component, then populating the struct with them.
1693  *
1694  * Although the function implies hardware initialization, both hardware and
1695  * software are initialized here. Splitting them out to their relevant init
1696  * hooks is a future TODO item.
1697  *
1698  * Some notable things that are initialized here:
1699  *
1700  * - Display Core, both software and hardware
1701  * - DC modules that we need (freesync and color management)
1702  * - DRM software states
1703  * - Interrupt sources and handlers
1704  * - Vblank support
1705  * - Debug FS entries, if enabled
1706  */
1707 static int dm_hw_init(void *handle)
1708 {
1709 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1710 	/* Create DAL display manager */
1711 	amdgpu_dm_init(adev);
1712 	amdgpu_dm_hpd_init(adev);
1713 
1714 	return 0;
1715 }
1716 
1717 /**
1718  * dm_hw_fini() - Teardown DC device
1719  * @handle: The base driver device containing the amdgpu_dm device.
1720  *
1721  * Teardown components within &struct amdgpu_display_manager that require
1722  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1723  * were loaded. Also flush IRQ workqueues and disable them.
1724  */
1725 static int dm_hw_fini(void *handle)
1726 {
1727 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1728 
1729 	amdgpu_dm_hpd_fini(adev);
1730 
1731 	amdgpu_dm_irq_fini(adev);
1732 	amdgpu_dm_fini(adev);
1733 	return 0;
1734 }
1735 
1736 
1737 static int dm_enable_vblank(struct drm_crtc *crtc);
1738 static void dm_disable_vblank(struct drm_crtc *crtc);
1739 
1740 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1741 				 struct dc_state *state, bool enable)
1742 {
1743 	enum dc_irq_source irq_source;
1744 	struct amdgpu_crtc *acrtc;
1745 	int rc = -EBUSY;
1746 	int i = 0;
1747 
1748 	for (i = 0; i < state->stream_count; i++) {
1749 		acrtc = get_crtc_by_otg_inst(
1750 				adev, state->stream_status[i].primary_otg_inst);
1751 
1752 		if (acrtc && state->stream_status[i].plane_count != 0) {
1753 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1754 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1755 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1756 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1757 			if (rc)
1758 				DRM_WARN("Failed to %s pflip interrupts\n",
1759 					 enable ? "enable" : "disable");
1760 
1761 			if (enable) {
1762 				rc = dm_enable_vblank(&acrtc->base);
1763 				if (rc)
1764 					DRM_WARN("Failed to enable vblank interrupts\n");
1765 			} else {
1766 				dm_disable_vblank(&acrtc->base);
1767 			}
1768 
1769 		}
1770 	}
1771 
1772 }
1773 
1774 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1775 {
1776 	struct dc_state *context = NULL;
1777 	enum dc_status res = DC_ERROR_UNEXPECTED;
1778 	int i;
1779 	struct dc_stream_state *del_streams[MAX_PIPES];
1780 	int del_streams_count = 0;
1781 
1782 	memset(del_streams, 0, sizeof(del_streams));
1783 
1784 	context = dc_create_state(dc);
1785 	if (context == NULL)
1786 		goto context_alloc_fail;
1787 
1788 	dc_resource_state_copy_construct_current(dc, context);
1789 
1790 	/* First remove from context all streams */
1791 	for (i = 0; i < context->stream_count; i++) {
1792 		struct dc_stream_state *stream = context->streams[i];
1793 
1794 		del_streams[del_streams_count++] = stream;
1795 	}
1796 
1797 	/* Remove all planes for removed streams and then remove the streams */
1798 	for (i = 0; i < del_streams_count; i++) {
1799 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1800 			res = DC_FAIL_DETACH_SURFACES;
1801 			goto fail;
1802 		}
1803 
1804 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1805 		if (res != DC_OK)
1806 			goto fail;
1807 	}
1808 
1809 
1810 	res = dc_validate_global_state(dc, context, false);
1811 
1812 	if (res != DC_OK) {
1813 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1814 		goto fail;
1815 	}
1816 
1817 	res = dc_commit_state(dc, context);
1818 
1819 fail:
1820 	dc_release_state(context);
1821 
1822 context_alloc_fail:
1823 	return res;
1824 }
1825 
1826 static int dm_suspend(void *handle)
1827 {
1828 	struct amdgpu_device *adev = handle;
1829 	struct amdgpu_display_manager *dm = &adev->dm;
1830 	int ret = 0;
1831 
1832 	if (amdgpu_in_reset(adev)) {
1833 		mutex_lock(&dm->dc_lock);
1834 
1835 #if defined(CONFIG_DRM_AMD_DC_DCN)
1836 		dc_allow_idle_optimizations(adev->dm.dc, false);
1837 #endif
1838 
1839 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1840 
1841 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1842 
1843 		amdgpu_dm_commit_zero_streams(dm->dc);
1844 
1845 		amdgpu_dm_irq_suspend(adev);
1846 
1847 		return ret;
1848 	}
1849 
1850 	WARN_ON(adev->dm.cached_state);
1851 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1852 
1853 	s3_handle_mst(adev_to_drm(adev), true);
1854 
1855 	amdgpu_dm_irq_suspend(adev);
1856 
1857 
1858 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1859 
1860 	return 0;
1861 }
1862 
1863 static struct amdgpu_dm_connector *
1864 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1865 					     struct drm_crtc *crtc)
1866 {
1867 	uint32_t i;
1868 	struct drm_connector_state *new_con_state;
1869 	struct drm_connector *connector;
1870 	struct drm_crtc *crtc_from_state;
1871 
1872 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1873 		crtc_from_state = new_con_state->crtc;
1874 
1875 		if (crtc_from_state == crtc)
1876 			return to_amdgpu_dm_connector(connector);
1877 	}
1878 
1879 	return NULL;
1880 }
1881 
1882 static void emulated_link_detect(struct dc_link *link)
1883 {
1884 	struct dc_sink_init_data sink_init_data = { 0 };
1885 	struct display_sink_capability sink_caps = { 0 };
1886 	enum dc_edid_status edid_status;
1887 	struct dc_context *dc_ctx = link->ctx;
1888 	struct dc_sink *sink = NULL;
1889 	struct dc_sink *prev_sink = NULL;
1890 
1891 	link->type = dc_connection_none;
1892 	prev_sink = link->local_sink;
1893 
1894 	if (prev_sink)
1895 		dc_sink_release(prev_sink);
1896 
1897 	switch (link->connector_signal) {
1898 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1899 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1900 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1901 		break;
1902 	}
1903 
1904 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1905 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1906 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1907 		break;
1908 	}
1909 
1910 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1911 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1912 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1913 		break;
1914 	}
1915 
1916 	case SIGNAL_TYPE_LVDS: {
1917 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1918 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1919 		break;
1920 	}
1921 
1922 	case SIGNAL_TYPE_EDP: {
1923 		sink_caps.transaction_type =
1924 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1925 		sink_caps.signal = SIGNAL_TYPE_EDP;
1926 		break;
1927 	}
1928 
1929 	case SIGNAL_TYPE_DISPLAY_PORT: {
1930 		sink_caps.transaction_type =
1931 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1932 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1933 		break;
1934 	}
1935 
1936 	default:
1937 		DC_ERROR("Invalid connector type! signal:%d\n",
1938 			link->connector_signal);
1939 		return;
1940 	}
1941 
1942 	sink_init_data.link = link;
1943 	sink_init_data.sink_signal = sink_caps.signal;
1944 
1945 	sink = dc_sink_create(&sink_init_data);
1946 	if (!sink) {
1947 		DC_ERROR("Failed to create sink!\n");
1948 		return;
1949 	}
1950 
1951 	/* dc_sink_create returns a new reference */
1952 	link->local_sink = sink;
1953 
1954 	edid_status = dm_helpers_read_local_edid(
1955 			link->ctx,
1956 			link,
1957 			sink);
1958 
1959 	if (edid_status != EDID_OK)
1960 		DC_ERROR("Failed to read EDID");
1961 
1962 }
1963 
1964 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1965 				     struct amdgpu_display_manager *dm)
1966 {
1967 	struct {
1968 		struct dc_surface_update surface_updates[MAX_SURFACES];
1969 		struct dc_plane_info plane_infos[MAX_SURFACES];
1970 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1971 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1972 		struct dc_stream_update stream_update;
1973 	} * bundle;
1974 	int k, m;
1975 
1976 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1977 
1978 	if (!bundle) {
1979 		dm_error("Failed to allocate update bundle\n");
1980 		goto cleanup;
1981 	}
1982 
1983 	for (k = 0; k < dc_state->stream_count; k++) {
1984 		bundle->stream_update.stream = dc_state->streams[k];
1985 
1986 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1987 			bundle->surface_updates[m].surface =
1988 				dc_state->stream_status->plane_states[m];
1989 			bundle->surface_updates[m].surface->force_full_update =
1990 				true;
1991 		}
1992 		dc_commit_updates_for_stream(
1993 			dm->dc, bundle->surface_updates,
1994 			dc_state->stream_status->plane_count,
1995 			dc_state->streams[k], &bundle->stream_update, dc_state);
1996 	}
1997 
1998 cleanup:
1999 	kfree(bundle);
2000 
2001 	return;
2002 }
2003 
2004 static void dm_set_dpms_off(struct dc_link *link)
2005 {
2006 	struct dc_stream_state *stream_state;
2007 	struct amdgpu_dm_connector *aconnector = link->priv;
2008 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2009 	struct dc_stream_update stream_update;
2010 	bool dpms_off = true;
2011 
2012 	memset(&stream_update, 0, sizeof(stream_update));
2013 	stream_update.dpms_off = &dpms_off;
2014 
2015 	mutex_lock(&adev->dm.dc_lock);
2016 	stream_state = dc_stream_find_from_link(link);
2017 
2018 	if (stream_state == NULL) {
2019 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2020 		mutex_unlock(&adev->dm.dc_lock);
2021 		return;
2022 	}
2023 
2024 	stream_update.stream = stream_state;
2025 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2026 				     stream_state, &stream_update,
2027 				     stream_state->ctx->dc->current_state);
2028 	mutex_unlock(&adev->dm.dc_lock);
2029 }
2030 
2031 static int dm_resume(void *handle)
2032 {
2033 	struct amdgpu_device *adev = handle;
2034 	struct drm_device *ddev = adev_to_drm(adev);
2035 	struct amdgpu_display_manager *dm = &adev->dm;
2036 	struct amdgpu_dm_connector *aconnector;
2037 	struct drm_connector *connector;
2038 	struct drm_connector_list_iter iter;
2039 	struct drm_crtc *crtc;
2040 	struct drm_crtc_state *new_crtc_state;
2041 	struct dm_crtc_state *dm_new_crtc_state;
2042 	struct drm_plane *plane;
2043 	struct drm_plane_state *new_plane_state;
2044 	struct dm_plane_state *dm_new_plane_state;
2045 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2046 	enum dc_connection_type new_connection_type = dc_connection_none;
2047 	struct dc_state *dc_state;
2048 	int i, r, j;
2049 
2050 	if (amdgpu_in_reset(adev)) {
2051 		dc_state = dm->cached_dc_state;
2052 
2053 		r = dm_dmub_hw_init(adev);
2054 		if (r)
2055 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2056 
2057 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2058 		dc_resume(dm->dc);
2059 
2060 		amdgpu_dm_irq_resume_early(adev);
2061 
2062 		for (i = 0; i < dc_state->stream_count; i++) {
2063 			dc_state->streams[i]->mode_changed = true;
2064 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2065 				dc_state->stream_status->plane_states[j]->update_flags.raw
2066 					= 0xffffffff;
2067 			}
2068 		}
2069 
2070 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2071 
2072 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2073 
2074 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2075 
2076 		dc_release_state(dm->cached_dc_state);
2077 		dm->cached_dc_state = NULL;
2078 
2079 		amdgpu_dm_irq_resume_late(adev);
2080 
2081 		mutex_unlock(&dm->dc_lock);
2082 
2083 		return 0;
2084 	}
2085 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2086 	dc_release_state(dm_state->context);
2087 	dm_state->context = dc_create_state(dm->dc);
2088 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2089 	dc_resource_state_construct(dm->dc, dm_state->context);
2090 
2091 	/* Before powering on DC we need to re-initialize DMUB. */
2092 	r = dm_dmub_hw_init(adev);
2093 	if (r)
2094 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2095 
2096 	/* power on hardware */
2097 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2098 
2099 	/* program HPD filter */
2100 	dc_resume(dm->dc);
2101 
2102 	/*
2103 	 * early enable HPD Rx IRQ, should be done before set mode as short
2104 	 * pulse interrupts are used for MST
2105 	 */
2106 	amdgpu_dm_irq_resume_early(adev);
2107 
2108 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2109 	s3_handle_mst(ddev, false);
2110 
2111 	/* Do detection*/
2112 	drm_connector_list_iter_begin(ddev, &iter);
2113 	drm_for_each_connector_iter(connector, &iter) {
2114 		aconnector = to_amdgpu_dm_connector(connector);
2115 
2116 		/*
2117 		 * this is the case when traversing through already created
2118 		 * MST connectors, should be skipped
2119 		 */
2120 		if (aconnector->mst_port)
2121 			continue;
2122 
2123 		mutex_lock(&aconnector->hpd_lock);
2124 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2125 			DRM_ERROR("KMS: Failed to detect connector\n");
2126 
2127 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2128 			emulated_link_detect(aconnector->dc_link);
2129 		else
2130 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2131 
2132 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2133 			aconnector->fake_enable = false;
2134 
2135 		if (aconnector->dc_sink)
2136 			dc_sink_release(aconnector->dc_sink);
2137 		aconnector->dc_sink = NULL;
2138 		amdgpu_dm_update_connector_after_detect(aconnector);
2139 		mutex_unlock(&aconnector->hpd_lock);
2140 	}
2141 	drm_connector_list_iter_end(&iter);
2142 
2143 	/* Force mode set in atomic commit */
2144 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2145 		new_crtc_state->active_changed = true;
2146 
2147 	/*
2148 	 * atomic_check is expected to create the dc states. We need to release
2149 	 * them here, since they were duplicated as part of the suspend
2150 	 * procedure.
2151 	 */
2152 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2153 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2154 		if (dm_new_crtc_state->stream) {
2155 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2156 			dc_stream_release(dm_new_crtc_state->stream);
2157 			dm_new_crtc_state->stream = NULL;
2158 		}
2159 	}
2160 
2161 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2162 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2163 		if (dm_new_plane_state->dc_state) {
2164 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2165 			dc_plane_state_release(dm_new_plane_state->dc_state);
2166 			dm_new_plane_state->dc_state = NULL;
2167 		}
2168 	}
2169 
2170 	drm_atomic_helper_resume(ddev, dm->cached_state);
2171 
2172 	dm->cached_state = NULL;
2173 
2174 	amdgpu_dm_irq_resume_late(adev);
2175 
2176 	amdgpu_dm_smu_write_watermarks_table(adev);
2177 
2178 	return 0;
2179 }
2180 
2181 /**
2182  * DOC: DM Lifecycle
2183  *
2184  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2185  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2186  * the base driver's device list to be initialized and torn down accordingly.
2187  *
2188  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2189  */
2190 
2191 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2192 	.name = "dm",
2193 	.early_init = dm_early_init,
2194 	.late_init = dm_late_init,
2195 	.sw_init = dm_sw_init,
2196 	.sw_fini = dm_sw_fini,
2197 	.hw_init = dm_hw_init,
2198 	.hw_fini = dm_hw_fini,
2199 	.suspend = dm_suspend,
2200 	.resume = dm_resume,
2201 	.is_idle = dm_is_idle,
2202 	.wait_for_idle = dm_wait_for_idle,
2203 	.check_soft_reset = dm_check_soft_reset,
2204 	.soft_reset = dm_soft_reset,
2205 	.set_clockgating_state = dm_set_clockgating_state,
2206 	.set_powergating_state = dm_set_powergating_state,
2207 };
2208 
2209 const struct amdgpu_ip_block_version dm_ip_block =
2210 {
2211 	.type = AMD_IP_BLOCK_TYPE_DCE,
2212 	.major = 1,
2213 	.minor = 0,
2214 	.rev = 0,
2215 	.funcs = &amdgpu_dm_funcs,
2216 };
2217 
2218 
2219 /**
2220  * DOC: atomic
2221  *
2222  * *WIP*
2223  */
2224 
2225 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2226 	.fb_create = amdgpu_display_user_framebuffer_create,
2227 	.get_format_info = amd_get_format_info,
2228 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2229 	.atomic_check = amdgpu_dm_atomic_check,
2230 	.atomic_commit = drm_atomic_helper_commit,
2231 };
2232 
2233 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2234 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2235 };
2236 
2237 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2238 {
2239 	u32 max_cll, min_cll, max, min, q, r;
2240 	struct amdgpu_dm_backlight_caps *caps;
2241 	struct amdgpu_display_manager *dm;
2242 	struct drm_connector *conn_base;
2243 	struct amdgpu_device *adev;
2244 	struct dc_link *link = NULL;
2245 	static const u8 pre_computed_values[] = {
2246 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2247 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2248 
2249 	if (!aconnector || !aconnector->dc_link)
2250 		return;
2251 
2252 	link = aconnector->dc_link;
2253 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2254 		return;
2255 
2256 	conn_base = &aconnector->base;
2257 	adev = drm_to_adev(conn_base->dev);
2258 	dm = &adev->dm;
2259 	caps = &dm->backlight_caps;
2260 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2261 	caps->aux_support = false;
2262 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2263 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2264 
2265 	if (caps->ext_caps->bits.oled == 1 ||
2266 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2267 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2268 		caps->aux_support = true;
2269 
2270 	if (amdgpu_backlight == 0)
2271 		caps->aux_support = false;
2272 	else if (amdgpu_backlight == 1)
2273 		caps->aux_support = true;
2274 
2275 	/* From the specification (CTA-861-G), for calculating the maximum
2276 	 * luminance we need to use:
2277 	 *	Luminance = 50*2**(CV/32)
2278 	 * Where CV is a one-byte value.
2279 	 * For calculating this expression we may need float point precision;
2280 	 * to avoid this complexity level, we take advantage that CV is divided
2281 	 * by a constant. From the Euclids division algorithm, we know that CV
2282 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2283 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2284 	 * need to pre-compute the value of r/32. For pre-computing the values
2285 	 * We just used the following Ruby line:
2286 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2287 	 * The results of the above expressions can be verified at
2288 	 * pre_computed_values.
2289 	 */
2290 	q = max_cll >> 5;
2291 	r = max_cll % 32;
2292 	max = (1 << q) * pre_computed_values[r];
2293 
2294 	// min luminance: maxLum * (CV/255)^2 / 100
2295 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2296 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2297 
2298 	caps->aux_max_input_signal = max;
2299 	caps->aux_min_input_signal = min;
2300 }
2301 
2302 void amdgpu_dm_update_connector_after_detect(
2303 		struct amdgpu_dm_connector *aconnector)
2304 {
2305 	struct drm_connector *connector = &aconnector->base;
2306 	struct drm_device *dev = connector->dev;
2307 	struct dc_sink *sink;
2308 
2309 	/* MST handled by drm_mst framework */
2310 	if (aconnector->mst_mgr.mst_state == true)
2311 		return;
2312 
2313 	sink = aconnector->dc_link->local_sink;
2314 	if (sink)
2315 		dc_sink_retain(sink);
2316 
2317 	/*
2318 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2319 	 * the connector sink is set to either fake or physical sink depends on link status.
2320 	 * Skip if already done during boot.
2321 	 */
2322 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2323 			&& aconnector->dc_em_sink) {
2324 
2325 		/*
2326 		 * For S3 resume with headless use eml_sink to fake stream
2327 		 * because on resume connector->sink is set to NULL
2328 		 */
2329 		mutex_lock(&dev->mode_config.mutex);
2330 
2331 		if (sink) {
2332 			if (aconnector->dc_sink) {
2333 				amdgpu_dm_update_freesync_caps(connector, NULL);
2334 				/*
2335 				 * retain and release below are used to
2336 				 * bump up refcount for sink because the link doesn't point
2337 				 * to it anymore after disconnect, so on next crtc to connector
2338 				 * reshuffle by UMD we will get into unwanted dc_sink release
2339 				 */
2340 				dc_sink_release(aconnector->dc_sink);
2341 			}
2342 			aconnector->dc_sink = sink;
2343 			dc_sink_retain(aconnector->dc_sink);
2344 			amdgpu_dm_update_freesync_caps(connector,
2345 					aconnector->edid);
2346 		} else {
2347 			amdgpu_dm_update_freesync_caps(connector, NULL);
2348 			if (!aconnector->dc_sink) {
2349 				aconnector->dc_sink = aconnector->dc_em_sink;
2350 				dc_sink_retain(aconnector->dc_sink);
2351 			}
2352 		}
2353 
2354 		mutex_unlock(&dev->mode_config.mutex);
2355 
2356 		if (sink)
2357 			dc_sink_release(sink);
2358 		return;
2359 	}
2360 
2361 	/*
2362 	 * TODO: temporary guard to look for proper fix
2363 	 * if this sink is MST sink, we should not do anything
2364 	 */
2365 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2366 		dc_sink_release(sink);
2367 		return;
2368 	}
2369 
2370 	if (aconnector->dc_sink == sink) {
2371 		/*
2372 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2373 		 * Do nothing!!
2374 		 */
2375 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2376 				aconnector->connector_id);
2377 		if (sink)
2378 			dc_sink_release(sink);
2379 		return;
2380 	}
2381 
2382 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2383 		aconnector->connector_id, aconnector->dc_sink, sink);
2384 
2385 	mutex_lock(&dev->mode_config.mutex);
2386 
2387 	/*
2388 	 * 1. Update status of the drm connector
2389 	 * 2. Send an event and let userspace tell us what to do
2390 	 */
2391 	if (sink) {
2392 		/*
2393 		 * TODO: check if we still need the S3 mode update workaround.
2394 		 * If yes, put it here.
2395 		 */
2396 		if (aconnector->dc_sink) {
2397 			amdgpu_dm_update_freesync_caps(connector, NULL);
2398 			dc_sink_release(aconnector->dc_sink);
2399 		}
2400 
2401 		aconnector->dc_sink = sink;
2402 		dc_sink_retain(aconnector->dc_sink);
2403 		if (sink->dc_edid.length == 0) {
2404 			aconnector->edid = NULL;
2405 			if (aconnector->dc_link->aux_mode) {
2406 				drm_dp_cec_unset_edid(
2407 					&aconnector->dm_dp_aux.aux);
2408 			}
2409 		} else {
2410 			aconnector->edid =
2411 				(struct edid *)sink->dc_edid.raw_edid;
2412 
2413 			drm_connector_update_edid_property(connector,
2414 							   aconnector->edid);
2415 			if (aconnector->dc_link->aux_mode)
2416 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2417 						    aconnector->edid);
2418 		}
2419 
2420 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2421 		update_connector_ext_caps(aconnector);
2422 	} else {
2423 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2424 		amdgpu_dm_update_freesync_caps(connector, NULL);
2425 		drm_connector_update_edid_property(connector, NULL);
2426 		aconnector->num_modes = 0;
2427 		dc_sink_release(aconnector->dc_sink);
2428 		aconnector->dc_sink = NULL;
2429 		aconnector->edid = NULL;
2430 #ifdef CONFIG_DRM_AMD_DC_HDCP
2431 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2432 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2433 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2434 #endif
2435 	}
2436 
2437 	mutex_unlock(&dev->mode_config.mutex);
2438 
2439 	update_subconnector_property(aconnector);
2440 
2441 	if (sink)
2442 		dc_sink_release(sink);
2443 }
2444 
2445 static void handle_hpd_irq(void *param)
2446 {
2447 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2448 	struct drm_connector *connector = &aconnector->base;
2449 	struct drm_device *dev = connector->dev;
2450 	enum dc_connection_type new_connection_type = dc_connection_none;
2451 #ifdef CONFIG_DRM_AMD_DC_HDCP
2452 	struct amdgpu_device *adev = drm_to_adev(dev);
2453 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2454 #endif
2455 
2456 	/*
2457 	 * In case of failure or MST no need to update connector status or notify the OS
2458 	 * since (for MST case) MST does this in its own context.
2459 	 */
2460 	mutex_lock(&aconnector->hpd_lock);
2461 
2462 #ifdef CONFIG_DRM_AMD_DC_HDCP
2463 	if (adev->dm.hdcp_workqueue) {
2464 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2465 		dm_con_state->update_hdcp = true;
2466 	}
2467 #endif
2468 	if (aconnector->fake_enable)
2469 		aconnector->fake_enable = false;
2470 
2471 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2472 		DRM_ERROR("KMS: Failed to detect connector\n");
2473 
2474 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2475 		emulated_link_detect(aconnector->dc_link);
2476 
2477 
2478 		drm_modeset_lock_all(dev);
2479 		dm_restore_drm_connector_state(dev, connector);
2480 		drm_modeset_unlock_all(dev);
2481 
2482 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2483 			drm_kms_helper_hotplug_event(dev);
2484 
2485 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2486 		if (new_connection_type == dc_connection_none &&
2487 		    aconnector->dc_link->type == dc_connection_none)
2488 			dm_set_dpms_off(aconnector->dc_link);
2489 
2490 		amdgpu_dm_update_connector_after_detect(aconnector);
2491 
2492 		drm_modeset_lock_all(dev);
2493 		dm_restore_drm_connector_state(dev, connector);
2494 		drm_modeset_unlock_all(dev);
2495 
2496 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2497 			drm_kms_helper_hotplug_event(dev);
2498 	}
2499 	mutex_unlock(&aconnector->hpd_lock);
2500 
2501 }
2502 
2503 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2504 {
2505 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2506 	uint8_t dret;
2507 	bool new_irq_handled = false;
2508 	int dpcd_addr;
2509 	int dpcd_bytes_to_read;
2510 
2511 	const int max_process_count = 30;
2512 	int process_count = 0;
2513 
2514 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2515 
2516 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2517 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2518 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2519 		dpcd_addr = DP_SINK_COUNT;
2520 	} else {
2521 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2522 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2523 		dpcd_addr = DP_SINK_COUNT_ESI;
2524 	}
2525 
2526 	dret = drm_dp_dpcd_read(
2527 		&aconnector->dm_dp_aux.aux,
2528 		dpcd_addr,
2529 		esi,
2530 		dpcd_bytes_to_read);
2531 
2532 	while (dret == dpcd_bytes_to_read &&
2533 		process_count < max_process_count) {
2534 		uint8_t retry;
2535 		dret = 0;
2536 
2537 		process_count++;
2538 
2539 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2540 		/* handle HPD short pulse irq */
2541 		if (aconnector->mst_mgr.mst_state)
2542 			drm_dp_mst_hpd_irq(
2543 				&aconnector->mst_mgr,
2544 				esi,
2545 				&new_irq_handled);
2546 
2547 		if (new_irq_handled) {
2548 			/* ACK at DPCD to notify down stream */
2549 			const int ack_dpcd_bytes_to_write =
2550 				dpcd_bytes_to_read - 1;
2551 
2552 			for (retry = 0; retry < 3; retry++) {
2553 				uint8_t wret;
2554 
2555 				wret = drm_dp_dpcd_write(
2556 					&aconnector->dm_dp_aux.aux,
2557 					dpcd_addr + 1,
2558 					&esi[1],
2559 					ack_dpcd_bytes_to_write);
2560 				if (wret == ack_dpcd_bytes_to_write)
2561 					break;
2562 			}
2563 
2564 			/* check if there is new irq to be handled */
2565 			dret = drm_dp_dpcd_read(
2566 				&aconnector->dm_dp_aux.aux,
2567 				dpcd_addr,
2568 				esi,
2569 				dpcd_bytes_to_read);
2570 
2571 			new_irq_handled = false;
2572 		} else {
2573 			break;
2574 		}
2575 	}
2576 
2577 	if (process_count == max_process_count)
2578 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2579 }
2580 
2581 static void handle_hpd_rx_irq(void *param)
2582 {
2583 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2584 	struct drm_connector *connector = &aconnector->base;
2585 	struct drm_device *dev = connector->dev;
2586 	struct dc_link *dc_link = aconnector->dc_link;
2587 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2588 	bool result = false;
2589 	enum dc_connection_type new_connection_type = dc_connection_none;
2590 	struct amdgpu_device *adev = drm_to_adev(dev);
2591 	union hpd_irq_data hpd_irq_data;
2592 
2593 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2594 
2595 	/*
2596 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2597 	 * conflict, after implement i2c helper, this mutex should be
2598 	 * retired.
2599 	 */
2600 	if (dc_link->type != dc_connection_mst_branch)
2601 		mutex_lock(&aconnector->hpd_lock);
2602 
2603 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2604 
2605 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2606 		(dc_link->type == dc_connection_mst_branch)) {
2607 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2608 			result = true;
2609 			dm_handle_hpd_rx_irq(aconnector);
2610 			goto out;
2611 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2612 			result = false;
2613 			dm_handle_hpd_rx_irq(aconnector);
2614 			goto out;
2615 		}
2616 	}
2617 
2618 	mutex_lock(&adev->dm.dc_lock);
2619 #ifdef CONFIG_DRM_AMD_DC_HDCP
2620 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2621 #else
2622 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2623 #endif
2624 	mutex_unlock(&adev->dm.dc_lock);
2625 
2626 out:
2627 	if (result && !is_mst_root_connector) {
2628 		/* Downstream Port status changed. */
2629 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2630 			DRM_ERROR("KMS: Failed to detect connector\n");
2631 
2632 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2633 			emulated_link_detect(dc_link);
2634 
2635 			if (aconnector->fake_enable)
2636 				aconnector->fake_enable = false;
2637 
2638 			amdgpu_dm_update_connector_after_detect(aconnector);
2639 
2640 
2641 			drm_modeset_lock_all(dev);
2642 			dm_restore_drm_connector_state(dev, connector);
2643 			drm_modeset_unlock_all(dev);
2644 
2645 			drm_kms_helper_hotplug_event(dev);
2646 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2647 
2648 			if (aconnector->fake_enable)
2649 				aconnector->fake_enable = false;
2650 
2651 			amdgpu_dm_update_connector_after_detect(aconnector);
2652 
2653 
2654 			drm_modeset_lock_all(dev);
2655 			dm_restore_drm_connector_state(dev, connector);
2656 			drm_modeset_unlock_all(dev);
2657 
2658 			drm_kms_helper_hotplug_event(dev);
2659 		}
2660 	}
2661 #ifdef CONFIG_DRM_AMD_DC_HDCP
2662 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2663 		if (adev->dm.hdcp_workqueue)
2664 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2665 	}
2666 #endif
2667 
2668 	if (dc_link->type != dc_connection_mst_branch) {
2669 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2670 		mutex_unlock(&aconnector->hpd_lock);
2671 	}
2672 }
2673 
2674 static void register_hpd_handlers(struct amdgpu_device *adev)
2675 {
2676 	struct drm_device *dev = adev_to_drm(adev);
2677 	struct drm_connector *connector;
2678 	struct amdgpu_dm_connector *aconnector;
2679 	const struct dc_link *dc_link;
2680 	struct dc_interrupt_params int_params = {0};
2681 
2682 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2683 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2684 
2685 	list_for_each_entry(connector,
2686 			&dev->mode_config.connector_list, head)	{
2687 
2688 		aconnector = to_amdgpu_dm_connector(connector);
2689 		dc_link = aconnector->dc_link;
2690 
2691 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2692 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2693 			int_params.irq_source = dc_link->irq_source_hpd;
2694 
2695 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2696 					handle_hpd_irq,
2697 					(void *) aconnector);
2698 		}
2699 
2700 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2701 
2702 			/* Also register for DP short pulse (hpd_rx). */
2703 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2704 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2705 
2706 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2707 					handle_hpd_rx_irq,
2708 					(void *) aconnector);
2709 		}
2710 	}
2711 }
2712 
2713 #if defined(CONFIG_DRM_AMD_DC_SI)
2714 /* Register IRQ sources and initialize IRQ callbacks */
2715 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2716 {
2717 	struct dc *dc = adev->dm.dc;
2718 	struct common_irq_params *c_irq_params;
2719 	struct dc_interrupt_params int_params = {0};
2720 	int r;
2721 	int i;
2722 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2723 
2724 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2725 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2726 
2727 	/*
2728 	 * Actions of amdgpu_irq_add_id():
2729 	 * 1. Register a set() function with base driver.
2730 	 *    Base driver will call set() function to enable/disable an
2731 	 *    interrupt in DC hardware.
2732 	 * 2. Register amdgpu_dm_irq_handler().
2733 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2734 	 *    coming from DC hardware.
2735 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2736 	 *    for acknowledging and handling. */
2737 
2738 	/* Use VBLANK interrupt */
2739 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2740 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2741 		if (r) {
2742 			DRM_ERROR("Failed to add crtc irq id!\n");
2743 			return r;
2744 		}
2745 
2746 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2747 		int_params.irq_source =
2748 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2749 
2750 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2751 
2752 		c_irq_params->adev = adev;
2753 		c_irq_params->irq_src = int_params.irq_source;
2754 
2755 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2756 				dm_crtc_high_irq, c_irq_params);
2757 	}
2758 
2759 	/* Use GRPH_PFLIP interrupt */
2760 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2761 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2762 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2763 		if (r) {
2764 			DRM_ERROR("Failed to add page flip irq id!\n");
2765 			return r;
2766 		}
2767 
2768 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2769 		int_params.irq_source =
2770 			dc_interrupt_to_irq_source(dc, i, 0);
2771 
2772 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2773 
2774 		c_irq_params->adev = adev;
2775 		c_irq_params->irq_src = int_params.irq_source;
2776 
2777 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2778 				dm_pflip_high_irq, c_irq_params);
2779 
2780 	}
2781 
2782 	/* HPD */
2783 	r = amdgpu_irq_add_id(adev, client_id,
2784 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2785 	if (r) {
2786 		DRM_ERROR("Failed to add hpd irq id!\n");
2787 		return r;
2788 	}
2789 
2790 	register_hpd_handlers(adev);
2791 
2792 	return 0;
2793 }
2794 #endif
2795 
2796 /* Register IRQ sources and initialize IRQ callbacks */
2797 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2798 {
2799 	struct dc *dc = adev->dm.dc;
2800 	struct common_irq_params *c_irq_params;
2801 	struct dc_interrupt_params int_params = {0};
2802 	int r;
2803 	int i;
2804 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2805 
2806 	if (adev->asic_type >= CHIP_VEGA10)
2807 		client_id = SOC15_IH_CLIENTID_DCE;
2808 
2809 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2810 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2811 
2812 	/*
2813 	 * Actions of amdgpu_irq_add_id():
2814 	 * 1. Register a set() function with base driver.
2815 	 *    Base driver will call set() function to enable/disable an
2816 	 *    interrupt in DC hardware.
2817 	 * 2. Register amdgpu_dm_irq_handler().
2818 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2819 	 *    coming from DC hardware.
2820 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2821 	 *    for acknowledging and handling. */
2822 
2823 	/* Use VBLANK interrupt */
2824 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2825 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2826 		if (r) {
2827 			DRM_ERROR("Failed to add crtc irq id!\n");
2828 			return r;
2829 		}
2830 
2831 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2832 		int_params.irq_source =
2833 			dc_interrupt_to_irq_source(dc, i, 0);
2834 
2835 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2836 
2837 		c_irq_params->adev = adev;
2838 		c_irq_params->irq_src = int_params.irq_source;
2839 
2840 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2841 				dm_crtc_high_irq, c_irq_params);
2842 	}
2843 
2844 	/* Use VUPDATE interrupt */
2845 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2846 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2847 		if (r) {
2848 			DRM_ERROR("Failed to add vupdate irq id!\n");
2849 			return r;
2850 		}
2851 
2852 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2853 		int_params.irq_source =
2854 			dc_interrupt_to_irq_source(dc, i, 0);
2855 
2856 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2857 
2858 		c_irq_params->adev = adev;
2859 		c_irq_params->irq_src = int_params.irq_source;
2860 
2861 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2862 				dm_vupdate_high_irq, c_irq_params);
2863 	}
2864 
2865 	/* Use GRPH_PFLIP interrupt */
2866 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2867 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2868 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2869 		if (r) {
2870 			DRM_ERROR("Failed to add page flip irq id!\n");
2871 			return r;
2872 		}
2873 
2874 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2875 		int_params.irq_source =
2876 			dc_interrupt_to_irq_source(dc, i, 0);
2877 
2878 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2879 
2880 		c_irq_params->adev = adev;
2881 		c_irq_params->irq_src = int_params.irq_source;
2882 
2883 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2884 				dm_pflip_high_irq, c_irq_params);
2885 
2886 	}
2887 
2888 	/* HPD */
2889 	r = amdgpu_irq_add_id(adev, client_id,
2890 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2891 	if (r) {
2892 		DRM_ERROR("Failed to add hpd irq id!\n");
2893 		return r;
2894 	}
2895 
2896 	register_hpd_handlers(adev);
2897 
2898 	return 0;
2899 }
2900 
2901 #if defined(CONFIG_DRM_AMD_DC_DCN)
2902 /* Register IRQ sources and initialize IRQ callbacks */
2903 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2904 {
2905 	struct dc *dc = adev->dm.dc;
2906 	struct common_irq_params *c_irq_params;
2907 	struct dc_interrupt_params int_params = {0};
2908 	int r;
2909 	int i;
2910 
2911 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2912 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2913 
2914 	/*
2915 	 * Actions of amdgpu_irq_add_id():
2916 	 * 1. Register a set() function with base driver.
2917 	 *    Base driver will call set() function to enable/disable an
2918 	 *    interrupt in DC hardware.
2919 	 * 2. Register amdgpu_dm_irq_handler().
2920 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2921 	 *    coming from DC hardware.
2922 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2923 	 *    for acknowledging and handling.
2924 	 */
2925 
2926 	/* Use VSTARTUP interrupt */
2927 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2928 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2929 			i++) {
2930 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2931 
2932 		if (r) {
2933 			DRM_ERROR("Failed to add crtc irq id!\n");
2934 			return r;
2935 		}
2936 
2937 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2938 		int_params.irq_source =
2939 			dc_interrupt_to_irq_source(dc, i, 0);
2940 
2941 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2942 
2943 		c_irq_params->adev = adev;
2944 		c_irq_params->irq_src = int_params.irq_source;
2945 
2946 		amdgpu_dm_irq_register_interrupt(
2947 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2948 	}
2949 
2950 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2951 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2952 	 * to trigger at end of each vblank, regardless of state of the lock,
2953 	 * matching DCE behaviour.
2954 	 */
2955 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2956 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2957 	     i++) {
2958 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2959 
2960 		if (r) {
2961 			DRM_ERROR("Failed to add vupdate irq id!\n");
2962 			return r;
2963 		}
2964 
2965 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2966 		int_params.irq_source =
2967 			dc_interrupt_to_irq_source(dc, i, 0);
2968 
2969 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2970 
2971 		c_irq_params->adev = adev;
2972 		c_irq_params->irq_src = int_params.irq_source;
2973 
2974 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2975 				dm_vupdate_high_irq, c_irq_params);
2976 	}
2977 
2978 	/* Use GRPH_PFLIP interrupt */
2979 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2980 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2981 			i++) {
2982 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2983 		if (r) {
2984 			DRM_ERROR("Failed to add page flip irq id!\n");
2985 			return r;
2986 		}
2987 
2988 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2989 		int_params.irq_source =
2990 			dc_interrupt_to_irq_source(dc, i, 0);
2991 
2992 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2993 
2994 		c_irq_params->adev = adev;
2995 		c_irq_params->irq_src = int_params.irq_source;
2996 
2997 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2998 				dm_pflip_high_irq, c_irq_params);
2999 
3000 	}
3001 
3002 	/* HPD */
3003 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3004 			&adev->hpd_irq);
3005 	if (r) {
3006 		DRM_ERROR("Failed to add hpd irq id!\n");
3007 		return r;
3008 	}
3009 
3010 	register_hpd_handlers(adev);
3011 
3012 	return 0;
3013 }
3014 #endif
3015 
3016 /*
3017  * Acquires the lock for the atomic state object and returns
3018  * the new atomic state.
3019  *
3020  * This should only be called during atomic check.
3021  */
3022 static int dm_atomic_get_state(struct drm_atomic_state *state,
3023 			       struct dm_atomic_state **dm_state)
3024 {
3025 	struct drm_device *dev = state->dev;
3026 	struct amdgpu_device *adev = drm_to_adev(dev);
3027 	struct amdgpu_display_manager *dm = &adev->dm;
3028 	struct drm_private_state *priv_state;
3029 
3030 	if (*dm_state)
3031 		return 0;
3032 
3033 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3034 	if (IS_ERR(priv_state))
3035 		return PTR_ERR(priv_state);
3036 
3037 	*dm_state = to_dm_atomic_state(priv_state);
3038 
3039 	return 0;
3040 }
3041 
3042 static struct dm_atomic_state *
3043 dm_atomic_get_new_state(struct drm_atomic_state *state)
3044 {
3045 	struct drm_device *dev = state->dev;
3046 	struct amdgpu_device *adev = drm_to_adev(dev);
3047 	struct amdgpu_display_manager *dm = &adev->dm;
3048 	struct drm_private_obj *obj;
3049 	struct drm_private_state *new_obj_state;
3050 	int i;
3051 
3052 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3053 		if (obj->funcs == dm->atomic_obj.funcs)
3054 			return to_dm_atomic_state(new_obj_state);
3055 	}
3056 
3057 	return NULL;
3058 }
3059 
3060 static struct drm_private_state *
3061 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3062 {
3063 	struct dm_atomic_state *old_state, *new_state;
3064 
3065 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3066 	if (!new_state)
3067 		return NULL;
3068 
3069 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3070 
3071 	old_state = to_dm_atomic_state(obj->state);
3072 
3073 	if (old_state && old_state->context)
3074 		new_state->context = dc_copy_state(old_state->context);
3075 
3076 	if (!new_state->context) {
3077 		kfree(new_state);
3078 		return NULL;
3079 	}
3080 
3081 	return &new_state->base;
3082 }
3083 
3084 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3085 				    struct drm_private_state *state)
3086 {
3087 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3088 
3089 	if (dm_state && dm_state->context)
3090 		dc_release_state(dm_state->context);
3091 
3092 	kfree(dm_state);
3093 }
3094 
3095 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3096 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3097 	.atomic_destroy_state = dm_atomic_destroy_state,
3098 };
3099 
3100 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3101 {
3102 	struct dm_atomic_state *state;
3103 	int r;
3104 
3105 	adev->mode_info.mode_config_initialized = true;
3106 
3107 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3108 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3109 
3110 	adev_to_drm(adev)->mode_config.max_width = 16384;
3111 	adev_to_drm(adev)->mode_config.max_height = 16384;
3112 
3113 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3114 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3115 	/* indicates support for immediate flip */
3116 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3117 
3118 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3119 
3120 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3121 	if (!state)
3122 		return -ENOMEM;
3123 
3124 	state->context = dc_create_state(adev->dm.dc);
3125 	if (!state->context) {
3126 		kfree(state);
3127 		return -ENOMEM;
3128 	}
3129 
3130 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3131 
3132 	drm_atomic_private_obj_init(adev_to_drm(adev),
3133 				    &adev->dm.atomic_obj,
3134 				    &state->base,
3135 				    &dm_atomic_state_funcs);
3136 
3137 	r = amdgpu_display_modeset_create_props(adev);
3138 	if (r) {
3139 		dc_release_state(state->context);
3140 		kfree(state);
3141 		return r;
3142 	}
3143 
3144 	r = amdgpu_dm_audio_init(adev);
3145 	if (r) {
3146 		dc_release_state(state->context);
3147 		kfree(state);
3148 		return r;
3149 	}
3150 
3151 	return 0;
3152 }
3153 
3154 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3155 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3156 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3157 
3158 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3159 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3160 
3161 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3162 {
3163 #if defined(CONFIG_ACPI)
3164 	struct amdgpu_dm_backlight_caps caps;
3165 
3166 	memset(&caps, 0, sizeof(caps));
3167 
3168 	if (dm->backlight_caps.caps_valid)
3169 		return;
3170 
3171 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3172 	if (caps.caps_valid) {
3173 		dm->backlight_caps.caps_valid = true;
3174 		if (caps.aux_support)
3175 			return;
3176 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3177 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3178 	} else {
3179 		dm->backlight_caps.min_input_signal =
3180 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3181 		dm->backlight_caps.max_input_signal =
3182 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3183 	}
3184 #else
3185 	if (dm->backlight_caps.aux_support)
3186 		return;
3187 
3188 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3189 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3190 #endif
3191 }
3192 
3193 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3194 				unsigned *min, unsigned *max)
3195 {
3196 	if (!caps)
3197 		return 0;
3198 
3199 	if (caps->aux_support) {
3200 		// Firmware limits are in nits, DC API wants millinits.
3201 		*max = 1000 * caps->aux_max_input_signal;
3202 		*min = 1000 * caps->aux_min_input_signal;
3203 	} else {
3204 		// Firmware limits are 8-bit, PWM control is 16-bit.
3205 		*max = 0x101 * caps->max_input_signal;
3206 		*min = 0x101 * caps->min_input_signal;
3207 	}
3208 	return 1;
3209 }
3210 
3211 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3212 					uint32_t brightness)
3213 {
3214 	unsigned min, max;
3215 
3216 	if (!get_brightness_range(caps, &min, &max))
3217 		return brightness;
3218 
3219 	// Rescale 0..255 to min..max
3220 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3221 				       AMDGPU_MAX_BL_LEVEL);
3222 }
3223 
3224 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3225 				      uint32_t brightness)
3226 {
3227 	unsigned min, max;
3228 
3229 	if (!get_brightness_range(caps, &min, &max))
3230 		return brightness;
3231 
3232 	if (brightness < min)
3233 		return 0;
3234 	// Rescale min..max to 0..255
3235 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3236 				 max - min);
3237 }
3238 
3239 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3240 {
3241 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3242 	struct amdgpu_dm_backlight_caps caps;
3243 	struct dc_link *link = NULL;
3244 	u32 brightness;
3245 	bool rc;
3246 
3247 	amdgpu_dm_update_backlight_caps(dm);
3248 	caps = dm->backlight_caps;
3249 
3250 	link = (struct dc_link *)dm->backlight_link;
3251 
3252 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3253 	// Change brightness based on AUX property
3254 	if (caps.aux_support)
3255 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3256 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3257 	else
3258 		rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3259 
3260 	return rc ? 0 : 1;
3261 }
3262 
3263 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3264 {
3265 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3266 	struct amdgpu_dm_backlight_caps caps;
3267 
3268 	amdgpu_dm_update_backlight_caps(dm);
3269 	caps = dm->backlight_caps;
3270 
3271 	if (caps.aux_support) {
3272 		struct dc_link *link = (struct dc_link *)dm->backlight_link;
3273 		u32 avg, peak;
3274 		bool rc;
3275 
3276 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3277 		if (!rc)
3278 			return bd->props.brightness;
3279 		return convert_brightness_to_user(&caps, avg);
3280 	} else {
3281 		int ret = dc_link_get_backlight_level(dm->backlight_link);
3282 
3283 		if (ret == DC_ERROR_UNEXPECTED)
3284 			return bd->props.brightness;
3285 		return convert_brightness_to_user(&caps, ret);
3286 	}
3287 }
3288 
3289 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3290 	.options = BL_CORE_SUSPENDRESUME,
3291 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3292 	.update_status	= amdgpu_dm_backlight_update_status,
3293 };
3294 
3295 static void
3296 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3297 {
3298 	char bl_name[16];
3299 	struct backlight_properties props = { 0 };
3300 
3301 	amdgpu_dm_update_backlight_caps(dm);
3302 
3303 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3304 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3305 	props.type = BACKLIGHT_RAW;
3306 
3307 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3308 		 adev_to_drm(dm->adev)->primary->index);
3309 
3310 	dm->backlight_dev = backlight_device_register(bl_name,
3311 						      adev_to_drm(dm->adev)->dev,
3312 						      dm,
3313 						      &amdgpu_dm_backlight_ops,
3314 						      &props);
3315 
3316 	if (IS_ERR(dm->backlight_dev))
3317 		DRM_ERROR("DM: Backlight registration failed!\n");
3318 	else
3319 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3320 }
3321 
3322 #endif
3323 
3324 static int initialize_plane(struct amdgpu_display_manager *dm,
3325 			    struct amdgpu_mode_info *mode_info, int plane_id,
3326 			    enum drm_plane_type plane_type,
3327 			    const struct dc_plane_cap *plane_cap)
3328 {
3329 	struct drm_plane *plane;
3330 	unsigned long possible_crtcs;
3331 	int ret = 0;
3332 
3333 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3334 	if (!plane) {
3335 		DRM_ERROR("KMS: Failed to allocate plane\n");
3336 		return -ENOMEM;
3337 	}
3338 	plane->type = plane_type;
3339 
3340 	/*
3341 	 * HACK: IGT tests expect that the primary plane for a CRTC
3342 	 * can only have one possible CRTC. Only expose support for
3343 	 * any CRTC if they're not going to be used as a primary plane
3344 	 * for a CRTC - like overlay or underlay planes.
3345 	 */
3346 	possible_crtcs = 1 << plane_id;
3347 	if (plane_id >= dm->dc->caps.max_streams)
3348 		possible_crtcs = 0xff;
3349 
3350 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3351 
3352 	if (ret) {
3353 		DRM_ERROR("KMS: Failed to initialize plane\n");
3354 		kfree(plane);
3355 		return ret;
3356 	}
3357 
3358 	if (mode_info)
3359 		mode_info->planes[plane_id] = plane;
3360 
3361 	return ret;
3362 }
3363 
3364 
3365 static void register_backlight_device(struct amdgpu_display_manager *dm,
3366 				      struct dc_link *link)
3367 {
3368 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3369 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3370 
3371 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3372 	    link->type != dc_connection_none) {
3373 		/*
3374 		 * Event if registration failed, we should continue with
3375 		 * DM initialization because not having a backlight control
3376 		 * is better then a black screen.
3377 		 */
3378 		amdgpu_dm_register_backlight_device(dm);
3379 
3380 		if (dm->backlight_dev)
3381 			dm->backlight_link = link;
3382 	}
3383 #endif
3384 }
3385 
3386 
3387 /*
3388  * In this architecture, the association
3389  * connector -> encoder -> crtc
3390  * id not really requried. The crtc and connector will hold the
3391  * display_index as an abstraction to use with DAL component
3392  *
3393  * Returns 0 on success
3394  */
3395 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3396 {
3397 	struct amdgpu_display_manager *dm = &adev->dm;
3398 	int32_t i;
3399 	struct amdgpu_dm_connector *aconnector = NULL;
3400 	struct amdgpu_encoder *aencoder = NULL;
3401 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3402 	uint32_t link_cnt;
3403 	int32_t primary_planes;
3404 	enum dc_connection_type new_connection_type = dc_connection_none;
3405 	const struct dc_plane_cap *plane;
3406 
3407 	dm->display_indexes_num = dm->dc->caps.max_streams;
3408 	/* Update the actual used number of crtc */
3409 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3410 
3411 	link_cnt = dm->dc->caps.max_links;
3412 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3413 		DRM_ERROR("DM: Failed to initialize mode config\n");
3414 		return -EINVAL;
3415 	}
3416 
3417 	/* There is one primary plane per CRTC */
3418 	primary_planes = dm->dc->caps.max_streams;
3419 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3420 
3421 	/*
3422 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3423 	 * Order is reversed to match iteration order in atomic check.
3424 	 */
3425 	for (i = (primary_planes - 1); i >= 0; i--) {
3426 		plane = &dm->dc->caps.planes[i];
3427 
3428 		if (initialize_plane(dm, mode_info, i,
3429 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3430 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3431 			goto fail;
3432 		}
3433 	}
3434 
3435 	/*
3436 	 * Initialize overlay planes, index starting after primary planes.
3437 	 * These planes have a higher DRM index than the primary planes since
3438 	 * they should be considered as having a higher z-order.
3439 	 * Order is reversed to match iteration order in atomic check.
3440 	 *
3441 	 * Only support DCN for now, and only expose one so we don't encourage
3442 	 * userspace to use up all the pipes.
3443 	 */
3444 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3445 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3446 
3447 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3448 			continue;
3449 
3450 		if (!plane->blends_with_above || !plane->blends_with_below)
3451 			continue;
3452 
3453 		if (!plane->pixel_format_support.argb8888)
3454 			continue;
3455 
3456 		if (initialize_plane(dm, NULL, primary_planes + i,
3457 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3458 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3459 			goto fail;
3460 		}
3461 
3462 		/* Only create one overlay plane. */
3463 		break;
3464 	}
3465 
3466 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3467 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3468 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3469 			goto fail;
3470 		}
3471 
3472 	/* loops over all connectors on the board */
3473 	for (i = 0; i < link_cnt; i++) {
3474 		struct dc_link *link = NULL;
3475 
3476 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3477 			DRM_ERROR(
3478 				"KMS: Cannot support more than %d display indexes\n",
3479 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3480 			continue;
3481 		}
3482 
3483 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3484 		if (!aconnector)
3485 			goto fail;
3486 
3487 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3488 		if (!aencoder)
3489 			goto fail;
3490 
3491 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3492 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3493 			goto fail;
3494 		}
3495 
3496 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3497 			DRM_ERROR("KMS: Failed to initialize connector\n");
3498 			goto fail;
3499 		}
3500 
3501 		link = dc_get_link_at_index(dm->dc, i);
3502 
3503 		if (!dc_link_detect_sink(link, &new_connection_type))
3504 			DRM_ERROR("KMS: Failed to detect connector\n");
3505 
3506 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3507 			emulated_link_detect(link);
3508 			amdgpu_dm_update_connector_after_detect(aconnector);
3509 
3510 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3511 			amdgpu_dm_update_connector_after_detect(aconnector);
3512 			register_backlight_device(dm, link);
3513 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3514 				amdgpu_dm_set_psr_caps(link);
3515 		}
3516 
3517 
3518 	}
3519 
3520 	/* Software is initialized. Now we can register interrupt handlers. */
3521 	switch (adev->asic_type) {
3522 #if defined(CONFIG_DRM_AMD_DC_SI)
3523 	case CHIP_TAHITI:
3524 	case CHIP_PITCAIRN:
3525 	case CHIP_VERDE:
3526 	case CHIP_OLAND:
3527 		if (dce60_register_irq_handlers(dm->adev)) {
3528 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3529 			goto fail;
3530 		}
3531 		break;
3532 #endif
3533 	case CHIP_BONAIRE:
3534 	case CHIP_HAWAII:
3535 	case CHIP_KAVERI:
3536 	case CHIP_KABINI:
3537 	case CHIP_MULLINS:
3538 	case CHIP_TONGA:
3539 	case CHIP_FIJI:
3540 	case CHIP_CARRIZO:
3541 	case CHIP_STONEY:
3542 	case CHIP_POLARIS11:
3543 	case CHIP_POLARIS10:
3544 	case CHIP_POLARIS12:
3545 	case CHIP_VEGAM:
3546 	case CHIP_VEGA10:
3547 	case CHIP_VEGA12:
3548 	case CHIP_VEGA20:
3549 		if (dce110_register_irq_handlers(dm->adev)) {
3550 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3551 			goto fail;
3552 		}
3553 		break;
3554 #if defined(CONFIG_DRM_AMD_DC_DCN)
3555 	case CHIP_RAVEN:
3556 	case CHIP_NAVI12:
3557 	case CHIP_NAVI10:
3558 	case CHIP_NAVI14:
3559 	case CHIP_RENOIR:
3560 	case CHIP_SIENNA_CICHLID:
3561 	case CHIP_NAVY_FLOUNDER:
3562 	case CHIP_DIMGREY_CAVEFISH:
3563 	case CHIP_VANGOGH:
3564 		if (dcn10_register_irq_handlers(dm->adev)) {
3565 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3566 			goto fail;
3567 		}
3568 		break;
3569 #endif
3570 	default:
3571 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3572 		goto fail;
3573 	}
3574 
3575 	return 0;
3576 fail:
3577 	kfree(aencoder);
3578 	kfree(aconnector);
3579 
3580 	return -EINVAL;
3581 }
3582 
3583 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3584 {
3585 	drm_mode_config_cleanup(dm->ddev);
3586 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3587 	return;
3588 }
3589 
3590 /******************************************************************************
3591  * amdgpu_display_funcs functions
3592  *****************************************************************************/
3593 
3594 /*
3595  * dm_bandwidth_update - program display watermarks
3596  *
3597  * @adev: amdgpu_device pointer
3598  *
3599  * Calculate and program the display watermarks and line buffer allocation.
3600  */
3601 static void dm_bandwidth_update(struct amdgpu_device *adev)
3602 {
3603 	/* TODO: implement later */
3604 }
3605 
3606 static const struct amdgpu_display_funcs dm_display_funcs = {
3607 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3608 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3609 	.backlight_set_level = NULL, /* never called for DC */
3610 	.backlight_get_level = NULL, /* never called for DC */
3611 	.hpd_sense = NULL,/* called unconditionally */
3612 	.hpd_set_polarity = NULL, /* called unconditionally */
3613 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3614 	.page_flip_get_scanoutpos =
3615 		dm_crtc_get_scanoutpos,/* called unconditionally */
3616 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3617 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3618 };
3619 
3620 #if defined(CONFIG_DEBUG_KERNEL_DC)
3621 
3622 static ssize_t s3_debug_store(struct device *device,
3623 			      struct device_attribute *attr,
3624 			      const char *buf,
3625 			      size_t count)
3626 {
3627 	int ret;
3628 	int s3_state;
3629 	struct drm_device *drm_dev = dev_get_drvdata(device);
3630 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3631 
3632 	ret = kstrtoint(buf, 0, &s3_state);
3633 
3634 	if (ret == 0) {
3635 		if (s3_state) {
3636 			dm_resume(adev);
3637 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3638 		} else
3639 			dm_suspend(adev);
3640 	}
3641 
3642 	return ret == 0 ? count : 0;
3643 }
3644 
3645 DEVICE_ATTR_WO(s3_debug);
3646 
3647 #endif
3648 
3649 static int dm_early_init(void *handle)
3650 {
3651 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3652 
3653 	switch (adev->asic_type) {
3654 #if defined(CONFIG_DRM_AMD_DC_SI)
3655 	case CHIP_TAHITI:
3656 	case CHIP_PITCAIRN:
3657 	case CHIP_VERDE:
3658 		adev->mode_info.num_crtc = 6;
3659 		adev->mode_info.num_hpd = 6;
3660 		adev->mode_info.num_dig = 6;
3661 		break;
3662 	case CHIP_OLAND:
3663 		adev->mode_info.num_crtc = 2;
3664 		adev->mode_info.num_hpd = 2;
3665 		adev->mode_info.num_dig = 2;
3666 		break;
3667 #endif
3668 	case CHIP_BONAIRE:
3669 	case CHIP_HAWAII:
3670 		adev->mode_info.num_crtc = 6;
3671 		adev->mode_info.num_hpd = 6;
3672 		adev->mode_info.num_dig = 6;
3673 		break;
3674 	case CHIP_KAVERI:
3675 		adev->mode_info.num_crtc = 4;
3676 		adev->mode_info.num_hpd = 6;
3677 		adev->mode_info.num_dig = 7;
3678 		break;
3679 	case CHIP_KABINI:
3680 	case CHIP_MULLINS:
3681 		adev->mode_info.num_crtc = 2;
3682 		adev->mode_info.num_hpd = 6;
3683 		adev->mode_info.num_dig = 6;
3684 		break;
3685 	case CHIP_FIJI:
3686 	case CHIP_TONGA:
3687 		adev->mode_info.num_crtc = 6;
3688 		adev->mode_info.num_hpd = 6;
3689 		adev->mode_info.num_dig = 7;
3690 		break;
3691 	case CHIP_CARRIZO:
3692 		adev->mode_info.num_crtc = 3;
3693 		adev->mode_info.num_hpd = 6;
3694 		adev->mode_info.num_dig = 9;
3695 		break;
3696 	case CHIP_STONEY:
3697 		adev->mode_info.num_crtc = 2;
3698 		adev->mode_info.num_hpd = 6;
3699 		adev->mode_info.num_dig = 9;
3700 		break;
3701 	case CHIP_POLARIS11:
3702 	case CHIP_POLARIS12:
3703 		adev->mode_info.num_crtc = 5;
3704 		adev->mode_info.num_hpd = 5;
3705 		adev->mode_info.num_dig = 5;
3706 		break;
3707 	case CHIP_POLARIS10:
3708 	case CHIP_VEGAM:
3709 		adev->mode_info.num_crtc = 6;
3710 		adev->mode_info.num_hpd = 6;
3711 		adev->mode_info.num_dig = 6;
3712 		break;
3713 	case CHIP_VEGA10:
3714 	case CHIP_VEGA12:
3715 	case CHIP_VEGA20:
3716 		adev->mode_info.num_crtc = 6;
3717 		adev->mode_info.num_hpd = 6;
3718 		adev->mode_info.num_dig = 6;
3719 		break;
3720 #if defined(CONFIG_DRM_AMD_DC_DCN)
3721 	case CHIP_RAVEN:
3722 	case CHIP_RENOIR:
3723 	case CHIP_VANGOGH:
3724 		adev->mode_info.num_crtc = 4;
3725 		adev->mode_info.num_hpd = 4;
3726 		adev->mode_info.num_dig = 4;
3727 		break;
3728 	case CHIP_NAVI10:
3729 	case CHIP_NAVI12:
3730 	case CHIP_SIENNA_CICHLID:
3731 	case CHIP_NAVY_FLOUNDER:
3732 		adev->mode_info.num_crtc = 6;
3733 		adev->mode_info.num_hpd = 6;
3734 		adev->mode_info.num_dig = 6;
3735 		break;
3736 	case CHIP_NAVI14:
3737 	case CHIP_DIMGREY_CAVEFISH:
3738 		adev->mode_info.num_crtc = 5;
3739 		adev->mode_info.num_hpd = 5;
3740 		adev->mode_info.num_dig = 5;
3741 		break;
3742 #endif
3743 	default:
3744 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3745 		return -EINVAL;
3746 	}
3747 
3748 	amdgpu_dm_set_irq_funcs(adev);
3749 
3750 	if (adev->mode_info.funcs == NULL)
3751 		adev->mode_info.funcs = &dm_display_funcs;
3752 
3753 	/*
3754 	 * Note: Do NOT change adev->audio_endpt_rreg and
3755 	 * adev->audio_endpt_wreg because they are initialised in
3756 	 * amdgpu_device_init()
3757 	 */
3758 #if defined(CONFIG_DEBUG_KERNEL_DC)
3759 	device_create_file(
3760 		adev_to_drm(adev)->dev,
3761 		&dev_attr_s3_debug);
3762 #endif
3763 
3764 	return 0;
3765 }
3766 
3767 static bool modeset_required(struct drm_crtc_state *crtc_state,
3768 			     struct dc_stream_state *new_stream,
3769 			     struct dc_stream_state *old_stream)
3770 {
3771 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3772 }
3773 
3774 static bool modereset_required(struct drm_crtc_state *crtc_state)
3775 {
3776 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3777 }
3778 
3779 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3780 {
3781 	drm_encoder_cleanup(encoder);
3782 	kfree(encoder);
3783 }
3784 
3785 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3786 	.destroy = amdgpu_dm_encoder_destroy,
3787 };
3788 
3789 
3790 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3791 					 struct drm_framebuffer *fb,
3792 					 int *min_downscale, int *max_upscale)
3793 {
3794 	struct amdgpu_device *adev = drm_to_adev(dev);
3795 	struct dc *dc = adev->dm.dc;
3796 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3797 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3798 
3799 	switch (fb->format->format) {
3800 	case DRM_FORMAT_P010:
3801 	case DRM_FORMAT_NV12:
3802 	case DRM_FORMAT_NV21:
3803 		*max_upscale = plane_cap->max_upscale_factor.nv12;
3804 		*min_downscale = plane_cap->max_downscale_factor.nv12;
3805 		break;
3806 
3807 	case DRM_FORMAT_XRGB16161616F:
3808 	case DRM_FORMAT_ARGB16161616F:
3809 	case DRM_FORMAT_XBGR16161616F:
3810 	case DRM_FORMAT_ABGR16161616F:
3811 		*max_upscale = plane_cap->max_upscale_factor.fp16;
3812 		*min_downscale = plane_cap->max_downscale_factor.fp16;
3813 		break;
3814 
3815 	default:
3816 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
3817 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
3818 		break;
3819 	}
3820 
3821 	/*
3822 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3823 	 * scaling factor of 1.0 == 1000 units.
3824 	 */
3825 	if (*max_upscale == 1)
3826 		*max_upscale = 1000;
3827 
3828 	if (*min_downscale == 1)
3829 		*min_downscale = 1000;
3830 }
3831 
3832 
3833 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3834 				struct dc_scaling_info *scaling_info)
3835 {
3836 	int scale_w, scale_h, min_downscale, max_upscale;
3837 
3838 	memset(scaling_info, 0, sizeof(*scaling_info));
3839 
3840 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3841 	scaling_info->src_rect.x = state->src_x >> 16;
3842 	scaling_info->src_rect.y = state->src_y >> 16;
3843 
3844 	scaling_info->src_rect.width = state->src_w >> 16;
3845 	if (scaling_info->src_rect.width == 0)
3846 		return -EINVAL;
3847 
3848 	scaling_info->src_rect.height = state->src_h >> 16;
3849 	if (scaling_info->src_rect.height == 0)
3850 		return -EINVAL;
3851 
3852 	scaling_info->dst_rect.x = state->crtc_x;
3853 	scaling_info->dst_rect.y = state->crtc_y;
3854 
3855 	if (state->crtc_w == 0)
3856 		return -EINVAL;
3857 
3858 	scaling_info->dst_rect.width = state->crtc_w;
3859 
3860 	if (state->crtc_h == 0)
3861 		return -EINVAL;
3862 
3863 	scaling_info->dst_rect.height = state->crtc_h;
3864 
3865 	/* DRM doesn't specify clipping on destination output. */
3866 	scaling_info->clip_rect = scaling_info->dst_rect;
3867 
3868 	/* Validate scaling per-format with DC plane caps */
3869 	if (state->plane && state->plane->dev && state->fb) {
3870 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3871 					     &min_downscale, &max_upscale);
3872 	} else {
3873 		min_downscale = 250;
3874 		max_upscale = 16000;
3875 	}
3876 
3877 	scale_w = scaling_info->dst_rect.width * 1000 /
3878 		  scaling_info->src_rect.width;
3879 
3880 	if (scale_w < min_downscale || scale_w > max_upscale)
3881 		return -EINVAL;
3882 
3883 	scale_h = scaling_info->dst_rect.height * 1000 /
3884 		  scaling_info->src_rect.height;
3885 
3886 	if (scale_h < min_downscale || scale_h > max_upscale)
3887 		return -EINVAL;
3888 
3889 	/*
3890 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3891 	 * assume reasonable defaults based on the format.
3892 	 */
3893 
3894 	return 0;
3895 }
3896 
3897 static void
3898 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3899 				 uint64_t tiling_flags)
3900 {
3901 	/* Fill GFX8 params */
3902 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3903 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3904 
3905 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3906 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3907 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3908 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3909 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3910 
3911 		/* XXX fix me for VI */
3912 		tiling_info->gfx8.num_banks = num_banks;
3913 		tiling_info->gfx8.array_mode =
3914 				DC_ARRAY_2D_TILED_THIN1;
3915 		tiling_info->gfx8.tile_split = tile_split;
3916 		tiling_info->gfx8.bank_width = bankw;
3917 		tiling_info->gfx8.bank_height = bankh;
3918 		tiling_info->gfx8.tile_aspect = mtaspect;
3919 		tiling_info->gfx8.tile_mode =
3920 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3921 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3922 			== DC_ARRAY_1D_TILED_THIN1) {
3923 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3924 	}
3925 
3926 	tiling_info->gfx8.pipe_config =
3927 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3928 }
3929 
3930 static void
3931 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3932 				  union dc_tiling_info *tiling_info)
3933 {
3934 	tiling_info->gfx9.num_pipes =
3935 		adev->gfx.config.gb_addr_config_fields.num_pipes;
3936 	tiling_info->gfx9.num_banks =
3937 		adev->gfx.config.gb_addr_config_fields.num_banks;
3938 	tiling_info->gfx9.pipe_interleave =
3939 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3940 	tiling_info->gfx9.num_shader_engines =
3941 		adev->gfx.config.gb_addr_config_fields.num_se;
3942 	tiling_info->gfx9.max_compressed_frags =
3943 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3944 	tiling_info->gfx9.num_rb_per_se =
3945 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3946 	tiling_info->gfx9.shaderEnable = 1;
3947 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3948 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
3949 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3950 	    adev->asic_type == CHIP_VANGOGH)
3951 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3952 }
3953 
3954 static int
3955 validate_dcc(struct amdgpu_device *adev,
3956 	     const enum surface_pixel_format format,
3957 	     const enum dc_rotation_angle rotation,
3958 	     const union dc_tiling_info *tiling_info,
3959 	     const struct dc_plane_dcc_param *dcc,
3960 	     const struct dc_plane_address *address,
3961 	     const struct plane_size *plane_size)
3962 {
3963 	struct dc *dc = adev->dm.dc;
3964 	struct dc_dcc_surface_param input;
3965 	struct dc_surface_dcc_cap output;
3966 
3967 	memset(&input, 0, sizeof(input));
3968 	memset(&output, 0, sizeof(output));
3969 
3970 	if (!dcc->enable)
3971 		return 0;
3972 
3973 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3974 	    !dc->cap_funcs.get_dcc_compression_cap)
3975 		return -EINVAL;
3976 
3977 	input.format = format;
3978 	input.surface_size.width = plane_size->surface_size.width;
3979 	input.surface_size.height = plane_size->surface_size.height;
3980 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3981 
3982 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3983 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3984 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3985 		input.scan = SCAN_DIRECTION_VERTICAL;
3986 
3987 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3988 		return -EINVAL;
3989 
3990 	if (!output.capable)
3991 		return -EINVAL;
3992 
3993 	if (dcc->independent_64b_blks == 0 &&
3994 	    output.grph.rgb.independent_64b_blks != 0)
3995 		return -EINVAL;
3996 
3997 	return 0;
3998 }
3999 
4000 static bool
4001 modifier_has_dcc(uint64_t modifier)
4002 {
4003 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4004 }
4005 
4006 static unsigned
4007 modifier_gfx9_swizzle_mode(uint64_t modifier)
4008 {
4009 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4010 		return 0;
4011 
4012 	return AMD_FMT_MOD_GET(TILE, modifier);
4013 }
4014 
4015 static const struct drm_format_info *
4016 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4017 {
4018 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4019 }
4020 
4021 static void
4022 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4023 				    union dc_tiling_info *tiling_info,
4024 				    uint64_t modifier)
4025 {
4026 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4027 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4028 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4029 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4030 
4031 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4032 
4033 	if (!IS_AMD_FMT_MOD(modifier))
4034 		return;
4035 
4036 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4037 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4038 
4039 	if (adev->family >= AMDGPU_FAMILY_NV) {
4040 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4041 	} else {
4042 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4043 
4044 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4045 	}
4046 }
4047 
4048 enum dm_micro_swizzle {
4049 	MICRO_SWIZZLE_Z = 0,
4050 	MICRO_SWIZZLE_S = 1,
4051 	MICRO_SWIZZLE_D = 2,
4052 	MICRO_SWIZZLE_R = 3
4053 };
4054 
4055 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4056 					  uint32_t format,
4057 					  uint64_t modifier)
4058 {
4059 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4060 	const struct drm_format_info *info = drm_format_info(format);
4061 
4062 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4063 
4064 	if (!info)
4065 		return false;
4066 
4067 	/*
4068 	 * We always have to allow this modifier, because core DRM still
4069 	 * checks LINEAR support if userspace does not provide modifers.
4070 	 */
4071 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4072 		return true;
4073 
4074 	/*
4075 	 * The arbitrary tiling support for multiplane formats has not been hooked
4076 	 * up.
4077 	 */
4078 	if (info->num_planes > 1)
4079 		return false;
4080 
4081 	/*
4082 	 * For D swizzle the canonical modifier depends on the bpp, so check
4083 	 * it here.
4084 	 */
4085 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4086 	    adev->family >= AMDGPU_FAMILY_NV) {
4087 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4088 			return false;
4089 	}
4090 
4091 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4092 	    info->cpp[0] < 8)
4093 		return false;
4094 
4095 	if (modifier_has_dcc(modifier)) {
4096 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4097 		if (info->cpp[0] != 4)
4098 			return false;
4099 	}
4100 
4101 	return true;
4102 }
4103 
4104 static void
4105 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4106 {
4107 	if (!*mods)
4108 		return;
4109 
4110 	if (*cap - *size < 1) {
4111 		uint64_t new_cap = *cap * 2;
4112 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4113 
4114 		if (!new_mods) {
4115 			kfree(*mods);
4116 			*mods = NULL;
4117 			return;
4118 		}
4119 
4120 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4121 		kfree(*mods);
4122 		*mods = new_mods;
4123 		*cap = new_cap;
4124 	}
4125 
4126 	(*mods)[*size] = mod;
4127 	*size += 1;
4128 }
4129 
4130 static void
4131 add_gfx9_modifiers(const struct amdgpu_device *adev,
4132 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4133 {
4134 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4135 	int pipe_xor_bits = min(8, pipes +
4136 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4137 	int bank_xor_bits = min(8 - pipe_xor_bits,
4138 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4139 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4140 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4141 
4142 
4143 	if (adev->family == AMDGPU_FAMILY_RV) {
4144 		/* Raven2 and later */
4145 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4146 
4147 		/*
4148 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4149 		 * doesn't support _D on DCN
4150 		 */
4151 
4152 		if (has_constant_encode) {
4153 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4154 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4155 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4156 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4157 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4158 				    AMD_FMT_MOD_SET(DCC, 1) |
4159 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4160 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4161 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4162 		}
4163 
4164 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4165 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4166 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4167 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4168 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4169 			    AMD_FMT_MOD_SET(DCC, 1) |
4170 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4171 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4172 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4173 
4174 		if (has_constant_encode) {
4175 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4176 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4177 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4178 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4179 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4180 				    AMD_FMT_MOD_SET(DCC, 1) |
4181 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4182 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4183 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4184 
4185 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4186 				    AMD_FMT_MOD_SET(RB, rb) |
4187 				    AMD_FMT_MOD_SET(PIPE, pipes));
4188 		}
4189 
4190 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4191 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4192 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4193 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4194 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4195 			    AMD_FMT_MOD_SET(DCC, 1) |
4196 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4197 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4198 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4199 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4200 			    AMD_FMT_MOD_SET(RB, rb) |
4201 			    AMD_FMT_MOD_SET(PIPE, pipes));
4202 	}
4203 
4204 	/*
4205 	 * Only supported for 64bpp on Raven, will be filtered on format in
4206 	 * dm_plane_format_mod_supported.
4207 	 */
4208 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4209 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4210 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4211 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4212 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4213 
4214 	if (adev->family == AMDGPU_FAMILY_RV) {
4215 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4216 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4217 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4218 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4219 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4220 	}
4221 
4222 	/*
4223 	 * Only supported for 64bpp on Raven, will be filtered on format in
4224 	 * dm_plane_format_mod_supported.
4225 	 */
4226 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4227 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4228 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4229 
4230 	if (adev->family == AMDGPU_FAMILY_RV) {
4231 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4232 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4233 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4234 	}
4235 }
4236 
4237 static void
4238 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4239 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4240 {
4241 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4242 
4243 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4244 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4245 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4246 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4247 		    AMD_FMT_MOD_SET(DCC, 1) |
4248 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4249 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4250 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4251 
4252 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4253 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4254 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4255 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4256 		    AMD_FMT_MOD_SET(DCC, 1) |
4257 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4258 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4259 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4260 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4261 
4262 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4263 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4264 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4265 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4266 
4267 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4268 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4269 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4270 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4271 
4272 
4273 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4274 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4275 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4276 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4277 
4278 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4279 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4280 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4281 }
4282 
4283 static void
4284 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4285 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4286 {
4287 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4288 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4289 
4290 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4291 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4292 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4293 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4294 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4295 		    AMD_FMT_MOD_SET(DCC, 1) |
4296 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4297 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4298 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4299 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4300 
4301 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4302 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4303 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4304 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4305 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4306 		    AMD_FMT_MOD_SET(DCC, 1) |
4307 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4308 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4309 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4310 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4311 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4312 
4313 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4314 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4315 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4316 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4317 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4318 
4319 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4320 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4321 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4322 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4323 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4324 
4325 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4326 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4327 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4328 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4329 
4330 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4331 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4332 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4333 }
4334 
4335 static int
4336 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4337 {
4338 	uint64_t size = 0, capacity = 128;
4339 	*mods = NULL;
4340 
4341 	/* We have not hooked up any pre-GFX9 modifiers. */
4342 	if (adev->family < AMDGPU_FAMILY_AI)
4343 		return 0;
4344 
4345 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4346 
4347 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4348 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4349 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4350 		return *mods ? 0 : -ENOMEM;
4351 	}
4352 
4353 	switch (adev->family) {
4354 	case AMDGPU_FAMILY_AI:
4355 	case AMDGPU_FAMILY_RV:
4356 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4357 		break;
4358 	case AMDGPU_FAMILY_NV:
4359 	case AMDGPU_FAMILY_VGH:
4360 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4361 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4362 		else
4363 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4364 		break;
4365 	}
4366 
4367 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4368 
4369 	/* INVALID marks the end of the list. */
4370 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4371 
4372 	if (!*mods)
4373 		return -ENOMEM;
4374 
4375 	return 0;
4376 }
4377 
4378 static int
4379 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4380 					  const struct amdgpu_framebuffer *afb,
4381 					  const enum surface_pixel_format format,
4382 					  const enum dc_rotation_angle rotation,
4383 					  const struct plane_size *plane_size,
4384 					  union dc_tiling_info *tiling_info,
4385 					  struct dc_plane_dcc_param *dcc,
4386 					  struct dc_plane_address *address,
4387 					  const bool force_disable_dcc)
4388 {
4389 	const uint64_t modifier = afb->base.modifier;
4390 	int ret;
4391 
4392 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4393 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4394 
4395 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4396 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4397 
4398 		dcc->enable = 1;
4399 		dcc->meta_pitch = afb->base.pitches[1];
4400 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4401 
4402 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4403 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4404 	}
4405 
4406 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4407 	if (ret)
4408 		return ret;
4409 
4410 	return 0;
4411 }
4412 
4413 static int
4414 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4415 			     const struct amdgpu_framebuffer *afb,
4416 			     const enum surface_pixel_format format,
4417 			     const enum dc_rotation_angle rotation,
4418 			     const uint64_t tiling_flags,
4419 			     union dc_tiling_info *tiling_info,
4420 			     struct plane_size *plane_size,
4421 			     struct dc_plane_dcc_param *dcc,
4422 			     struct dc_plane_address *address,
4423 			     bool tmz_surface,
4424 			     bool force_disable_dcc)
4425 {
4426 	const struct drm_framebuffer *fb = &afb->base;
4427 	int ret;
4428 
4429 	memset(tiling_info, 0, sizeof(*tiling_info));
4430 	memset(plane_size, 0, sizeof(*plane_size));
4431 	memset(dcc, 0, sizeof(*dcc));
4432 	memset(address, 0, sizeof(*address));
4433 
4434 	address->tmz_surface = tmz_surface;
4435 
4436 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4437 		uint64_t addr = afb->address + fb->offsets[0];
4438 
4439 		plane_size->surface_size.x = 0;
4440 		plane_size->surface_size.y = 0;
4441 		plane_size->surface_size.width = fb->width;
4442 		plane_size->surface_size.height = fb->height;
4443 		plane_size->surface_pitch =
4444 			fb->pitches[0] / fb->format->cpp[0];
4445 
4446 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4447 		address->grph.addr.low_part = lower_32_bits(addr);
4448 		address->grph.addr.high_part = upper_32_bits(addr);
4449 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4450 		uint64_t luma_addr = afb->address + fb->offsets[0];
4451 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4452 
4453 		plane_size->surface_size.x = 0;
4454 		plane_size->surface_size.y = 0;
4455 		plane_size->surface_size.width = fb->width;
4456 		plane_size->surface_size.height = fb->height;
4457 		plane_size->surface_pitch =
4458 			fb->pitches[0] / fb->format->cpp[0];
4459 
4460 		plane_size->chroma_size.x = 0;
4461 		plane_size->chroma_size.y = 0;
4462 		/* TODO: set these based on surface format */
4463 		plane_size->chroma_size.width = fb->width / 2;
4464 		plane_size->chroma_size.height = fb->height / 2;
4465 
4466 		plane_size->chroma_pitch =
4467 			fb->pitches[1] / fb->format->cpp[1];
4468 
4469 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4470 		address->video_progressive.luma_addr.low_part =
4471 			lower_32_bits(luma_addr);
4472 		address->video_progressive.luma_addr.high_part =
4473 			upper_32_bits(luma_addr);
4474 		address->video_progressive.chroma_addr.low_part =
4475 			lower_32_bits(chroma_addr);
4476 		address->video_progressive.chroma_addr.high_part =
4477 			upper_32_bits(chroma_addr);
4478 	}
4479 
4480 	if (adev->family >= AMDGPU_FAMILY_AI) {
4481 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4482 								rotation, plane_size,
4483 								tiling_info, dcc,
4484 								address,
4485 								force_disable_dcc);
4486 		if (ret)
4487 			return ret;
4488 	} else {
4489 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4490 	}
4491 
4492 	return 0;
4493 }
4494 
4495 static void
4496 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4497 			       bool *per_pixel_alpha, bool *global_alpha,
4498 			       int *global_alpha_value)
4499 {
4500 	*per_pixel_alpha = false;
4501 	*global_alpha = false;
4502 	*global_alpha_value = 0xff;
4503 
4504 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4505 		return;
4506 
4507 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4508 		static const uint32_t alpha_formats[] = {
4509 			DRM_FORMAT_ARGB8888,
4510 			DRM_FORMAT_RGBA8888,
4511 			DRM_FORMAT_ABGR8888,
4512 		};
4513 		uint32_t format = plane_state->fb->format->format;
4514 		unsigned int i;
4515 
4516 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4517 			if (format == alpha_formats[i]) {
4518 				*per_pixel_alpha = true;
4519 				break;
4520 			}
4521 		}
4522 	}
4523 
4524 	if (plane_state->alpha < 0xffff) {
4525 		*global_alpha = true;
4526 		*global_alpha_value = plane_state->alpha >> 8;
4527 	}
4528 }
4529 
4530 static int
4531 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4532 			    const enum surface_pixel_format format,
4533 			    enum dc_color_space *color_space)
4534 {
4535 	bool full_range;
4536 
4537 	*color_space = COLOR_SPACE_SRGB;
4538 
4539 	/* DRM color properties only affect non-RGB formats. */
4540 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4541 		return 0;
4542 
4543 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4544 
4545 	switch (plane_state->color_encoding) {
4546 	case DRM_COLOR_YCBCR_BT601:
4547 		if (full_range)
4548 			*color_space = COLOR_SPACE_YCBCR601;
4549 		else
4550 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4551 		break;
4552 
4553 	case DRM_COLOR_YCBCR_BT709:
4554 		if (full_range)
4555 			*color_space = COLOR_SPACE_YCBCR709;
4556 		else
4557 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4558 		break;
4559 
4560 	case DRM_COLOR_YCBCR_BT2020:
4561 		if (full_range)
4562 			*color_space = COLOR_SPACE_2020_YCBCR;
4563 		else
4564 			return -EINVAL;
4565 		break;
4566 
4567 	default:
4568 		return -EINVAL;
4569 	}
4570 
4571 	return 0;
4572 }
4573 
4574 static int
4575 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4576 			    const struct drm_plane_state *plane_state,
4577 			    const uint64_t tiling_flags,
4578 			    struct dc_plane_info *plane_info,
4579 			    struct dc_plane_address *address,
4580 			    bool tmz_surface,
4581 			    bool force_disable_dcc)
4582 {
4583 	const struct drm_framebuffer *fb = plane_state->fb;
4584 	const struct amdgpu_framebuffer *afb =
4585 		to_amdgpu_framebuffer(plane_state->fb);
4586 	int ret;
4587 
4588 	memset(plane_info, 0, sizeof(*plane_info));
4589 
4590 	switch (fb->format->format) {
4591 	case DRM_FORMAT_C8:
4592 		plane_info->format =
4593 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4594 		break;
4595 	case DRM_FORMAT_RGB565:
4596 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4597 		break;
4598 	case DRM_FORMAT_XRGB8888:
4599 	case DRM_FORMAT_ARGB8888:
4600 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4601 		break;
4602 	case DRM_FORMAT_XRGB2101010:
4603 	case DRM_FORMAT_ARGB2101010:
4604 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4605 		break;
4606 	case DRM_FORMAT_XBGR2101010:
4607 	case DRM_FORMAT_ABGR2101010:
4608 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4609 		break;
4610 	case DRM_FORMAT_XBGR8888:
4611 	case DRM_FORMAT_ABGR8888:
4612 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4613 		break;
4614 	case DRM_FORMAT_NV21:
4615 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4616 		break;
4617 	case DRM_FORMAT_NV12:
4618 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4619 		break;
4620 	case DRM_FORMAT_P010:
4621 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4622 		break;
4623 	case DRM_FORMAT_XRGB16161616F:
4624 	case DRM_FORMAT_ARGB16161616F:
4625 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4626 		break;
4627 	case DRM_FORMAT_XBGR16161616F:
4628 	case DRM_FORMAT_ABGR16161616F:
4629 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4630 		break;
4631 	default:
4632 		DRM_ERROR(
4633 			"Unsupported screen format %p4cc\n",
4634 			&fb->format->format);
4635 		return -EINVAL;
4636 	}
4637 
4638 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4639 	case DRM_MODE_ROTATE_0:
4640 		plane_info->rotation = ROTATION_ANGLE_0;
4641 		break;
4642 	case DRM_MODE_ROTATE_90:
4643 		plane_info->rotation = ROTATION_ANGLE_90;
4644 		break;
4645 	case DRM_MODE_ROTATE_180:
4646 		plane_info->rotation = ROTATION_ANGLE_180;
4647 		break;
4648 	case DRM_MODE_ROTATE_270:
4649 		plane_info->rotation = ROTATION_ANGLE_270;
4650 		break;
4651 	default:
4652 		plane_info->rotation = ROTATION_ANGLE_0;
4653 		break;
4654 	}
4655 
4656 	plane_info->visible = true;
4657 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4658 
4659 	plane_info->layer_index = 0;
4660 
4661 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4662 					  &plane_info->color_space);
4663 	if (ret)
4664 		return ret;
4665 
4666 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4667 					   plane_info->rotation, tiling_flags,
4668 					   &plane_info->tiling_info,
4669 					   &plane_info->plane_size,
4670 					   &plane_info->dcc, address, tmz_surface,
4671 					   force_disable_dcc);
4672 	if (ret)
4673 		return ret;
4674 
4675 	fill_blending_from_plane_state(
4676 		plane_state, &plane_info->per_pixel_alpha,
4677 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4678 
4679 	return 0;
4680 }
4681 
4682 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4683 				    struct dc_plane_state *dc_plane_state,
4684 				    struct drm_plane_state *plane_state,
4685 				    struct drm_crtc_state *crtc_state)
4686 {
4687 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4688 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4689 	struct dc_scaling_info scaling_info;
4690 	struct dc_plane_info plane_info;
4691 	int ret;
4692 	bool force_disable_dcc = false;
4693 
4694 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4695 	if (ret)
4696 		return ret;
4697 
4698 	dc_plane_state->src_rect = scaling_info.src_rect;
4699 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4700 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4701 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4702 
4703 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4704 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4705 					  afb->tiling_flags,
4706 					  &plane_info,
4707 					  &dc_plane_state->address,
4708 					  afb->tmz_surface,
4709 					  force_disable_dcc);
4710 	if (ret)
4711 		return ret;
4712 
4713 	dc_plane_state->format = plane_info.format;
4714 	dc_plane_state->color_space = plane_info.color_space;
4715 	dc_plane_state->format = plane_info.format;
4716 	dc_plane_state->plane_size = plane_info.plane_size;
4717 	dc_plane_state->rotation = plane_info.rotation;
4718 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4719 	dc_plane_state->stereo_format = plane_info.stereo_format;
4720 	dc_plane_state->tiling_info = plane_info.tiling_info;
4721 	dc_plane_state->visible = plane_info.visible;
4722 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4723 	dc_plane_state->global_alpha = plane_info.global_alpha;
4724 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4725 	dc_plane_state->dcc = plane_info.dcc;
4726 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4727 	dc_plane_state->flip_int_enabled = true;
4728 
4729 	/*
4730 	 * Always set input transfer function, since plane state is refreshed
4731 	 * every time.
4732 	 */
4733 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4734 	if (ret)
4735 		return ret;
4736 
4737 	return 0;
4738 }
4739 
4740 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4741 					   const struct dm_connector_state *dm_state,
4742 					   struct dc_stream_state *stream)
4743 {
4744 	enum amdgpu_rmx_type rmx_type;
4745 
4746 	struct rect src = { 0 }; /* viewport in composition space*/
4747 	struct rect dst = { 0 }; /* stream addressable area */
4748 
4749 	/* no mode. nothing to be done */
4750 	if (!mode)
4751 		return;
4752 
4753 	/* Full screen scaling by default */
4754 	src.width = mode->hdisplay;
4755 	src.height = mode->vdisplay;
4756 	dst.width = stream->timing.h_addressable;
4757 	dst.height = stream->timing.v_addressable;
4758 
4759 	if (dm_state) {
4760 		rmx_type = dm_state->scaling;
4761 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4762 			if (src.width * dst.height <
4763 					src.height * dst.width) {
4764 				/* height needs less upscaling/more downscaling */
4765 				dst.width = src.width *
4766 						dst.height / src.height;
4767 			} else {
4768 				/* width needs less upscaling/more downscaling */
4769 				dst.height = src.height *
4770 						dst.width / src.width;
4771 			}
4772 		} else if (rmx_type == RMX_CENTER) {
4773 			dst = src;
4774 		}
4775 
4776 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4777 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4778 
4779 		if (dm_state->underscan_enable) {
4780 			dst.x += dm_state->underscan_hborder / 2;
4781 			dst.y += dm_state->underscan_vborder / 2;
4782 			dst.width -= dm_state->underscan_hborder;
4783 			dst.height -= dm_state->underscan_vborder;
4784 		}
4785 	}
4786 
4787 	stream->src = src;
4788 	stream->dst = dst;
4789 
4790 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4791 			dst.x, dst.y, dst.width, dst.height);
4792 
4793 }
4794 
4795 static enum dc_color_depth
4796 convert_color_depth_from_display_info(const struct drm_connector *connector,
4797 				      bool is_y420, int requested_bpc)
4798 {
4799 	uint8_t bpc;
4800 
4801 	if (is_y420) {
4802 		bpc = 8;
4803 
4804 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4805 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4806 			bpc = 16;
4807 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4808 			bpc = 12;
4809 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4810 			bpc = 10;
4811 	} else {
4812 		bpc = (uint8_t)connector->display_info.bpc;
4813 		/* Assume 8 bpc by default if no bpc is specified. */
4814 		bpc = bpc ? bpc : 8;
4815 	}
4816 
4817 	if (requested_bpc > 0) {
4818 		/*
4819 		 * Cap display bpc based on the user requested value.
4820 		 *
4821 		 * The value for state->max_bpc may not correctly updated
4822 		 * depending on when the connector gets added to the state
4823 		 * or if this was called outside of atomic check, so it
4824 		 * can't be used directly.
4825 		 */
4826 		bpc = min_t(u8, bpc, requested_bpc);
4827 
4828 		/* Round down to the nearest even number. */
4829 		bpc = bpc - (bpc & 1);
4830 	}
4831 
4832 	switch (bpc) {
4833 	case 0:
4834 		/*
4835 		 * Temporary Work around, DRM doesn't parse color depth for
4836 		 * EDID revision before 1.4
4837 		 * TODO: Fix edid parsing
4838 		 */
4839 		return COLOR_DEPTH_888;
4840 	case 6:
4841 		return COLOR_DEPTH_666;
4842 	case 8:
4843 		return COLOR_DEPTH_888;
4844 	case 10:
4845 		return COLOR_DEPTH_101010;
4846 	case 12:
4847 		return COLOR_DEPTH_121212;
4848 	case 14:
4849 		return COLOR_DEPTH_141414;
4850 	case 16:
4851 		return COLOR_DEPTH_161616;
4852 	default:
4853 		return COLOR_DEPTH_UNDEFINED;
4854 	}
4855 }
4856 
4857 static enum dc_aspect_ratio
4858 get_aspect_ratio(const struct drm_display_mode *mode_in)
4859 {
4860 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4861 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4862 }
4863 
4864 static enum dc_color_space
4865 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4866 {
4867 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4868 
4869 	switch (dc_crtc_timing->pixel_encoding)	{
4870 	case PIXEL_ENCODING_YCBCR422:
4871 	case PIXEL_ENCODING_YCBCR444:
4872 	case PIXEL_ENCODING_YCBCR420:
4873 	{
4874 		/*
4875 		 * 27030khz is the separation point between HDTV and SDTV
4876 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4877 		 * respectively
4878 		 */
4879 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4880 			if (dc_crtc_timing->flags.Y_ONLY)
4881 				color_space =
4882 					COLOR_SPACE_YCBCR709_LIMITED;
4883 			else
4884 				color_space = COLOR_SPACE_YCBCR709;
4885 		} else {
4886 			if (dc_crtc_timing->flags.Y_ONLY)
4887 				color_space =
4888 					COLOR_SPACE_YCBCR601_LIMITED;
4889 			else
4890 				color_space = COLOR_SPACE_YCBCR601;
4891 		}
4892 
4893 	}
4894 	break;
4895 	case PIXEL_ENCODING_RGB:
4896 		color_space = COLOR_SPACE_SRGB;
4897 		break;
4898 
4899 	default:
4900 		WARN_ON(1);
4901 		break;
4902 	}
4903 
4904 	return color_space;
4905 }
4906 
4907 static bool adjust_colour_depth_from_display_info(
4908 	struct dc_crtc_timing *timing_out,
4909 	const struct drm_display_info *info)
4910 {
4911 	enum dc_color_depth depth = timing_out->display_color_depth;
4912 	int normalized_clk;
4913 	do {
4914 		normalized_clk = timing_out->pix_clk_100hz / 10;
4915 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4916 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4917 			normalized_clk /= 2;
4918 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4919 		switch (depth) {
4920 		case COLOR_DEPTH_888:
4921 			break;
4922 		case COLOR_DEPTH_101010:
4923 			normalized_clk = (normalized_clk * 30) / 24;
4924 			break;
4925 		case COLOR_DEPTH_121212:
4926 			normalized_clk = (normalized_clk * 36) / 24;
4927 			break;
4928 		case COLOR_DEPTH_161616:
4929 			normalized_clk = (normalized_clk * 48) / 24;
4930 			break;
4931 		default:
4932 			/* The above depths are the only ones valid for HDMI. */
4933 			return false;
4934 		}
4935 		if (normalized_clk <= info->max_tmds_clock) {
4936 			timing_out->display_color_depth = depth;
4937 			return true;
4938 		}
4939 	} while (--depth > COLOR_DEPTH_666);
4940 	return false;
4941 }
4942 
4943 static void fill_stream_properties_from_drm_display_mode(
4944 	struct dc_stream_state *stream,
4945 	const struct drm_display_mode *mode_in,
4946 	const struct drm_connector *connector,
4947 	const struct drm_connector_state *connector_state,
4948 	const struct dc_stream_state *old_stream,
4949 	int requested_bpc)
4950 {
4951 	struct dc_crtc_timing *timing_out = &stream->timing;
4952 	const struct drm_display_info *info = &connector->display_info;
4953 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4954 	struct hdmi_vendor_infoframe hv_frame;
4955 	struct hdmi_avi_infoframe avi_frame;
4956 
4957 	memset(&hv_frame, 0, sizeof(hv_frame));
4958 	memset(&avi_frame, 0, sizeof(avi_frame));
4959 
4960 	timing_out->h_border_left = 0;
4961 	timing_out->h_border_right = 0;
4962 	timing_out->v_border_top = 0;
4963 	timing_out->v_border_bottom = 0;
4964 	/* TODO: un-hardcode */
4965 	if (drm_mode_is_420_only(info, mode_in)
4966 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4967 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4968 	else if (drm_mode_is_420_also(info, mode_in)
4969 			&& aconnector->force_yuv420_output)
4970 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4971 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4972 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4973 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4974 	else
4975 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4976 
4977 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4978 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4979 		connector,
4980 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4981 		requested_bpc);
4982 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4983 	timing_out->hdmi_vic = 0;
4984 
4985 	if(old_stream) {
4986 		timing_out->vic = old_stream->timing.vic;
4987 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4988 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4989 	} else {
4990 		timing_out->vic = drm_match_cea_mode(mode_in);
4991 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4992 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4993 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4994 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4995 	}
4996 
4997 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4998 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4999 		timing_out->vic = avi_frame.video_code;
5000 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5001 		timing_out->hdmi_vic = hv_frame.vic;
5002 	}
5003 
5004 	timing_out->h_addressable = mode_in->crtc_hdisplay;
5005 	timing_out->h_total = mode_in->crtc_htotal;
5006 	timing_out->h_sync_width =
5007 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5008 	timing_out->h_front_porch =
5009 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5010 	timing_out->v_total = mode_in->crtc_vtotal;
5011 	timing_out->v_addressable = mode_in->crtc_vdisplay;
5012 	timing_out->v_front_porch =
5013 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5014 	timing_out->v_sync_width =
5015 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5016 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5017 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5018 
5019 	stream->output_color_space = get_output_color_space(timing_out);
5020 
5021 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5022 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5023 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5024 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5025 		    drm_mode_is_420_also(info, mode_in) &&
5026 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5027 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5028 			adjust_colour_depth_from_display_info(timing_out, info);
5029 		}
5030 	}
5031 }
5032 
5033 static void fill_audio_info(struct audio_info *audio_info,
5034 			    const struct drm_connector *drm_connector,
5035 			    const struct dc_sink *dc_sink)
5036 {
5037 	int i = 0;
5038 	int cea_revision = 0;
5039 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5040 
5041 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5042 	audio_info->product_id = edid_caps->product_id;
5043 
5044 	cea_revision = drm_connector->display_info.cea_rev;
5045 
5046 	strscpy(audio_info->display_name,
5047 		edid_caps->display_name,
5048 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5049 
5050 	if (cea_revision >= 3) {
5051 		audio_info->mode_count = edid_caps->audio_mode_count;
5052 
5053 		for (i = 0; i < audio_info->mode_count; ++i) {
5054 			audio_info->modes[i].format_code =
5055 					(enum audio_format_code)
5056 					(edid_caps->audio_modes[i].format_code);
5057 			audio_info->modes[i].channel_count =
5058 					edid_caps->audio_modes[i].channel_count;
5059 			audio_info->modes[i].sample_rates.all =
5060 					edid_caps->audio_modes[i].sample_rate;
5061 			audio_info->modes[i].sample_size =
5062 					edid_caps->audio_modes[i].sample_size;
5063 		}
5064 	}
5065 
5066 	audio_info->flags.all = edid_caps->speaker_flags;
5067 
5068 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5069 	if (drm_connector->latency_present[0]) {
5070 		audio_info->video_latency = drm_connector->video_latency[0];
5071 		audio_info->audio_latency = drm_connector->audio_latency[0];
5072 	}
5073 
5074 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5075 
5076 }
5077 
5078 static void
5079 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5080 				      struct drm_display_mode *dst_mode)
5081 {
5082 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5083 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5084 	dst_mode->crtc_clock = src_mode->crtc_clock;
5085 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5086 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5087 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5088 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5089 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5090 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5091 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5092 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5093 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5094 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5095 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5096 }
5097 
5098 static void
5099 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5100 					const struct drm_display_mode *native_mode,
5101 					bool scale_enabled)
5102 {
5103 	if (scale_enabled) {
5104 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5105 	} else if (native_mode->clock == drm_mode->clock &&
5106 			native_mode->htotal == drm_mode->htotal &&
5107 			native_mode->vtotal == drm_mode->vtotal) {
5108 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5109 	} else {
5110 		/* no scaling nor amdgpu inserted, no need to patch */
5111 	}
5112 }
5113 
5114 static struct dc_sink *
5115 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5116 {
5117 	struct dc_sink_init_data sink_init_data = { 0 };
5118 	struct dc_sink *sink = NULL;
5119 	sink_init_data.link = aconnector->dc_link;
5120 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5121 
5122 	sink = dc_sink_create(&sink_init_data);
5123 	if (!sink) {
5124 		DRM_ERROR("Failed to create sink!\n");
5125 		return NULL;
5126 	}
5127 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5128 
5129 	return sink;
5130 }
5131 
5132 static void set_multisync_trigger_params(
5133 		struct dc_stream_state *stream)
5134 {
5135 	if (stream->triggered_crtc_reset.enabled) {
5136 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5137 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5138 	}
5139 }
5140 
5141 static void set_master_stream(struct dc_stream_state *stream_set[],
5142 			      int stream_count)
5143 {
5144 	int j, highest_rfr = 0, master_stream = 0;
5145 
5146 	for (j = 0;  j < stream_count; j++) {
5147 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5148 			int refresh_rate = 0;
5149 
5150 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5151 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5152 			if (refresh_rate > highest_rfr) {
5153 				highest_rfr = refresh_rate;
5154 				master_stream = j;
5155 			}
5156 		}
5157 	}
5158 	for (j = 0;  j < stream_count; j++) {
5159 		if (stream_set[j])
5160 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5161 	}
5162 }
5163 
5164 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5165 {
5166 	int i = 0;
5167 
5168 	if (context->stream_count < 2)
5169 		return;
5170 	for (i = 0; i < context->stream_count ; i++) {
5171 		if (!context->streams[i])
5172 			continue;
5173 		/*
5174 		 * TODO: add a function to read AMD VSDB bits and set
5175 		 * crtc_sync_master.multi_sync_enabled flag
5176 		 * For now it's set to false
5177 		 */
5178 		set_multisync_trigger_params(context->streams[i]);
5179 	}
5180 	set_master_stream(context->streams, context->stream_count);
5181 }
5182 
5183 static struct dc_stream_state *
5184 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5185 		       const struct drm_display_mode *drm_mode,
5186 		       const struct dm_connector_state *dm_state,
5187 		       const struct dc_stream_state *old_stream,
5188 		       int requested_bpc)
5189 {
5190 	struct drm_display_mode *preferred_mode = NULL;
5191 	struct drm_connector *drm_connector;
5192 	const struct drm_connector_state *con_state =
5193 		dm_state ? &dm_state->base : NULL;
5194 	struct dc_stream_state *stream = NULL;
5195 	struct drm_display_mode mode = *drm_mode;
5196 	bool native_mode_found = false;
5197 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5198 	int mode_refresh;
5199 	int preferred_refresh = 0;
5200 #if defined(CONFIG_DRM_AMD_DC_DCN)
5201 	struct dsc_dec_dpcd_caps dsc_caps;
5202 	uint32_t link_bandwidth_kbps;
5203 #endif
5204 	struct dc_sink *sink = NULL;
5205 	if (aconnector == NULL) {
5206 		DRM_ERROR("aconnector is NULL!\n");
5207 		return stream;
5208 	}
5209 
5210 	drm_connector = &aconnector->base;
5211 
5212 	if (!aconnector->dc_sink) {
5213 		sink = create_fake_sink(aconnector);
5214 		if (!sink)
5215 			return stream;
5216 	} else {
5217 		sink = aconnector->dc_sink;
5218 		dc_sink_retain(sink);
5219 	}
5220 
5221 	stream = dc_create_stream_for_sink(sink);
5222 
5223 	if (stream == NULL) {
5224 		DRM_ERROR("Failed to create stream for sink!\n");
5225 		goto finish;
5226 	}
5227 
5228 	stream->dm_stream_context = aconnector;
5229 
5230 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5231 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5232 
5233 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5234 		/* Search for preferred mode */
5235 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5236 			native_mode_found = true;
5237 			break;
5238 		}
5239 	}
5240 	if (!native_mode_found)
5241 		preferred_mode = list_first_entry_or_null(
5242 				&aconnector->base.modes,
5243 				struct drm_display_mode,
5244 				head);
5245 
5246 	mode_refresh = drm_mode_vrefresh(&mode);
5247 
5248 	if (preferred_mode == NULL) {
5249 		/*
5250 		 * This may not be an error, the use case is when we have no
5251 		 * usermode calls to reset and set mode upon hotplug. In this
5252 		 * case, we call set mode ourselves to restore the previous mode
5253 		 * and the modelist may not be filled in in time.
5254 		 */
5255 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5256 	} else {
5257 		decide_crtc_timing_for_drm_display_mode(
5258 				&mode, preferred_mode,
5259 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5260 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5261 	}
5262 
5263 	if (!dm_state)
5264 		drm_mode_set_crtcinfo(&mode, 0);
5265 
5266 	/*
5267 	* If scaling is enabled and refresh rate didn't change
5268 	* we copy the vic and polarities of the old timings
5269 	*/
5270 	if (!scale || mode_refresh != preferred_refresh)
5271 		fill_stream_properties_from_drm_display_mode(stream,
5272 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
5273 	else
5274 		fill_stream_properties_from_drm_display_mode(stream,
5275 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
5276 
5277 	stream->timing.flags.DSC = 0;
5278 
5279 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5280 #if defined(CONFIG_DRM_AMD_DC_DCN)
5281 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5282 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5283 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5284 				      &dsc_caps);
5285 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5286 							     dc_link_get_link_cap(aconnector->dc_link));
5287 
5288 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5289 			/* Set DSC policy according to dsc_clock_en */
5290 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5291 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5292 
5293 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5294 						  &dsc_caps,
5295 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5296 						  0,
5297 						  link_bandwidth_kbps,
5298 						  &stream->timing,
5299 						  &stream->timing.dsc_cfg))
5300 				stream->timing.flags.DSC = 1;
5301 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5302 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5303 				stream->timing.flags.DSC = 1;
5304 
5305 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5306 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5307 
5308 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5309 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5310 
5311 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5312 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5313 		}
5314 #endif
5315 	}
5316 
5317 	update_stream_scaling_settings(&mode, dm_state, stream);
5318 
5319 	fill_audio_info(
5320 		&stream->audio_info,
5321 		drm_connector,
5322 		sink);
5323 
5324 	update_stream_signal(stream, sink);
5325 
5326 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5327 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5328 
5329 	if (stream->link->psr_settings.psr_feature_enabled) {
5330 		//
5331 		// should decide stream support vsc sdp colorimetry capability
5332 		// before building vsc info packet
5333 		//
5334 		stream->use_vsc_sdp_for_colorimetry = false;
5335 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5336 			stream->use_vsc_sdp_for_colorimetry =
5337 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5338 		} else {
5339 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5340 				stream->use_vsc_sdp_for_colorimetry = true;
5341 		}
5342 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5343 	}
5344 finish:
5345 	dc_sink_release(sink);
5346 
5347 	return stream;
5348 }
5349 
5350 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5351 {
5352 	drm_crtc_cleanup(crtc);
5353 	kfree(crtc);
5354 }
5355 
5356 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5357 				  struct drm_crtc_state *state)
5358 {
5359 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5360 
5361 	/* TODO Destroy dc_stream objects are stream object is flattened */
5362 	if (cur->stream)
5363 		dc_stream_release(cur->stream);
5364 
5365 
5366 	__drm_atomic_helper_crtc_destroy_state(state);
5367 
5368 
5369 	kfree(state);
5370 }
5371 
5372 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5373 {
5374 	struct dm_crtc_state *state;
5375 
5376 	if (crtc->state)
5377 		dm_crtc_destroy_state(crtc, crtc->state);
5378 
5379 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5380 	if (WARN_ON(!state))
5381 		return;
5382 
5383 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5384 }
5385 
5386 static struct drm_crtc_state *
5387 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5388 {
5389 	struct dm_crtc_state *state, *cur;
5390 
5391 	cur = to_dm_crtc_state(crtc->state);
5392 
5393 	if (WARN_ON(!crtc->state))
5394 		return NULL;
5395 
5396 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5397 	if (!state)
5398 		return NULL;
5399 
5400 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5401 
5402 	if (cur->stream) {
5403 		state->stream = cur->stream;
5404 		dc_stream_retain(state->stream);
5405 	}
5406 
5407 	state->active_planes = cur->active_planes;
5408 	state->vrr_infopacket = cur->vrr_infopacket;
5409 	state->abm_level = cur->abm_level;
5410 	state->vrr_supported = cur->vrr_supported;
5411 	state->freesync_config = cur->freesync_config;
5412 	state->crc_src = cur->crc_src;
5413 	state->cm_has_degamma = cur->cm_has_degamma;
5414 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5415 
5416 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5417 
5418 	return &state->base;
5419 }
5420 
5421 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5422 {
5423 	enum dc_irq_source irq_source;
5424 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5425 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5426 	int rc;
5427 
5428 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5429 
5430 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5431 
5432 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5433 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
5434 	return rc;
5435 }
5436 
5437 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5438 {
5439 	enum dc_irq_source irq_source;
5440 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5441 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5442 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5443 #if defined(CONFIG_DRM_AMD_DC_DCN)
5444 	struct amdgpu_display_manager *dm = &adev->dm;
5445 	unsigned long flags;
5446 #endif
5447 	int rc = 0;
5448 
5449 	if (enable) {
5450 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5451 		if (amdgpu_dm_vrr_active(acrtc_state))
5452 			rc = dm_set_vupdate_irq(crtc, true);
5453 	} else {
5454 		/* vblank irq off -> vupdate irq off */
5455 		rc = dm_set_vupdate_irq(crtc, false);
5456 	}
5457 
5458 	if (rc)
5459 		return rc;
5460 
5461 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5462 
5463 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5464 		return -EBUSY;
5465 
5466 	if (amdgpu_in_reset(adev))
5467 		return 0;
5468 
5469 #if defined(CONFIG_DRM_AMD_DC_DCN)
5470 	spin_lock_irqsave(&dm->vblank_lock, flags);
5471 	dm->vblank_workqueue->dm = dm;
5472 	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5473 	dm->vblank_workqueue->enable = enable;
5474 	spin_unlock_irqrestore(&dm->vblank_lock, flags);
5475 	schedule_work(&dm->vblank_workqueue->mall_work);
5476 #endif
5477 
5478 	return 0;
5479 }
5480 
5481 static int dm_enable_vblank(struct drm_crtc *crtc)
5482 {
5483 	return dm_set_vblank(crtc, true);
5484 }
5485 
5486 static void dm_disable_vblank(struct drm_crtc *crtc)
5487 {
5488 	dm_set_vblank(crtc, false);
5489 }
5490 
5491 /* Implemented only the options currently availible for the driver */
5492 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5493 	.reset = dm_crtc_reset_state,
5494 	.destroy = amdgpu_dm_crtc_destroy,
5495 	.set_config = drm_atomic_helper_set_config,
5496 	.page_flip = drm_atomic_helper_page_flip,
5497 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5498 	.atomic_destroy_state = dm_crtc_destroy_state,
5499 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5500 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5501 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5502 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5503 	.enable_vblank = dm_enable_vblank,
5504 	.disable_vblank = dm_disable_vblank,
5505 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5506 };
5507 
5508 static enum drm_connector_status
5509 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5510 {
5511 	bool connected;
5512 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5513 
5514 	/*
5515 	 * Notes:
5516 	 * 1. This interface is NOT called in context of HPD irq.
5517 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5518 	 * makes it a bad place for *any* MST-related activity.
5519 	 */
5520 
5521 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5522 	    !aconnector->fake_enable)
5523 		connected = (aconnector->dc_sink != NULL);
5524 	else
5525 		connected = (aconnector->base.force == DRM_FORCE_ON);
5526 
5527 	update_subconnector_property(aconnector);
5528 
5529 	return (connected ? connector_status_connected :
5530 			connector_status_disconnected);
5531 }
5532 
5533 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5534 					    struct drm_connector_state *connector_state,
5535 					    struct drm_property *property,
5536 					    uint64_t val)
5537 {
5538 	struct drm_device *dev = connector->dev;
5539 	struct amdgpu_device *adev = drm_to_adev(dev);
5540 	struct dm_connector_state *dm_old_state =
5541 		to_dm_connector_state(connector->state);
5542 	struct dm_connector_state *dm_new_state =
5543 		to_dm_connector_state(connector_state);
5544 
5545 	int ret = -EINVAL;
5546 
5547 	if (property == dev->mode_config.scaling_mode_property) {
5548 		enum amdgpu_rmx_type rmx_type;
5549 
5550 		switch (val) {
5551 		case DRM_MODE_SCALE_CENTER:
5552 			rmx_type = RMX_CENTER;
5553 			break;
5554 		case DRM_MODE_SCALE_ASPECT:
5555 			rmx_type = RMX_ASPECT;
5556 			break;
5557 		case DRM_MODE_SCALE_FULLSCREEN:
5558 			rmx_type = RMX_FULL;
5559 			break;
5560 		case DRM_MODE_SCALE_NONE:
5561 		default:
5562 			rmx_type = RMX_OFF;
5563 			break;
5564 		}
5565 
5566 		if (dm_old_state->scaling == rmx_type)
5567 			return 0;
5568 
5569 		dm_new_state->scaling = rmx_type;
5570 		ret = 0;
5571 	} else if (property == adev->mode_info.underscan_hborder_property) {
5572 		dm_new_state->underscan_hborder = val;
5573 		ret = 0;
5574 	} else if (property == adev->mode_info.underscan_vborder_property) {
5575 		dm_new_state->underscan_vborder = val;
5576 		ret = 0;
5577 	} else if (property == adev->mode_info.underscan_property) {
5578 		dm_new_state->underscan_enable = val;
5579 		ret = 0;
5580 	} else if (property == adev->mode_info.abm_level_property) {
5581 		dm_new_state->abm_level = val;
5582 		ret = 0;
5583 	}
5584 
5585 	return ret;
5586 }
5587 
5588 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5589 					    const struct drm_connector_state *state,
5590 					    struct drm_property *property,
5591 					    uint64_t *val)
5592 {
5593 	struct drm_device *dev = connector->dev;
5594 	struct amdgpu_device *adev = drm_to_adev(dev);
5595 	struct dm_connector_state *dm_state =
5596 		to_dm_connector_state(state);
5597 	int ret = -EINVAL;
5598 
5599 	if (property == dev->mode_config.scaling_mode_property) {
5600 		switch (dm_state->scaling) {
5601 		case RMX_CENTER:
5602 			*val = DRM_MODE_SCALE_CENTER;
5603 			break;
5604 		case RMX_ASPECT:
5605 			*val = DRM_MODE_SCALE_ASPECT;
5606 			break;
5607 		case RMX_FULL:
5608 			*val = DRM_MODE_SCALE_FULLSCREEN;
5609 			break;
5610 		case RMX_OFF:
5611 		default:
5612 			*val = DRM_MODE_SCALE_NONE;
5613 			break;
5614 		}
5615 		ret = 0;
5616 	} else if (property == adev->mode_info.underscan_hborder_property) {
5617 		*val = dm_state->underscan_hborder;
5618 		ret = 0;
5619 	} else if (property == adev->mode_info.underscan_vborder_property) {
5620 		*val = dm_state->underscan_vborder;
5621 		ret = 0;
5622 	} else if (property == adev->mode_info.underscan_property) {
5623 		*val = dm_state->underscan_enable;
5624 		ret = 0;
5625 	} else if (property == adev->mode_info.abm_level_property) {
5626 		*val = dm_state->abm_level;
5627 		ret = 0;
5628 	}
5629 
5630 	return ret;
5631 }
5632 
5633 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5634 {
5635 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5636 
5637 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5638 }
5639 
5640 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5641 {
5642 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5643 	const struct dc_link *link = aconnector->dc_link;
5644 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5645 	struct amdgpu_display_manager *dm = &adev->dm;
5646 
5647 	/*
5648 	 * Call only if mst_mgr was iniitalized before since it's not done
5649 	 * for all connector types.
5650 	 */
5651 	if (aconnector->mst_mgr.dev)
5652 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5653 
5654 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5655 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5656 
5657 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5658 	    link->type != dc_connection_none &&
5659 	    dm->backlight_dev) {
5660 		backlight_device_unregister(dm->backlight_dev);
5661 		dm->backlight_dev = NULL;
5662 	}
5663 #endif
5664 
5665 	if (aconnector->dc_em_sink)
5666 		dc_sink_release(aconnector->dc_em_sink);
5667 	aconnector->dc_em_sink = NULL;
5668 	if (aconnector->dc_sink)
5669 		dc_sink_release(aconnector->dc_sink);
5670 	aconnector->dc_sink = NULL;
5671 
5672 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5673 	drm_connector_unregister(connector);
5674 	drm_connector_cleanup(connector);
5675 	if (aconnector->i2c) {
5676 		i2c_del_adapter(&aconnector->i2c->base);
5677 		kfree(aconnector->i2c);
5678 	}
5679 	kfree(aconnector->dm_dp_aux.aux.name);
5680 
5681 	kfree(connector);
5682 }
5683 
5684 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5685 {
5686 	struct dm_connector_state *state =
5687 		to_dm_connector_state(connector->state);
5688 
5689 	if (connector->state)
5690 		__drm_atomic_helper_connector_destroy_state(connector->state);
5691 
5692 	kfree(state);
5693 
5694 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5695 
5696 	if (state) {
5697 		state->scaling = RMX_OFF;
5698 		state->underscan_enable = false;
5699 		state->underscan_hborder = 0;
5700 		state->underscan_vborder = 0;
5701 		state->base.max_requested_bpc = 8;
5702 		state->vcpi_slots = 0;
5703 		state->pbn = 0;
5704 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5705 			state->abm_level = amdgpu_dm_abm_level;
5706 
5707 		__drm_atomic_helper_connector_reset(connector, &state->base);
5708 	}
5709 }
5710 
5711 struct drm_connector_state *
5712 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5713 {
5714 	struct dm_connector_state *state =
5715 		to_dm_connector_state(connector->state);
5716 
5717 	struct dm_connector_state *new_state =
5718 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5719 
5720 	if (!new_state)
5721 		return NULL;
5722 
5723 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5724 
5725 	new_state->freesync_capable = state->freesync_capable;
5726 	new_state->abm_level = state->abm_level;
5727 	new_state->scaling = state->scaling;
5728 	new_state->underscan_enable = state->underscan_enable;
5729 	new_state->underscan_hborder = state->underscan_hborder;
5730 	new_state->underscan_vborder = state->underscan_vborder;
5731 	new_state->vcpi_slots = state->vcpi_slots;
5732 	new_state->pbn = state->pbn;
5733 	return &new_state->base;
5734 }
5735 
5736 static int
5737 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5738 {
5739 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5740 		to_amdgpu_dm_connector(connector);
5741 	int r;
5742 
5743 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5744 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5745 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5746 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5747 		if (r)
5748 			return r;
5749 	}
5750 
5751 #if defined(CONFIG_DEBUG_FS)
5752 	connector_debugfs_init(amdgpu_dm_connector);
5753 #endif
5754 
5755 	return 0;
5756 }
5757 
5758 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5759 	.reset = amdgpu_dm_connector_funcs_reset,
5760 	.detect = amdgpu_dm_connector_detect,
5761 	.fill_modes = drm_helper_probe_single_connector_modes,
5762 	.destroy = amdgpu_dm_connector_destroy,
5763 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5764 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5765 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5766 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5767 	.late_register = amdgpu_dm_connector_late_register,
5768 	.early_unregister = amdgpu_dm_connector_unregister
5769 };
5770 
5771 static int get_modes(struct drm_connector *connector)
5772 {
5773 	return amdgpu_dm_connector_get_modes(connector);
5774 }
5775 
5776 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5777 {
5778 	struct dc_sink_init_data init_params = {
5779 			.link = aconnector->dc_link,
5780 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5781 	};
5782 	struct edid *edid;
5783 
5784 	if (!aconnector->base.edid_blob_ptr) {
5785 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5786 				aconnector->base.name);
5787 
5788 		aconnector->base.force = DRM_FORCE_OFF;
5789 		aconnector->base.override_edid = false;
5790 		return;
5791 	}
5792 
5793 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5794 
5795 	aconnector->edid = edid;
5796 
5797 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5798 		aconnector->dc_link,
5799 		(uint8_t *)edid,
5800 		(edid->extensions + 1) * EDID_LENGTH,
5801 		&init_params);
5802 
5803 	if (aconnector->base.force == DRM_FORCE_ON) {
5804 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5805 		aconnector->dc_link->local_sink :
5806 		aconnector->dc_em_sink;
5807 		dc_sink_retain(aconnector->dc_sink);
5808 	}
5809 }
5810 
5811 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5812 {
5813 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5814 
5815 	/*
5816 	 * In case of headless boot with force on for DP managed connector
5817 	 * Those settings have to be != 0 to get initial modeset
5818 	 */
5819 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5820 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5821 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5822 	}
5823 
5824 
5825 	aconnector->base.override_edid = true;
5826 	create_eml_sink(aconnector);
5827 }
5828 
5829 static struct dc_stream_state *
5830 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5831 				const struct drm_display_mode *drm_mode,
5832 				const struct dm_connector_state *dm_state,
5833 				const struct dc_stream_state *old_stream)
5834 {
5835 	struct drm_connector *connector = &aconnector->base;
5836 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5837 	struct dc_stream_state *stream;
5838 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5839 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5840 	enum dc_status dc_result = DC_OK;
5841 
5842 	do {
5843 		stream = create_stream_for_sink(aconnector, drm_mode,
5844 						dm_state, old_stream,
5845 						requested_bpc);
5846 		if (stream == NULL) {
5847 			DRM_ERROR("Failed to create stream for sink!\n");
5848 			break;
5849 		}
5850 
5851 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5852 
5853 		if (dc_result != DC_OK) {
5854 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5855 				      drm_mode->hdisplay,
5856 				      drm_mode->vdisplay,
5857 				      drm_mode->clock,
5858 				      dc_result,
5859 				      dc_status_to_str(dc_result));
5860 
5861 			dc_stream_release(stream);
5862 			stream = NULL;
5863 			requested_bpc -= 2; /* lower bpc to retry validation */
5864 		}
5865 
5866 	} while (stream == NULL && requested_bpc >= 6);
5867 
5868 	return stream;
5869 }
5870 
5871 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5872 				   struct drm_display_mode *mode)
5873 {
5874 	int result = MODE_ERROR;
5875 	struct dc_sink *dc_sink;
5876 	/* TODO: Unhardcode stream count */
5877 	struct dc_stream_state *stream;
5878 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5879 
5880 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5881 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5882 		return result;
5883 
5884 	/*
5885 	 * Only run this the first time mode_valid is called to initilialize
5886 	 * EDID mgmt
5887 	 */
5888 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5889 		!aconnector->dc_em_sink)
5890 		handle_edid_mgmt(aconnector);
5891 
5892 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5893 
5894 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5895 				aconnector->base.force != DRM_FORCE_ON) {
5896 		DRM_ERROR("dc_sink is NULL!\n");
5897 		goto fail;
5898 	}
5899 
5900 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5901 	if (stream) {
5902 		dc_stream_release(stream);
5903 		result = MODE_OK;
5904 	}
5905 
5906 fail:
5907 	/* TODO: error handling*/
5908 	return result;
5909 }
5910 
5911 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5912 				struct dc_info_packet *out)
5913 {
5914 	struct hdmi_drm_infoframe frame;
5915 	unsigned char buf[30]; /* 26 + 4 */
5916 	ssize_t len;
5917 	int ret, i;
5918 
5919 	memset(out, 0, sizeof(*out));
5920 
5921 	if (!state->hdr_output_metadata)
5922 		return 0;
5923 
5924 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5925 	if (ret)
5926 		return ret;
5927 
5928 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5929 	if (len < 0)
5930 		return (int)len;
5931 
5932 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5933 	if (len != 30)
5934 		return -EINVAL;
5935 
5936 	/* Prepare the infopacket for DC. */
5937 	switch (state->connector->connector_type) {
5938 	case DRM_MODE_CONNECTOR_HDMIA:
5939 		out->hb0 = 0x87; /* type */
5940 		out->hb1 = 0x01; /* version */
5941 		out->hb2 = 0x1A; /* length */
5942 		out->sb[0] = buf[3]; /* checksum */
5943 		i = 1;
5944 		break;
5945 
5946 	case DRM_MODE_CONNECTOR_DisplayPort:
5947 	case DRM_MODE_CONNECTOR_eDP:
5948 		out->hb0 = 0x00; /* sdp id, zero */
5949 		out->hb1 = 0x87; /* type */
5950 		out->hb2 = 0x1D; /* payload len - 1 */
5951 		out->hb3 = (0x13 << 2); /* sdp version */
5952 		out->sb[0] = 0x01; /* version */
5953 		out->sb[1] = 0x1A; /* length */
5954 		i = 2;
5955 		break;
5956 
5957 	default:
5958 		return -EINVAL;
5959 	}
5960 
5961 	memcpy(&out->sb[i], &buf[4], 26);
5962 	out->valid = true;
5963 
5964 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5965 		       sizeof(out->sb), false);
5966 
5967 	return 0;
5968 }
5969 
5970 static bool
5971 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5972 			  const struct drm_connector_state *new_state)
5973 {
5974 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5975 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5976 
5977 	if (old_blob != new_blob) {
5978 		if (old_blob && new_blob &&
5979 		    old_blob->length == new_blob->length)
5980 			return memcmp(old_blob->data, new_blob->data,
5981 				      old_blob->length);
5982 
5983 		return true;
5984 	}
5985 
5986 	return false;
5987 }
5988 
5989 static int
5990 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5991 				 struct drm_atomic_state *state)
5992 {
5993 	struct drm_connector_state *new_con_state =
5994 		drm_atomic_get_new_connector_state(state, conn);
5995 	struct drm_connector_state *old_con_state =
5996 		drm_atomic_get_old_connector_state(state, conn);
5997 	struct drm_crtc *crtc = new_con_state->crtc;
5998 	struct drm_crtc_state *new_crtc_state;
5999 	int ret;
6000 
6001 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6002 
6003 	if (!crtc)
6004 		return 0;
6005 
6006 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6007 		struct dc_info_packet hdr_infopacket;
6008 
6009 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6010 		if (ret)
6011 			return ret;
6012 
6013 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6014 		if (IS_ERR(new_crtc_state))
6015 			return PTR_ERR(new_crtc_state);
6016 
6017 		/*
6018 		 * DC considers the stream backends changed if the
6019 		 * static metadata changes. Forcing the modeset also
6020 		 * gives a simple way for userspace to switch from
6021 		 * 8bpc to 10bpc when setting the metadata to enter
6022 		 * or exit HDR.
6023 		 *
6024 		 * Changing the static metadata after it's been
6025 		 * set is permissible, however. So only force a
6026 		 * modeset if we're entering or exiting HDR.
6027 		 */
6028 		new_crtc_state->mode_changed =
6029 			!old_con_state->hdr_output_metadata ||
6030 			!new_con_state->hdr_output_metadata;
6031 	}
6032 
6033 	return 0;
6034 }
6035 
6036 static const struct drm_connector_helper_funcs
6037 amdgpu_dm_connector_helper_funcs = {
6038 	/*
6039 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6040 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6041 	 * are missing after user start lightdm. So we need to renew modes list.
6042 	 * in get_modes call back, not just return the modes count
6043 	 */
6044 	.get_modes = get_modes,
6045 	.mode_valid = amdgpu_dm_connector_mode_valid,
6046 	.atomic_check = amdgpu_dm_connector_atomic_check,
6047 };
6048 
6049 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6050 {
6051 }
6052 
6053 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6054 {
6055 	struct drm_atomic_state *state = new_crtc_state->state;
6056 	struct drm_plane *plane;
6057 	int num_active = 0;
6058 
6059 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6060 		struct drm_plane_state *new_plane_state;
6061 
6062 		/* Cursor planes are "fake". */
6063 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6064 			continue;
6065 
6066 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6067 
6068 		if (!new_plane_state) {
6069 			/*
6070 			 * The plane is enable on the CRTC and hasn't changed
6071 			 * state. This means that it previously passed
6072 			 * validation and is therefore enabled.
6073 			 */
6074 			num_active += 1;
6075 			continue;
6076 		}
6077 
6078 		/* We need a framebuffer to be considered enabled. */
6079 		num_active += (new_plane_state->fb != NULL);
6080 	}
6081 
6082 	return num_active;
6083 }
6084 
6085 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6086 					 struct drm_crtc_state *new_crtc_state)
6087 {
6088 	struct dm_crtc_state *dm_new_crtc_state =
6089 		to_dm_crtc_state(new_crtc_state);
6090 
6091 	dm_new_crtc_state->active_planes = 0;
6092 
6093 	if (!dm_new_crtc_state->stream)
6094 		return;
6095 
6096 	dm_new_crtc_state->active_planes =
6097 		count_crtc_active_planes(new_crtc_state);
6098 }
6099 
6100 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6101 				       struct drm_atomic_state *state)
6102 {
6103 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6104 									  crtc);
6105 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6106 	struct dc *dc = adev->dm.dc;
6107 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6108 	int ret = -EINVAL;
6109 
6110 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6111 
6112 	dm_update_crtc_active_planes(crtc, crtc_state);
6113 
6114 	if (unlikely(!dm_crtc_state->stream &&
6115 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6116 		WARN_ON(1);
6117 		return ret;
6118 	}
6119 
6120 	/*
6121 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6122 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6123 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6124 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6125 	 */
6126 	if (crtc_state->enable &&
6127 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6128 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6129 		return -EINVAL;
6130 	}
6131 
6132 	/* In some use cases, like reset, no stream is attached */
6133 	if (!dm_crtc_state->stream)
6134 		return 0;
6135 
6136 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6137 		return 0;
6138 
6139 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6140 	return ret;
6141 }
6142 
6143 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6144 				      const struct drm_display_mode *mode,
6145 				      struct drm_display_mode *adjusted_mode)
6146 {
6147 	return true;
6148 }
6149 
6150 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6151 	.disable = dm_crtc_helper_disable,
6152 	.atomic_check = dm_crtc_helper_atomic_check,
6153 	.mode_fixup = dm_crtc_helper_mode_fixup,
6154 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6155 };
6156 
6157 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6158 {
6159 
6160 }
6161 
6162 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6163 {
6164 	switch (display_color_depth) {
6165 		case COLOR_DEPTH_666:
6166 			return 6;
6167 		case COLOR_DEPTH_888:
6168 			return 8;
6169 		case COLOR_DEPTH_101010:
6170 			return 10;
6171 		case COLOR_DEPTH_121212:
6172 			return 12;
6173 		case COLOR_DEPTH_141414:
6174 			return 14;
6175 		case COLOR_DEPTH_161616:
6176 			return 16;
6177 		default:
6178 			break;
6179 		}
6180 	return 0;
6181 }
6182 
6183 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6184 					  struct drm_crtc_state *crtc_state,
6185 					  struct drm_connector_state *conn_state)
6186 {
6187 	struct drm_atomic_state *state = crtc_state->state;
6188 	struct drm_connector *connector = conn_state->connector;
6189 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6190 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6191 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6192 	struct drm_dp_mst_topology_mgr *mst_mgr;
6193 	struct drm_dp_mst_port *mst_port;
6194 	enum dc_color_depth color_depth;
6195 	int clock, bpp = 0;
6196 	bool is_y420 = false;
6197 
6198 	if (!aconnector->port || !aconnector->dc_sink)
6199 		return 0;
6200 
6201 	mst_port = aconnector->port;
6202 	mst_mgr = &aconnector->mst_port->mst_mgr;
6203 
6204 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6205 		return 0;
6206 
6207 	if (!state->duplicated) {
6208 		int max_bpc = conn_state->max_requested_bpc;
6209 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6210 				aconnector->force_yuv420_output;
6211 		color_depth = convert_color_depth_from_display_info(connector,
6212 								    is_y420,
6213 								    max_bpc);
6214 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6215 		clock = adjusted_mode->clock;
6216 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6217 	}
6218 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6219 									   mst_mgr,
6220 									   mst_port,
6221 									   dm_new_connector_state->pbn,
6222 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6223 	if (dm_new_connector_state->vcpi_slots < 0) {
6224 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6225 		return dm_new_connector_state->vcpi_slots;
6226 	}
6227 	return 0;
6228 }
6229 
6230 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6231 	.disable = dm_encoder_helper_disable,
6232 	.atomic_check = dm_encoder_helper_atomic_check
6233 };
6234 
6235 #if defined(CONFIG_DRM_AMD_DC_DCN)
6236 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6237 					    struct dc_state *dc_state)
6238 {
6239 	struct dc_stream_state *stream = NULL;
6240 	struct drm_connector *connector;
6241 	struct drm_connector_state *new_con_state, *old_con_state;
6242 	struct amdgpu_dm_connector *aconnector;
6243 	struct dm_connector_state *dm_conn_state;
6244 	int i, j, clock, bpp;
6245 	int vcpi, pbn_div, pbn = 0;
6246 
6247 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6248 
6249 		aconnector = to_amdgpu_dm_connector(connector);
6250 
6251 		if (!aconnector->port)
6252 			continue;
6253 
6254 		if (!new_con_state || !new_con_state->crtc)
6255 			continue;
6256 
6257 		dm_conn_state = to_dm_connector_state(new_con_state);
6258 
6259 		for (j = 0; j < dc_state->stream_count; j++) {
6260 			stream = dc_state->streams[j];
6261 			if (!stream)
6262 				continue;
6263 
6264 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6265 				break;
6266 
6267 			stream = NULL;
6268 		}
6269 
6270 		if (!stream)
6271 			continue;
6272 
6273 		if (stream->timing.flags.DSC != 1) {
6274 			drm_dp_mst_atomic_enable_dsc(state,
6275 						     aconnector->port,
6276 						     dm_conn_state->pbn,
6277 						     0,
6278 						     false);
6279 			continue;
6280 		}
6281 
6282 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6283 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6284 		clock = stream->timing.pix_clk_100hz / 10;
6285 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6286 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6287 						    aconnector->port,
6288 						    pbn, pbn_div,
6289 						    true);
6290 		if (vcpi < 0)
6291 			return vcpi;
6292 
6293 		dm_conn_state->pbn = pbn;
6294 		dm_conn_state->vcpi_slots = vcpi;
6295 	}
6296 	return 0;
6297 }
6298 #endif
6299 
6300 static void dm_drm_plane_reset(struct drm_plane *plane)
6301 {
6302 	struct dm_plane_state *amdgpu_state = NULL;
6303 
6304 	if (plane->state)
6305 		plane->funcs->atomic_destroy_state(plane, plane->state);
6306 
6307 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6308 	WARN_ON(amdgpu_state == NULL);
6309 
6310 	if (amdgpu_state)
6311 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6312 }
6313 
6314 static struct drm_plane_state *
6315 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6316 {
6317 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6318 
6319 	old_dm_plane_state = to_dm_plane_state(plane->state);
6320 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6321 	if (!dm_plane_state)
6322 		return NULL;
6323 
6324 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6325 
6326 	if (old_dm_plane_state->dc_state) {
6327 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6328 		dc_plane_state_retain(dm_plane_state->dc_state);
6329 	}
6330 
6331 	return &dm_plane_state->base;
6332 }
6333 
6334 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6335 				struct drm_plane_state *state)
6336 {
6337 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6338 
6339 	if (dm_plane_state->dc_state)
6340 		dc_plane_state_release(dm_plane_state->dc_state);
6341 
6342 	drm_atomic_helper_plane_destroy_state(plane, state);
6343 }
6344 
6345 static const struct drm_plane_funcs dm_plane_funcs = {
6346 	.update_plane	= drm_atomic_helper_update_plane,
6347 	.disable_plane	= drm_atomic_helper_disable_plane,
6348 	.destroy	= drm_primary_helper_destroy,
6349 	.reset = dm_drm_plane_reset,
6350 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6351 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6352 	.format_mod_supported = dm_plane_format_mod_supported,
6353 };
6354 
6355 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6356 				      struct drm_plane_state *new_state)
6357 {
6358 	struct amdgpu_framebuffer *afb;
6359 	struct drm_gem_object *obj;
6360 	struct amdgpu_device *adev;
6361 	struct amdgpu_bo *rbo;
6362 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6363 	struct list_head list;
6364 	struct ttm_validate_buffer tv;
6365 	struct ww_acquire_ctx ticket;
6366 	uint32_t domain;
6367 	int r;
6368 
6369 	if (!new_state->fb) {
6370 		DRM_DEBUG_DRIVER("No FB bound\n");
6371 		return 0;
6372 	}
6373 
6374 	afb = to_amdgpu_framebuffer(new_state->fb);
6375 	obj = new_state->fb->obj[0];
6376 	rbo = gem_to_amdgpu_bo(obj);
6377 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6378 	INIT_LIST_HEAD(&list);
6379 
6380 	tv.bo = &rbo->tbo;
6381 	tv.num_shared = 1;
6382 	list_add(&tv.head, &list);
6383 
6384 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6385 	if (r) {
6386 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6387 		return r;
6388 	}
6389 
6390 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6391 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6392 	else
6393 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6394 
6395 	r = amdgpu_bo_pin(rbo, domain);
6396 	if (unlikely(r != 0)) {
6397 		if (r != -ERESTARTSYS)
6398 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6399 		ttm_eu_backoff_reservation(&ticket, &list);
6400 		return r;
6401 	}
6402 
6403 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6404 	if (unlikely(r != 0)) {
6405 		amdgpu_bo_unpin(rbo);
6406 		ttm_eu_backoff_reservation(&ticket, &list);
6407 		DRM_ERROR("%p bind failed\n", rbo);
6408 		return r;
6409 	}
6410 
6411 	ttm_eu_backoff_reservation(&ticket, &list);
6412 
6413 	afb->address = amdgpu_bo_gpu_offset(rbo);
6414 
6415 	amdgpu_bo_ref(rbo);
6416 
6417 	/**
6418 	 * We don't do surface updates on planes that have been newly created,
6419 	 * but we also don't have the afb->address during atomic check.
6420 	 *
6421 	 * Fill in buffer attributes depending on the address here, but only on
6422 	 * newly created planes since they're not being used by DC yet and this
6423 	 * won't modify global state.
6424 	 */
6425 	dm_plane_state_old = to_dm_plane_state(plane->state);
6426 	dm_plane_state_new = to_dm_plane_state(new_state);
6427 
6428 	if (dm_plane_state_new->dc_state &&
6429 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6430 		struct dc_plane_state *plane_state =
6431 			dm_plane_state_new->dc_state;
6432 		bool force_disable_dcc = !plane_state->dcc.enable;
6433 
6434 		fill_plane_buffer_attributes(
6435 			adev, afb, plane_state->format, plane_state->rotation,
6436 			afb->tiling_flags,
6437 			&plane_state->tiling_info, &plane_state->plane_size,
6438 			&plane_state->dcc, &plane_state->address,
6439 			afb->tmz_surface, force_disable_dcc);
6440 	}
6441 
6442 	return 0;
6443 }
6444 
6445 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6446 				       struct drm_plane_state *old_state)
6447 {
6448 	struct amdgpu_bo *rbo;
6449 	int r;
6450 
6451 	if (!old_state->fb)
6452 		return;
6453 
6454 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6455 	r = amdgpu_bo_reserve(rbo, false);
6456 	if (unlikely(r)) {
6457 		DRM_ERROR("failed to reserve rbo before unpin\n");
6458 		return;
6459 	}
6460 
6461 	amdgpu_bo_unpin(rbo);
6462 	amdgpu_bo_unreserve(rbo);
6463 	amdgpu_bo_unref(&rbo);
6464 }
6465 
6466 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6467 				       struct drm_crtc_state *new_crtc_state)
6468 {
6469 	struct drm_framebuffer *fb = state->fb;
6470 	int min_downscale, max_upscale;
6471 	int min_scale = 0;
6472 	int max_scale = INT_MAX;
6473 
6474 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6475 	if (fb && state->crtc) {
6476 		/* Validate viewport to cover the case when only the position changes */
6477 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6478 			int viewport_width = state->crtc_w;
6479 			int viewport_height = state->crtc_h;
6480 
6481 			if (state->crtc_x < 0)
6482 				viewport_width += state->crtc_x;
6483 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6484 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6485 
6486 			if (state->crtc_y < 0)
6487 				viewport_height += state->crtc_y;
6488 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6489 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6490 
6491 			/* If completely outside of screen, viewport_width and/or viewport_height will be negative,
6492 			 * which is still OK to satisfy the condition below, thereby also covering these cases
6493 			 * (when plane is completely outside of screen).
6494 			 * x2 for width is because of pipe-split.
6495 			 */
6496 			if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
6497 				return -EINVAL;
6498 		}
6499 
6500 		/* Get min/max allowed scaling factors from plane caps. */
6501 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6502 					     &min_downscale, &max_upscale);
6503 		/*
6504 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
6505 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6506 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6507 		 */
6508 		min_scale = (1000 << 16) / max_upscale;
6509 		max_scale = (1000 << 16) / min_downscale;
6510 	}
6511 
6512 	return drm_atomic_helper_check_plane_state(
6513 		state, new_crtc_state, min_scale, max_scale, true, true);
6514 }
6515 
6516 static int dm_plane_atomic_check(struct drm_plane *plane,
6517 				 struct drm_atomic_state *state)
6518 {
6519 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6520 										 plane);
6521 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6522 	struct dc *dc = adev->dm.dc;
6523 	struct dm_plane_state *dm_plane_state;
6524 	struct dc_scaling_info scaling_info;
6525 	struct drm_crtc_state *new_crtc_state;
6526 	int ret;
6527 
6528 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6529 
6530 	dm_plane_state = to_dm_plane_state(new_plane_state);
6531 
6532 	if (!dm_plane_state->dc_state)
6533 		return 0;
6534 
6535 	new_crtc_state =
6536 		drm_atomic_get_new_crtc_state(state,
6537 					      new_plane_state->crtc);
6538 	if (!new_crtc_state)
6539 		return -EINVAL;
6540 
6541 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6542 	if (ret)
6543 		return ret;
6544 
6545 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6546 	if (ret)
6547 		return ret;
6548 
6549 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6550 		return 0;
6551 
6552 	return -EINVAL;
6553 }
6554 
6555 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6556 				       struct drm_atomic_state *state)
6557 {
6558 	/* Only support async updates on cursor planes. */
6559 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6560 		return -EINVAL;
6561 
6562 	return 0;
6563 }
6564 
6565 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6566 					 struct drm_atomic_state *state)
6567 {
6568 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6569 									   plane);
6570 	struct drm_plane_state *old_state =
6571 		drm_atomic_get_old_plane_state(state, plane);
6572 
6573 	trace_amdgpu_dm_atomic_update_cursor(new_state);
6574 
6575 	swap(plane->state->fb, new_state->fb);
6576 
6577 	plane->state->src_x = new_state->src_x;
6578 	plane->state->src_y = new_state->src_y;
6579 	plane->state->src_w = new_state->src_w;
6580 	plane->state->src_h = new_state->src_h;
6581 	plane->state->crtc_x = new_state->crtc_x;
6582 	plane->state->crtc_y = new_state->crtc_y;
6583 	plane->state->crtc_w = new_state->crtc_w;
6584 	plane->state->crtc_h = new_state->crtc_h;
6585 
6586 	handle_cursor_update(plane, old_state);
6587 }
6588 
6589 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6590 	.prepare_fb = dm_plane_helper_prepare_fb,
6591 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6592 	.atomic_check = dm_plane_atomic_check,
6593 	.atomic_async_check = dm_plane_atomic_async_check,
6594 	.atomic_async_update = dm_plane_atomic_async_update
6595 };
6596 
6597 /*
6598  * TODO: these are currently initialized to rgb formats only.
6599  * For future use cases we should either initialize them dynamically based on
6600  * plane capabilities, or initialize this array to all formats, so internal drm
6601  * check will succeed, and let DC implement proper check
6602  */
6603 static const uint32_t rgb_formats[] = {
6604 	DRM_FORMAT_XRGB8888,
6605 	DRM_FORMAT_ARGB8888,
6606 	DRM_FORMAT_RGBA8888,
6607 	DRM_FORMAT_XRGB2101010,
6608 	DRM_FORMAT_XBGR2101010,
6609 	DRM_FORMAT_ARGB2101010,
6610 	DRM_FORMAT_ABGR2101010,
6611 	DRM_FORMAT_XBGR8888,
6612 	DRM_FORMAT_ABGR8888,
6613 	DRM_FORMAT_RGB565,
6614 };
6615 
6616 static const uint32_t overlay_formats[] = {
6617 	DRM_FORMAT_XRGB8888,
6618 	DRM_FORMAT_ARGB8888,
6619 	DRM_FORMAT_RGBA8888,
6620 	DRM_FORMAT_XBGR8888,
6621 	DRM_FORMAT_ABGR8888,
6622 	DRM_FORMAT_RGB565
6623 };
6624 
6625 static const u32 cursor_formats[] = {
6626 	DRM_FORMAT_ARGB8888
6627 };
6628 
6629 static int get_plane_formats(const struct drm_plane *plane,
6630 			     const struct dc_plane_cap *plane_cap,
6631 			     uint32_t *formats, int max_formats)
6632 {
6633 	int i, num_formats = 0;
6634 
6635 	/*
6636 	 * TODO: Query support for each group of formats directly from
6637 	 * DC plane caps. This will require adding more formats to the
6638 	 * caps list.
6639 	 */
6640 
6641 	switch (plane->type) {
6642 	case DRM_PLANE_TYPE_PRIMARY:
6643 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6644 			if (num_formats >= max_formats)
6645 				break;
6646 
6647 			formats[num_formats++] = rgb_formats[i];
6648 		}
6649 
6650 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6651 			formats[num_formats++] = DRM_FORMAT_NV12;
6652 		if (plane_cap && plane_cap->pixel_format_support.p010)
6653 			formats[num_formats++] = DRM_FORMAT_P010;
6654 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6655 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6656 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6657 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6658 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6659 		}
6660 		break;
6661 
6662 	case DRM_PLANE_TYPE_OVERLAY:
6663 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6664 			if (num_formats >= max_formats)
6665 				break;
6666 
6667 			formats[num_formats++] = overlay_formats[i];
6668 		}
6669 		break;
6670 
6671 	case DRM_PLANE_TYPE_CURSOR:
6672 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6673 			if (num_formats >= max_formats)
6674 				break;
6675 
6676 			formats[num_formats++] = cursor_formats[i];
6677 		}
6678 		break;
6679 	}
6680 
6681 	return num_formats;
6682 }
6683 
6684 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6685 				struct drm_plane *plane,
6686 				unsigned long possible_crtcs,
6687 				const struct dc_plane_cap *plane_cap)
6688 {
6689 	uint32_t formats[32];
6690 	int num_formats;
6691 	int res = -EPERM;
6692 	unsigned int supported_rotations;
6693 	uint64_t *modifiers = NULL;
6694 
6695 	num_formats = get_plane_formats(plane, plane_cap, formats,
6696 					ARRAY_SIZE(formats));
6697 
6698 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6699 	if (res)
6700 		return res;
6701 
6702 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6703 				       &dm_plane_funcs, formats, num_formats,
6704 				       modifiers, plane->type, NULL);
6705 	kfree(modifiers);
6706 	if (res)
6707 		return res;
6708 
6709 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6710 	    plane_cap && plane_cap->per_pixel_alpha) {
6711 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6712 					  BIT(DRM_MODE_BLEND_PREMULTI);
6713 
6714 		drm_plane_create_alpha_property(plane);
6715 		drm_plane_create_blend_mode_property(plane, blend_caps);
6716 	}
6717 
6718 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6719 	    plane_cap &&
6720 	    (plane_cap->pixel_format_support.nv12 ||
6721 	     plane_cap->pixel_format_support.p010)) {
6722 		/* This only affects YUV formats. */
6723 		drm_plane_create_color_properties(
6724 			plane,
6725 			BIT(DRM_COLOR_YCBCR_BT601) |
6726 			BIT(DRM_COLOR_YCBCR_BT709) |
6727 			BIT(DRM_COLOR_YCBCR_BT2020),
6728 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6729 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6730 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6731 	}
6732 
6733 	supported_rotations =
6734 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6735 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6736 
6737 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
6738 	    plane->type != DRM_PLANE_TYPE_CURSOR)
6739 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6740 						   supported_rotations);
6741 
6742 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6743 
6744 	/* Create (reset) the plane state */
6745 	if (plane->funcs->reset)
6746 		plane->funcs->reset(plane);
6747 
6748 	return 0;
6749 }
6750 
6751 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6752 			       struct drm_plane *plane,
6753 			       uint32_t crtc_index)
6754 {
6755 	struct amdgpu_crtc *acrtc = NULL;
6756 	struct drm_plane *cursor_plane;
6757 
6758 	int res = -ENOMEM;
6759 
6760 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6761 	if (!cursor_plane)
6762 		goto fail;
6763 
6764 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6765 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6766 
6767 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6768 	if (!acrtc)
6769 		goto fail;
6770 
6771 	res = drm_crtc_init_with_planes(
6772 			dm->ddev,
6773 			&acrtc->base,
6774 			plane,
6775 			cursor_plane,
6776 			&amdgpu_dm_crtc_funcs, NULL);
6777 
6778 	if (res)
6779 		goto fail;
6780 
6781 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6782 
6783 	/* Create (reset) the plane state */
6784 	if (acrtc->base.funcs->reset)
6785 		acrtc->base.funcs->reset(&acrtc->base);
6786 
6787 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6788 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6789 
6790 	acrtc->crtc_id = crtc_index;
6791 	acrtc->base.enabled = false;
6792 	acrtc->otg_inst = -1;
6793 
6794 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6795 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6796 				   true, MAX_COLOR_LUT_ENTRIES);
6797 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6798 
6799 	return 0;
6800 
6801 fail:
6802 	kfree(acrtc);
6803 	kfree(cursor_plane);
6804 	return res;
6805 }
6806 
6807 
6808 static int to_drm_connector_type(enum signal_type st)
6809 {
6810 	switch (st) {
6811 	case SIGNAL_TYPE_HDMI_TYPE_A:
6812 		return DRM_MODE_CONNECTOR_HDMIA;
6813 	case SIGNAL_TYPE_EDP:
6814 		return DRM_MODE_CONNECTOR_eDP;
6815 	case SIGNAL_TYPE_LVDS:
6816 		return DRM_MODE_CONNECTOR_LVDS;
6817 	case SIGNAL_TYPE_RGB:
6818 		return DRM_MODE_CONNECTOR_VGA;
6819 	case SIGNAL_TYPE_DISPLAY_PORT:
6820 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6821 		return DRM_MODE_CONNECTOR_DisplayPort;
6822 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6823 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6824 		return DRM_MODE_CONNECTOR_DVID;
6825 	case SIGNAL_TYPE_VIRTUAL:
6826 		return DRM_MODE_CONNECTOR_VIRTUAL;
6827 
6828 	default:
6829 		return DRM_MODE_CONNECTOR_Unknown;
6830 	}
6831 }
6832 
6833 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6834 {
6835 	struct drm_encoder *encoder;
6836 
6837 	/* There is only one encoder per connector */
6838 	drm_connector_for_each_possible_encoder(connector, encoder)
6839 		return encoder;
6840 
6841 	return NULL;
6842 }
6843 
6844 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6845 {
6846 	struct drm_encoder *encoder;
6847 	struct amdgpu_encoder *amdgpu_encoder;
6848 
6849 	encoder = amdgpu_dm_connector_to_encoder(connector);
6850 
6851 	if (encoder == NULL)
6852 		return;
6853 
6854 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6855 
6856 	amdgpu_encoder->native_mode.clock = 0;
6857 
6858 	if (!list_empty(&connector->probed_modes)) {
6859 		struct drm_display_mode *preferred_mode = NULL;
6860 
6861 		list_for_each_entry(preferred_mode,
6862 				    &connector->probed_modes,
6863 				    head) {
6864 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6865 				amdgpu_encoder->native_mode = *preferred_mode;
6866 
6867 			break;
6868 		}
6869 
6870 	}
6871 }
6872 
6873 static struct drm_display_mode *
6874 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6875 			     char *name,
6876 			     int hdisplay, int vdisplay)
6877 {
6878 	struct drm_device *dev = encoder->dev;
6879 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6880 	struct drm_display_mode *mode = NULL;
6881 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6882 
6883 	mode = drm_mode_duplicate(dev, native_mode);
6884 
6885 	if (mode == NULL)
6886 		return NULL;
6887 
6888 	mode->hdisplay = hdisplay;
6889 	mode->vdisplay = vdisplay;
6890 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6891 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6892 
6893 	return mode;
6894 
6895 }
6896 
6897 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6898 						 struct drm_connector *connector)
6899 {
6900 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6901 	struct drm_display_mode *mode = NULL;
6902 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6903 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6904 				to_amdgpu_dm_connector(connector);
6905 	int i;
6906 	int n;
6907 	struct mode_size {
6908 		char name[DRM_DISPLAY_MODE_LEN];
6909 		int w;
6910 		int h;
6911 	} common_modes[] = {
6912 		{  "640x480",  640,  480},
6913 		{  "800x600",  800,  600},
6914 		{ "1024x768", 1024,  768},
6915 		{ "1280x720", 1280,  720},
6916 		{ "1280x800", 1280,  800},
6917 		{"1280x1024", 1280, 1024},
6918 		{ "1440x900", 1440,  900},
6919 		{"1680x1050", 1680, 1050},
6920 		{"1600x1200", 1600, 1200},
6921 		{"1920x1080", 1920, 1080},
6922 		{"1920x1200", 1920, 1200}
6923 	};
6924 
6925 	n = ARRAY_SIZE(common_modes);
6926 
6927 	for (i = 0; i < n; i++) {
6928 		struct drm_display_mode *curmode = NULL;
6929 		bool mode_existed = false;
6930 
6931 		if (common_modes[i].w > native_mode->hdisplay ||
6932 		    common_modes[i].h > native_mode->vdisplay ||
6933 		   (common_modes[i].w == native_mode->hdisplay &&
6934 		    common_modes[i].h == native_mode->vdisplay))
6935 			continue;
6936 
6937 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6938 			if (common_modes[i].w == curmode->hdisplay &&
6939 			    common_modes[i].h == curmode->vdisplay) {
6940 				mode_existed = true;
6941 				break;
6942 			}
6943 		}
6944 
6945 		if (mode_existed)
6946 			continue;
6947 
6948 		mode = amdgpu_dm_create_common_mode(encoder,
6949 				common_modes[i].name, common_modes[i].w,
6950 				common_modes[i].h);
6951 		drm_mode_probed_add(connector, mode);
6952 		amdgpu_dm_connector->num_modes++;
6953 	}
6954 }
6955 
6956 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6957 					      struct edid *edid)
6958 {
6959 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6960 			to_amdgpu_dm_connector(connector);
6961 
6962 	if (edid) {
6963 		/* empty probed_modes */
6964 		INIT_LIST_HEAD(&connector->probed_modes);
6965 		amdgpu_dm_connector->num_modes =
6966 				drm_add_edid_modes(connector, edid);
6967 
6968 		/* sorting the probed modes before calling function
6969 		 * amdgpu_dm_get_native_mode() since EDID can have
6970 		 * more than one preferred mode. The modes that are
6971 		 * later in the probed mode list could be of higher
6972 		 * and preferred resolution. For example, 3840x2160
6973 		 * resolution in base EDID preferred timing and 4096x2160
6974 		 * preferred resolution in DID extension block later.
6975 		 */
6976 		drm_mode_sort(&connector->probed_modes);
6977 		amdgpu_dm_get_native_mode(connector);
6978 	} else {
6979 		amdgpu_dm_connector->num_modes = 0;
6980 	}
6981 }
6982 
6983 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6984 {
6985 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6986 			to_amdgpu_dm_connector(connector);
6987 	struct drm_encoder *encoder;
6988 	struct edid *edid = amdgpu_dm_connector->edid;
6989 
6990 	encoder = amdgpu_dm_connector_to_encoder(connector);
6991 
6992 	if (!drm_edid_is_valid(edid)) {
6993 		amdgpu_dm_connector->num_modes =
6994 				drm_add_modes_noedid(connector, 640, 480);
6995 	} else {
6996 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6997 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6998 	}
6999 	amdgpu_dm_fbc_init(connector);
7000 
7001 	return amdgpu_dm_connector->num_modes;
7002 }
7003 
7004 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7005 				     struct amdgpu_dm_connector *aconnector,
7006 				     int connector_type,
7007 				     struct dc_link *link,
7008 				     int link_index)
7009 {
7010 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7011 
7012 	/*
7013 	 * Some of the properties below require access to state, like bpc.
7014 	 * Allocate some default initial connector state with our reset helper.
7015 	 */
7016 	if (aconnector->base.funcs->reset)
7017 		aconnector->base.funcs->reset(&aconnector->base);
7018 
7019 	aconnector->connector_id = link_index;
7020 	aconnector->dc_link = link;
7021 	aconnector->base.interlace_allowed = false;
7022 	aconnector->base.doublescan_allowed = false;
7023 	aconnector->base.stereo_allowed = false;
7024 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7025 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7026 	aconnector->audio_inst = -1;
7027 	mutex_init(&aconnector->hpd_lock);
7028 
7029 	/*
7030 	 * configure support HPD hot plug connector_>polled default value is 0
7031 	 * which means HPD hot plug not supported
7032 	 */
7033 	switch (connector_type) {
7034 	case DRM_MODE_CONNECTOR_HDMIA:
7035 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7036 		aconnector->base.ycbcr_420_allowed =
7037 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7038 		break;
7039 	case DRM_MODE_CONNECTOR_DisplayPort:
7040 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7041 		aconnector->base.ycbcr_420_allowed =
7042 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7043 		break;
7044 	case DRM_MODE_CONNECTOR_DVID:
7045 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7046 		break;
7047 	default:
7048 		break;
7049 	}
7050 
7051 	drm_object_attach_property(&aconnector->base.base,
7052 				dm->ddev->mode_config.scaling_mode_property,
7053 				DRM_MODE_SCALE_NONE);
7054 
7055 	drm_object_attach_property(&aconnector->base.base,
7056 				adev->mode_info.underscan_property,
7057 				UNDERSCAN_OFF);
7058 	drm_object_attach_property(&aconnector->base.base,
7059 				adev->mode_info.underscan_hborder_property,
7060 				0);
7061 	drm_object_attach_property(&aconnector->base.base,
7062 				adev->mode_info.underscan_vborder_property,
7063 				0);
7064 
7065 	if (!aconnector->mst_port)
7066 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7067 
7068 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7069 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7070 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7071 
7072 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7073 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7074 		drm_object_attach_property(&aconnector->base.base,
7075 				adev->mode_info.abm_level_property, 0);
7076 	}
7077 
7078 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7079 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7080 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7081 		drm_object_attach_property(
7082 			&aconnector->base.base,
7083 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
7084 
7085 		if (!aconnector->mst_port)
7086 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7087 
7088 #ifdef CONFIG_DRM_AMD_DC_HDCP
7089 		if (adev->dm.hdcp_workqueue)
7090 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7091 #endif
7092 	}
7093 }
7094 
7095 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7096 			      struct i2c_msg *msgs, int num)
7097 {
7098 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7099 	struct ddc_service *ddc_service = i2c->ddc_service;
7100 	struct i2c_command cmd;
7101 	int i;
7102 	int result = -EIO;
7103 
7104 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7105 
7106 	if (!cmd.payloads)
7107 		return result;
7108 
7109 	cmd.number_of_payloads = num;
7110 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7111 	cmd.speed = 100;
7112 
7113 	for (i = 0; i < num; i++) {
7114 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7115 		cmd.payloads[i].address = msgs[i].addr;
7116 		cmd.payloads[i].length = msgs[i].len;
7117 		cmd.payloads[i].data = msgs[i].buf;
7118 	}
7119 
7120 	if (dc_submit_i2c(
7121 			ddc_service->ctx->dc,
7122 			ddc_service->ddc_pin->hw_info.ddc_channel,
7123 			&cmd))
7124 		result = num;
7125 
7126 	kfree(cmd.payloads);
7127 	return result;
7128 }
7129 
7130 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7131 {
7132 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7133 }
7134 
7135 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7136 	.master_xfer = amdgpu_dm_i2c_xfer,
7137 	.functionality = amdgpu_dm_i2c_func,
7138 };
7139 
7140 static struct amdgpu_i2c_adapter *
7141 create_i2c(struct ddc_service *ddc_service,
7142 	   int link_index,
7143 	   int *res)
7144 {
7145 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7146 	struct amdgpu_i2c_adapter *i2c;
7147 
7148 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7149 	if (!i2c)
7150 		return NULL;
7151 	i2c->base.owner = THIS_MODULE;
7152 	i2c->base.class = I2C_CLASS_DDC;
7153 	i2c->base.dev.parent = &adev->pdev->dev;
7154 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7155 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7156 	i2c_set_adapdata(&i2c->base, i2c);
7157 	i2c->ddc_service = ddc_service;
7158 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7159 
7160 	return i2c;
7161 }
7162 
7163 
7164 /*
7165  * Note: this function assumes that dc_link_detect() was called for the
7166  * dc_link which will be represented by this aconnector.
7167  */
7168 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7169 				    struct amdgpu_dm_connector *aconnector,
7170 				    uint32_t link_index,
7171 				    struct amdgpu_encoder *aencoder)
7172 {
7173 	int res = 0;
7174 	int connector_type;
7175 	struct dc *dc = dm->dc;
7176 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7177 	struct amdgpu_i2c_adapter *i2c;
7178 
7179 	link->priv = aconnector;
7180 
7181 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7182 
7183 	i2c = create_i2c(link->ddc, link->link_index, &res);
7184 	if (!i2c) {
7185 		DRM_ERROR("Failed to create i2c adapter data\n");
7186 		return -ENOMEM;
7187 	}
7188 
7189 	aconnector->i2c = i2c;
7190 	res = i2c_add_adapter(&i2c->base);
7191 
7192 	if (res) {
7193 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7194 		goto out_free;
7195 	}
7196 
7197 	connector_type = to_drm_connector_type(link->connector_signal);
7198 
7199 	res = drm_connector_init_with_ddc(
7200 			dm->ddev,
7201 			&aconnector->base,
7202 			&amdgpu_dm_connector_funcs,
7203 			connector_type,
7204 			&i2c->base);
7205 
7206 	if (res) {
7207 		DRM_ERROR("connector_init failed\n");
7208 		aconnector->connector_id = -1;
7209 		goto out_free;
7210 	}
7211 
7212 	drm_connector_helper_add(
7213 			&aconnector->base,
7214 			&amdgpu_dm_connector_helper_funcs);
7215 
7216 	amdgpu_dm_connector_init_helper(
7217 		dm,
7218 		aconnector,
7219 		connector_type,
7220 		link,
7221 		link_index);
7222 
7223 	drm_connector_attach_encoder(
7224 		&aconnector->base, &aencoder->base);
7225 
7226 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7227 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7228 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7229 
7230 out_free:
7231 	if (res) {
7232 		kfree(i2c);
7233 		aconnector->i2c = NULL;
7234 	}
7235 	return res;
7236 }
7237 
7238 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7239 {
7240 	switch (adev->mode_info.num_crtc) {
7241 	case 1:
7242 		return 0x1;
7243 	case 2:
7244 		return 0x3;
7245 	case 3:
7246 		return 0x7;
7247 	case 4:
7248 		return 0xf;
7249 	case 5:
7250 		return 0x1f;
7251 	case 6:
7252 	default:
7253 		return 0x3f;
7254 	}
7255 }
7256 
7257 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7258 				  struct amdgpu_encoder *aencoder,
7259 				  uint32_t link_index)
7260 {
7261 	struct amdgpu_device *adev = drm_to_adev(dev);
7262 
7263 	int res = drm_encoder_init(dev,
7264 				   &aencoder->base,
7265 				   &amdgpu_dm_encoder_funcs,
7266 				   DRM_MODE_ENCODER_TMDS,
7267 				   NULL);
7268 
7269 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7270 
7271 	if (!res)
7272 		aencoder->encoder_id = link_index;
7273 	else
7274 		aencoder->encoder_id = -1;
7275 
7276 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7277 
7278 	return res;
7279 }
7280 
7281 static void manage_dm_interrupts(struct amdgpu_device *adev,
7282 				 struct amdgpu_crtc *acrtc,
7283 				 bool enable)
7284 {
7285 	/*
7286 	 * We have no guarantee that the frontend index maps to the same
7287 	 * backend index - some even map to more than one.
7288 	 *
7289 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7290 	 */
7291 	int irq_type =
7292 		amdgpu_display_crtc_idx_to_irq_type(
7293 			adev,
7294 			acrtc->crtc_id);
7295 
7296 	if (enable) {
7297 		drm_crtc_vblank_on(&acrtc->base);
7298 		amdgpu_irq_get(
7299 			adev,
7300 			&adev->pageflip_irq,
7301 			irq_type);
7302 	} else {
7303 
7304 		amdgpu_irq_put(
7305 			adev,
7306 			&adev->pageflip_irq,
7307 			irq_type);
7308 		drm_crtc_vblank_off(&acrtc->base);
7309 	}
7310 }
7311 
7312 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7313 				      struct amdgpu_crtc *acrtc)
7314 {
7315 	int irq_type =
7316 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7317 
7318 	/**
7319 	 * This reads the current state for the IRQ and force reapplies
7320 	 * the setting to hardware.
7321 	 */
7322 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7323 }
7324 
7325 static bool
7326 is_scaling_state_different(const struct dm_connector_state *dm_state,
7327 			   const struct dm_connector_state *old_dm_state)
7328 {
7329 	if (dm_state->scaling != old_dm_state->scaling)
7330 		return true;
7331 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7332 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7333 			return true;
7334 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7335 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7336 			return true;
7337 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7338 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7339 		return true;
7340 	return false;
7341 }
7342 
7343 #ifdef CONFIG_DRM_AMD_DC_HDCP
7344 static bool is_content_protection_different(struct drm_connector_state *state,
7345 					    const struct drm_connector_state *old_state,
7346 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7347 {
7348 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7349 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7350 
7351 	/* Handle: Type0/1 change */
7352 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7353 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7354 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7355 		return true;
7356 	}
7357 
7358 	/* CP is being re enabled, ignore this
7359 	 *
7360 	 * Handles:	ENABLED -> DESIRED
7361 	 */
7362 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7363 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7364 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7365 		return false;
7366 	}
7367 
7368 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7369 	 *
7370 	 * Handles:	UNDESIRED -> ENABLED
7371 	 */
7372 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7373 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7374 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7375 
7376 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7377 	 * hot-plug, headless s3, dpms
7378 	 *
7379 	 * Handles:	DESIRED -> DESIRED (Special case)
7380 	 */
7381 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7382 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7383 		dm_con_state->update_hdcp = false;
7384 		return true;
7385 	}
7386 
7387 	/*
7388 	 * Handles:	UNDESIRED -> UNDESIRED
7389 	 *		DESIRED -> DESIRED
7390 	 *		ENABLED -> ENABLED
7391 	 */
7392 	if (old_state->content_protection == state->content_protection)
7393 		return false;
7394 
7395 	/*
7396 	 * Handles:	UNDESIRED -> DESIRED
7397 	 *		DESIRED -> UNDESIRED
7398 	 *		ENABLED -> UNDESIRED
7399 	 */
7400 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7401 		return true;
7402 
7403 	/*
7404 	 * Handles:	DESIRED -> ENABLED
7405 	 */
7406 	return false;
7407 }
7408 
7409 #endif
7410 static void remove_stream(struct amdgpu_device *adev,
7411 			  struct amdgpu_crtc *acrtc,
7412 			  struct dc_stream_state *stream)
7413 {
7414 	/* this is the update mode case */
7415 
7416 	acrtc->otg_inst = -1;
7417 	acrtc->enabled = false;
7418 }
7419 
7420 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7421 			       struct dc_cursor_position *position)
7422 {
7423 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7424 	int x, y;
7425 	int xorigin = 0, yorigin = 0;
7426 
7427 	position->enable = false;
7428 	position->x = 0;
7429 	position->y = 0;
7430 
7431 	if (!crtc || !plane->state->fb)
7432 		return 0;
7433 
7434 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7435 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7436 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7437 			  __func__,
7438 			  plane->state->crtc_w,
7439 			  plane->state->crtc_h);
7440 		return -EINVAL;
7441 	}
7442 
7443 	x = plane->state->crtc_x;
7444 	y = plane->state->crtc_y;
7445 
7446 	if (x <= -amdgpu_crtc->max_cursor_width ||
7447 	    y <= -amdgpu_crtc->max_cursor_height)
7448 		return 0;
7449 
7450 	if (x < 0) {
7451 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7452 		x = 0;
7453 	}
7454 	if (y < 0) {
7455 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7456 		y = 0;
7457 	}
7458 	position->enable = true;
7459 	position->translate_by_source = true;
7460 	position->x = x;
7461 	position->y = y;
7462 	position->x_hotspot = xorigin;
7463 	position->y_hotspot = yorigin;
7464 
7465 	return 0;
7466 }
7467 
7468 static void handle_cursor_update(struct drm_plane *plane,
7469 				 struct drm_plane_state *old_plane_state)
7470 {
7471 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7472 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7473 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7474 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7475 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7476 	uint64_t address = afb ? afb->address : 0;
7477 	struct dc_cursor_position position;
7478 	struct dc_cursor_attributes attributes;
7479 	int ret;
7480 
7481 	if (!plane->state->fb && !old_plane_state->fb)
7482 		return;
7483 
7484 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7485 			 __func__,
7486 			 amdgpu_crtc->crtc_id,
7487 			 plane->state->crtc_w,
7488 			 plane->state->crtc_h);
7489 
7490 	ret = get_cursor_position(plane, crtc, &position);
7491 	if (ret)
7492 		return;
7493 
7494 	if (!position.enable) {
7495 		/* turn off cursor */
7496 		if (crtc_state && crtc_state->stream) {
7497 			mutex_lock(&adev->dm.dc_lock);
7498 			dc_stream_set_cursor_position(crtc_state->stream,
7499 						      &position);
7500 			mutex_unlock(&adev->dm.dc_lock);
7501 		}
7502 		return;
7503 	}
7504 
7505 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
7506 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
7507 
7508 	memset(&attributes, 0, sizeof(attributes));
7509 	attributes.address.high_part = upper_32_bits(address);
7510 	attributes.address.low_part  = lower_32_bits(address);
7511 	attributes.width             = plane->state->crtc_w;
7512 	attributes.height            = plane->state->crtc_h;
7513 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7514 	attributes.rotation_angle    = 0;
7515 	attributes.attribute_flags.value = 0;
7516 
7517 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7518 
7519 	if (crtc_state->stream) {
7520 		mutex_lock(&adev->dm.dc_lock);
7521 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7522 							 &attributes))
7523 			DRM_ERROR("DC failed to set cursor attributes\n");
7524 
7525 		if (!dc_stream_set_cursor_position(crtc_state->stream,
7526 						   &position))
7527 			DRM_ERROR("DC failed to set cursor position\n");
7528 		mutex_unlock(&adev->dm.dc_lock);
7529 	}
7530 }
7531 
7532 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7533 {
7534 
7535 	assert_spin_locked(&acrtc->base.dev->event_lock);
7536 	WARN_ON(acrtc->event);
7537 
7538 	acrtc->event = acrtc->base.state->event;
7539 
7540 	/* Set the flip status */
7541 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7542 
7543 	/* Mark this event as consumed */
7544 	acrtc->base.state->event = NULL;
7545 
7546 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7547 						 acrtc->crtc_id);
7548 }
7549 
7550 static void update_freesync_state_on_stream(
7551 	struct amdgpu_display_manager *dm,
7552 	struct dm_crtc_state *new_crtc_state,
7553 	struct dc_stream_state *new_stream,
7554 	struct dc_plane_state *surface,
7555 	u32 flip_timestamp_in_us)
7556 {
7557 	struct mod_vrr_params vrr_params;
7558 	struct dc_info_packet vrr_infopacket = {0};
7559 	struct amdgpu_device *adev = dm->adev;
7560 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7561 	unsigned long flags;
7562 
7563 	if (!new_stream)
7564 		return;
7565 
7566 	/*
7567 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7568 	 * For now it's sufficient to just guard against these conditions.
7569 	 */
7570 
7571 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7572 		return;
7573 
7574 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7575         vrr_params = acrtc->dm_irq_params.vrr_params;
7576 
7577 	if (surface) {
7578 		mod_freesync_handle_preflip(
7579 			dm->freesync_module,
7580 			surface,
7581 			new_stream,
7582 			flip_timestamp_in_us,
7583 			&vrr_params);
7584 
7585 		if (adev->family < AMDGPU_FAMILY_AI &&
7586 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7587 			mod_freesync_handle_v_update(dm->freesync_module,
7588 						     new_stream, &vrr_params);
7589 
7590 			/* Need to call this before the frame ends. */
7591 			dc_stream_adjust_vmin_vmax(dm->dc,
7592 						   new_crtc_state->stream,
7593 						   &vrr_params.adjust);
7594 		}
7595 	}
7596 
7597 	mod_freesync_build_vrr_infopacket(
7598 		dm->freesync_module,
7599 		new_stream,
7600 		&vrr_params,
7601 		PACKET_TYPE_VRR,
7602 		TRANSFER_FUNC_UNKNOWN,
7603 		&vrr_infopacket);
7604 
7605 	new_crtc_state->freesync_timing_changed |=
7606 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7607 			&vrr_params.adjust,
7608 			sizeof(vrr_params.adjust)) != 0);
7609 
7610 	new_crtc_state->freesync_vrr_info_changed |=
7611 		(memcmp(&new_crtc_state->vrr_infopacket,
7612 			&vrr_infopacket,
7613 			sizeof(vrr_infopacket)) != 0);
7614 
7615 	acrtc->dm_irq_params.vrr_params = vrr_params;
7616 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7617 
7618 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7619 	new_stream->vrr_infopacket = vrr_infopacket;
7620 
7621 	if (new_crtc_state->freesync_vrr_info_changed)
7622 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7623 			      new_crtc_state->base.crtc->base.id,
7624 			      (int)new_crtc_state->base.vrr_enabled,
7625 			      (int)vrr_params.state);
7626 
7627 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7628 }
7629 
7630 static void update_stream_irq_parameters(
7631 	struct amdgpu_display_manager *dm,
7632 	struct dm_crtc_state *new_crtc_state)
7633 {
7634 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7635 	struct mod_vrr_params vrr_params;
7636 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7637 	struct amdgpu_device *adev = dm->adev;
7638 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7639 	unsigned long flags;
7640 
7641 	if (!new_stream)
7642 		return;
7643 
7644 	/*
7645 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7646 	 * For now it's sufficient to just guard against these conditions.
7647 	 */
7648 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7649 		return;
7650 
7651 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7652 	vrr_params = acrtc->dm_irq_params.vrr_params;
7653 
7654 	if (new_crtc_state->vrr_supported &&
7655 	    config.min_refresh_in_uhz &&
7656 	    config.max_refresh_in_uhz) {
7657 		config.state = new_crtc_state->base.vrr_enabled ?
7658 			VRR_STATE_ACTIVE_VARIABLE :
7659 			VRR_STATE_INACTIVE;
7660 	} else {
7661 		config.state = VRR_STATE_UNSUPPORTED;
7662 	}
7663 
7664 	mod_freesync_build_vrr_params(dm->freesync_module,
7665 				      new_stream,
7666 				      &config, &vrr_params);
7667 
7668 	new_crtc_state->freesync_timing_changed |=
7669 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7670 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7671 
7672 	new_crtc_state->freesync_config = config;
7673 	/* Copy state for access from DM IRQ handler */
7674 	acrtc->dm_irq_params.freesync_config = config;
7675 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7676 	acrtc->dm_irq_params.vrr_params = vrr_params;
7677 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7678 }
7679 
7680 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7681 					    struct dm_crtc_state *new_state)
7682 {
7683 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7684 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7685 
7686 	if (!old_vrr_active && new_vrr_active) {
7687 		/* Transition VRR inactive -> active:
7688 		 * While VRR is active, we must not disable vblank irq, as a
7689 		 * reenable after disable would compute bogus vblank/pflip
7690 		 * timestamps if it likely happened inside display front-porch.
7691 		 *
7692 		 * We also need vupdate irq for the actual core vblank handling
7693 		 * at end of vblank.
7694 		 */
7695 		dm_set_vupdate_irq(new_state->base.crtc, true);
7696 		drm_crtc_vblank_get(new_state->base.crtc);
7697 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7698 				 __func__, new_state->base.crtc->base.id);
7699 	} else if (old_vrr_active && !new_vrr_active) {
7700 		/* Transition VRR active -> inactive:
7701 		 * Allow vblank irq disable again for fixed refresh rate.
7702 		 */
7703 		dm_set_vupdate_irq(new_state->base.crtc, false);
7704 		drm_crtc_vblank_put(new_state->base.crtc);
7705 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7706 				 __func__, new_state->base.crtc->base.id);
7707 	}
7708 }
7709 
7710 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7711 {
7712 	struct drm_plane *plane;
7713 	struct drm_plane_state *old_plane_state, *new_plane_state;
7714 	int i;
7715 
7716 	/*
7717 	 * TODO: Make this per-stream so we don't issue redundant updates for
7718 	 * commits with multiple streams.
7719 	 */
7720 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7721 				       new_plane_state, i)
7722 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7723 			handle_cursor_update(plane, old_plane_state);
7724 }
7725 
7726 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7727 				    struct dc_state *dc_state,
7728 				    struct drm_device *dev,
7729 				    struct amdgpu_display_manager *dm,
7730 				    struct drm_crtc *pcrtc,
7731 				    bool wait_for_vblank)
7732 {
7733 	uint32_t i;
7734 	uint64_t timestamp_ns;
7735 	struct drm_plane *plane;
7736 	struct drm_plane_state *old_plane_state, *new_plane_state;
7737 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7738 	struct drm_crtc_state *new_pcrtc_state =
7739 			drm_atomic_get_new_crtc_state(state, pcrtc);
7740 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7741 	struct dm_crtc_state *dm_old_crtc_state =
7742 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7743 	int planes_count = 0, vpos, hpos;
7744 	long r;
7745 	unsigned long flags;
7746 	struct amdgpu_bo *abo;
7747 	uint32_t target_vblank, last_flip_vblank;
7748 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7749 	bool pflip_present = false;
7750 	struct {
7751 		struct dc_surface_update surface_updates[MAX_SURFACES];
7752 		struct dc_plane_info plane_infos[MAX_SURFACES];
7753 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7754 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7755 		struct dc_stream_update stream_update;
7756 	} *bundle;
7757 
7758 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7759 
7760 	if (!bundle) {
7761 		dm_error("Failed to allocate update bundle\n");
7762 		goto cleanup;
7763 	}
7764 
7765 	/*
7766 	 * Disable the cursor first if we're disabling all the planes.
7767 	 * It'll remain on the screen after the planes are re-enabled
7768 	 * if we don't.
7769 	 */
7770 	if (acrtc_state->active_planes == 0)
7771 		amdgpu_dm_commit_cursors(state);
7772 
7773 	/* update planes when needed */
7774 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7775 		struct drm_crtc *crtc = new_plane_state->crtc;
7776 		struct drm_crtc_state *new_crtc_state;
7777 		struct drm_framebuffer *fb = new_plane_state->fb;
7778 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7779 		bool plane_needs_flip;
7780 		struct dc_plane_state *dc_plane;
7781 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7782 
7783 		/* Cursor plane is handled after stream updates */
7784 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7785 			continue;
7786 
7787 		if (!fb || !crtc || pcrtc != crtc)
7788 			continue;
7789 
7790 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7791 		if (!new_crtc_state->active)
7792 			continue;
7793 
7794 		dc_plane = dm_new_plane_state->dc_state;
7795 
7796 		bundle->surface_updates[planes_count].surface = dc_plane;
7797 		if (new_pcrtc_state->color_mgmt_changed) {
7798 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7799 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7800 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7801 		}
7802 
7803 		fill_dc_scaling_info(new_plane_state,
7804 				     &bundle->scaling_infos[planes_count]);
7805 
7806 		bundle->surface_updates[planes_count].scaling_info =
7807 			&bundle->scaling_infos[planes_count];
7808 
7809 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7810 
7811 		pflip_present = pflip_present || plane_needs_flip;
7812 
7813 		if (!plane_needs_flip) {
7814 			planes_count += 1;
7815 			continue;
7816 		}
7817 
7818 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7819 
7820 		/*
7821 		 * Wait for all fences on this FB. Do limited wait to avoid
7822 		 * deadlock during GPU reset when this fence will not signal
7823 		 * but we hold reservation lock for the BO.
7824 		 */
7825 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7826 							false,
7827 							msecs_to_jiffies(5000));
7828 		if (unlikely(r <= 0))
7829 			DRM_ERROR("Waiting for fences timed out!");
7830 
7831 		fill_dc_plane_info_and_addr(
7832 			dm->adev, new_plane_state,
7833 			afb->tiling_flags,
7834 			&bundle->plane_infos[planes_count],
7835 			&bundle->flip_addrs[planes_count].address,
7836 			afb->tmz_surface, false);
7837 
7838 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7839 				 new_plane_state->plane->index,
7840 				 bundle->plane_infos[planes_count].dcc.enable);
7841 
7842 		bundle->surface_updates[planes_count].plane_info =
7843 			&bundle->plane_infos[planes_count];
7844 
7845 		/*
7846 		 * Only allow immediate flips for fast updates that don't
7847 		 * change FB pitch, DCC state, rotation or mirroing.
7848 		 */
7849 		bundle->flip_addrs[planes_count].flip_immediate =
7850 			crtc->state->async_flip &&
7851 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7852 
7853 		timestamp_ns = ktime_get_ns();
7854 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7855 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7856 		bundle->surface_updates[planes_count].surface = dc_plane;
7857 
7858 		if (!bundle->surface_updates[planes_count].surface) {
7859 			DRM_ERROR("No surface for CRTC: id=%d\n",
7860 					acrtc_attach->crtc_id);
7861 			continue;
7862 		}
7863 
7864 		if (plane == pcrtc->primary)
7865 			update_freesync_state_on_stream(
7866 				dm,
7867 				acrtc_state,
7868 				acrtc_state->stream,
7869 				dc_plane,
7870 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7871 
7872 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7873 				 __func__,
7874 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7875 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7876 
7877 		planes_count += 1;
7878 
7879 	}
7880 
7881 	if (pflip_present) {
7882 		if (!vrr_active) {
7883 			/* Use old throttling in non-vrr fixed refresh rate mode
7884 			 * to keep flip scheduling based on target vblank counts
7885 			 * working in a backwards compatible way, e.g., for
7886 			 * clients using the GLX_OML_sync_control extension or
7887 			 * DRI3/Present extension with defined target_msc.
7888 			 */
7889 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7890 		}
7891 		else {
7892 			/* For variable refresh rate mode only:
7893 			 * Get vblank of last completed flip to avoid > 1 vrr
7894 			 * flips per video frame by use of throttling, but allow
7895 			 * flip programming anywhere in the possibly large
7896 			 * variable vrr vblank interval for fine-grained flip
7897 			 * timing control and more opportunity to avoid stutter
7898 			 * on late submission of flips.
7899 			 */
7900 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7901 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7902 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7903 		}
7904 
7905 		target_vblank = last_flip_vblank + wait_for_vblank;
7906 
7907 		/*
7908 		 * Wait until we're out of the vertical blank period before the one
7909 		 * targeted by the flip
7910 		 */
7911 		while ((acrtc_attach->enabled &&
7912 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7913 							    0, &vpos, &hpos, NULL,
7914 							    NULL, &pcrtc->hwmode)
7915 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7916 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7917 			(int)(target_vblank -
7918 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7919 			usleep_range(1000, 1100);
7920 		}
7921 
7922 		/**
7923 		 * Prepare the flip event for the pageflip interrupt to handle.
7924 		 *
7925 		 * This only works in the case where we've already turned on the
7926 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7927 		 * from 0 -> n planes we have to skip a hardware generated event
7928 		 * and rely on sending it from software.
7929 		 */
7930 		if (acrtc_attach->base.state->event &&
7931 		    acrtc_state->active_planes > 0) {
7932 			drm_crtc_vblank_get(pcrtc);
7933 
7934 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7935 
7936 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7937 			prepare_flip_isr(acrtc_attach);
7938 
7939 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7940 		}
7941 
7942 		if (acrtc_state->stream) {
7943 			if (acrtc_state->freesync_vrr_info_changed)
7944 				bundle->stream_update.vrr_infopacket =
7945 					&acrtc_state->stream->vrr_infopacket;
7946 		}
7947 	}
7948 
7949 	/* Update the planes if changed or disable if we don't have any. */
7950 	if ((planes_count || acrtc_state->active_planes == 0) &&
7951 		acrtc_state->stream) {
7952 		bundle->stream_update.stream = acrtc_state->stream;
7953 		if (new_pcrtc_state->mode_changed) {
7954 			bundle->stream_update.src = acrtc_state->stream->src;
7955 			bundle->stream_update.dst = acrtc_state->stream->dst;
7956 		}
7957 
7958 		if (new_pcrtc_state->color_mgmt_changed) {
7959 			/*
7960 			 * TODO: This isn't fully correct since we've actually
7961 			 * already modified the stream in place.
7962 			 */
7963 			bundle->stream_update.gamut_remap =
7964 				&acrtc_state->stream->gamut_remap_matrix;
7965 			bundle->stream_update.output_csc_transform =
7966 				&acrtc_state->stream->csc_color_matrix;
7967 			bundle->stream_update.out_transfer_func =
7968 				acrtc_state->stream->out_transfer_func;
7969 		}
7970 
7971 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7972 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7973 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7974 
7975 		/*
7976 		 * If FreeSync state on the stream has changed then we need to
7977 		 * re-adjust the min/max bounds now that DC doesn't handle this
7978 		 * as part of commit.
7979 		 */
7980 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7981 		    amdgpu_dm_vrr_active(acrtc_state)) {
7982 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7983 			dc_stream_adjust_vmin_vmax(
7984 				dm->dc, acrtc_state->stream,
7985 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7986 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7987 		}
7988 		mutex_lock(&dm->dc_lock);
7989 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7990 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7991 			amdgpu_dm_psr_disable(acrtc_state->stream);
7992 
7993 		dc_commit_updates_for_stream(dm->dc,
7994 						     bundle->surface_updates,
7995 						     planes_count,
7996 						     acrtc_state->stream,
7997 						     &bundle->stream_update,
7998 						     dc_state);
7999 
8000 		/**
8001 		 * Enable or disable the interrupts on the backend.
8002 		 *
8003 		 * Most pipes are put into power gating when unused.
8004 		 *
8005 		 * When power gating is enabled on a pipe we lose the
8006 		 * interrupt enablement state when power gating is disabled.
8007 		 *
8008 		 * So we need to update the IRQ control state in hardware
8009 		 * whenever the pipe turns on (since it could be previously
8010 		 * power gated) or off (since some pipes can't be power gated
8011 		 * on some ASICs).
8012 		 */
8013 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8014 			dm_update_pflip_irq_state(drm_to_adev(dev),
8015 						  acrtc_attach);
8016 
8017 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8018 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8019 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8020 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8021 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8022 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8023 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
8024 			amdgpu_dm_psr_enable(acrtc_state->stream);
8025 		}
8026 
8027 		mutex_unlock(&dm->dc_lock);
8028 	}
8029 
8030 	/*
8031 	 * Update cursor state *after* programming all the planes.
8032 	 * This avoids redundant programming in the case where we're going
8033 	 * to be disabling a single plane - those pipes are being disabled.
8034 	 */
8035 	if (acrtc_state->active_planes)
8036 		amdgpu_dm_commit_cursors(state);
8037 
8038 cleanup:
8039 	kfree(bundle);
8040 }
8041 
8042 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8043 				   struct drm_atomic_state *state)
8044 {
8045 	struct amdgpu_device *adev = drm_to_adev(dev);
8046 	struct amdgpu_dm_connector *aconnector;
8047 	struct drm_connector *connector;
8048 	struct drm_connector_state *old_con_state, *new_con_state;
8049 	struct drm_crtc_state *new_crtc_state;
8050 	struct dm_crtc_state *new_dm_crtc_state;
8051 	const struct dc_stream_status *status;
8052 	int i, inst;
8053 
8054 	/* Notify device removals. */
8055 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8056 		if (old_con_state->crtc != new_con_state->crtc) {
8057 			/* CRTC changes require notification. */
8058 			goto notify;
8059 		}
8060 
8061 		if (!new_con_state->crtc)
8062 			continue;
8063 
8064 		new_crtc_state = drm_atomic_get_new_crtc_state(
8065 			state, new_con_state->crtc);
8066 
8067 		if (!new_crtc_state)
8068 			continue;
8069 
8070 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8071 			continue;
8072 
8073 	notify:
8074 		aconnector = to_amdgpu_dm_connector(connector);
8075 
8076 		mutex_lock(&adev->dm.audio_lock);
8077 		inst = aconnector->audio_inst;
8078 		aconnector->audio_inst = -1;
8079 		mutex_unlock(&adev->dm.audio_lock);
8080 
8081 		amdgpu_dm_audio_eld_notify(adev, inst);
8082 	}
8083 
8084 	/* Notify audio device additions. */
8085 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8086 		if (!new_con_state->crtc)
8087 			continue;
8088 
8089 		new_crtc_state = drm_atomic_get_new_crtc_state(
8090 			state, new_con_state->crtc);
8091 
8092 		if (!new_crtc_state)
8093 			continue;
8094 
8095 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8096 			continue;
8097 
8098 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8099 		if (!new_dm_crtc_state->stream)
8100 			continue;
8101 
8102 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8103 		if (!status)
8104 			continue;
8105 
8106 		aconnector = to_amdgpu_dm_connector(connector);
8107 
8108 		mutex_lock(&adev->dm.audio_lock);
8109 		inst = status->audio_inst;
8110 		aconnector->audio_inst = inst;
8111 		mutex_unlock(&adev->dm.audio_lock);
8112 
8113 		amdgpu_dm_audio_eld_notify(adev, inst);
8114 	}
8115 }
8116 
8117 /*
8118  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8119  * @crtc_state: the DRM CRTC state
8120  * @stream_state: the DC stream state.
8121  *
8122  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8123  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8124  */
8125 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8126 						struct dc_stream_state *stream_state)
8127 {
8128 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8129 }
8130 
8131 /**
8132  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8133  * @state: The atomic state to commit
8134  *
8135  * This will tell DC to commit the constructed DC state from atomic_check,
8136  * programming the hardware. Any failures here implies a hardware failure, since
8137  * atomic check should have filtered anything non-kosher.
8138  */
8139 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8140 {
8141 	struct drm_device *dev = state->dev;
8142 	struct amdgpu_device *adev = drm_to_adev(dev);
8143 	struct amdgpu_display_manager *dm = &adev->dm;
8144 	struct dm_atomic_state *dm_state;
8145 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8146 	uint32_t i, j;
8147 	struct drm_crtc *crtc;
8148 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8149 	unsigned long flags;
8150 	bool wait_for_vblank = true;
8151 	struct drm_connector *connector;
8152 	struct drm_connector_state *old_con_state, *new_con_state;
8153 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8154 	int crtc_disable_count = 0;
8155 	bool mode_set_reset_required = false;
8156 
8157 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8158 
8159 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8160 
8161 	dm_state = dm_atomic_get_new_state(state);
8162 	if (dm_state && dm_state->context) {
8163 		dc_state = dm_state->context;
8164 	} else {
8165 		/* No state changes, retain current state. */
8166 		dc_state_temp = dc_create_state(dm->dc);
8167 		ASSERT(dc_state_temp);
8168 		dc_state = dc_state_temp;
8169 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8170 	}
8171 
8172 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8173 				       new_crtc_state, i) {
8174 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8175 
8176 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8177 
8178 		if (old_crtc_state->active &&
8179 		    (!new_crtc_state->active ||
8180 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8181 			manage_dm_interrupts(adev, acrtc, false);
8182 			dc_stream_release(dm_old_crtc_state->stream);
8183 		}
8184 	}
8185 
8186 	drm_atomic_helper_calc_timestamping_constants(state);
8187 
8188 	/* update changed items */
8189 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8190 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8191 
8192 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8193 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8194 
8195 		DRM_DEBUG_DRIVER(
8196 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8197 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8198 			"connectors_changed:%d\n",
8199 			acrtc->crtc_id,
8200 			new_crtc_state->enable,
8201 			new_crtc_state->active,
8202 			new_crtc_state->planes_changed,
8203 			new_crtc_state->mode_changed,
8204 			new_crtc_state->active_changed,
8205 			new_crtc_state->connectors_changed);
8206 
8207 		/* Disable cursor if disabling crtc */
8208 		if (old_crtc_state->active && !new_crtc_state->active) {
8209 			struct dc_cursor_position position;
8210 
8211 			memset(&position, 0, sizeof(position));
8212 			mutex_lock(&dm->dc_lock);
8213 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8214 			mutex_unlock(&dm->dc_lock);
8215 		}
8216 
8217 		/* Copy all transient state flags into dc state */
8218 		if (dm_new_crtc_state->stream) {
8219 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8220 							    dm_new_crtc_state->stream);
8221 		}
8222 
8223 		/* handles headless hotplug case, updating new_state and
8224 		 * aconnector as needed
8225 		 */
8226 
8227 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8228 
8229 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8230 
8231 			if (!dm_new_crtc_state->stream) {
8232 				/*
8233 				 * this could happen because of issues with
8234 				 * userspace notifications delivery.
8235 				 * In this case userspace tries to set mode on
8236 				 * display which is disconnected in fact.
8237 				 * dc_sink is NULL in this case on aconnector.
8238 				 * We expect reset mode will come soon.
8239 				 *
8240 				 * This can also happen when unplug is done
8241 				 * during resume sequence ended
8242 				 *
8243 				 * In this case, we want to pretend we still
8244 				 * have a sink to keep the pipe running so that
8245 				 * hw state is consistent with the sw state
8246 				 */
8247 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8248 						__func__, acrtc->base.base.id);
8249 				continue;
8250 			}
8251 
8252 			if (dm_old_crtc_state->stream)
8253 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8254 
8255 			pm_runtime_get_noresume(dev->dev);
8256 
8257 			acrtc->enabled = true;
8258 			acrtc->hw_mode = new_crtc_state->mode;
8259 			crtc->hwmode = new_crtc_state->mode;
8260 			mode_set_reset_required = true;
8261 		} else if (modereset_required(new_crtc_state)) {
8262 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8263 			/* i.e. reset mode */
8264 			if (dm_old_crtc_state->stream)
8265 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8266 			mode_set_reset_required = true;
8267 		}
8268 	} /* for_each_crtc_in_state() */
8269 
8270 	if (dc_state) {
8271 		/* if there mode set or reset, disable eDP PSR */
8272 		if (mode_set_reset_required)
8273 			amdgpu_dm_psr_disable_all(dm);
8274 
8275 		dm_enable_per_frame_crtc_master_sync(dc_state);
8276 		mutex_lock(&dm->dc_lock);
8277 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8278 		mutex_unlock(&dm->dc_lock);
8279 	}
8280 
8281 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8282 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8283 
8284 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8285 
8286 		if (dm_new_crtc_state->stream != NULL) {
8287 			const struct dc_stream_status *status =
8288 					dc_stream_get_status(dm_new_crtc_state->stream);
8289 
8290 			if (!status)
8291 				status = dc_stream_get_status_from_state(dc_state,
8292 									 dm_new_crtc_state->stream);
8293 			if (!status)
8294 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8295 			else
8296 				acrtc->otg_inst = status->primary_otg_inst;
8297 		}
8298 	}
8299 #ifdef CONFIG_DRM_AMD_DC_HDCP
8300 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8301 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8302 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8303 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8304 
8305 		new_crtc_state = NULL;
8306 
8307 		if (acrtc)
8308 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8309 
8310 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8311 
8312 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8313 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8314 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8315 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8316 			dm_new_con_state->update_hdcp = true;
8317 			continue;
8318 		}
8319 
8320 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8321 			hdcp_update_display(
8322 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8323 				new_con_state->hdcp_content_type,
8324 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8325 													 : false);
8326 	}
8327 #endif
8328 
8329 	/* Handle connector state changes */
8330 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8331 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8332 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8333 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8334 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8335 		struct dc_stream_update stream_update;
8336 		struct dc_info_packet hdr_packet;
8337 		struct dc_stream_status *status = NULL;
8338 		bool abm_changed, hdr_changed, scaling_changed;
8339 
8340 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8341 		memset(&stream_update, 0, sizeof(stream_update));
8342 
8343 		if (acrtc) {
8344 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8345 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8346 		}
8347 
8348 		/* Skip any modesets/resets */
8349 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8350 			continue;
8351 
8352 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8353 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8354 
8355 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8356 							     dm_old_con_state);
8357 
8358 		abm_changed = dm_new_crtc_state->abm_level !=
8359 			      dm_old_crtc_state->abm_level;
8360 
8361 		hdr_changed =
8362 			is_hdr_metadata_different(old_con_state, new_con_state);
8363 
8364 		if (!scaling_changed && !abm_changed && !hdr_changed)
8365 			continue;
8366 
8367 		stream_update.stream = dm_new_crtc_state->stream;
8368 		if (scaling_changed) {
8369 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8370 					dm_new_con_state, dm_new_crtc_state->stream);
8371 
8372 			stream_update.src = dm_new_crtc_state->stream->src;
8373 			stream_update.dst = dm_new_crtc_state->stream->dst;
8374 		}
8375 
8376 		if (abm_changed) {
8377 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8378 
8379 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8380 		}
8381 
8382 		if (hdr_changed) {
8383 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8384 			stream_update.hdr_static_metadata = &hdr_packet;
8385 		}
8386 
8387 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8388 		WARN_ON(!status);
8389 		WARN_ON(!status->plane_count);
8390 
8391 		/*
8392 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8393 		 * Here we create an empty update on each plane.
8394 		 * To fix this, DC should permit updating only stream properties.
8395 		 */
8396 		for (j = 0; j < status->plane_count; j++)
8397 			dummy_updates[j].surface = status->plane_states[0];
8398 
8399 
8400 		mutex_lock(&dm->dc_lock);
8401 		dc_commit_updates_for_stream(dm->dc,
8402 						     dummy_updates,
8403 						     status->plane_count,
8404 						     dm_new_crtc_state->stream,
8405 						     &stream_update,
8406 						     dc_state);
8407 		mutex_unlock(&dm->dc_lock);
8408 	}
8409 
8410 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8411 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8412 				      new_crtc_state, i) {
8413 		if (old_crtc_state->active && !new_crtc_state->active)
8414 			crtc_disable_count++;
8415 
8416 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8417 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8418 
8419 		/* For freesync config update on crtc state and params for irq */
8420 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8421 
8422 		/* Handle vrr on->off / off->on transitions */
8423 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8424 						dm_new_crtc_state);
8425 	}
8426 
8427 	/**
8428 	 * Enable interrupts for CRTCs that are newly enabled or went through
8429 	 * a modeset. It was intentionally deferred until after the front end
8430 	 * state was modified to wait until the OTG was on and so the IRQ
8431 	 * handlers didn't access stale or invalid state.
8432 	 */
8433 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8434 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8435 
8436 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8437 
8438 		if (new_crtc_state->active &&
8439 		    (!old_crtc_state->active ||
8440 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8441 			dc_stream_retain(dm_new_crtc_state->stream);
8442 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8443 			manage_dm_interrupts(adev, acrtc, true);
8444 
8445 #ifdef CONFIG_DEBUG_FS
8446 			/**
8447 			 * Frontend may have changed so reapply the CRC capture
8448 			 * settings for the stream.
8449 			 */
8450 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8451 
8452 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8453 				amdgpu_dm_crtc_configure_crc_source(
8454 					crtc, dm_new_crtc_state,
8455 					dm_new_crtc_state->crc_src);
8456 			}
8457 #endif
8458 		}
8459 	}
8460 
8461 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8462 		if (new_crtc_state->async_flip)
8463 			wait_for_vblank = false;
8464 
8465 	/* update planes when needed per crtc*/
8466 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8467 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8468 
8469 		if (dm_new_crtc_state->stream)
8470 			amdgpu_dm_commit_planes(state, dc_state, dev,
8471 						dm, crtc, wait_for_vblank);
8472 	}
8473 
8474 	/* Update audio instances for each connector. */
8475 	amdgpu_dm_commit_audio(dev, state);
8476 
8477 	/*
8478 	 * send vblank event on all events not handled in flip and
8479 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8480 	 */
8481 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8482 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8483 
8484 		if (new_crtc_state->event)
8485 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8486 
8487 		new_crtc_state->event = NULL;
8488 	}
8489 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8490 
8491 	/* Signal HW programming completion */
8492 	drm_atomic_helper_commit_hw_done(state);
8493 
8494 	if (wait_for_vblank)
8495 		drm_atomic_helper_wait_for_flip_done(dev, state);
8496 
8497 	drm_atomic_helper_cleanup_planes(dev, state);
8498 
8499 	/* return the stolen vga memory back to VRAM */
8500 	if (!adev->mman.keep_stolen_vga_memory)
8501 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8502 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8503 
8504 	/*
8505 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8506 	 * so we can put the GPU into runtime suspend if we're not driving any
8507 	 * displays anymore
8508 	 */
8509 	for (i = 0; i < crtc_disable_count; i++)
8510 		pm_runtime_put_autosuspend(dev->dev);
8511 	pm_runtime_mark_last_busy(dev->dev);
8512 
8513 	if (dc_state_temp)
8514 		dc_release_state(dc_state_temp);
8515 }
8516 
8517 
8518 static int dm_force_atomic_commit(struct drm_connector *connector)
8519 {
8520 	int ret = 0;
8521 	struct drm_device *ddev = connector->dev;
8522 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8523 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8524 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8525 	struct drm_connector_state *conn_state;
8526 	struct drm_crtc_state *crtc_state;
8527 	struct drm_plane_state *plane_state;
8528 
8529 	if (!state)
8530 		return -ENOMEM;
8531 
8532 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8533 
8534 	/* Construct an atomic state to restore previous display setting */
8535 
8536 	/*
8537 	 * Attach connectors to drm_atomic_state
8538 	 */
8539 	conn_state = drm_atomic_get_connector_state(state, connector);
8540 
8541 	ret = PTR_ERR_OR_ZERO(conn_state);
8542 	if (ret)
8543 		goto out;
8544 
8545 	/* Attach crtc to drm_atomic_state*/
8546 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8547 
8548 	ret = PTR_ERR_OR_ZERO(crtc_state);
8549 	if (ret)
8550 		goto out;
8551 
8552 	/* force a restore */
8553 	crtc_state->mode_changed = true;
8554 
8555 	/* Attach plane to drm_atomic_state */
8556 	plane_state = drm_atomic_get_plane_state(state, plane);
8557 
8558 	ret = PTR_ERR_OR_ZERO(plane_state);
8559 	if (ret)
8560 		goto out;
8561 
8562 	/* Call commit internally with the state we just constructed */
8563 	ret = drm_atomic_commit(state);
8564 
8565 out:
8566 	drm_atomic_state_put(state);
8567 	if (ret)
8568 		DRM_ERROR("Restoring old state failed with %i\n", ret);
8569 
8570 	return ret;
8571 }
8572 
8573 /*
8574  * This function handles all cases when set mode does not come upon hotplug.
8575  * This includes when a display is unplugged then plugged back into the
8576  * same port and when running without usermode desktop manager supprot
8577  */
8578 void dm_restore_drm_connector_state(struct drm_device *dev,
8579 				    struct drm_connector *connector)
8580 {
8581 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8582 	struct amdgpu_crtc *disconnected_acrtc;
8583 	struct dm_crtc_state *acrtc_state;
8584 
8585 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8586 		return;
8587 
8588 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8589 	if (!disconnected_acrtc)
8590 		return;
8591 
8592 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8593 	if (!acrtc_state->stream)
8594 		return;
8595 
8596 	/*
8597 	 * If the previous sink is not released and different from the current,
8598 	 * we deduce we are in a state where we can not rely on usermode call
8599 	 * to turn on the display, so we do it here
8600 	 */
8601 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8602 		dm_force_atomic_commit(&aconnector->base);
8603 }
8604 
8605 /*
8606  * Grabs all modesetting locks to serialize against any blocking commits,
8607  * Waits for completion of all non blocking commits.
8608  */
8609 static int do_aquire_global_lock(struct drm_device *dev,
8610 				 struct drm_atomic_state *state)
8611 {
8612 	struct drm_crtc *crtc;
8613 	struct drm_crtc_commit *commit;
8614 	long ret;
8615 
8616 	/*
8617 	 * Adding all modeset locks to aquire_ctx will
8618 	 * ensure that when the framework release it the
8619 	 * extra locks we are locking here will get released to
8620 	 */
8621 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8622 	if (ret)
8623 		return ret;
8624 
8625 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8626 		spin_lock(&crtc->commit_lock);
8627 		commit = list_first_entry_or_null(&crtc->commit_list,
8628 				struct drm_crtc_commit, commit_entry);
8629 		if (commit)
8630 			drm_crtc_commit_get(commit);
8631 		spin_unlock(&crtc->commit_lock);
8632 
8633 		if (!commit)
8634 			continue;
8635 
8636 		/*
8637 		 * Make sure all pending HW programming completed and
8638 		 * page flips done
8639 		 */
8640 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8641 
8642 		if (ret > 0)
8643 			ret = wait_for_completion_interruptible_timeout(
8644 					&commit->flip_done, 10*HZ);
8645 
8646 		if (ret == 0)
8647 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8648 				  "timed out\n", crtc->base.id, crtc->name);
8649 
8650 		drm_crtc_commit_put(commit);
8651 	}
8652 
8653 	return ret < 0 ? ret : 0;
8654 }
8655 
8656 static void get_freesync_config_for_crtc(
8657 	struct dm_crtc_state *new_crtc_state,
8658 	struct dm_connector_state *new_con_state)
8659 {
8660 	struct mod_freesync_config config = {0};
8661 	struct amdgpu_dm_connector *aconnector =
8662 			to_amdgpu_dm_connector(new_con_state->base.connector);
8663 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8664 	int vrefresh = drm_mode_vrefresh(mode);
8665 
8666 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8667 					vrefresh >= aconnector->min_vfreq &&
8668 					vrefresh <= aconnector->max_vfreq;
8669 
8670 	if (new_crtc_state->vrr_supported) {
8671 		new_crtc_state->stream->ignore_msa_timing_param = true;
8672 		config.state = new_crtc_state->base.vrr_enabled ?
8673 				VRR_STATE_ACTIVE_VARIABLE :
8674 				VRR_STATE_INACTIVE;
8675 		config.min_refresh_in_uhz =
8676 				aconnector->min_vfreq * 1000000;
8677 		config.max_refresh_in_uhz =
8678 				aconnector->max_vfreq * 1000000;
8679 		config.vsif_supported = true;
8680 		config.btr = true;
8681 	}
8682 
8683 	new_crtc_state->freesync_config = config;
8684 }
8685 
8686 static void reset_freesync_config_for_crtc(
8687 	struct dm_crtc_state *new_crtc_state)
8688 {
8689 	new_crtc_state->vrr_supported = false;
8690 
8691 	memset(&new_crtc_state->vrr_infopacket, 0,
8692 	       sizeof(new_crtc_state->vrr_infopacket));
8693 }
8694 
8695 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8696 				struct drm_atomic_state *state,
8697 				struct drm_crtc *crtc,
8698 				struct drm_crtc_state *old_crtc_state,
8699 				struct drm_crtc_state *new_crtc_state,
8700 				bool enable,
8701 				bool *lock_and_validation_needed)
8702 {
8703 	struct dm_atomic_state *dm_state = NULL;
8704 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8705 	struct dc_stream_state *new_stream;
8706 	int ret = 0;
8707 
8708 	/*
8709 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8710 	 * update changed items
8711 	 */
8712 	struct amdgpu_crtc *acrtc = NULL;
8713 	struct amdgpu_dm_connector *aconnector = NULL;
8714 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8715 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8716 
8717 	new_stream = NULL;
8718 
8719 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8720 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8721 	acrtc = to_amdgpu_crtc(crtc);
8722 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8723 
8724 	/* TODO This hack should go away */
8725 	if (aconnector && enable) {
8726 		/* Make sure fake sink is created in plug-in scenario */
8727 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8728 							    &aconnector->base);
8729 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8730 							    &aconnector->base);
8731 
8732 		if (IS_ERR(drm_new_conn_state)) {
8733 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8734 			goto fail;
8735 		}
8736 
8737 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8738 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8739 
8740 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8741 			goto skip_modeset;
8742 
8743 		new_stream = create_validate_stream_for_sink(aconnector,
8744 							     &new_crtc_state->mode,
8745 							     dm_new_conn_state,
8746 							     dm_old_crtc_state->stream);
8747 
8748 		/*
8749 		 * we can have no stream on ACTION_SET if a display
8750 		 * was disconnected during S3, in this case it is not an
8751 		 * error, the OS will be updated after detection, and
8752 		 * will do the right thing on next atomic commit
8753 		 */
8754 
8755 		if (!new_stream) {
8756 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8757 					__func__, acrtc->base.base.id);
8758 			ret = -ENOMEM;
8759 			goto fail;
8760 		}
8761 
8762 		/*
8763 		 * TODO: Check VSDB bits to decide whether this should
8764 		 * be enabled or not.
8765 		 */
8766 		new_stream->triggered_crtc_reset.enabled =
8767 			dm->force_timing_sync;
8768 
8769 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8770 
8771 		ret = fill_hdr_info_packet(drm_new_conn_state,
8772 					   &new_stream->hdr_static_metadata);
8773 		if (ret)
8774 			goto fail;
8775 
8776 		/*
8777 		 * If we already removed the old stream from the context
8778 		 * (and set the new stream to NULL) then we can't reuse
8779 		 * the old stream even if the stream and scaling are unchanged.
8780 		 * We'll hit the BUG_ON and black screen.
8781 		 *
8782 		 * TODO: Refactor this function to allow this check to work
8783 		 * in all conditions.
8784 		 */
8785 		if (dm_new_crtc_state->stream &&
8786 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8787 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8788 			new_crtc_state->mode_changed = false;
8789 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8790 					 new_crtc_state->mode_changed);
8791 		}
8792 	}
8793 
8794 	/* mode_changed flag may get updated above, need to check again */
8795 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8796 		goto skip_modeset;
8797 
8798 	DRM_DEBUG_DRIVER(
8799 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8800 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8801 		"connectors_changed:%d\n",
8802 		acrtc->crtc_id,
8803 		new_crtc_state->enable,
8804 		new_crtc_state->active,
8805 		new_crtc_state->planes_changed,
8806 		new_crtc_state->mode_changed,
8807 		new_crtc_state->active_changed,
8808 		new_crtc_state->connectors_changed);
8809 
8810 	/* Remove stream for any changed/disabled CRTC */
8811 	if (!enable) {
8812 
8813 		if (!dm_old_crtc_state->stream)
8814 			goto skip_modeset;
8815 
8816 		ret = dm_atomic_get_state(state, &dm_state);
8817 		if (ret)
8818 			goto fail;
8819 
8820 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8821 				crtc->base.id);
8822 
8823 		/* i.e. reset mode */
8824 		if (dc_remove_stream_from_ctx(
8825 				dm->dc,
8826 				dm_state->context,
8827 				dm_old_crtc_state->stream) != DC_OK) {
8828 			ret = -EINVAL;
8829 			goto fail;
8830 		}
8831 
8832 		dc_stream_release(dm_old_crtc_state->stream);
8833 		dm_new_crtc_state->stream = NULL;
8834 
8835 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8836 
8837 		*lock_and_validation_needed = true;
8838 
8839 	} else {/* Add stream for any updated/enabled CRTC */
8840 		/*
8841 		 * Quick fix to prevent NULL pointer on new_stream when
8842 		 * added MST connectors not found in existing crtc_state in the chained mode
8843 		 * TODO: need to dig out the root cause of that
8844 		 */
8845 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8846 			goto skip_modeset;
8847 
8848 		if (modereset_required(new_crtc_state))
8849 			goto skip_modeset;
8850 
8851 		if (modeset_required(new_crtc_state, new_stream,
8852 				     dm_old_crtc_state->stream)) {
8853 
8854 			WARN_ON(dm_new_crtc_state->stream);
8855 
8856 			ret = dm_atomic_get_state(state, &dm_state);
8857 			if (ret)
8858 				goto fail;
8859 
8860 			dm_new_crtc_state->stream = new_stream;
8861 
8862 			dc_stream_retain(new_stream);
8863 
8864 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8865 						crtc->base.id);
8866 
8867 			if (dc_add_stream_to_ctx(
8868 					dm->dc,
8869 					dm_state->context,
8870 					dm_new_crtc_state->stream) != DC_OK) {
8871 				ret = -EINVAL;
8872 				goto fail;
8873 			}
8874 
8875 			*lock_and_validation_needed = true;
8876 		}
8877 	}
8878 
8879 skip_modeset:
8880 	/* Release extra reference */
8881 	if (new_stream)
8882 		 dc_stream_release(new_stream);
8883 
8884 	/*
8885 	 * We want to do dc stream updates that do not require a
8886 	 * full modeset below.
8887 	 */
8888 	if (!(enable && aconnector && new_crtc_state->active))
8889 		return 0;
8890 	/*
8891 	 * Given above conditions, the dc state cannot be NULL because:
8892 	 * 1. We're in the process of enabling CRTCs (just been added
8893 	 *    to the dc context, or already is on the context)
8894 	 * 2. Has a valid connector attached, and
8895 	 * 3. Is currently active and enabled.
8896 	 * => The dc stream state currently exists.
8897 	 */
8898 	BUG_ON(dm_new_crtc_state->stream == NULL);
8899 
8900 	/* Scaling or underscan settings */
8901 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8902 		update_stream_scaling_settings(
8903 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8904 
8905 	/* ABM settings */
8906 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8907 
8908 	/*
8909 	 * Color management settings. We also update color properties
8910 	 * when a modeset is needed, to ensure it gets reprogrammed.
8911 	 */
8912 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8913 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8914 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8915 		if (ret)
8916 			goto fail;
8917 	}
8918 
8919 	/* Update Freesync settings. */
8920 	get_freesync_config_for_crtc(dm_new_crtc_state,
8921 				     dm_new_conn_state);
8922 
8923 	return ret;
8924 
8925 fail:
8926 	if (new_stream)
8927 		dc_stream_release(new_stream);
8928 	return ret;
8929 }
8930 
8931 static bool should_reset_plane(struct drm_atomic_state *state,
8932 			       struct drm_plane *plane,
8933 			       struct drm_plane_state *old_plane_state,
8934 			       struct drm_plane_state *new_plane_state)
8935 {
8936 	struct drm_plane *other;
8937 	struct drm_plane_state *old_other_state, *new_other_state;
8938 	struct drm_crtc_state *new_crtc_state;
8939 	int i;
8940 
8941 	/*
8942 	 * TODO: Remove this hack once the checks below are sufficient
8943 	 * enough to determine when we need to reset all the planes on
8944 	 * the stream.
8945 	 */
8946 	if (state->allow_modeset)
8947 		return true;
8948 
8949 	/* Exit early if we know that we're adding or removing the plane. */
8950 	if (old_plane_state->crtc != new_plane_state->crtc)
8951 		return true;
8952 
8953 	/* old crtc == new_crtc == NULL, plane not in context. */
8954 	if (!new_plane_state->crtc)
8955 		return false;
8956 
8957 	new_crtc_state =
8958 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8959 
8960 	if (!new_crtc_state)
8961 		return true;
8962 
8963 	/* CRTC Degamma changes currently require us to recreate planes. */
8964 	if (new_crtc_state->color_mgmt_changed)
8965 		return true;
8966 
8967 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8968 		return true;
8969 
8970 	/*
8971 	 * If there are any new primary or overlay planes being added or
8972 	 * removed then the z-order can potentially change. To ensure
8973 	 * correct z-order and pipe acquisition the current DC architecture
8974 	 * requires us to remove and recreate all existing planes.
8975 	 *
8976 	 * TODO: Come up with a more elegant solution for this.
8977 	 */
8978 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8979 		struct amdgpu_framebuffer *old_afb, *new_afb;
8980 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8981 			continue;
8982 
8983 		if (old_other_state->crtc != new_plane_state->crtc &&
8984 		    new_other_state->crtc != new_plane_state->crtc)
8985 			continue;
8986 
8987 		if (old_other_state->crtc != new_other_state->crtc)
8988 			return true;
8989 
8990 		/* Src/dst size and scaling updates. */
8991 		if (old_other_state->src_w != new_other_state->src_w ||
8992 		    old_other_state->src_h != new_other_state->src_h ||
8993 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8994 		    old_other_state->crtc_h != new_other_state->crtc_h)
8995 			return true;
8996 
8997 		/* Rotation / mirroring updates. */
8998 		if (old_other_state->rotation != new_other_state->rotation)
8999 			return true;
9000 
9001 		/* Blending updates. */
9002 		if (old_other_state->pixel_blend_mode !=
9003 		    new_other_state->pixel_blend_mode)
9004 			return true;
9005 
9006 		/* Alpha updates. */
9007 		if (old_other_state->alpha != new_other_state->alpha)
9008 			return true;
9009 
9010 		/* Colorspace changes. */
9011 		if (old_other_state->color_range != new_other_state->color_range ||
9012 		    old_other_state->color_encoding != new_other_state->color_encoding)
9013 			return true;
9014 
9015 		/* Framebuffer checks fall at the end. */
9016 		if (!old_other_state->fb || !new_other_state->fb)
9017 			continue;
9018 
9019 		/* Pixel format changes can require bandwidth updates. */
9020 		if (old_other_state->fb->format != new_other_state->fb->format)
9021 			return true;
9022 
9023 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9024 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9025 
9026 		/* Tiling and DCC changes also require bandwidth updates. */
9027 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9028 		    old_afb->base.modifier != new_afb->base.modifier)
9029 			return true;
9030 	}
9031 
9032 	return false;
9033 }
9034 
9035 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9036 			      struct drm_plane_state *new_plane_state,
9037 			      struct drm_framebuffer *fb)
9038 {
9039 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9040 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9041 	unsigned int pitch;
9042 	bool linear;
9043 
9044 	if (fb->width > new_acrtc->max_cursor_width ||
9045 	    fb->height > new_acrtc->max_cursor_height) {
9046 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9047 				 new_plane_state->fb->width,
9048 				 new_plane_state->fb->height);
9049 		return -EINVAL;
9050 	}
9051 	if (new_plane_state->src_w != fb->width << 16 ||
9052 	    new_plane_state->src_h != fb->height << 16) {
9053 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9054 		return -EINVAL;
9055 	}
9056 
9057 	/* Pitch in pixels */
9058 	pitch = fb->pitches[0] / fb->format->cpp[0];
9059 
9060 	if (fb->width != pitch) {
9061 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9062 				 fb->width, pitch);
9063 		return -EINVAL;
9064 	}
9065 
9066 	switch (pitch) {
9067 	case 64:
9068 	case 128:
9069 	case 256:
9070 		/* FB pitch is supported by cursor plane */
9071 		break;
9072 	default:
9073 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9074 		return -EINVAL;
9075 	}
9076 
9077 	/* Core DRM takes care of checking FB modifiers, so we only need to
9078 	 * check tiling flags when the FB doesn't have a modifier. */
9079 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9080 		if (adev->family < AMDGPU_FAMILY_AI) {
9081 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9082 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9083 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9084 		} else {
9085 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9086 		}
9087 		if (!linear) {
9088 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9089 			return -EINVAL;
9090 		}
9091 	}
9092 
9093 	return 0;
9094 }
9095 
9096 static int dm_update_plane_state(struct dc *dc,
9097 				 struct drm_atomic_state *state,
9098 				 struct drm_plane *plane,
9099 				 struct drm_plane_state *old_plane_state,
9100 				 struct drm_plane_state *new_plane_state,
9101 				 bool enable,
9102 				 bool *lock_and_validation_needed)
9103 {
9104 
9105 	struct dm_atomic_state *dm_state = NULL;
9106 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9107 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9108 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9109 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9110 	struct amdgpu_crtc *new_acrtc;
9111 	bool needs_reset;
9112 	int ret = 0;
9113 
9114 
9115 	new_plane_crtc = new_plane_state->crtc;
9116 	old_plane_crtc = old_plane_state->crtc;
9117 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9118 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9119 
9120 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9121 		if (!enable || !new_plane_crtc ||
9122 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9123 			return 0;
9124 
9125 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9126 
9127 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9128 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9129 			return -EINVAL;
9130 		}
9131 
9132 		if (new_plane_state->fb) {
9133 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9134 						 new_plane_state->fb);
9135 			if (ret)
9136 				return ret;
9137 		}
9138 
9139 		return 0;
9140 	}
9141 
9142 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9143 					 new_plane_state);
9144 
9145 	/* Remove any changed/removed planes */
9146 	if (!enable) {
9147 		if (!needs_reset)
9148 			return 0;
9149 
9150 		if (!old_plane_crtc)
9151 			return 0;
9152 
9153 		old_crtc_state = drm_atomic_get_old_crtc_state(
9154 				state, old_plane_crtc);
9155 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9156 
9157 		if (!dm_old_crtc_state->stream)
9158 			return 0;
9159 
9160 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9161 				plane->base.id, old_plane_crtc->base.id);
9162 
9163 		ret = dm_atomic_get_state(state, &dm_state);
9164 		if (ret)
9165 			return ret;
9166 
9167 		if (!dc_remove_plane_from_context(
9168 				dc,
9169 				dm_old_crtc_state->stream,
9170 				dm_old_plane_state->dc_state,
9171 				dm_state->context)) {
9172 
9173 			return -EINVAL;
9174 		}
9175 
9176 
9177 		dc_plane_state_release(dm_old_plane_state->dc_state);
9178 		dm_new_plane_state->dc_state = NULL;
9179 
9180 		*lock_and_validation_needed = true;
9181 
9182 	} else { /* Add new planes */
9183 		struct dc_plane_state *dc_new_plane_state;
9184 
9185 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9186 			return 0;
9187 
9188 		if (!new_plane_crtc)
9189 			return 0;
9190 
9191 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9192 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9193 
9194 		if (!dm_new_crtc_state->stream)
9195 			return 0;
9196 
9197 		if (!needs_reset)
9198 			return 0;
9199 
9200 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9201 		if (ret)
9202 			return ret;
9203 
9204 		WARN_ON(dm_new_plane_state->dc_state);
9205 
9206 		dc_new_plane_state = dc_create_plane_state(dc);
9207 		if (!dc_new_plane_state)
9208 			return -ENOMEM;
9209 
9210 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9211 				plane->base.id, new_plane_crtc->base.id);
9212 
9213 		ret = fill_dc_plane_attributes(
9214 			drm_to_adev(new_plane_crtc->dev),
9215 			dc_new_plane_state,
9216 			new_plane_state,
9217 			new_crtc_state);
9218 		if (ret) {
9219 			dc_plane_state_release(dc_new_plane_state);
9220 			return ret;
9221 		}
9222 
9223 		ret = dm_atomic_get_state(state, &dm_state);
9224 		if (ret) {
9225 			dc_plane_state_release(dc_new_plane_state);
9226 			return ret;
9227 		}
9228 
9229 		/*
9230 		 * Any atomic check errors that occur after this will
9231 		 * not need a release. The plane state will be attached
9232 		 * to the stream, and therefore part of the atomic
9233 		 * state. It'll be released when the atomic state is
9234 		 * cleaned.
9235 		 */
9236 		if (!dc_add_plane_to_context(
9237 				dc,
9238 				dm_new_crtc_state->stream,
9239 				dc_new_plane_state,
9240 				dm_state->context)) {
9241 
9242 			dc_plane_state_release(dc_new_plane_state);
9243 			return -EINVAL;
9244 		}
9245 
9246 		dm_new_plane_state->dc_state = dc_new_plane_state;
9247 
9248 		/* Tell DC to do a full surface update every time there
9249 		 * is a plane change. Inefficient, but works for now.
9250 		 */
9251 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9252 
9253 		*lock_and_validation_needed = true;
9254 	}
9255 
9256 
9257 	return ret;
9258 }
9259 
9260 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9261 				struct drm_crtc *crtc,
9262 				struct drm_crtc_state *new_crtc_state)
9263 {
9264 	struct drm_plane_state *new_cursor_state, *new_primary_state;
9265 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9266 
9267 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9268 	 * cursor per pipe but it's going to inherit the scaling and
9269 	 * positioning from the underlying pipe. Check the cursor plane's
9270 	 * blending properties match the primary plane's. */
9271 
9272 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9273 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9274 	if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9275 		return 0;
9276 	}
9277 
9278 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9279 			 (new_cursor_state->src_w >> 16);
9280 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9281 			 (new_cursor_state->src_h >> 16);
9282 
9283 	primary_scale_w = new_primary_state->crtc_w * 1000 /
9284 			 (new_primary_state->src_w >> 16);
9285 	primary_scale_h = new_primary_state->crtc_h * 1000 /
9286 			 (new_primary_state->src_h >> 16);
9287 
9288 	if (cursor_scale_w != primary_scale_w ||
9289 	    cursor_scale_h != primary_scale_h) {
9290 		DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9291 		return -EINVAL;
9292 	}
9293 
9294 	return 0;
9295 }
9296 
9297 #if defined(CONFIG_DRM_AMD_DC_DCN)
9298 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9299 {
9300 	struct drm_connector *connector;
9301 	struct drm_connector_state *conn_state;
9302 	struct amdgpu_dm_connector *aconnector = NULL;
9303 	int i;
9304 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9305 		if (conn_state->crtc != crtc)
9306 			continue;
9307 
9308 		aconnector = to_amdgpu_dm_connector(connector);
9309 		if (!aconnector->port || !aconnector->mst_port)
9310 			aconnector = NULL;
9311 		else
9312 			break;
9313 	}
9314 
9315 	if (!aconnector)
9316 		return 0;
9317 
9318 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9319 }
9320 #endif
9321 
9322 /**
9323  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9324  * @dev: The DRM device
9325  * @state: The atomic state to commit
9326  *
9327  * Validate that the given atomic state is programmable by DC into hardware.
9328  * This involves constructing a &struct dc_state reflecting the new hardware
9329  * state we wish to commit, then querying DC to see if it is programmable. It's
9330  * important not to modify the existing DC state. Otherwise, atomic_check
9331  * may unexpectedly commit hardware changes.
9332  *
9333  * When validating the DC state, it's important that the right locks are
9334  * acquired. For full updates case which removes/adds/updates streams on one
9335  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9336  * that any such full update commit will wait for completion of any outstanding
9337  * flip using DRMs synchronization events.
9338  *
9339  * Note that DM adds the affected connectors for all CRTCs in state, when that
9340  * might not seem necessary. This is because DC stream creation requires the
9341  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9342  * be possible but non-trivial - a possible TODO item.
9343  *
9344  * Return: -Error code if validation failed.
9345  */
9346 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9347 				  struct drm_atomic_state *state)
9348 {
9349 	struct amdgpu_device *adev = drm_to_adev(dev);
9350 	struct dm_atomic_state *dm_state = NULL;
9351 	struct dc *dc = adev->dm.dc;
9352 	struct drm_connector *connector;
9353 	struct drm_connector_state *old_con_state, *new_con_state;
9354 	struct drm_crtc *crtc;
9355 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9356 	struct drm_plane *plane;
9357 	struct drm_plane_state *old_plane_state, *new_plane_state;
9358 	enum dc_status status;
9359 	int ret, i;
9360 	bool lock_and_validation_needed = false;
9361 	struct dm_crtc_state *dm_old_crtc_state;
9362 
9363 	trace_amdgpu_dm_atomic_check_begin(state);
9364 
9365 	ret = drm_atomic_helper_check_modeset(dev, state);
9366 	if (ret)
9367 		goto fail;
9368 
9369 	/* Check connector changes */
9370 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9371 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9372 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9373 
9374 		/* Skip connectors that are disabled or part of modeset already. */
9375 		if (!old_con_state->crtc && !new_con_state->crtc)
9376 			continue;
9377 
9378 		if (!new_con_state->crtc)
9379 			continue;
9380 
9381 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9382 		if (IS_ERR(new_crtc_state)) {
9383 			ret = PTR_ERR(new_crtc_state);
9384 			goto fail;
9385 		}
9386 
9387 		if (dm_old_con_state->abm_level !=
9388 		    dm_new_con_state->abm_level)
9389 			new_crtc_state->connectors_changed = true;
9390 	}
9391 
9392 #if defined(CONFIG_DRM_AMD_DC_DCN)
9393 	if (adev->asic_type >= CHIP_NAVI10) {
9394 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9395 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9396 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9397 				if (ret)
9398 					goto fail;
9399 			}
9400 		}
9401 	}
9402 #endif
9403 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9404 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9405 
9406 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9407 		    !new_crtc_state->color_mgmt_changed &&
9408 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9409 			dm_old_crtc_state->dsc_force_changed == false)
9410 			continue;
9411 
9412 		if (!new_crtc_state->enable)
9413 			continue;
9414 
9415 		ret = drm_atomic_add_affected_connectors(state, crtc);
9416 		if (ret)
9417 			return ret;
9418 
9419 		ret = drm_atomic_add_affected_planes(state, crtc);
9420 		if (ret)
9421 			goto fail;
9422 
9423 		if (dm_old_crtc_state->dsc_force_changed)
9424 			new_crtc_state->mode_changed = true;
9425 	}
9426 
9427 	/*
9428 	 * Add all primary and overlay planes on the CRTC to the state
9429 	 * whenever a plane is enabled to maintain correct z-ordering
9430 	 * and to enable fast surface updates.
9431 	 */
9432 	drm_for_each_crtc(crtc, dev) {
9433 		bool modified = false;
9434 
9435 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9436 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9437 				continue;
9438 
9439 			if (new_plane_state->crtc == crtc ||
9440 			    old_plane_state->crtc == crtc) {
9441 				modified = true;
9442 				break;
9443 			}
9444 		}
9445 
9446 		if (!modified)
9447 			continue;
9448 
9449 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9450 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9451 				continue;
9452 
9453 			new_plane_state =
9454 				drm_atomic_get_plane_state(state, plane);
9455 
9456 			if (IS_ERR(new_plane_state)) {
9457 				ret = PTR_ERR(new_plane_state);
9458 				goto fail;
9459 			}
9460 		}
9461 	}
9462 
9463 	/* Remove exiting planes if they are modified */
9464 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9465 		ret = dm_update_plane_state(dc, state, plane,
9466 					    old_plane_state,
9467 					    new_plane_state,
9468 					    false,
9469 					    &lock_and_validation_needed);
9470 		if (ret)
9471 			goto fail;
9472 	}
9473 
9474 	/* Disable all crtcs which require disable */
9475 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9476 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9477 					   old_crtc_state,
9478 					   new_crtc_state,
9479 					   false,
9480 					   &lock_and_validation_needed);
9481 		if (ret)
9482 			goto fail;
9483 	}
9484 
9485 	/* Enable all crtcs which require enable */
9486 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9487 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9488 					   old_crtc_state,
9489 					   new_crtc_state,
9490 					   true,
9491 					   &lock_and_validation_needed);
9492 		if (ret)
9493 			goto fail;
9494 	}
9495 
9496 	/* Add new/modified planes */
9497 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9498 		ret = dm_update_plane_state(dc, state, plane,
9499 					    old_plane_state,
9500 					    new_plane_state,
9501 					    true,
9502 					    &lock_and_validation_needed);
9503 		if (ret)
9504 			goto fail;
9505 	}
9506 
9507 	/* Run this here since we want to validate the streams we created */
9508 	ret = drm_atomic_helper_check_planes(dev, state);
9509 	if (ret)
9510 		goto fail;
9511 
9512 	/* Check cursor planes scaling */
9513 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9514 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9515 		if (ret)
9516 			goto fail;
9517 	}
9518 
9519 	if (state->legacy_cursor_update) {
9520 		/*
9521 		 * This is a fast cursor update coming from the plane update
9522 		 * helper, check if it can be done asynchronously for better
9523 		 * performance.
9524 		 */
9525 		state->async_update =
9526 			!drm_atomic_helper_async_check(dev, state);
9527 
9528 		/*
9529 		 * Skip the remaining global validation if this is an async
9530 		 * update. Cursor updates can be done without affecting
9531 		 * state or bandwidth calcs and this avoids the performance
9532 		 * penalty of locking the private state object and
9533 		 * allocating a new dc_state.
9534 		 */
9535 		if (state->async_update)
9536 			return 0;
9537 	}
9538 
9539 	/* Check scaling and underscan changes*/
9540 	/* TODO Removed scaling changes validation due to inability to commit
9541 	 * new stream into context w\o causing full reset. Need to
9542 	 * decide how to handle.
9543 	 */
9544 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9545 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9546 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9547 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9548 
9549 		/* Skip any modesets/resets */
9550 		if (!acrtc || drm_atomic_crtc_needs_modeset(
9551 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9552 			continue;
9553 
9554 		/* Skip any thing not scale or underscan changes */
9555 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9556 			continue;
9557 
9558 		lock_and_validation_needed = true;
9559 	}
9560 
9561 	/**
9562 	 * Streams and planes are reset when there are changes that affect
9563 	 * bandwidth. Anything that affects bandwidth needs to go through
9564 	 * DC global validation to ensure that the configuration can be applied
9565 	 * to hardware.
9566 	 *
9567 	 * We have to currently stall out here in atomic_check for outstanding
9568 	 * commits to finish in this case because our IRQ handlers reference
9569 	 * DRM state directly - we can end up disabling interrupts too early
9570 	 * if we don't.
9571 	 *
9572 	 * TODO: Remove this stall and drop DM state private objects.
9573 	 */
9574 	if (lock_and_validation_needed) {
9575 		ret = dm_atomic_get_state(state, &dm_state);
9576 		if (ret)
9577 			goto fail;
9578 
9579 		ret = do_aquire_global_lock(dev, state);
9580 		if (ret)
9581 			goto fail;
9582 
9583 #if defined(CONFIG_DRM_AMD_DC_DCN)
9584 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9585 			goto fail;
9586 
9587 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9588 		if (ret)
9589 			goto fail;
9590 #endif
9591 
9592 		/*
9593 		 * Perform validation of MST topology in the state:
9594 		 * We need to perform MST atomic check before calling
9595 		 * dc_validate_global_state(), or there is a chance
9596 		 * to get stuck in an infinite loop and hang eventually.
9597 		 */
9598 		ret = drm_dp_mst_atomic_check(state);
9599 		if (ret)
9600 			goto fail;
9601 		status = dc_validate_global_state(dc, dm_state->context, false);
9602 		if (status != DC_OK) {
9603 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
9604 				       dc_status_to_str(status), status);
9605 			ret = -EINVAL;
9606 			goto fail;
9607 		}
9608 	} else {
9609 		/*
9610 		 * The commit is a fast update. Fast updates shouldn't change
9611 		 * the DC context, affect global validation, and can have their
9612 		 * commit work done in parallel with other commits not touching
9613 		 * the same resource. If we have a new DC context as part of
9614 		 * the DM atomic state from validation we need to free it and
9615 		 * retain the existing one instead.
9616 		 *
9617 		 * Furthermore, since the DM atomic state only contains the DC
9618 		 * context and can safely be annulled, we can free the state
9619 		 * and clear the associated private object now to free
9620 		 * some memory and avoid a possible use-after-free later.
9621 		 */
9622 
9623 		for (i = 0; i < state->num_private_objs; i++) {
9624 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9625 
9626 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9627 				int j = state->num_private_objs-1;
9628 
9629 				dm_atomic_destroy_state(obj,
9630 						state->private_objs[i].state);
9631 
9632 				/* If i is not at the end of the array then the
9633 				 * last element needs to be moved to where i was
9634 				 * before the array can safely be truncated.
9635 				 */
9636 				if (i != j)
9637 					state->private_objs[i] =
9638 						state->private_objs[j];
9639 
9640 				state->private_objs[j].ptr = NULL;
9641 				state->private_objs[j].state = NULL;
9642 				state->private_objs[j].old_state = NULL;
9643 				state->private_objs[j].new_state = NULL;
9644 
9645 				state->num_private_objs = j;
9646 				break;
9647 			}
9648 		}
9649 	}
9650 
9651 	/* Store the overall update type for use later in atomic check. */
9652 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9653 		struct dm_crtc_state *dm_new_crtc_state =
9654 			to_dm_crtc_state(new_crtc_state);
9655 
9656 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9657 							 UPDATE_TYPE_FULL :
9658 							 UPDATE_TYPE_FAST;
9659 	}
9660 
9661 	/* Must be success */
9662 	WARN_ON(ret);
9663 
9664 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9665 
9666 	return ret;
9667 
9668 fail:
9669 	if (ret == -EDEADLK)
9670 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9671 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9672 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9673 	else
9674 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9675 
9676 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9677 
9678 	return ret;
9679 }
9680 
9681 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9682 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9683 {
9684 	uint8_t dpcd_data;
9685 	bool capable = false;
9686 
9687 	if (amdgpu_dm_connector->dc_link &&
9688 		dm_helpers_dp_read_dpcd(
9689 				NULL,
9690 				amdgpu_dm_connector->dc_link,
9691 				DP_DOWN_STREAM_PORT_COUNT,
9692 				&dpcd_data,
9693 				sizeof(dpcd_data))) {
9694 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9695 	}
9696 
9697 	return capable;
9698 }
9699 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9700 					struct edid *edid)
9701 {
9702 	int i;
9703 	bool edid_check_required;
9704 	struct detailed_timing *timing;
9705 	struct detailed_non_pixel *data;
9706 	struct detailed_data_monitor_range *range;
9707 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9708 			to_amdgpu_dm_connector(connector);
9709 	struct dm_connector_state *dm_con_state = NULL;
9710 
9711 	struct drm_device *dev = connector->dev;
9712 	struct amdgpu_device *adev = drm_to_adev(dev);
9713 	bool freesync_capable = false;
9714 
9715 	if (!connector->state) {
9716 		DRM_ERROR("%s - Connector has no state", __func__);
9717 		goto update;
9718 	}
9719 
9720 	if (!edid) {
9721 		dm_con_state = to_dm_connector_state(connector->state);
9722 
9723 		amdgpu_dm_connector->min_vfreq = 0;
9724 		amdgpu_dm_connector->max_vfreq = 0;
9725 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9726 
9727 		goto update;
9728 	}
9729 
9730 	dm_con_state = to_dm_connector_state(connector->state);
9731 
9732 	edid_check_required = false;
9733 	if (!amdgpu_dm_connector->dc_sink) {
9734 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9735 		goto update;
9736 	}
9737 	if (!adev->dm.freesync_module)
9738 		goto update;
9739 	/*
9740 	 * if edid non zero restrict freesync only for dp and edp
9741 	 */
9742 	if (edid) {
9743 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9744 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9745 			edid_check_required = is_dp_capable_without_timing_msa(
9746 						adev->dm.dc,
9747 						amdgpu_dm_connector);
9748 		}
9749 	}
9750 	if (edid_check_required == true && (edid->version > 1 ||
9751 	   (edid->version == 1 && edid->revision > 1))) {
9752 		for (i = 0; i < 4; i++) {
9753 
9754 			timing	= &edid->detailed_timings[i];
9755 			data	= &timing->data.other_data;
9756 			range	= &data->data.range;
9757 			/*
9758 			 * Check if monitor has continuous frequency mode
9759 			 */
9760 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
9761 				continue;
9762 			/*
9763 			 * Check for flag range limits only. If flag == 1 then
9764 			 * no additional timing information provided.
9765 			 * Default GTF, GTF Secondary curve and CVT are not
9766 			 * supported
9767 			 */
9768 			if (range->flags != 1)
9769 				continue;
9770 
9771 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9772 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9773 			amdgpu_dm_connector->pixel_clock_mhz =
9774 				range->pixel_clock_mhz * 10;
9775 
9776 			connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
9777 			connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
9778 
9779 			break;
9780 		}
9781 
9782 		if (amdgpu_dm_connector->max_vfreq -
9783 		    amdgpu_dm_connector->min_vfreq > 10) {
9784 
9785 			freesync_capable = true;
9786 		}
9787 	}
9788 
9789 update:
9790 	if (dm_con_state)
9791 		dm_con_state->freesync_capable = freesync_capable;
9792 
9793 	if (connector->vrr_capable_property)
9794 		drm_connector_set_vrr_capable_property(connector,
9795 						       freesync_capable);
9796 }
9797 
9798 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9799 {
9800 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9801 
9802 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9803 		return;
9804 	if (link->type == dc_connection_none)
9805 		return;
9806 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9807 					dpcd_data, sizeof(dpcd_data))) {
9808 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9809 
9810 		if (dpcd_data[0] == 0) {
9811 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9812 			link->psr_settings.psr_feature_enabled = false;
9813 		} else {
9814 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9815 			link->psr_settings.psr_feature_enabled = true;
9816 		}
9817 
9818 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9819 	}
9820 }
9821 
9822 /*
9823  * amdgpu_dm_link_setup_psr() - configure psr link
9824  * @stream: stream state
9825  *
9826  * Return: true if success
9827  */
9828 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9829 {
9830 	struct dc_link *link = NULL;
9831 	struct psr_config psr_config = {0};
9832 	struct psr_context psr_context = {0};
9833 	bool ret = false;
9834 
9835 	if (stream == NULL)
9836 		return false;
9837 
9838 	link = stream->link;
9839 
9840 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9841 
9842 	if (psr_config.psr_version > 0) {
9843 		psr_config.psr_exit_link_training_required = 0x1;
9844 		psr_config.psr_frame_capture_indication_req = 0;
9845 		psr_config.psr_rfb_setup_time = 0x37;
9846 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9847 		psr_config.allow_smu_optimizations = 0x0;
9848 
9849 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9850 
9851 	}
9852 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9853 
9854 	return ret;
9855 }
9856 
9857 /*
9858  * amdgpu_dm_psr_enable() - enable psr f/w
9859  * @stream: stream state
9860  *
9861  * Return: true if success
9862  */
9863 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9864 {
9865 	struct dc_link *link = stream->link;
9866 	unsigned int vsync_rate_hz = 0;
9867 	struct dc_static_screen_params params = {0};
9868 	/* Calculate number of static frames before generating interrupt to
9869 	 * enter PSR.
9870 	 */
9871 	// Init fail safe of 2 frames static
9872 	unsigned int num_frames_static = 2;
9873 
9874 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9875 
9876 	vsync_rate_hz = div64_u64(div64_u64((
9877 			stream->timing.pix_clk_100hz * 100),
9878 			stream->timing.v_total),
9879 			stream->timing.h_total);
9880 
9881 	/* Round up
9882 	 * Calculate number of frames such that at least 30 ms of time has
9883 	 * passed.
9884 	 */
9885 	if (vsync_rate_hz != 0) {
9886 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9887 		num_frames_static = (30000 / frame_time_microsec) + 1;
9888 	}
9889 
9890 	params.triggers.cursor_update = true;
9891 	params.triggers.overlay_update = true;
9892 	params.triggers.surface_update = true;
9893 	params.num_frames = num_frames_static;
9894 
9895 	dc_stream_set_static_screen_params(link->ctx->dc,
9896 					   &stream, 1,
9897 					   &params);
9898 
9899 	return dc_link_set_psr_allow_active(link, true, false, false);
9900 }
9901 
9902 /*
9903  * amdgpu_dm_psr_disable() - disable psr f/w
9904  * @stream:  stream state
9905  *
9906  * Return: true if success
9907  */
9908 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9909 {
9910 
9911 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9912 
9913 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
9914 }
9915 
9916 /*
9917  * amdgpu_dm_psr_disable() - disable psr f/w
9918  * if psr is enabled on any stream
9919  *
9920  * Return: true if success
9921  */
9922 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9923 {
9924 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9925 	return dc_set_psr_allow_active(dm->dc, false);
9926 }
9927 
9928 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9929 {
9930 	struct amdgpu_device *adev = drm_to_adev(dev);
9931 	struct dc *dc = adev->dm.dc;
9932 	int i;
9933 
9934 	mutex_lock(&adev->dm.dc_lock);
9935 	if (dc->current_state) {
9936 		for (i = 0; i < dc->current_state->stream_count; ++i)
9937 			dc->current_state->streams[i]
9938 				->triggered_crtc_reset.enabled =
9939 				adev->dm.force_timing_sync;
9940 
9941 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9942 		dc_trigger_sync(dc, dc->current_state);
9943 	}
9944 	mutex_unlock(&adev->dm.dc_lock);
9945 }
9946 
9947 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9948 		       uint32_t value, const char *func_name)
9949 {
9950 #ifdef DM_CHECK_ADDR_0
9951 	if (address == 0) {
9952 		DC_ERR("invalid register write. address = 0");
9953 		return;
9954 	}
9955 #endif
9956 	cgs_write_register(ctx->cgs_device, address, value);
9957 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9958 }
9959 
9960 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9961 			  const char *func_name)
9962 {
9963 	uint32_t value;
9964 #ifdef DM_CHECK_ADDR_0
9965 	if (address == 0) {
9966 		DC_ERR("invalid register read; address = 0\n");
9967 		return 0;
9968 	}
9969 #endif
9970 
9971 	if (ctx->dmub_srv &&
9972 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9973 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9974 		ASSERT(false);
9975 		return 0;
9976 	}
9977 
9978 	value = cgs_read_register(ctx->cgs_device, address);
9979 
9980 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9981 
9982 	return value;
9983 }
9984