1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38 
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50 
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58 
59 #include "ivsrcid/ivsrcid_vislands30.h"
60 
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107 
108 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110 
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113 
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116 
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119 
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129 
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135 {
136 	switch (link->dpcd_caps.dongle_type) {
137 	case DISPLAY_DONGLE_NONE:
138 		return DRM_MODE_SUBCONNECTOR_Native;
139 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 		return DRM_MODE_SUBCONNECTOR_VGA;
141 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 		return DRM_MODE_SUBCONNECTOR_DVID;
144 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 		return DRM_MODE_SUBCONNECTOR_HDMIA;
147 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148 	default:
149 		return DRM_MODE_SUBCONNECTOR_Unknown;
150 	}
151 }
152 
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154 {
155 	struct dc_link *link = aconnector->dc_link;
156 	struct drm_connector *connector = &aconnector->base;
157 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158 
159 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160 		return;
161 
162 	if (aconnector->dc_sink)
163 		subconnector = get_subconnector_type(link);
164 
165 	drm_object_property_set_value(&connector->base,
166 			connector->dev->mode_config.dp_subconnector_property,
167 			subconnector);
168 }
169 
170 /*
171  * initializes drm_device display related structures, based on the information
172  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173  * drm_encoder, drm_mode_config
174  *
175  * Returns 0 on success
176  */
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180 
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182 				struct drm_plane *plane,
183 				unsigned long possible_crtcs,
184 				const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 			       struct drm_plane *plane,
187 			       uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
190 				    uint32_t link_index,
191 				    struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 				  struct amdgpu_encoder *aencoder,
194 				  uint32_t link_index);
195 
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197 
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199 
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 				  struct drm_atomic_state *state);
202 
203 static void handle_cursor_update(struct drm_plane *plane,
204 				 struct drm_plane_state *old_plane_state);
205 
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
211 
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214 
215 /*
216  * dm_vblank_get_counter
217  *
218  * @brief
219  * Get counter for number of vertical blanks
220  *
221  * @param
222  * struct amdgpu_device *adev - [in] desired amdgpu device
223  * int disp_idx - [in] which CRTC to get the counter from
224  *
225  * @return
226  * Counter for vertical blanks
227  */
228 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
229 {
230 	if (crtc >= adev->mode_info.num_crtc)
231 		return 0;
232 	else {
233 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
234 
235 		if (acrtc->dm_irq_params.stream == NULL) {
236 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
237 				  crtc);
238 			return 0;
239 		}
240 
241 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
242 	}
243 }
244 
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246 				  u32 *vbl, u32 *position)
247 {
248 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
249 
250 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251 		return -EINVAL;
252 	else {
253 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254 
255 		if (acrtc->dm_irq_params.stream ==  NULL) {
256 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
257 				  crtc);
258 			return 0;
259 		}
260 
261 		/*
262 		 * TODO rework base driver to use values directly.
263 		 * for now parse it back into reg-format
264 		 */
265 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
266 					 &v_blank_start,
267 					 &v_blank_end,
268 					 &h_position,
269 					 &v_position);
270 
271 		*position = v_position | (h_position << 16);
272 		*vbl = v_blank_start | (v_blank_end << 16);
273 	}
274 
275 	return 0;
276 }
277 
278 static bool dm_is_idle(void *handle)
279 {
280 	/* XXX todo */
281 	return true;
282 }
283 
284 static int dm_wait_for_idle(void *handle)
285 {
286 	/* XXX todo */
287 	return 0;
288 }
289 
290 static bool dm_check_soft_reset(void *handle)
291 {
292 	return false;
293 }
294 
295 static int dm_soft_reset(void *handle)
296 {
297 	/* XXX todo */
298 	return 0;
299 }
300 
301 static struct amdgpu_crtc *
302 get_crtc_by_otg_inst(struct amdgpu_device *adev,
303 		     int otg_inst)
304 {
305 	struct drm_device *dev = adev_to_drm(adev);
306 	struct drm_crtc *crtc;
307 	struct amdgpu_crtc *amdgpu_crtc;
308 
309 	if (otg_inst == -1) {
310 		WARN_ON(1);
311 		return adev->mode_info.crtcs[0];
312 	}
313 
314 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315 		amdgpu_crtc = to_amdgpu_crtc(crtc);
316 
317 		if (amdgpu_crtc->otg_inst == otg_inst)
318 			return amdgpu_crtc;
319 	}
320 
321 	return NULL;
322 }
323 
324 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
325 {
326 	return acrtc->dm_irq_params.freesync_config.state ==
327 		       VRR_STATE_ACTIVE_VARIABLE ||
328 	       acrtc->dm_irq_params.freesync_config.state ==
329 		       VRR_STATE_ACTIVE_FIXED;
330 }
331 
332 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
333 {
334 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
336 }
337 
338 /**
339  * dm_pflip_high_irq() - Handle pageflip interrupt
340  * @interrupt_params: ignored
341  *
342  * Handles the pageflip interrupt by notifying all interested parties
343  * that the pageflip has been completed.
344  */
345 static void dm_pflip_high_irq(void *interrupt_params)
346 {
347 	struct amdgpu_crtc *amdgpu_crtc;
348 	struct common_irq_params *irq_params = interrupt_params;
349 	struct amdgpu_device *adev = irq_params->adev;
350 	unsigned long flags;
351 	struct drm_pending_vblank_event *e;
352 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
353 	bool vrr_active;
354 
355 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
356 
357 	/* IRQ could occur when in initial stage */
358 	/* TODO work and BO cleanup */
359 	if (amdgpu_crtc == NULL) {
360 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
361 		return;
362 	}
363 
364 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
365 
366 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368 						 amdgpu_crtc->pflip_status,
369 						 AMDGPU_FLIP_SUBMITTED,
370 						 amdgpu_crtc->crtc_id,
371 						 amdgpu_crtc);
372 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
373 		return;
374 	}
375 
376 	/* page flip completed. */
377 	e = amdgpu_crtc->event;
378 	amdgpu_crtc->event = NULL;
379 
380 	if (!e)
381 		WARN_ON(1);
382 
383 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
384 
385 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
386 	if (!vrr_active ||
387 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
388 				      &v_blank_end, &hpos, &vpos) ||
389 	    (vpos < v_blank_start)) {
390 		/* Update to correct count and vblank timestamp if racing with
391 		 * vblank irq. This also updates to the correct vblank timestamp
392 		 * even in VRR mode, as scanout is past the front-porch atm.
393 		 */
394 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
395 
396 		/* Wake up userspace by sending the pageflip event with proper
397 		 * count and timestamp of vblank of flip completion.
398 		 */
399 		if (e) {
400 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
401 
402 			/* Event sent, so done with vblank for this flip */
403 			drm_crtc_vblank_put(&amdgpu_crtc->base);
404 		}
405 	} else if (e) {
406 		/* VRR active and inside front-porch: vblank count and
407 		 * timestamp for pageflip event will only be up to date after
408 		 * drm_crtc_handle_vblank() has been executed from late vblank
409 		 * irq handler after start of back-porch (vline 0). We queue the
410 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
411 		 * updated timestamp and count, once it runs after us.
412 		 *
413 		 * We need to open-code this instead of using the helper
414 		 * drm_crtc_arm_vblank_event(), as that helper would
415 		 * call drm_crtc_accurate_vblank_count(), which we must
416 		 * not call in VRR mode while we are in front-porch!
417 		 */
418 
419 		/* sequence will be replaced by real count during send-out. */
420 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421 		e->pipe = amdgpu_crtc->crtc_id;
422 
423 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
424 		e = NULL;
425 	}
426 
427 	/* Keep track of vblank of this flip for flip throttling. We use the
428 	 * cooked hw counter, as that one incremented at start of this vblank
429 	 * of pageflip completion, so last_flip_vblank is the forbidden count
430 	 * for queueing new pageflips if vsync + VRR is enabled.
431 	 */
432 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
433 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
434 
435 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
436 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
437 
438 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
440 			 vrr_active, (int) !e);
441 }
442 
443 static void dm_vupdate_high_irq(void *interrupt_params)
444 {
445 	struct common_irq_params *irq_params = interrupt_params;
446 	struct amdgpu_device *adev = irq_params->adev;
447 	struct amdgpu_crtc *acrtc;
448 	unsigned long flags;
449 	int vrr_active;
450 
451 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
452 
453 	if (acrtc) {
454 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
455 
456 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
457 			      acrtc->crtc_id,
458 			      vrr_active);
459 
460 		/* Core vblank handling is done here after end of front-porch in
461 		 * vrr mode, as vblank timestamping will give valid results
462 		 * while now done after front-porch. This will also deliver
463 		 * page-flip completion events that have been queued to us
464 		 * if a pageflip happened inside front-porch.
465 		 */
466 		if (vrr_active) {
467 			drm_crtc_handle_vblank(&acrtc->base);
468 
469 			/* BTR processing for pre-DCE12 ASICs */
470 			if (acrtc->dm_irq_params.stream &&
471 			    adev->family < AMDGPU_FAMILY_AI) {
472 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
473 				mod_freesync_handle_v_update(
474 				    adev->dm.freesync_module,
475 				    acrtc->dm_irq_params.stream,
476 				    &acrtc->dm_irq_params.vrr_params);
477 
478 				dc_stream_adjust_vmin_vmax(
479 				    adev->dm.dc,
480 				    acrtc->dm_irq_params.stream,
481 				    &acrtc->dm_irq_params.vrr_params.adjust);
482 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
483 			}
484 		}
485 	}
486 }
487 
488 /**
489  * dm_crtc_high_irq() - Handles CRTC interrupt
490  * @interrupt_params: used for determining the CRTC instance
491  *
492  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
493  * event handler.
494  */
495 static void dm_crtc_high_irq(void *interrupt_params)
496 {
497 	struct common_irq_params *irq_params = interrupt_params;
498 	struct amdgpu_device *adev = irq_params->adev;
499 	struct amdgpu_crtc *acrtc;
500 	unsigned long flags;
501 	int vrr_active;
502 
503 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
504 	if (!acrtc)
505 		return;
506 
507 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
508 
509 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
510 		      vrr_active, acrtc->dm_irq_params.active_planes);
511 
512 	/**
513 	 * Core vblank handling at start of front-porch is only possible
514 	 * in non-vrr mode, as only there vblank timestamping will give
515 	 * valid results while done in front-porch. Otherwise defer it
516 	 * to dm_vupdate_high_irq after end of front-porch.
517 	 */
518 	if (!vrr_active)
519 		drm_crtc_handle_vblank(&acrtc->base);
520 
521 	/**
522 	 * Following stuff must happen at start of vblank, for crc
523 	 * computation and below-the-range btr support in vrr mode.
524 	 */
525 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
526 
527 	/* BTR updates need to happen before VUPDATE on Vega and above. */
528 	if (adev->family < AMDGPU_FAMILY_AI)
529 		return;
530 
531 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
532 
533 	if (acrtc->dm_irq_params.stream &&
534 	    acrtc->dm_irq_params.vrr_params.supported &&
535 	    acrtc->dm_irq_params.freesync_config.state ==
536 		    VRR_STATE_ACTIVE_VARIABLE) {
537 		mod_freesync_handle_v_update(adev->dm.freesync_module,
538 					     acrtc->dm_irq_params.stream,
539 					     &acrtc->dm_irq_params.vrr_params);
540 
541 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542 					   &acrtc->dm_irq_params.vrr_params.adjust);
543 	}
544 
545 	/*
546 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
547 	 * In that case, pageflip completion interrupts won't fire and pageflip
548 	 * completion events won't get delivered. Prevent this by sending
549 	 * pending pageflip events from here if a flip is still pending.
550 	 *
551 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
552 	 * avoid race conditions between flip programming and completion,
553 	 * which could cause too early flip completion events.
554 	 */
555 	if (adev->family >= AMDGPU_FAMILY_RV &&
556 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
557 	    acrtc->dm_irq_params.active_planes == 0) {
558 		if (acrtc->event) {
559 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
560 			acrtc->event = NULL;
561 			drm_crtc_vblank_put(&acrtc->base);
562 		}
563 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
564 	}
565 
566 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
567 }
568 
569 static int dm_set_clockgating_state(void *handle,
570 		  enum amd_clockgating_state state)
571 {
572 	return 0;
573 }
574 
575 static int dm_set_powergating_state(void *handle,
576 		  enum amd_powergating_state state)
577 {
578 	return 0;
579 }
580 
581 /* Prototypes of private functions */
582 static int dm_early_init(void* handle);
583 
584 /* Allocate memory for FBC compressed data  */
585 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
586 {
587 	struct drm_device *dev = connector->dev;
588 	struct amdgpu_device *adev = drm_to_adev(dev);
589 	struct dm_compressor_info *compressor = &adev->dm.compressor;
590 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591 	struct drm_display_mode *mode;
592 	unsigned long max_size = 0;
593 
594 	if (adev->dm.dc->fbc_compressor == NULL)
595 		return;
596 
597 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
598 		return;
599 
600 	if (compressor->bo_ptr)
601 		return;
602 
603 
604 	list_for_each_entry(mode, &connector->modes, head) {
605 		if (max_size < mode->htotal * mode->vtotal)
606 			max_size = mode->htotal * mode->vtotal;
607 	}
608 
609 	if (max_size) {
610 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
611 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
612 			    &compressor->gpu_addr, &compressor->cpu_addr);
613 
614 		if (r)
615 			DRM_ERROR("DM: Failed to initialize FBC\n");
616 		else {
617 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
619 		}
620 
621 	}
622 
623 }
624 
625 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626 					  int pipe, bool *enabled,
627 					  unsigned char *buf, int max_bytes)
628 {
629 	struct drm_device *dev = dev_get_drvdata(kdev);
630 	struct amdgpu_device *adev = drm_to_adev(dev);
631 	struct drm_connector *connector;
632 	struct drm_connector_list_iter conn_iter;
633 	struct amdgpu_dm_connector *aconnector;
634 	int ret = 0;
635 
636 	*enabled = false;
637 
638 	mutex_lock(&adev->dm.audio_lock);
639 
640 	drm_connector_list_iter_begin(dev, &conn_iter);
641 	drm_for_each_connector_iter(connector, &conn_iter) {
642 		aconnector = to_amdgpu_dm_connector(connector);
643 		if (aconnector->audio_inst != port)
644 			continue;
645 
646 		*enabled = true;
647 		ret = drm_eld_size(connector->eld);
648 		memcpy(buf, connector->eld, min(max_bytes, ret));
649 
650 		break;
651 	}
652 	drm_connector_list_iter_end(&conn_iter);
653 
654 	mutex_unlock(&adev->dm.audio_lock);
655 
656 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
657 
658 	return ret;
659 }
660 
661 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662 	.get_eld = amdgpu_dm_audio_component_get_eld,
663 };
664 
665 static int amdgpu_dm_audio_component_bind(struct device *kdev,
666 				       struct device *hda_kdev, void *data)
667 {
668 	struct drm_device *dev = dev_get_drvdata(kdev);
669 	struct amdgpu_device *adev = drm_to_adev(dev);
670 	struct drm_audio_component *acomp = data;
671 
672 	acomp->ops = &amdgpu_dm_audio_component_ops;
673 	acomp->dev = kdev;
674 	adev->dm.audio_component = acomp;
675 
676 	return 0;
677 }
678 
679 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680 					  struct device *hda_kdev, void *data)
681 {
682 	struct drm_device *dev = dev_get_drvdata(kdev);
683 	struct amdgpu_device *adev = drm_to_adev(dev);
684 	struct drm_audio_component *acomp = data;
685 
686 	acomp->ops = NULL;
687 	acomp->dev = NULL;
688 	adev->dm.audio_component = NULL;
689 }
690 
691 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692 	.bind	= amdgpu_dm_audio_component_bind,
693 	.unbind	= amdgpu_dm_audio_component_unbind,
694 };
695 
696 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
697 {
698 	int i, ret;
699 
700 	if (!amdgpu_audio)
701 		return 0;
702 
703 	adev->mode_info.audio.enabled = true;
704 
705 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
706 
707 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708 		adev->mode_info.audio.pin[i].channels = -1;
709 		adev->mode_info.audio.pin[i].rate = -1;
710 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
711 		adev->mode_info.audio.pin[i].status_bits = 0;
712 		adev->mode_info.audio.pin[i].category_code = 0;
713 		adev->mode_info.audio.pin[i].connected = false;
714 		adev->mode_info.audio.pin[i].id =
715 			adev->dm.dc->res_pool->audios[i]->inst;
716 		adev->mode_info.audio.pin[i].offset = 0;
717 	}
718 
719 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
720 	if (ret < 0)
721 		return ret;
722 
723 	adev->dm.audio_registered = true;
724 
725 	return 0;
726 }
727 
728 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
729 {
730 	if (!amdgpu_audio)
731 		return;
732 
733 	if (!adev->mode_info.audio.enabled)
734 		return;
735 
736 	if (adev->dm.audio_registered) {
737 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738 		adev->dm.audio_registered = false;
739 	}
740 
741 	/* TODO: Disable audio? */
742 
743 	adev->mode_info.audio.enabled = false;
744 }
745 
746 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
747 {
748 	struct drm_audio_component *acomp = adev->dm.audio_component;
749 
750 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
752 
753 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
754 						 pin, -1);
755 	}
756 }
757 
758 static int dm_dmub_hw_init(struct amdgpu_device *adev)
759 {
760 	const struct dmcub_firmware_header_v1_0 *hdr;
761 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
762 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
763 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
764 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765 	struct abm *abm = adev->dm.dc->res_pool->abm;
766 	struct dmub_srv_hw_params hw_params;
767 	enum dmub_status status;
768 	const unsigned char *fw_inst_const, *fw_bss_data;
769 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
770 	bool has_hw_support;
771 
772 	if (!dmub_srv)
773 		/* DMUB isn't supported on the ASIC. */
774 		return 0;
775 
776 	if (!fb_info) {
777 		DRM_ERROR("No framebuffer info for DMUB service.\n");
778 		return -EINVAL;
779 	}
780 
781 	if (!dmub_fw) {
782 		/* Firmware required for DMUB support. */
783 		DRM_ERROR("No firmware provided for DMUB.\n");
784 		return -EINVAL;
785 	}
786 
787 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788 	if (status != DMUB_STATUS_OK) {
789 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
790 		return -EINVAL;
791 	}
792 
793 	if (!has_hw_support) {
794 		DRM_INFO("DMUB unsupported on ASIC\n");
795 		return 0;
796 	}
797 
798 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
799 
800 	fw_inst_const = dmub_fw->data +
801 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
802 			PSP_HEADER_BYTES;
803 
804 	fw_bss_data = dmub_fw->data +
805 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 		      le32_to_cpu(hdr->inst_const_bytes);
807 
808 	/* Copy firmware and bios info into FB memory. */
809 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
811 
812 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
813 
814 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815 	 * amdgpu_ucode_init_single_fw will load dmub firmware
816 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
817 	 * will be done by dm_dmub_hw_init
818 	 */
819 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
821 				fw_inst_const_size);
822 	}
823 
824 	if (fw_bss_data_size)
825 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826 		       fw_bss_data, fw_bss_data_size);
827 
828 	/* Copy firmware bios info into FB memory. */
829 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
830 	       adev->bios_size);
831 
832 	/* Reset regions that need to be reset. */
833 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
835 
836 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
838 
839 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
841 
842 	/* Initialize hardware. */
843 	memset(&hw_params, 0, sizeof(hw_params));
844 	hw_params.fb_base = adev->gmc.fb_start;
845 	hw_params.fb_offset = adev->gmc.aper_base;
846 
847 	/* backdoor load firmware and trigger dmub running */
848 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849 		hw_params.load_inst_const = true;
850 
851 	if (dmcu)
852 		hw_params.psp_version = dmcu->psp_version;
853 
854 	for (i = 0; i < fb_info->num_fb; ++i)
855 		hw_params.fb[i] = &fb_info->fb[i];
856 
857 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
858 	if (status != DMUB_STATUS_OK) {
859 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
860 		return -EINVAL;
861 	}
862 
863 	/* Wait for firmware load to finish. */
864 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865 	if (status != DMUB_STATUS_OK)
866 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
867 
868 	/* Init DMCU and ABM if available. */
869 	if (dmcu && abm) {
870 		dmcu->funcs->dmcu_init(dmcu);
871 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
872 	}
873 
874 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 	if (!adev->dm.dc->ctx->dmub_srv) {
876 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877 		return -ENOMEM;
878 	}
879 
880 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 		 adev->dm.dmcub_fw_version);
882 
883 	return 0;
884 }
885 
886 #if defined(CONFIG_DRM_AMD_DC_DCN)
887 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
888 {
889 	uint64_t pt_base;
890 	uint32_t logical_addr_low;
891 	uint32_t logical_addr_high;
892 	uint32_t agp_base, agp_bot, agp_top;
893 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
894 
895 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
897 
898 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
899 		/*
900 		 * Raven2 has a HW issue that it is unable to use the vram which
901 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902 		 * workaround that increase system aperture high address (add 1)
903 		 * to get rid of the VM fault and hardware hang.
904 		 */
905 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
906 	else
907 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
908 
909 	agp_base = 0;
910 	agp_bot = adev->gmc.agp_start >> 24;
911 	agp_top = adev->gmc.agp_end >> 24;
912 
913 
914 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919 	page_table_base.low_part = lower_32_bits(pt_base);
920 
921 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
923 
924 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
927 
928 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
931 
932 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
935 
936 	pa_config->is_hvm_enabled = 0;
937 
938 }
939 #endif
940 #if defined(CONFIG_DRM_AMD_DC_DCN)
941 static void event_mall_stutter(struct work_struct *work)
942 {
943 
944 	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
945 	struct amdgpu_display_manager *dm = vblank_work->dm;
946 
947 	mutex_lock(&dm->dc_lock);
948 
949 	if (vblank_work->enable)
950 		dm->active_vblank_irq_count++;
951 	else
952 		dm->active_vblank_irq_count--;
953 
954 
955 	dc_allow_idle_optimizations(
956 		dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
957 
958 	DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
959 
960 
961 	mutex_unlock(&dm->dc_lock);
962 }
963 
964 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
965 {
966 
967 	int max_caps = dc->caps.max_links;
968 	struct vblank_workqueue *vblank_work;
969 	int i = 0;
970 
971 	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
972 	if (ZERO_OR_NULL_PTR(vblank_work)) {
973 		kfree(vblank_work);
974 		return NULL;
975 	}
976 
977 	for (i = 0; i < max_caps; i++)
978 		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
979 
980 	return vblank_work;
981 }
982 #endif
983 static int amdgpu_dm_init(struct amdgpu_device *adev)
984 {
985 	struct dc_init_data init_data;
986 #ifdef CONFIG_DRM_AMD_DC_HDCP
987 	struct dc_callback_init init_params;
988 #endif
989 	int r;
990 
991 	adev->dm.ddev = adev_to_drm(adev);
992 	adev->dm.adev = adev;
993 
994 	/* Zero all the fields */
995 	memset(&init_data, 0, sizeof(init_data));
996 #ifdef CONFIG_DRM_AMD_DC_HDCP
997 	memset(&init_params, 0, sizeof(init_params));
998 #endif
999 
1000 	mutex_init(&adev->dm.dc_lock);
1001 	mutex_init(&adev->dm.audio_lock);
1002 #if defined(CONFIG_DRM_AMD_DC_DCN)
1003 	spin_lock_init(&adev->dm.vblank_lock);
1004 #endif
1005 
1006 	if(amdgpu_dm_irq_init(adev)) {
1007 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1008 		goto error;
1009 	}
1010 
1011 	init_data.asic_id.chip_family = adev->family;
1012 
1013 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1014 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1015 
1016 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1017 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1018 	init_data.asic_id.atombios_base_address =
1019 		adev->mode_info.atom_context->bios;
1020 
1021 	init_data.driver = adev;
1022 
1023 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1024 
1025 	if (!adev->dm.cgs_device) {
1026 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1027 		goto error;
1028 	}
1029 
1030 	init_data.cgs_device = adev->dm.cgs_device;
1031 
1032 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1033 
1034 	switch (adev->asic_type) {
1035 	case CHIP_CARRIZO:
1036 	case CHIP_STONEY:
1037 	case CHIP_RAVEN:
1038 	case CHIP_RENOIR:
1039 		init_data.flags.gpu_vm_support = true;
1040 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1041 			init_data.flags.disable_dmcu = true;
1042 		break;
1043 #if defined(CONFIG_DRM_AMD_DC_DCN)
1044 	case CHIP_VANGOGH:
1045 		init_data.flags.gpu_vm_support = true;
1046 		break;
1047 #endif
1048 	default:
1049 		break;
1050 	}
1051 
1052 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1053 		init_data.flags.fbc_support = true;
1054 
1055 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1056 		init_data.flags.multi_mon_pp_mclk_switch = true;
1057 
1058 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1059 		init_data.flags.disable_fractional_pwm = true;
1060 
1061 	init_data.flags.power_down_display_on_boot = true;
1062 
1063 	/* Display Core create. */
1064 	adev->dm.dc = dc_create(&init_data);
1065 
1066 	if (adev->dm.dc) {
1067 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1068 	} else {
1069 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1070 		goto error;
1071 	}
1072 
1073 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1074 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1075 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1076 	}
1077 
1078 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1079 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1080 
1081 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1082 		adev->dm.dc->debug.disable_stutter = true;
1083 
1084 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1085 		adev->dm.dc->debug.disable_dsc = true;
1086 
1087 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1088 		adev->dm.dc->debug.disable_clock_gate = true;
1089 
1090 	r = dm_dmub_hw_init(adev);
1091 	if (r) {
1092 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1093 		goto error;
1094 	}
1095 
1096 	dc_hardware_init(adev->dm.dc);
1097 
1098 #if defined(CONFIG_DRM_AMD_DC_DCN)
1099 	if (adev->apu_flags) {
1100 		struct dc_phy_addr_space_config pa_config;
1101 
1102 		mmhub_read_system_context(adev, &pa_config);
1103 
1104 		// Call the DC init_memory func
1105 		dc_setup_system_context(adev->dm.dc, &pa_config);
1106 	}
1107 #endif
1108 
1109 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1110 	if (!adev->dm.freesync_module) {
1111 		DRM_ERROR(
1112 		"amdgpu: failed to initialize freesync_module.\n");
1113 	} else
1114 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1115 				adev->dm.freesync_module);
1116 
1117 	amdgpu_dm_init_color_mod();
1118 
1119 #if defined(CONFIG_DRM_AMD_DC_DCN)
1120 	if (adev->dm.dc->caps.max_links > 0) {
1121 		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1122 
1123 		if (!adev->dm.vblank_workqueue)
1124 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1125 		else
1126 			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1127 	}
1128 #endif
1129 
1130 #ifdef CONFIG_DRM_AMD_DC_HDCP
1131 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1132 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1133 
1134 		if (!adev->dm.hdcp_workqueue)
1135 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1136 		else
1137 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1138 
1139 		dc_init_callbacks(adev->dm.dc, &init_params);
1140 	}
1141 #endif
1142 	if (amdgpu_dm_initialize_drm_device(adev)) {
1143 		DRM_ERROR(
1144 		"amdgpu: failed to initialize sw for display support.\n");
1145 		goto error;
1146 	}
1147 
1148 	/* create fake encoders for MST */
1149 	dm_dp_create_fake_mst_encoders(adev);
1150 
1151 	/* TODO: Add_display_info? */
1152 
1153 	/* TODO use dynamic cursor width */
1154 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1155 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1156 
1157 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1158 		DRM_ERROR(
1159 		"amdgpu: failed to initialize sw for display support.\n");
1160 		goto error;
1161 	}
1162 
1163 
1164 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1165 
1166 	return 0;
1167 error:
1168 	amdgpu_dm_fini(adev);
1169 
1170 	return -EINVAL;
1171 }
1172 
1173 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1174 {
1175 	int i;
1176 
1177 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1178 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1179 	}
1180 
1181 	amdgpu_dm_audio_fini(adev);
1182 
1183 	amdgpu_dm_destroy_drm_device(&adev->dm);
1184 
1185 #ifdef CONFIG_DRM_AMD_DC_HDCP
1186 	if (adev->dm.hdcp_workqueue) {
1187 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1188 		adev->dm.hdcp_workqueue = NULL;
1189 	}
1190 
1191 	if (adev->dm.dc)
1192 		dc_deinit_callbacks(adev->dm.dc);
1193 #endif
1194 	if (adev->dm.dc->ctx->dmub_srv) {
1195 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1196 		adev->dm.dc->ctx->dmub_srv = NULL;
1197 	}
1198 
1199 	if (adev->dm.dmub_bo)
1200 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1201 				      &adev->dm.dmub_bo_gpu_addr,
1202 				      &adev->dm.dmub_bo_cpu_addr);
1203 
1204 	/* DC Destroy TODO: Replace destroy DAL */
1205 	if (adev->dm.dc)
1206 		dc_destroy(&adev->dm.dc);
1207 	/*
1208 	 * TODO: pageflip, vlank interrupt
1209 	 *
1210 	 * amdgpu_dm_irq_fini(adev);
1211 	 */
1212 
1213 	if (adev->dm.cgs_device) {
1214 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1215 		adev->dm.cgs_device = NULL;
1216 	}
1217 	if (adev->dm.freesync_module) {
1218 		mod_freesync_destroy(adev->dm.freesync_module);
1219 		adev->dm.freesync_module = NULL;
1220 	}
1221 
1222 	mutex_destroy(&adev->dm.audio_lock);
1223 	mutex_destroy(&adev->dm.dc_lock);
1224 
1225 	return;
1226 }
1227 
1228 static int load_dmcu_fw(struct amdgpu_device *adev)
1229 {
1230 	const char *fw_name_dmcu = NULL;
1231 	int r;
1232 	const struct dmcu_firmware_header_v1_0 *hdr;
1233 
1234 	switch(adev->asic_type) {
1235 #if defined(CONFIG_DRM_AMD_DC_SI)
1236 	case CHIP_TAHITI:
1237 	case CHIP_PITCAIRN:
1238 	case CHIP_VERDE:
1239 	case CHIP_OLAND:
1240 #endif
1241 	case CHIP_BONAIRE:
1242 	case CHIP_HAWAII:
1243 	case CHIP_KAVERI:
1244 	case CHIP_KABINI:
1245 	case CHIP_MULLINS:
1246 	case CHIP_TONGA:
1247 	case CHIP_FIJI:
1248 	case CHIP_CARRIZO:
1249 	case CHIP_STONEY:
1250 	case CHIP_POLARIS11:
1251 	case CHIP_POLARIS10:
1252 	case CHIP_POLARIS12:
1253 	case CHIP_VEGAM:
1254 	case CHIP_VEGA10:
1255 	case CHIP_VEGA12:
1256 	case CHIP_VEGA20:
1257 	case CHIP_NAVI10:
1258 	case CHIP_NAVI14:
1259 	case CHIP_RENOIR:
1260 	case CHIP_SIENNA_CICHLID:
1261 	case CHIP_NAVY_FLOUNDER:
1262 	case CHIP_DIMGREY_CAVEFISH:
1263 	case CHIP_VANGOGH:
1264 		return 0;
1265 	case CHIP_NAVI12:
1266 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1267 		break;
1268 	case CHIP_RAVEN:
1269 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1270 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1271 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1272 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1273 		else
1274 			return 0;
1275 		break;
1276 	default:
1277 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1278 		return -EINVAL;
1279 	}
1280 
1281 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1282 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1283 		return 0;
1284 	}
1285 
1286 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1287 	if (r == -ENOENT) {
1288 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1289 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1290 		adev->dm.fw_dmcu = NULL;
1291 		return 0;
1292 	}
1293 	if (r) {
1294 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1295 			fw_name_dmcu);
1296 		return r;
1297 	}
1298 
1299 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1300 	if (r) {
1301 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1302 			fw_name_dmcu);
1303 		release_firmware(adev->dm.fw_dmcu);
1304 		adev->dm.fw_dmcu = NULL;
1305 		return r;
1306 	}
1307 
1308 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1309 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1310 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1311 	adev->firmware.fw_size +=
1312 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1313 
1314 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1315 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1316 	adev->firmware.fw_size +=
1317 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1318 
1319 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1320 
1321 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1322 
1323 	return 0;
1324 }
1325 
1326 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1327 {
1328 	struct amdgpu_device *adev = ctx;
1329 
1330 	return dm_read_reg(adev->dm.dc->ctx, address);
1331 }
1332 
1333 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1334 				     uint32_t value)
1335 {
1336 	struct amdgpu_device *adev = ctx;
1337 
1338 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1339 }
1340 
1341 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1342 {
1343 	struct dmub_srv_create_params create_params;
1344 	struct dmub_srv_region_params region_params;
1345 	struct dmub_srv_region_info region_info;
1346 	struct dmub_srv_fb_params fb_params;
1347 	struct dmub_srv_fb_info *fb_info;
1348 	struct dmub_srv *dmub_srv;
1349 	const struct dmcub_firmware_header_v1_0 *hdr;
1350 	const char *fw_name_dmub;
1351 	enum dmub_asic dmub_asic;
1352 	enum dmub_status status;
1353 	int r;
1354 
1355 	switch (adev->asic_type) {
1356 	case CHIP_RENOIR:
1357 		dmub_asic = DMUB_ASIC_DCN21;
1358 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1359 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1360 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1361 		break;
1362 	case CHIP_SIENNA_CICHLID:
1363 		dmub_asic = DMUB_ASIC_DCN30;
1364 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1365 		break;
1366 	case CHIP_NAVY_FLOUNDER:
1367 		dmub_asic = DMUB_ASIC_DCN30;
1368 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1369 		break;
1370 	case CHIP_VANGOGH:
1371 		dmub_asic = DMUB_ASIC_DCN301;
1372 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1373 		break;
1374 	case CHIP_DIMGREY_CAVEFISH:
1375 		dmub_asic = DMUB_ASIC_DCN302;
1376 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1377 		break;
1378 
1379 	default:
1380 		/* ASIC doesn't support DMUB. */
1381 		return 0;
1382 	}
1383 
1384 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1385 	if (r) {
1386 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1387 		return 0;
1388 	}
1389 
1390 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1391 	if (r) {
1392 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1393 		return 0;
1394 	}
1395 
1396 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1397 
1398 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1399 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1400 			AMDGPU_UCODE_ID_DMCUB;
1401 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1402 			adev->dm.dmub_fw;
1403 		adev->firmware.fw_size +=
1404 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1405 
1406 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1407 			 adev->dm.dmcub_fw_version);
1408 	}
1409 
1410 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1411 
1412 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1413 	dmub_srv = adev->dm.dmub_srv;
1414 
1415 	if (!dmub_srv) {
1416 		DRM_ERROR("Failed to allocate DMUB service!\n");
1417 		return -ENOMEM;
1418 	}
1419 
1420 	memset(&create_params, 0, sizeof(create_params));
1421 	create_params.user_ctx = adev;
1422 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1423 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1424 	create_params.asic = dmub_asic;
1425 
1426 	/* Create the DMUB service. */
1427 	status = dmub_srv_create(dmub_srv, &create_params);
1428 	if (status != DMUB_STATUS_OK) {
1429 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1430 		return -EINVAL;
1431 	}
1432 
1433 	/* Calculate the size of all the regions for the DMUB service. */
1434 	memset(&region_params, 0, sizeof(region_params));
1435 
1436 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1437 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1438 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1439 	region_params.vbios_size = adev->bios_size;
1440 	region_params.fw_bss_data = region_params.bss_data_size ?
1441 		adev->dm.dmub_fw->data +
1442 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1443 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1444 	region_params.fw_inst_const =
1445 		adev->dm.dmub_fw->data +
1446 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1447 		PSP_HEADER_BYTES;
1448 
1449 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1450 					   &region_info);
1451 
1452 	if (status != DMUB_STATUS_OK) {
1453 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1454 		return -EINVAL;
1455 	}
1456 
1457 	/*
1458 	 * Allocate a framebuffer based on the total size of all the regions.
1459 	 * TODO: Move this into GART.
1460 	 */
1461 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1462 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1463 				    &adev->dm.dmub_bo_gpu_addr,
1464 				    &adev->dm.dmub_bo_cpu_addr);
1465 	if (r)
1466 		return r;
1467 
1468 	/* Rebase the regions on the framebuffer address. */
1469 	memset(&fb_params, 0, sizeof(fb_params));
1470 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1471 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1472 	fb_params.region_info = &region_info;
1473 
1474 	adev->dm.dmub_fb_info =
1475 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1476 	fb_info = adev->dm.dmub_fb_info;
1477 
1478 	if (!fb_info) {
1479 		DRM_ERROR(
1480 			"Failed to allocate framebuffer info for DMUB service!\n");
1481 		return -ENOMEM;
1482 	}
1483 
1484 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1485 	if (status != DMUB_STATUS_OK) {
1486 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1487 		return -EINVAL;
1488 	}
1489 
1490 	return 0;
1491 }
1492 
1493 static int dm_sw_init(void *handle)
1494 {
1495 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1496 	int r;
1497 
1498 	r = dm_dmub_sw_init(adev);
1499 	if (r)
1500 		return r;
1501 
1502 	return load_dmcu_fw(adev);
1503 }
1504 
1505 static int dm_sw_fini(void *handle)
1506 {
1507 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1508 
1509 	kfree(adev->dm.dmub_fb_info);
1510 	adev->dm.dmub_fb_info = NULL;
1511 
1512 	if (adev->dm.dmub_srv) {
1513 		dmub_srv_destroy(adev->dm.dmub_srv);
1514 		adev->dm.dmub_srv = NULL;
1515 	}
1516 
1517 	release_firmware(adev->dm.dmub_fw);
1518 	adev->dm.dmub_fw = NULL;
1519 
1520 	release_firmware(adev->dm.fw_dmcu);
1521 	adev->dm.fw_dmcu = NULL;
1522 
1523 	return 0;
1524 }
1525 
1526 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1527 {
1528 	struct amdgpu_dm_connector *aconnector;
1529 	struct drm_connector *connector;
1530 	struct drm_connector_list_iter iter;
1531 	int ret = 0;
1532 
1533 	drm_connector_list_iter_begin(dev, &iter);
1534 	drm_for_each_connector_iter(connector, &iter) {
1535 		aconnector = to_amdgpu_dm_connector(connector);
1536 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1537 		    aconnector->mst_mgr.aux) {
1538 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1539 					 aconnector,
1540 					 aconnector->base.base.id);
1541 
1542 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1543 			if (ret < 0) {
1544 				DRM_ERROR("DM_MST: Failed to start MST\n");
1545 				aconnector->dc_link->type =
1546 					dc_connection_single;
1547 				break;
1548 			}
1549 		}
1550 	}
1551 	drm_connector_list_iter_end(&iter);
1552 
1553 	return ret;
1554 }
1555 
1556 static int dm_late_init(void *handle)
1557 {
1558 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1559 
1560 	struct dmcu_iram_parameters params;
1561 	unsigned int linear_lut[16];
1562 	int i;
1563 	struct dmcu *dmcu = NULL;
1564 	bool ret = true;
1565 
1566 	dmcu = adev->dm.dc->res_pool->dmcu;
1567 
1568 	for (i = 0; i < 16; i++)
1569 		linear_lut[i] = 0xFFFF * i / 15;
1570 
1571 	params.set = 0;
1572 	params.backlight_ramping_start = 0xCCCC;
1573 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1574 	params.backlight_lut_array_size = 16;
1575 	params.backlight_lut_array = linear_lut;
1576 
1577 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1578 	 * 0xFFFF x 0.01 = 0x28F
1579 	 */
1580 	params.min_abm_backlight = 0x28F;
1581 
1582 	/* In the case where abm is implemented on dmcub,
1583 	 * dmcu object will be null.
1584 	 * ABM 2.4 and up are implemented on dmcub.
1585 	 */
1586 	if (dmcu)
1587 		ret = dmcu_load_iram(dmcu, params);
1588 	else if (adev->dm.dc->ctx->dmub_srv)
1589 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1590 
1591 	if (!ret)
1592 		return -EINVAL;
1593 
1594 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1595 }
1596 
1597 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1598 {
1599 	struct amdgpu_dm_connector *aconnector;
1600 	struct drm_connector *connector;
1601 	struct drm_connector_list_iter iter;
1602 	struct drm_dp_mst_topology_mgr *mgr;
1603 	int ret;
1604 	bool need_hotplug = false;
1605 
1606 	drm_connector_list_iter_begin(dev, &iter);
1607 	drm_for_each_connector_iter(connector, &iter) {
1608 		aconnector = to_amdgpu_dm_connector(connector);
1609 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1610 		    aconnector->mst_port)
1611 			continue;
1612 
1613 		mgr = &aconnector->mst_mgr;
1614 
1615 		if (suspend) {
1616 			drm_dp_mst_topology_mgr_suspend(mgr);
1617 		} else {
1618 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1619 			if (ret < 0) {
1620 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1621 				need_hotplug = true;
1622 			}
1623 		}
1624 	}
1625 	drm_connector_list_iter_end(&iter);
1626 
1627 	if (need_hotplug)
1628 		drm_kms_helper_hotplug_event(dev);
1629 }
1630 
1631 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1632 {
1633 	struct smu_context *smu = &adev->smu;
1634 	int ret = 0;
1635 
1636 	if (!is_support_sw_smu(adev))
1637 		return 0;
1638 
1639 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1640 	 * on window driver dc implementation.
1641 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1642 	 * should be passed to smu during boot up and resume from s3.
1643 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1644 	 * dcn20_resource_construct
1645 	 * then call pplib functions below to pass the settings to smu:
1646 	 * smu_set_watermarks_for_clock_ranges
1647 	 * smu_set_watermarks_table
1648 	 * navi10_set_watermarks_table
1649 	 * smu_write_watermarks_table
1650 	 *
1651 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1652 	 * dc has implemented different flow for window driver:
1653 	 * dc_hardware_init / dc_set_power_state
1654 	 * dcn10_init_hw
1655 	 * notify_wm_ranges
1656 	 * set_wm_ranges
1657 	 * -- Linux
1658 	 * smu_set_watermarks_for_clock_ranges
1659 	 * renoir_set_watermarks_table
1660 	 * smu_write_watermarks_table
1661 	 *
1662 	 * For Linux,
1663 	 * dc_hardware_init -> amdgpu_dm_init
1664 	 * dc_set_power_state --> dm_resume
1665 	 *
1666 	 * therefore, this function apply to navi10/12/14 but not Renoir
1667 	 * *
1668 	 */
1669 	switch(adev->asic_type) {
1670 	case CHIP_NAVI10:
1671 	case CHIP_NAVI14:
1672 	case CHIP_NAVI12:
1673 		break;
1674 	default:
1675 		return 0;
1676 	}
1677 
1678 	ret = smu_write_watermarks_table(smu);
1679 	if (ret) {
1680 		DRM_ERROR("Failed to update WMTABLE!\n");
1681 		return ret;
1682 	}
1683 
1684 	return 0;
1685 }
1686 
1687 /**
1688  * dm_hw_init() - Initialize DC device
1689  * @handle: The base driver device containing the amdgpu_dm device.
1690  *
1691  * Initialize the &struct amdgpu_display_manager device. This involves calling
1692  * the initializers of each DM component, then populating the struct with them.
1693  *
1694  * Although the function implies hardware initialization, both hardware and
1695  * software are initialized here. Splitting them out to their relevant init
1696  * hooks is a future TODO item.
1697  *
1698  * Some notable things that are initialized here:
1699  *
1700  * - Display Core, both software and hardware
1701  * - DC modules that we need (freesync and color management)
1702  * - DRM software states
1703  * - Interrupt sources and handlers
1704  * - Vblank support
1705  * - Debug FS entries, if enabled
1706  */
1707 static int dm_hw_init(void *handle)
1708 {
1709 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1710 	/* Create DAL display manager */
1711 	amdgpu_dm_init(adev);
1712 	amdgpu_dm_hpd_init(adev);
1713 
1714 	return 0;
1715 }
1716 
1717 /**
1718  * dm_hw_fini() - Teardown DC device
1719  * @handle: The base driver device containing the amdgpu_dm device.
1720  *
1721  * Teardown components within &struct amdgpu_display_manager that require
1722  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1723  * were loaded. Also flush IRQ workqueues and disable them.
1724  */
1725 static int dm_hw_fini(void *handle)
1726 {
1727 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1728 
1729 	amdgpu_dm_hpd_fini(adev);
1730 
1731 	amdgpu_dm_irq_fini(adev);
1732 	amdgpu_dm_fini(adev);
1733 	return 0;
1734 }
1735 
1736 
1737 static int dm_enable_vblank(struct drm_crtc *crtc);
1738 static void dm_disable_vblank(struct drm_crtc *crtc);
1739 
1740 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1741 				 struct dc_state *state, bool enable)
1742 {
1743 	enum dc_irq_source irq_source;
1744 	struct amdgpu_crtc *acrtc;
1745 	int rc = -EBUSY;
1746 	int i = 0;
1747 
1748 	for (i = 0; i < state->stream_count; i++) {
1749 		acrtc = get_crtc_by_otg_inst(
1750 				adev, state->stream_status[i].primary_otg_inst);
1751 
1752 		if (acrtc && state->stream_status[i].plane_count != 0) {
1753 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1754 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1755 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1756 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1757 			if (rc)
1758 				DRM_WARN("Failed to %s pflip interrupts\n",
1759 					 enable ? "enable" : "disable");
1760 
1761 			if (enable) {
1762 				rc = dm_enable_vblank(&acrtc->base);
1763 				if (rc)
1764 					DRM_WARN("Failed to enable vblank interrupts\n");
1765 			} else {
1766 				dm_disable_vblank(&acrtc->base);
1767 			}
1768 
1769 		}
1770 	}
1771 
1772 }
1773 
1774 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1775 {
1776 	struct dc_state *context = NULL;
1777 	enum dc_status res = DC_ERROR_UNEXPECTED;
1778 	int i;
1779 	struct dc_stream_state *del_streams[MAX_PIPES];
1780 	int del_streams_count = 0;
1781 
1782 	memset(del_streams, 0, sizeof(del_streams));
1783 
1784 	context = dc_create_state(dc);
1785 	if (context == NULL)
1786 		goto context_alloc_fail;
1787 
1788 	dc_resource_state_copy_construct_current(dc, context);
1789 
1790 	/* First remove from context all streams */
1791 	for (i = 0; i < context->stream_count; i++) {
1792 		struct dc_stream_state *stream = context->streams[i];
1793 
1794 		del_streams[del_streams_count++] = stream;
1795 	}
1796 
1797 	/* Remove all planes for removed streams and then remove the streams */
1798 	for (i = 0; i < del_streams_count; i++) {
1799 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1800 			res = DC_FAIL_DETACH_SURFACES;
1801 			goto fail;
1802 		}
1803 
1804 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1805 		if (res != DC_OK)
1806 			goto fail;
1807 	}
1808 
1809 
1810 	res = dc_validate_global_state(dc, context, false);
1811 
1812 	if (res != DC_OK) {
1813 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1814 		goto fail;
1815 	}
1816 
1817 	res = dc_commit_state(dc, context);
1818 
1819 fail:
1820 	dc_release_state(context);
1821 
1822 context_alloc_fail:
1823 	return res;
1824 }
1825 
1826 static int dm_suspend(void *handle)
1827 {
1828 	struct amdgpu_device *adev = handle;
1829 	struct amdgpu_display_manager *dm = &adev->dm;
1830 	int ret = 0;
1831 
1832 	if (amdgpu_in_reset(adev)) {
1833 		mutex_lock(&dm->dc_lock);
1834 
1835 #if defined(CONFIG_DRM_AMD_DC_DCN)
1836 		dc_allow_idle_optimizations(adev->dm.dc, false);
1837 #endif
1838 
1839 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1840 
1841 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1842 
1843 		amdgpu_dm_commit_zero_streams(dm->dc);
1844 
1845 		amdgpu_dm_irq_suspend(adev);
1846 
1847 		return ret;
1848 	}
1849 
1850 	WARN_ON(adev->dm.cached_state);
1851 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1852 
1853 	s3_handle_mst(adev_to_drm(adev), true);
1854 
1855 	amdgpu_dm_irq_suspend(adev);
1856 
1857 
1858 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1859 
1860 	return 0;
1861 }
1862 
1863 static struct amdgpu_dm_connector *
1864 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1865 					     struct drm_crtc *crtc)
1866 {
1867 	uint32_t i;
1868 	struct drm_connector_state *new_con_state;
1869 	struct drm_connector *connector;
1870 	struct drm_crtc *crtc_from_state;
1871 
1872 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1873 		crtc_from_state = new_con_state->crtc;
1874 
1875 		if (crtc_from_state == crtc)
1876 			return to_amdgpu_dm_connector(connector);
1877 	}
1878 
1879 	return NULL;
1880 }
1881 
1882 static void emulated_link_detect(struct dc_link *link)
1883 {
1884 	struct dc_sink_init_data sink_init_data = { 0 };
1885 	struct display_sink_capability sink_caps = { 0 };
1886 	enum dc_edid_status edid_status;
1887 	struct dc_context *dc_ctx = link->ctx;
1888 	struct dc_sink *sink = NULL;
1889 	struct dc_sink *prev_sink = NULL;
1890 
1891 	link->type = dc_connection_none;
1892 	prev_sink = link->local_sink;
1893 
1894 	if (prev_sink)
1895 		dc_sink_release(prev_sink);
1896 
1897 	switch (link->connector_signal) {
1898 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1899 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1900 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1901 		break;
1902 	}
1903 
1904 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1905 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1906 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1907 		break;
1908 	}
1909 
1910 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1911 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1912 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1913 		break;
1914 	}
1915 
1916 	case SIGNAL_TYPE_LVDS: {
1917 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1918 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1919 		break;
1920 	}
1921 
1922 	case SIGNAL_TYPE_EDP: {
1923 		sink_caps.transaction_type =
1924 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1925 		sink_caps.signal = SIGNAL_TYPE_EDP;
1926 		break;
1927 	}
1928 
1929 	case SIGNAL_TYPE_DISPLAY_PORT: {
1930 		sink_caps.transaction_type =
1931 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1932 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1933 		break;
1934 	}
1935 
1936 	default:
1937 		DC_ERROR("Invalid connector type! signal:%d\n",
1938 			link->connector_signal);
1939 		return;
1940 	}
1941 
1942 	sink_init_data.link = link;
1943 	sink_init_data.sink_signal = sink_caps.signal;
1944 
1945 	sink = dc_sink_create(&sink_init_data);
1946 	if (!sink) {
1947 		DC_ERROR("Failed to create sink!\n");
1948 		return;
1949 	}
1950 
1951 	/* dc_sink_create returns a new reference */
1952 	link->local_sink = sink;
1953 
1954 	edid_status = dm_helpers_read_local_edid(
1955 			link->ctx,
1956 			link,
1957 			sink);
1958 
1959 	if (edid_status != EDID_OK)
1960 		DC_ERROR("Failed to read EDID");
1961 
1962 }
1963 
1964 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1965 				     struct amdgpu_display_manager *dm)
1966 {
1967 	struct {
1968 		struct dc_surface_update surface_updates[MAX_SURFACES];
1969 		struct dc_plane_info plane_infos[MAX_SURFACES];
1970 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1971 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1972 		struct dc_stream_update stream_update;
1973 	} * bundle;
1974 	int k, m;
1975 
1976 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1977 
1978 	if (!bundle) {
1979 		dm_error("Failed to allocate update bundle\n");
1980 		goto cleanup;
1981 	}
1982 
1983 	for (k = 0; k < dc_state->stream_count; k++) {
1984 		bundle->stream_update.stream = dc_state->streams[k];
1985 
1986 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1987 			bundle->surface_updates[m].surface =
1988 				dc_state->stream_status->plane_states[m];
1989 			bundle->surface_updates[m].surface->force_full_update =
1990 				true;
1991 		}
1992 		dc_commit_updates_for_stream(
1993 			dm->dc, bundle->surface_updates,
1994 			dc_state->stream_status->plane_count,
1995 			dc_state->streams[k], &bundle->stream_update, dc_state);
1996 	}
1997 
1998 cleanup:
1999 	kfree(bundle);
2000 
2001 	return;
2002 }
2003 
2004 static void dm_set_dpms_off(struct dc_link *link)
2005 {
2006 	struct dc_stream_state *stream_state;
2007 	struct amdgpu_dm_connector *aconnector = link->priv;
2008 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2009 	struct dc_stream_update stream_update;
2010 	bool dpms_off = true;
2011 
2012 	memset(&stream_update, 0, sizeof(stream_update));
2013 	stream_update.dpms_off = &dpms_off;
2014 
2015 	mutex_lock(&adev->dm.dc_lock);
2016 	stream_state = dc_stream_find_from_link(link);
2017 
2018 	if (stream_state == NULL) {
2019 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2020 		mutex_unlock(&adev->dm.dc_lock);
2021 		return;
2022 	}
2023 
2024 	stream_update.stream = stream_state;
2025 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2026 				     stream_state, &stream_update,
2027 				     stream_state->ctx->dc->current_state);
2028 	mutex_unlock(&adev->dm.dc_lock);
2029 }
2030 
2031 static int dm_resume(void *handle)
2032 {
2033 	struct amdgpu_device *adev = handle;
2034 	struct drm_device *ddev = adev_to_drm(adev);
2035 	struct amdgpu_display_manager *dm = &adev->dm;
2036 	struct amdgpu_dm_connector *aconnector;
2037 	struct drm_connector *connector;
2038 	struct drm_connector_list_iter iter;
2039 	struct drm_crtc *crtc;
2040 	struct drm_crtc_state *new_crtc_state;
2041 	struct dm_crtc_state *dm_new_crtc_state;
2042 	struct drm_plane *plane;
2043 	struct drm_plane_state *new_plane_state;
2044 	struct dm_plane_state *dm_new_plane_state;
2045 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2046 	enum dc_connection_type new_connection_type = dc_connection_none;
2047 	struct dc_state *dc_state;
2048 	int i, r, j;
2049 
2050 	if (amdgpu_in_reset(adev)) {
2051 		dc_state = dm->cached_dc_state;
2052 
2053 		r = dm_dmub_hw_init(adev);
2054 		if (r)
2055 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2056 
2057 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2058 		dc_resume(dm->dc);
2059 
2060 		amdgpu_dm_irq_resume_early(adev);
2061 
2062 		for (i = 0; i < dc_state->stream_count; i++) {
2063 			dc_state->streams[i]->mode_changed = true;
2064 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2065 				dc_state->stream_status->plane_states[j]->update_flags.raw
2066 					= 0xffffffff;
2067 			}
2068 		}
2069 
2070 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2071 
2072 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2073 
2074 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2075 
2076 		dc_release_state(dm->cached_dc_state);
2077 		dm->cached_dc_state = NULL;
2078 
2079 		amdgpu_dm_irq_resume_late(adev);
2080 
2081 		mutex_unlock(&dm->dc_lock);
2082 
2083 		return 0;
2084 	}
2085 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2086 	dc_release_state(dm_state->context);
2087 	dm_state->context = dc_create_state(dm->dc);
2088 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2089 	dc_resource_state_construct(dm->dc, dm_state->context);
2090 
2091 	/* Before powering on DC we need to re-initialize DMUB. */
2092 	r = dm_dmub_hw_init(adev);
2093 	if (r)
2094 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2095 
2096 	/* power on hardware */
2097 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2098 
2099 	/* program HPD filter */
2100 	dc_resume(dm->dc);
2101 
2102 	/*
2103 	 * early enable HPD Rx IRQ, should be done before set mode as short
2104 	 * pulse interrupts are used for MST
2105 	 */
2106 	amdgpu_dm_irq_resume_early(adev);
2107 
2108 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2109 	s3_handle_mst(ddev, false);
2110 
2111 	/* Do detection*/
2112 	drm_connector_list_iter_begin(ddev, &iter);
2113 	drm_for_each_connector_iter(connector, &iter) {
2114 		aconnector = to_amdgpu_dm_connector(connector);
2115 
2116 		/*
2117 		 * this is the case when traversing through already created
2118 		 * MST connectors, should be skipped
2119 		 */
2120 		if (aconnector->mst_port)
2121 			continue;
2122 
2123 		mutex_lock(&aconnector->hpd_lock);
2124 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2125 			DRM_ERROR("KMS: Failed to detect connector\n");
2126 
2127 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2128 			emulated_link_detect(aconnector->dc_link);
2129 		else
2130 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2131 
2132 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2133 			aconnector->fake_enable = false;
2134 
2135 		if (aconnector->dc_sink)
2136 			dc_sink_release(aconnector->dc_sink);
2137 		aconnector->dc_sink = NULL;
2138 		amdgpu_dm_update_connector_after_detect(aconnector);
2139 		mutex_unlock(&aconnector->hpd_lock);
2140 	}
2141 	drm_connector_list_iter_end(&iter);
2142 
2143 	/* Force mode set in atomic commit */
2144 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2145 		new_crtc_state->active_changed = true;
2146 
2147 	/*
2148 	 * atomic_check is expected to create the dc states. We need to release
2149 	 * them here, since they were duplicated as part of the suspend
2150 	 * procedure.
2151 	 */
2152 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2153 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2154 		if (dm_new_crtc_state->stream) {
2155 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2156 			dc_stream_release(dm_new_crtc_state->stream);
2157 			dm_new_crtc_state->stream = NULL;
2158 		}
2159 	}
2160 
2161 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2162 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2163 		if (dm_new_plane_state->dc_state) {
2164 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2165 			dc_plane_state_release(dm_new_plane_state->dc_state);
2166 			dm_new_plane_state->dc_state = NULL;
2167 		}
2168 	}
2169 
2170 	drm_atomic_helper_resume(ddev, dm->cached_state);
2171 
2172 	dm->cached_state = NULL;
2173 
2174 	amdgpu_dm_irq_resume_late(adev);
2175 
2176 	amdgpu_dm_smu_write_watermarks_table(adev);
2177 
2178 	return 0;
2179 }
2180 
2181 /**
2182  * DOC: DM Lifecycle
2183  *
2184  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2185  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2186  * the base driver's device list to be initialized and torn down accordingly.
2187  *
2188  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2189  */
2190 
2191 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2192 	.name = "dm",
2193 	.early_init = dm_early_init,
2194 	.late_init = dm_late_init,
2195 	.sw_init = dm_sw_init,
2196 	.sw_fini = dm_sw_fini,
2197 	.hw_init = dm_hw_init,
2198 	.hw_fini = dm_hw_fini,
2199 	.suspend = dm_suspend,
2200 	.resume = dm_resume,
2201 	.is_idle = dm_is_idle,
2202 	.wait_for_idle = dm_wait_for_idle,
2203 	.check_soft_reset = dm_check_soft_reset,
2204 	.soft_reset = dm_soft_reset,
2205 	.set_clockgating_state = dm_set_clockgating_state,
2206 	.set_powergating_state = dm_set_powergating_state,
2207 };
2208 
2209 const struct amdgpu_ip_block_version dm_ip_block =
2210 {
2211 	.type = AMD_IP_BLOCK_TYPE_DCE,
2212 	.major = 1,
2213 	.minor = 0,
2214 	.rev = 0,
2215 	.funcs = &amdgpu_dm_funcs,
2216 };
2217 
2218 
2219 /**
2220  * DOC: atomic
2221  *
2222  * *WIP*
2223  */
2224 
2225 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2226 	.fb_create = amdgpu_display_user_framebuffer_create,
2227 	.get_format_info = amd_get_format_info,
2228 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2229 	.atomic_check = amdgpu_dm_atomic_check,
2230 	.atomic_commit = drm_atomic_helper_commit,
2231 };
2232 
2233 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2234 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2235 };
2236 
2237 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2238 {
2239 	u32 max_cll, min_cll, max, min, q, r;
2240 	struct amdgpu_dm_backlight_caps *caps;
2241 	struct amdgpu_display_manager *dm;
2242 	struct drm_connector *conn_base;
2243 	struct amdgpu_device *adev;
2244 	struct dc_link *link = NULL;
2245 	static const u8 pre_computed_values[] = {
2246 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2247 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2248 
2249 	if (!aconnector || !aconnector->dc_link)
2250 		return;
2251 
2252 	link = aconnector->dc_link;
2253 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2254 		return;
2255 
2256 	conn_base = &aconnector->base;
2257 	adev = drm_to_adev(conn_base->dev);
2258 	dm = &adev->dm;
2259 	caps = &dm->backlight_caps;
2260 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2261 	caps->aux_support = false;
2262 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2263 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2264 
2265 	if (caps->ext_caps->bits.oled == 1 ||
2266 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2267 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2268 		caps->aux_support = true;
2269 
2270 	/* From the specification (CTA-861-G), for calculating the maximum
2271 	 * luminance we need to use:
2272 	 *	Luminance = 50*2**(CV/32)
2273 	 * Where CV is a one-byte value.
2274 	 * For calculating this expression we may need float point precision;
2275 	 * to avoid this complexity level, we take advantage that CV is divided
2276 	 * by a constant. From the Euclids division algorithm, we know that CV
2277 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2278 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2279 	 * need to pre-compute the value of r/32. For pre-computing the values
2280 	 * We just used the following Ruby line:
2281 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2282 	 * The results of the above expressions can be verified at
2283 	 * pre_computed_values.
2284 	 */
2285 	q = max_cll >> 5;
2286 	r = max_cll % 32;
2287 	max = (1 << q) * pre_computed_values[r];
2288 
2289 	// min luminance: maxLum * (CV/255)^2 / 100
2290 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2291 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2292 
2293 	caps->aux_max_input_signal = max;
2294 	caps->aux_min_input_signal = min;
2295 }
2296 
2297 void amdgpu_dm_update_connector_after_detect(
2298 		struct amdgpu_dm_connector *aconnector)
2299 {
2300 	struct drm_connector *connector = &aconnector->base;
2301 	struct drm_device *dev = connector->dev;
2302 	struct dc_sink *sink;
2303 
2304 	/* MST handled by drm_mst framework */
2305 	if (aconnector->mst_mgr.mst_state == true)
2306 		return;
2307 
2308 	sink = aconnector->dc_link->local_sink;
2309 	if (sink)
2310 		dc_sink_retain(sink);
2311 
2312 	/*
2313 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2314 	 * the connector sink is set to either fake or physical sink depends on link status.
2315 	 * Skip if already done during boot.
2316 	 */
2317 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2318 			&& aconnector->dc_em_sink) {
2319 
2320 		/*
2321 		 * For S3 resume with headless use eml_sink to fake stream
2322 		 * because on resume connector->sink is set to NULL
2323 		 */
2324 		mutex_lock(&dev->mode_config.mutex);
2325 
2326 		if (sink) {
2327 			if (aconnector->dc_sink) {
2328 				amdgpu_dm_update_freesync_caps(connector, NULL);
2329 				/*
2330 				 * retain and release below are used to
2331 				 * bump up refcount for sink because the link doesn't point
2332 				 * to it anymore after disconnect, so on next crtc to connector
2333 				 * reshuffle by UMD we will get into unwanted dc_sink release
2334 				 */
2335 				dc_sink_release(aconnector->dc_sink);
2336 			}
2337 			aconnector->dc_sink = sink;
2338 			dc_sink_retain(aconnector->dc_sink);
2339 			amdgpu_dm_update_freesync_caps(connector,
2340 					aconnector->edid);
2341 		} else {
2342 			amdgpu_dm_update_freesync_caps(connector, NULL);
2343 			if (!aconnector->dc_sink) {
2344 				aconnector->dc_sink = aconnector->dc_em_sink;
2345 				dc_sink_retain(aconnector->dc_sink);
2346 			}
2347 		}
2348 
2349 		mutex_unlock(&dev->mode_config.mutex);
2350 
2351 		if (sink)
2352 			dc_sink_release(sink);
2353 		return;
2354 	}
2355 
2356 	/*
2357 	 * TODO: temporary guard to look for proper fix
2358 	 * if this sink is MST sink, we should not do anything
2359 	 */
2360 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2361 		dc_sink_release(sink);
2362 		return;
2363 	}
2364 
2365 	if (aconnector->dc_sink == sink) {
2366 		/*
2367 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2368 		 * Do nothing!!
2369 		 */
2370 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2371 				aconnector->connector_id);
2372 		if (sink)
2373 			dc_sink_release(sink);
2374 		return;
2375 	}
2376 
2377 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2378 		aconnector->connector_id, aconnector->dc_sink, sink);
2379 
2380 	mutex_lock(&dev->mode_config.mutex);
2381 
2382 	/*
2383 	 * 1. Update status of the drm connector
2384 	 * 2. Send an event and let userspace tell us what to do
2385 	 */
2386 	if (sink) {
2387 		/*
2388 		 * TODO: check if we still need the S3 mode update workaround.
2389 		 * If yes, put it here.
2390 		 */
2391 		if (aconnector->dc_sink) {
2392 			amdgpu_dm_update_freesync_caps(connector, NULL);
2393 			dc_sink_release(aconnector->dc_sink);
2394 		}
2395 
2396 		aconnector->dc_sink = sink;
2397 		dc_sink_retain(aconnector->dc_sink);
2398 		if (sink->dc_edid.length == 0) {
2399 			aconnector->edid = NULL;
2400 			if (aconnector->dc_link->aux_mode) {
2401 				drm_dp_cec_unset_edid(
2402 					&aconnector->dm_dp_aux.aux);
2403 			}
2404 		} else {
2405 			aconnector->edid =
2406 				(struct edid *)sink->dc_edid.raw_edid;
2407 
2408 			drm_connector_update_edid_property(connector,
2409 							   aconnector->edid);
2410 			if (aconnector->dc_link->aux_mode)
2411 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2412 						    aconnector->edid);
2413 		}
2414 
2415 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2416 		update_connector_ext_caps(aconnector);
2417 	} else {
2418 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2419 		amdgpu_dm_update_freesync_caps(connector, NULL);
2420 		drm_connector_update_edid_property(connector, NULL);
2421 		aconnector->num_modes = 0;
2422 		dc_sink_release(aconnector->dc_sink);
2423 		aconnector->dc_sink = NULL;
2424 		aconnector->edid = NULL;
2425 #ifdef CONFIG_DRM_AMD_DC_HDCP
2426 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2427 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2428 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2429 #endif
2430 	}
2431 
2432 	mutex_unlock(&dev->mode_config.mutex);
2433 
2434 	update_subconnector_property(aconnector);
2435 
2436 	if (sink)
2437 		dc_sink_release(sink);
2438 }
2439 
2440 static void handle_hpd_irq(void *param)
2441 {
2442 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2443 	struct drm_connector *connector = &aconnector->base;
2444 	struct drm_device *dev = connector->dev;
2445 	enum dc_connection_type new_connection_type = dc_connection_none;
2446 #ifdef CONFIG_DRM_AMD_DC_HDCP
2447 	struct amdgpu_device *adev = drm_to_adev(dev);
2448 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2449 #endif
2450 
2451 	/*
2452 	 * In case of failure or MST no need to update connector status or notify the OS
2453 	 * since (for MST case) MST does this in its own context.
2454 	 */
2455 	mutex_lock(&aconnector->hpd_lock);
2456 
2457 #ifdef CONFIG_DRM_AMD_DC_HDCP
2458 	if (adev->dm.hdcp_workqueue) {
2459 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2460 		dm_con_state->update_hdcp = true;
2461 	}
2462 #endif
2463 	if (aconnector->fake_enable)
2464 		aconnector->fake_enable = false;
2465 
2466 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2467 		DRM_ERROR("KMS: Failed to detect connector\n");
2468 
2469 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2470 		emulated_link_detect(aconnector->dc_link);
2471 
2472 
2473 		drm_modeset_lock_all(dev);
2474 		dm_restore_drm_connector_state(dev, connector);
2475 		drm_modeset_unlock_all(dev);
2476 
2477 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2478 			drm_kms_helper_hotplug_event(dev);
2479 
2480 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2481 		if (new_connection_type == dc_connection_none &&
2482 		    aconnector->dc_link->type == dc_connection_none)
2483 			dm_set_dpms_off(aconnector->dc_link);
2484 
2485 		amdgpu_dm_update_connector_after_detect(aconnector);
2486 
2487 		drm_modeset_lock_all(dev);
2488 		dm_restore_drm_connector_state(dev, connector);
2489 		drm_modeset_unlock_all(dev);
2490 
2491 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2492 			drm_kms_helper_hotplug_event(dev);
2493 	}
2494 	mutex_unlock(&aconnector->hpd_lock);
2495 
2496 }
2497 
2498 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2499 {
2500 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2501 	uint8_t dret;
2502 	bool new_irq_handled = false;
2503 	int dpcd_addr;
2504 	int dpcd_bytes_to_read;
2505 
2506 	const int max_process_count = 30;
2507 	int process_count = 0;
2508 
2509 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2510 
2511 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2512 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2513 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2514 		dpcd_addr = DP_SINK_COUNT;
2515 	} else {
2516 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2517 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2518 		dpcd_addr = DP_SINK_COUNT_ESI;
2519 	}
2520 
2521 	dret = drm_dp_dpcd_read(
2522 		&aconnector->dm_dp_aux.aux,
2523 		dpcd_addr,
2524 		esi,
2525 		dpcd_bytes_to_read);
2526 
2527 	while (dret == dpcd_bytes_to_read &&
2528 		process_count < max_process_count) {
2529 		uint8_t retry;
2530 		dret = 0;
2531 
2532 		process_count++;
2533 
2534 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2535 		/* handle HPD short pulse irq */
2536 		if (aconnector->mst_mgr.mst_state)
2537 			drm_dp_mst_hpd_irq(
2538 				&aconnector->mst_mgr,
2539 				esi,
2540 				&new_irq_handled);
2541 
2542 		if (new_irq_handled) {
2543 			/* ACK at DPCD to notify down stream */
2544 			const int ack_dpcd_bytes_to_write =
2545 				dpcd_bytes_to_read - 1;
2546 
2547 			for (retry = 0; retry < 3; retry++) {
2548 				uint8_t wret;
2549 
2550 				wret = drm_dp_dpcd_write(
2551 					&aconnector->dm_dp_aux.aux,
2552 					dpcd_addr + 1,
2553 					&esi[1],
2554 					ack_dpcd_bytes_to_write);
2555 				if (wret == ack_dpcd_bytes_to_write)
2556 					break;
2557 			}
2558 
2559 			/* check if there is new irq to be handled */
2560 			dret = drm_dp_dpcd_read(
2561 				&aconnector->dm_dp_aux.aux,
2562 				dpcd_addr,
2563 				esi,
2564 				dpcd_bytes_to_read);
2565 
2566 			new_irq_handled = false;
2567 		} else {
2568 			break;
2569 		}
2570 	}
2571 
2572 	if (process_count == max_process_count)
2573 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2574 }
2575 
2576 static void handle_hpd_rx_irq(void *param)
2577 {
2578 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2579 	struct drm_connector *connector = &aconnector->base;
2580 	struct drm_device *dev = connector->dev;
2581 	struct dc_link *dc_link = aconnector->dc_link;
2582 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2583 	bool result = false;
2584 	enum dc_connection_type new_connection_type = dc_connection_none;
2585 	struct amdgpu_device *adev = drm_to_adev(dev);
2586 	union hpd_irq_data hpd_irq_data;
2587 
2588 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2589 
2590 	/*
2591 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2592 	 * conflict, after implement i2c helper, this mutex should be
2593 	 * retired.
2594 	 */
2595 	if (dc_link->type != dc_connection_mst_branch)
2596 		mutex_lock(&aconnector->hpd_lock);
2597 
2598 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2599 
2600 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2601 		(dc_link->type == dc_connection_mst_branch)) {
2602 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2603 			result = true;
2604 			dm_handle_hpd_rx_irq(aconnector);
2605 			goto out;
2606 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2607 			result = false;
2608 			dm_handle_hpd_rx_irq(aconnector);
2609 			goto out;
2610 		}
2611 	}
2612 
2613 	mutex_lock(&adev->dm.dc_lock);
2614 #ifdef CONFIG_DRM_AMD_DC_HDCP
2615 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2616 #else
2617 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2618 #endif
2619 	mutex_unlock(&adev->dm.dc_lock);
2620 
2621 out:
2622 	if (result && !is_mst_root_connector) {
2623 		/* Downstream Port status changed. */
2624 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2625 			DRM_ERROR("KMS: Failed to detect connector\n");
2626 
2627 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2628 			emulated_link_detect(dc_link);
2629 
2630 			if (aconnector->fake_enable)
2631 				aconnector->fake_enable = false;
2632 
2633 			amdgpu_dm_update_connector_after_detect(aconnector);
2634 
2635 
2636 			drm_modeset_lock_all(dev);
2637 			dm_restore_drm_connector_state(dev, connector);
2638 			drm_modeset_unlock_all(dev);
2639 
2640 			drm_kms_helper_hotplug_event(dev);
2641 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2642 
2643 			if (aconnector->fake_enable)
2644 				aconnector->fake_enable = false;
2645 
2646 			amdgpu_dm_update_connector_after_detect(aconnector);
2647 
2648 
2649 			drm_modeset_lock_all(dev);
2650 			dm_restore_drm_connector_state(dev, connector);
2651 			drm_modeset_unlock_all(dev);
2652 
2653 			drm_kms_helper_hotplug_event(dev);
2654 		}
2655 	}
2656 #ifdef CONFIG_DRM_AMD_DC_HDCP
2657 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2658 		if (adev->dm.hdcp_workqueue)
2659 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2660 	}
2661 #endif
2662 
2663 	if (dc_link->type != dc_connection_mst_branch) {
2664 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2665 		mutex_unlock(&aconnector->hpd_lock);
2666 	}
2667 }
2668 
2669 static void register_hpd_handlers(struct amdgpu_device *adev)
2670 {
2671 	struct drm_device *dev = adev_to_drm(adev);
2672 	struct drm_connector *connector;
2673 	struct amdgpu_dm_connector *aconnector;
2674 	const struct dc_link *dc_link;
2675 	struct dc_interrupt_params int_params = {0};
2676 
2677 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2678 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2679 
2680 	list_for_each_entry(connector,
2681 			&dev->mode_config.connector_list, head)	{
2682 
2683 		aconnector = to_amdgpu_dm_connector(connector);
2684 		dc_link = aconnector->dc_link;
2685 
2686 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2687 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2688 			int_params.irq_source = dc_link->irq_source_hpd;
2689 
2690 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2691 					handle_hpd_irq,
2692 					(void *) aconnector);
2693 		}
2694 
2695 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2696 
2697 			/* Also register for DP short pulse (hpd_rx). */
2698 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2699 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2700 
2701 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2702 					handle_hpd_rx_irq,
2703 					(void *) aconnector);
2704 		}
2705 	}
2706 }
2707 
2708 #if defined(CONFIG_DRM_AMD_DC_SI)
2709 /* Register IRQ sources and initialize IRQ callbacks */
2710 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2711 {
2712 	struct dc *dc = adev->dm.dc;
2713 	struct common_irq_params *c_irq_params;
2714 	struct dc_interrupt_params int_params = {0};
2715 	int r;
2716 	int i;
2717 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2718 
2719 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2720 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2721 
2722 	/*
2723 	 * Actions of amdgpu_irq_add_id():
2724 	 * 1. Register a set() function with base driver.
2725 	 *    Base driver will call set() function to enable/disable an
2726 	 *    interrupt in DC hardware.
2727 	 * 2. Register amdgpu_dm_irq_handler().
2728 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2729 	 *    coming from DC hardware.
2730 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2731 	 *    for acknowledging and handling. */
2732 
2733 	/* Use VBLANK interrupt */
2734 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2735 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2736 		if (r) {
2737 			DRM_ERROR("Failed to add crtc irq id!\n");
2738 			return r;
2739 		}
2740 
2741 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2742 		int_params.irq_source =
2743 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2744 
2745 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2746 
2747 		c_irq_params->adev = adev;
2748 		c_irq_params->irq_src = int_params.irq_source;
2749 
2750 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2751 				dm_crtc_high_irq, c_irq_params);
2752 	}
2753 
2754 	/* Use GRPH_PFLIP interrupt */
2755 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2756 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2757 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2758 		if (r) {
2759 			DRM_ERROR("Failed to add page flip irq id!\n");
2760 			return r;
2761 		}
2762 
2763 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2764 		int_params.irq_source =
2765 			dc_interrupt_to_irq_source(dc, i, 0);
2766 
2767 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2768 
2769 		c_irq_params->adev = adev;
2770 		c_irq_params->irq_src = int_params.irq_source;
2771 
2772 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2773 				dm_pflip_high_irq, c_irq_params);
2774 
2775 	}
2776 
2777 	/* HPD */
2778 	r = amdgpu_irq_add_id(adev, client_id,
2779 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2780 	if (r) {
2781 		DRM_ERROR("Failed to add hpd irq id!\n");
2782 		return r;
2783 	}
2784 
2785 	register_hpd_handlers(adev);
2786 
2787 	return 0;
2788 }
2789 #endif
2790 
2791 /* Register IRQ sources and initialize IRQ callbacks */
2792 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2793 {
2794 	struct dc *dc = adev->dm.dc;
2795 	struct common_irq_params *c_irq_params;
2796 	struct dc_interrupt_params int_params = {0};
2797 	int r;
2798 	int i;
2799 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2800 
2801 	if (adev->asic_type >= CHIP_VEGA10)
2802 		client_id = SOC15_IH_CLIENTID_DCE;
2803 
2804 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2805 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2806 
2807 	/*
2808 	 * Actions of amdgpu_irq_add_id():
2809 	 * 1. Register a set() function with base driver.
2810 	 *    Base driver will call set() function to enable/disable an
2811 	 *    interrupt in DC hardware.
2812 	 * 2. Register amdgpu_dm_irq_handler().
2813 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2814 	 *    coming from DC hardware.
2815 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2816 	 *    for acknowledging and handling. */
2817 
2818 	/* Use VBLANK interrupt */
2819 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2820 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2821 		if (r) {
2822 			DRM_ERROR("Failed to add crtc irq id!\n");
2823 			return r;
2824 		}
2825 
2826 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2827 		int_params.irq_source =
2828 			dc_interrupt_to_irq_source(dc, i, 0);
2829 
2830 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2831 
2832 		c_irq_params->adev = adev;
2833 		c_irq_params->irq_src = int_params.irq_source;
2834 
2835 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2836 				dm_crtc_high_irq, c_irq_params);
2837 	}
2838 
2839 	/* Use VUPDATE interrupt */
2840 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2841 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2842 		if (r) {
2843 			DRM_ERROR("Failed to add vupdate irq id!\n");
2844 			return r;
2845 		}
2846 
2847 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2848 		int_params.irq_source =
2849 			dc_interrupt_to_irq_source(dc, i, 0);
2850 
2851 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2852 
2853 		c_irq_params->adev = adev;
2854 		c_irq_params->irq_src = int_params.irq_source;
2855 
2856 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2857 				dm_vupdate_high_irq, c_irq_params);
2858 	}
2859 
2860 	/* Use GRPH_PFLIP interrupt */
2861 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2862 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2863 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2864 		if (r) {
2865 			DRM_ERROR("Failed to add page flip irq id!\n");
2866 			return r;
2867 		}
2868 
2869 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2870 		int_params.irq_source =
2871 			dc_interrupt_to_irq_source(dc, i, 0);
2872 
2873 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2874 
2875 		c_irq_params->adev = adev;
2876 		c_irq_params->irq_src = int_params.irq_source;
2877 
2878 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2879 				dm_pflip_high_irq, c_irq_params);
2880 
2881 	}
2882 
2883 	/* HPD */
2884 	r = amdgpu_irq_add_id(adev, client_id,
2885 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2886 	if (r) {
2887 		DRM_ERROR("Failed to add hpd irq id!\n");
2888 		return r;
2889 	}
2890 
2891 	register_hpd_handlers(adev);
2892 
2893 	return 0;
2894 }
2895 
2896 #if defined(CONFIG_DRM_AMD_DC_DCN)
2897 /* Register IRQ sources and initialize IRQ callbacks */
2898 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2899 {
2900 	struct dc *dc = adev->dm.dc;
2901 	struct common_irq_params *c_irq_params;
2902 	struct dc_interrupt_params int_params = {0};
2903 	int r;
2904 	int i;
2905 
2906 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2907 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2908 
2909 	/*
2910 	 * Actions of amdgpu_irq_add_id():
2911 	 * 1. Register a set() function with base driver.
2912 	 *    Base driver will call set() function to enable/disable an
2913 	 *    interrupt in DC hardware.
2914 	 * 2. Register amdgpu_dm_irq_handler().
2915 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2916 	 *    coming from DC hardware.
2917 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2918 	 *    for acknowledging and handling.
2919 	 */
2920 
2921 	/* Use VSTARTUP interrupt */
2922 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2923 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2924 			i++) {
2925 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2926 
2927 		if (r) {
2928 			DRM_ERROR("Failed to add crtc irq id!\n");
2929 			return r;
2930 		}
2931 
2932 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2933 		int_params.irq_source =
2934 			dc_interrupt_to_irq_source(dc, i, 0);
2935 
2936 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2937 
2938 		c_irq_params->adev = adev;
2939 		c_irq_params->irq_src = int_params.irq_source;
2940 
2941 		amdgpu_dm_irq_register_interrupt(
2942 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2943 	}
2944 
2945 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2946 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2947 	 * to trigger at end of each vblank, regardless of state of the lock,
2948 	 * matching DCE behaviour.
2949 	 */
2950 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2951 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2952 	     i++) {
2953 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2954 
2955 		if (r) {
2956 			DRM_ERROR("Failed to add vupdate irq id!\n");
2957 			return r;
2958 		}
2959 
2960 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2961 		int_params.irq_source =
2962 			dc_interrupt_to_irq_source(dc, i, 0);
2963 
2964 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2965 
2966 		c_irq_params->adev = adev;
2967 		c_irq_params->irq_src = int_params.irq_source;
2968 
2969 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2970 				dm_vupdate_high_irq, c_irq_params);
2971 	}
2972 
2973 	/* Use GRPH_PFLIP interrupt */
2974 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2975 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2976 			i++) {
2977 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2978 		if (r) {
2979 			DRM_ERROR("Failed to add page flip irq id!\n");
2980 			return r;
2981 		}
2982 
2983 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2984 		int_params.irq_source =
2985 			dc_interrupt_to_irq_source(dc, i, 0);
2986 
2987 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2988 
2989 		c_irq_params->adev = adev;
2990 		c_irq_params->irq_src = int_params.irq_source;
2991 
2992 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2993 				dm_pflip_high_irq, c_irq_params);
2994 
2995 	}
2996 
2997 	/* HPD */
2998 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2999 			&adev->hpd_irq);
3000 	if (r) {
3001 		DRM_ERROR("Failed to add hpd irq id!\n");
3002 		return r;
3003 	}
3004 
3005 	register_hpd_handlers(adev);
3006 
3007 	return 0;
3008 }
3009 #endif
3010 
3011 /*
3012  * Acquires the lock for the atomic state object and returns
3013  * the new atomic state.
3014  *
3015  * This should only be called during atomic check.
3016  */
3017 static int dm_atomic_get_state(struct drm_atomic_state *state,
3018 			       struct dm_atomic_state **dm_state)
3019 {
3020 	struct drm_device *dev = state->dev;
3021 	struct amdgpu_device *adev = drm_to_adev(dev);
3022 	struct amdgpu_display_manager *dm = &adev->dm;
3023 	struct drm_private_state *priv_state;
3024 
3025 	if (*dm_state)
3026 		return 0;
3027 
3028 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3029 	if (IS_ERR(priv_state))
3030 		return PTR_ERR(priv_state);
3031 
3032 	*dm_state = to_dm_atomic_state(priv_state);
3033 
3034 	return 0;
3035 }
3036 
3037 static struct dm_atomic_state *
3038 dm_atomic_get_new_state(struct drm_atomic_state *state)
3039 {
3040 	struct drm_device *dev = state->dev;
3041 	struct amdgpu_device *adev = drm_to_adev(dev);
3042 	struct amdgpu_display_manager *dm = &adev->dm;
3043 	struct drm_private_obj *obj;
3044 	struct drm_private_state *new_obj_state;
3045 	int i;
3046 
3047 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3048 		if (obj->funcs == dm->atomic_obj.funcs)
3049 			return to_dm_atomic_state(new_obj_state);
3050 	}
3051 
3052 	return NULL;
3053 }
3054 
3055 static struct drm_private_state *
3056 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3057 {
3058 	struct dm_atomic_state *old_state, *new_state;
3059 
3060 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3061 	if (!new_state)
3062 		return NULL;
3063 
3064 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3065 
3066 	old_state = to_dm_atomic_state(obj->state);
3067 
3068 	if (old_state && old_state->context)
3069 		new_state->context = dc_copy_state(old_state->context);
3070 
3071 	if (!new_state->context) {
3072 		kfree(new_state);
3073 		return NULL;
3074 	}
3075 
3076 	return &new_state->base;
3077 }
3078 
3079 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3080 				    struct drm_private_state *state)
3081 {
3082 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3083 
3084 	if (dm_state && dm_state->context)
3085 		dc_release_state(dm_state->context);
3086 
3087 	kfree(dm_state);
3088 }
3089 
3090 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3091 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3092 	.atomic_destroy_state = dm_atomic_destroy_state,
3093 };
3094 
3095 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3096 {
3097 	struct dm_atomic_state *state;
3098 	int r;
3099 
3100 	adev->mode_info.mode_config_initialized = true;
3101 
3102 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3103 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3104 
3105 	adev_to_drm(adev)->mode_config.max_width = 16384;
3106 	adev_to_drm(adev)->mode_config.max_height = 16384;
3107 
3108 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3109 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3110 	/* indicates support for immediate flip */
3111 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3112 
3113 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3114 
3115 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3116 	if (!state)
3117 		return -ENOMEM;
3118 
3119 	state->context = dc_create_state(adev->dm.dc);
3120 	if (!state->context) {
3121 		kfree(state);
3122 		return -ENOMEM;
3123 	}
3124 
3125 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3126 
3127 	drm_atomic_private_obj_init(adev_to_drm(adev),
3128 				    &adev->dm.atomic_obj,
3129 				    &state->base,
3130 				    &dm_atomic_state_funcs);
3131 
3132 	r = amdgpu_display_modeset_create_props(adev);
3133 	if (r) {
3134 		dc_release_state(state->context);
3135 		kfree(state);
3136 		return r;
3137 	}
3138 
3139 	r = amdgpu_dm_audio_init(adev);
3140 	if (r) {
3141 		dc_release_state(state->context);
3142 		kfree(state);
3143 		return r;
3144 	}
3145 
3146 	return 0;
3147 }
3148 
3149 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3150 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3151 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3152 
3153 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3154 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3155 
3156 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3157 {
3158 #if defined(CONFIG_ACPI)
3159 	struct amdgpu_dm_backlight_caps caps;
3160 
3161 	memset(&caps, 0, sizeof(caps));
3162 
3163 	if (dm->backlight_caps.caps_valid)
3164 		return;
3165 
3166 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3167 	if (caps.caps_valid) {
3168 		dm->backlight_caps.caps_valid = true;
3169 		if (caps.aux_support)
3170 			return;
3171 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3172 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3173 	} else {
3174 		dm->backlight_caps.min_input_signal =
3175 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3176 		dm->backlight_caps.max_input_signal =
3177 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3178 	}
3179 #else
3180 	if (dm->backlight_caps.aux_support)
3181 		return;
3182 
3183 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3184 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3185 #endif
3186 }
3187 
3188 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3189 {
3190 	bool rc;
3191 
3192 	if (!link)
3193 		return 1;
3194 
3195 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
3196 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3197 
3198 	return rc ? 0 : 1;
3199 }
3200 
3201 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3202 				unsigned *min, unsigned *max)
3203 {
3204 	if (!caps)
3205 		return 0;
3206 
3207 	if (caps->aux_support) {
3208 		// Firmware limits are in nits, DC API wants millinits.
3209 		*max = 1000 * caps->aux_max_input_signal;
3210 		*min = 1000 * caps->aux_min_input_signal;
3211 	} else {
3212 		// Firmware limits are 8-bit, PWM control is 16-bit.
3213 		*max = 0x101 * caps->max_input_signal;
3214 		*min = 0x101 * caps->min_input_signal;
3215 	}
3216 	return 1;
3217 }
3218 
3219 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3220 					uint32_t brightness)
3221 {
3222 	unsigned min, max;
3223 
3224 	if (!get_brightness_range(caps, &min, &max))
3225 		return brightness;
3226 
3227 	// Rescale 0..255 to min..max
3228 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3229 				       AMDGPU_MAX_BL_LEVEL);
3230 }
3231 
3232 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3233 				      uint32_t brightness)
3234 {
3235 	unsigned min, max;
3236 
3237 	if (!get_brightness_range(caps, &min, &max))
3238 		return brightness;
3239 
3240 	if (brightness < min)
3241 		return 0;
3242 	// Rescale min..max to 0..255
3243 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3244 				 max - min);
3245 }
3246 
3247 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3248 {
3249 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3250 	struct amdgpu_dm_backlight_caps caps;
3251 	struct dc_link *link = NULL;
3252 	u32 brightness;
3253 	bool rc;
3254 
3255 	amdgpu_dm_update_backlight_caps(dm);
3256 	caps = dm->backlight_caps;
3257 
3258 	link = (struct dc_link *)dm->backlight_link;
3259 
3260 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3261 	// Change brightness based on AUX property
3262 	if (caps.aux_support)
3263 		return set_backlight_via_aux(link, brightness);
3264 
3265 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3266 
3267 	return rc ? 0 : 1;
3268 }
3269 
3270 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3271 {
3272 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3273 	int ret = dc_link_get_backlight_level(dm->backlight_link);
3274 
3275 	if (ret == DC_ERROR_UNEXPECTED)
3276 		return bd->props.brightness;
3277 	return convert_brightness_to_user(&dm->backlight_caps, ret);
3278 }
3279 
3280 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3281 	.options = BL_CORE_SUSPENDRESUME,
3282 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3283 	.update_status	= amdgpu_dm_backlight_update_status,
3284 };
3285 
3286 static void
3287 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3288 {
3289 	char bl_name[16];
3290 	struct backlight_properties props = { 0 };
3291 
3292 	amdgpu_dm_update_backlight_caps(dm);
3293 
3294 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3295 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3296 	props.type = BACKLIGHT_RAW;
3297 
3298 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3299 		 adev_to_drm(dm->adev)->primary->index);
3300 
3301 	dm->backlight_dev = backlight_device_register(bl_name,
3302 						      adev_to_drm(dm->adev)->dev,
3303 						      dm,
3304 						      &amdgpu_dm_backlight_ops,
3305 						      &props);
3306 
3307 	if (IS_ERR(dm->backlight_dev))
3308 		DRM_ERROR("DM: Backlight registration failed!\n");
3309 	else
3310 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3311 }
3312 
3313 #endif
3314 
3315 static int initialize_plane(struct amdgpu_display_manager *dm,
3316 			    struct amdgpu_mode_info *mode_info, int plane_id,
3317 			    enum drm_plane_type plane_type,
3318 			    const struct dc_plane_cap *plane_cap)
3319 {
3320 	struct drm_plane *plane;
3321 	unsigned long possible_crtcs;
3322 	int ret = 0;
3323 
3324 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3325 	if (!plane) {
3326 		DRM_ERROR("KMS: Failed to allocate plane\n");
3327 		return -ENOMEM;
3328 	}
3329 	plane->type = plane_type;
3330 
3331 	/*
3332 	 * HACK: IGT tests expect that the primary plane for a CRTC
3333 	 * can only have one possible CRTC. Only expose support for
3334 	 * any CRTC if they're not going to be used as a primary plane
3335 	 * for a CRTC - like overlay or underlay planes.
3336 	 */
3337 	possible_crtcs = 1 << plane_id;
3338 	if (plane_id >= dm->dc->caps.max_streams)
3339 		possible_crtcs = 0xff;
3340 
3341 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3342 
3343 	if (ret) {
3344 		DRM_ERROR("KMS: Failed to initialize plane\n");
3345 		kfree(plane);
3346 		return ret;
3347 	}
3348 
3349 	if (mode_info)
3350 		mode_info->planes[plane_id] = plane;
3351 
3352 	return ret;
3353 }
3354 
3355 
3356 static void register_backlight_device(struct amdgpu_display_manager *dm,
3357 				      struct dc_link *link)
3358 {
3359 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3360 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3361 
3362 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3363 	    link->type != dc_connection_none) {
3364 		/*
3365 		 * Event if registration failed, we should continue with
3366 		 * DM initialization because not having a backlight control
3367 		 * is better then a black screen.
3368 		 */
3369 		amdgpu_dm_register_backlight_device(dm);
3370 
3371 		if (dm->backlight_dev)
3372 			dm->backlight_link = link;
3373 	}
3374 #endif
3375 }
3376 
3377 
3378 /*
3379  * In this architecture, the association
3380  * connector -> encoder -> crtc
3381  * id not really requried. The crtc and connector will hold the
3382  * display_index as an abstraction to use with DAL component
3383  *
3384  * Returns 0 on success
3385  */
3386 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3387 {
3388 	struct amdgpu_display_manager *dm = &adev->dm;
3389 	int32_t i;
3390 	struct amdgpu_dm_connector *aconnector = NULL;
3391 	struct amdgpu_encoder *aencoder = NULL;
3392 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3393 	uint32_t link_cnt;
3394 	int32_t primary_planes;
3395 	enum dc_connection_type new_connection_type = dc_connection_none;
3396 	const struct dc_plane_cap *plane;
3397 
3398 	dm->display_indexes_num = dm->dc->caps.max_streams;
3399 	/* Update the actual used number of crtc */
3400 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3401 
3402 	link_cnt = dm->dc->caps.max_links;
3403 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3404 		DRM_ERROR("DM: Failed to initialize mode config\n");
3405 		return -EINVAL;
3406 	}
3407 
3408 	/* There is one primary plane per CRTC */
3409 	primary_planes = dm->dc->caps.max_streams;
3410 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3411 
3412 	/*
3413 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3414 	 * Order is reversed to match iteration order in atomic check.
3415 	 */
3416 	for (i = (primary_planes - 1); i >= 0; i--) {
3417 		plane = &dm->dc->caps.planes[i];
3418 
3419 		if (initialize_plane(dm, mode_info, i,
3420 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3421 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3422 			goto fail;
3423 		}
3424 	}
3425 
3426 	/*
3427 	 * Initialize overlay planes, index starting after primary planes.
3428 	 * These planes have a higher DRM index than the primary planes since
3429 	 * they should be considered as having a higher z-order.
3430 	 * Order is reversed to match iteration order in atomic check.
3431 	 *
3432 	 * Only support DCN for now, and only expose one so we don't encourage
3433 	 * userspace to use up all the pipes.
3434 	 */
3435 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3436 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3437 
3438 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3439 			continue;
3440 
3441 		if (!plane->blends_with_above || !plane->blends_with_below)
3442 			continue;
3443 
3444 		if (!plane->pixel_format_support.argb8888)
3445 			continue;
3446 
3447 		if (initialize_plane(dm, NULL, primary_planes + i,
3448 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3449 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3450 			goto fail;
3451 		}
3452 
3453 		/* Only create one overlay plane. */
3454 		break;
3455 	}
3456 
3457 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3458 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3459 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3460 			goto fail;
3461 		}
3462 
3463 	/* loops over all connectors on the board */
3464 	for (i = 0; i < link_cnt; i++) {
3465 		struct dc_link *link = NULL;
3466 
3467 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3468 			DRM_ERROR(
3469 				"KMS: Cannot support more than %d display indexes\n",
3470 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3471 			continue;
3472 		}
3473 
3474 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3475 		if (!aconnector)
3476 			goto fail;
3477 
3478 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3479 		if (!aencoder)
3480 			goto fail;
3481 
3482 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3483 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3484 			goto fail;
3485 		}
3486 
3487 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3488 			DRM_ERROR("KMS: Failed to initialize connector\n");
3489 			goto fail;
3490 		}
3491 
3492 		link = dc_get_link_at_index(dm->dc, i);
3493 
3494 		if (!dc_link_detect_sink(link, &new_connection_type))
3495 			DRM_ERROR("KMS: Failed to detect connector\n");
3496 
3497 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3498 			emulated_link_detect(link);
3499 			amdgpu_dm_update_connector_after_detect(aconnector);
3500 
3501 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3502 			amdgpu_dm_update_connector_after_detect(aconnector);
3503 			register_backlight_device(dm, link);
3504 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3505 				amdgpu_dm_set_psr_caps(link);
3506 		}
3507 
3508 
3509 	}
3510 
3511 	/* Software is initialized. Now we can register interrupt handlers. */
3512 	switch (adev->asic_type) {
3513 #if defined(CONFIG_DRM_AMD_DC_SI)
3514 	case CHIP_TAHITI:
3515 	case CHIP_PITCAIRN:
3516 	case CHIP_VERDE:
3517 	case CHIP_OLAND:
3518 		if (dce60_register_irq_handlers(dm->adev)) {
3519 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3520 			goto fail;
3521 		}
3522 		break;
3523 #endif
3524 	case CHIP_BONAIRE:
3525 	case CHIP_HAWAII:
3526 	case CHIP_KAVERI:
3527 	case CHIP_KABINI:
3528 	case CHIP_MULLINS:
3529 	case CHIP_TONGA:
3530 	case CHIP_FIJI:
3531 	case CHIP_CARRIZO:
3532 	case CHIP_STONEY:
3533 	case CHIP_POLARIS11:
3534 	case CHIP_POLARIS10:
3535 	case CHIP_POLARIS12:
3536 	case CHIP_VEGAM:
3537 	case CHIP_VEGA10:
3538 	case CHIP_VEGA12:
3539 	case CHIP_VEGA20:
3540 		if (dce110_register_irq_handlers(dm->adev)) {
3541 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3542 			goto fail;
3543 		}
3544 		break;
3545 #if defined(CONFIG_DRM_AMD_DC_DCN)
3546 	case CHIP_RAVEN:
3547 	case CHIP_NAVI12:
3548 	case CHIP_NAVI10:
3549 	case CHIP_NAVI14:
3550 	case CHIP_RENOIR:
3551 	case CHIP_SIENNA_CICHLID:
3552 	case CHIP_NAVY_FLOUNDER:
3553 	case CHIP_DIMGREY_CAVEFISH:
3554 	case CHIP_VANGOGH:
3555 		if (dcn10_register_irq_handlers(dm->adev)) {
3556 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3557 			goto fail;
3558 		}
3559 		break;
3560 #endif
3561 	default:
3562 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3563 		goto fail;
3564 	}
3565 
3566 	return 0;
3567 fail:
3568 	kfree(aencoder);
3569 	kfree(aconnector);
3570 
3571 	return -EINVAL;
3572 }
3573 
3574 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3575 {
3576 	drm_mode_config_cleanup(dm->ddev);
3577 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3578 	return;
3579 }
3580 
3581 /******************************************************************************
3582  * amdgpu_display_funcs functions
3583  *****************************************************************************/
3584 
3585 /*
3586  * dm_bandwidth_update - program display watermarks
3587  *
3588  * @adev: amdgpu_device pointer
3589  *
3590  * Calculate and program the display watermarks and line buffer allocation.
3591  */
3592 static void dm_bandwidth_update(struct amdgpu_device *adev)
3593 {
3594 	/* TODO: implement later */
3595 }
3596 
3597 static const struct amdgpu_display_funcs dm_display_funcs = {
3598 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3599 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3600 	.backlight_set_level = NULL, /* never called for DC */
3601 	.backlight_get_level = NULL, /* never called for DC */
3602 	.hpd_sense = NULL,/* called unconditionally */
3603 	.hpd_set_polarity = NULL, /* called unconditionally */
3604 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3605 	.page_flip_get_scanoutpos =
3606 		dm_crtc_get_scanoutpos,/* called unconditionally */
3607 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3608 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3609 };
3610 
3611 #if defined(CONFIG_DEBUG_KERNEL_DC)
3612 
3613 static ssize_t s3_debug_store(struct device *device,
3614 			      struct device_attribute *attr,
3615 			      const char *buf,
3616 			      size_t count)
3617 {
3618 	int ret;
3619 	int s3_state;
3620 	struct drm_device *drm_dev = dev_get_drvdata(device);
3621 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3622 
3623 	ret = kstrtoint(buf, 0, &s3_state);
3624 
3625 	if (ret == 0) {
3626 		if (s3_state) {
3627 			dm_resume(adev);
3628 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3629 		} else
3630 			dm_suspend(adev);
3631 	}
3632 
3633 	return ret == 0 ? count : 0;
3634 }
3635 
3636 DEVICE_ATTR_WO(s3_debug);
3637 
3638 #endif
3639 
3640 static int dm_early_init(void *handle)
3641 {
3642 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3643 
3644 	switch (adev->asic_type) {
3645 #if defined(CONFIG_DRM_AMD_DC_SI)
3646 	case CHIP_TAHITI:
3647 	case CHIP_PITCAIRN:
3648 	case CHIP_VERDE:
3649 		adev->mode_info.num_crtc = 6;
3650 		adev->mode_info.num_hpd = 6;
3651 		adev->mode_info.num_dig = 6;
3652 		break;
3653 	case CHIP_OLAND:
3654 		adev->mode_info.num_crtc = 2;
3655 		adev->mode_info.num_hpd = 2;
3656 		adev->mode_info.num_dig = 2;
3657 		break;
3658 #endif
3659 	case CHIP_BONAIRE:
3660 	case CHIP_HAWAII:
3661 		adev->mode_info.num_crtc = 6;
3662 		adev->mode_info.num_hpd = 6;
3663 		adev->mode_info.num_dig = 6;
3664 		break;
3665 	case CHIP_KAVERI:
3666 		adev->mode_info.num_crtc = 4;
3667 		adev->mode_info.num_hpd = 6;
3668 		adev->mode_info.num_dig = 7;
3669 		break;
3670 	case CHIP_KABINI:
3671 	case CHIP_MULLINS:
3672 		adev->mode_info.num_crtc = 2;
3673 		adev->mode_info.num_hpd = 6;
3674 		adev->mode_info.num_dig = 6;
3675 		break;
3676 	case CHIP_FIJI:
3677 	case CHIP_TONGA:
3678 		adev->mode_info.num_crtc = 6;
3679 		adev->mode_info.num_hpd = 6;
3680 		adev->mode_info.num_dig = 7;
3681 		break;
3682 	case CHIP_CARRIZO:
3683 		adev->mode_info.num_crtc = 3;
3684 		adev->mode_info.num_hpd = 6;
3685 		adev->mode_info.num_dig = 9;
3686 		break;
3687 	case CHIP_STONEY:
3688 		adev->mode_info.num_crtc = 2;
3689 		adev->mode_info.num_hpd = 6;
3690 		adev->mode_info.num_dig = 9;
3691 		break;
3692 	case CHIP_POLARIS11:
3693 	case CHIP_POLARIS12:
3694 		adev->mode_info.num_crtc = 5;
3695 		adev->mode_info.num_hpd = 5;
3696 		adev->mode_info.num_dig = 5;
3697 		break;
3698 	case CHIP_POLARIS10:
3699 	case CHIP_VEGAM:
3700 		adev->mode_info.num_crtc = 6;
3701 		adev->mode_info.num_hpd = 6;
3702 		adev->mode_info.num_dig = 6;
3703 		break;
3704 	case CHIP_VEGA10:
3705 	case CHIP_VEGA12:
3706 	case CHIP_VEGA20:
3707 		adev->mode_info.num_crtc = 6;
3708 		adev->mode_info.num_hpd = 6;
3709 		adev->mode_info.num_dig = 6;
3710 		break;
3711 #if defined(CONFIG_DRM_AMD_DC_DCN)
3712 	case CHIP_RAVEN:
3713 	case CHIP_RENOIR:
3714 	case CHIP_VANGOGH:
3715 		adev->mode_info.num_crtc = 4;
3716 		adev->mode_info.num_hpd = 4;
3717 		adev->mode_info.num_dig = 4;
3718 		break;
3719 	case CHIP_NAVI10:
3720 	case CHIP_NAVI12:
3721 	case CHIP_SIENNA_CICHLID:
3722 	case CHIP_NAVY_FLOUNDER:
3723 		adev->mode_info.num_crtc = 6;
3724 		adev->mode_info.num_hpd = 6;
3725 		adev->mode_info.num_dig = 6;
3726 		break;
3727 	case CHIP_NAVI14:
3728 	case CHIP_DIMGREY_CAVEFISH:
3729 		adev->mode_info.num_crtc = 5;
3730 		adev->mode_info.num_hpd = 5;
3731 		adev->mode_info.num_dig = 5;
3732 		break;
3733 #endif
3734 	default:
3735 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3736 		return -EINVAL;
3737 	}
3738 
3739 	amdgpu_dm_set_irq_funcs(adev);
3740 
3741 	if (adev->mode_info.funcs == NULL)
3742 		adev->mode_info.funcs = &dm_display_funcs;
3743 
3744 	/*
3745 	 * Note: Do NOT change adev->audio_endpt_rreg and
3746 	 * adev->audio_endpt_wreg because they are initialised in
3747 	 * amdgpu_device_init()
3748 	 */
3749 #if defined(CONFIG_DEBUG_KERNEL_DC)
3750 	device_create_file(
3751 		adev_to_drm(adev)->dev,
3752 		&dev_attr_s3_debug);
3753 #endif
3754 
3755 	return 0;
3756 }
3757 
3758 static bool modeset_required(struct drm_crtc_state *crtc_state,
3759 			     struct dc_stream_state *new_stream,
3760 			     struct dc_stream_state *old_stream)
3761 {
3762 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3763 }
3764 
3765 static bool modereset_required(struct drm_crtc_state *crtc_state)
3766 {
3767 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3768 }
3769 
3770 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3771 {
3772 	drm_encoder_cleanup(encoder);
3773 	kfree(encoder);
3774 }
3775 
3776 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3777 	.destroy = amdgpu_dm_encoder_destroy,
3778 };
3779 
3780 
3781 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3782 					 struct drm_framebuffer *fb,
3783 					 int *min_downscale, int *max_upscale)
3784 {
3785 	struct amdgpu_device *adev = drm_to_adev(dev);
3786 	struct dc *dc = adev->dm.dc;
3787 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3788 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3789 
3790 	switch (fb->format->format) {
3791 	case DRM_FORMAT_P010:
3792 	case DRM_FORMAT_NV12:
3793 	case DRM_FORMAT_NV21:
3794 		*max_upscale = plane_cap->max_upscale_factor.nv12;
3795 		*min_downscale = plane_cap->max_downscale_factor.nv12;
3796 		break;
3797 
3798 	case DRM_FORMAT_XRGB16161616F:
3799 	case DRM_FORMAT_ARGB16161616F:
3800 	case DRM_FORMAT_XBGR16161616F:
3801 	case DRM_FORMAT_ABGR16161616F:
3802 		*max_upscale = plane_cap->max_upscale_factor.fp16;
3803 		*min_downscale = plane_cap->max_downscale_factor.fp16;
3804 		break;
3805 
3806 	default:
3807 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
3808 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
3809 		break;
3810 	}
3811 
3812 	/*
3813 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3814 	 * scaling factor of 1.0 == 1000 units.
3815 	 */
3816 	if (*max_upscale == 1)
3817 		*max_upscale = 1000;
3818 
3819 	if (*min_downscale == 1)
3820 		*min_downscale = 1000;
3821 }
3822 
3823 
3824 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3825 				struct dc_scaling_info *scaling_info)
3826 {
3827 	int scale_w, scale_h, min_downscale, max_upscale;
3828 
3829 	memset(scaling_info, 0, sizeof(*scaling_info));
3830 
3831 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3832 	scaling_info->src_rect.x = state->src_x >> 16;
3833 	scaling_info->src_rect.y = state->src_y >> 16;
3834 
3835 	scaling_info->src_rect.width = state->src_w >> 16;
3836 	if (scaling_info->src_rect.width == 0)
3837 		return -EINVAL;
3838 
3839 	scaling_info->src_rect.height = state->src_h >> 16;
3840 	if (scaling_info->src_rect.height == 0)
3841 		return -EINVAL;
3842 
3843 	scaling_info->dst_rect.x = state->crtc_x;
3844 	scaling_info->dst_rect.y = state->crtc_y;
3845 
3846 	if (state->crtc_w == 0)
3847 		return -EINVAL;
3848 
3849 	scaling_info->dst_rect.width = state->crtc_w;
3850 
3851 	if (state->crtc_h == 0)
3852 		return -EINVAL;
3853 
3854 	scaling_info->dst_rect.height = state->crtc_h;
3855 
3856 	/* DRM doesn't specify clipping on destination output. */
3857 	scaling_info->clip_rect = scaling_info->dst_rect;
3858 
3859 	/* Validate scaling per-format with DC plane caps */
3860 	if (state->plane && state->plane->dev && state->fb) {
3861 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3862 					     &min_downscale, &max_upscale);
3863 	} else {
3864 		min_downscale = 250;
3865 		max_upscale = 16000;
3866 	}
3867 
3868 	scale_w = scaling_info->dst_rect.width * 1000 /
3869 		  scaling_info->src_rect.width;
3870 
3871 	if (scale_w < min_downscale || scale_w > max_upscale)
3872 		return -EINVAL;
3873 
3874 	scale_h = scaling_info->dst_rect.height * 1000 /
3875 		  scaling_info->src_rect.height;
3876 
3877 	if (scale_h < min_downscale || scale_h > max_upscale)
3878 		return -EINVAL;
3879 
3880 	/*
3881 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3882 	 * assume reasonable defaults based on the format.
3883 	 */
3884 
3885 	return 0;
3886 }
3887 
3888 static void
3889 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3890 				 uint64_t tiling_flags)
3891 {
3892 	/* Fill GFX8 params */
3893 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3894 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3895 
3896 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3897 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3898 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3899 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3900 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3901 
3902 		/* XXX fix me for VI */
3903 		tiling_info->gfx8.num_banks = num_banks;
3904 		tiling_info->gfx8.array_mode =
3905 				DC_ARRAY_2D_TILED_THIN1;
3906 		tiling_info->gfx8.tile_split = tile_split;
3907 		tiling_info->gfx8.bank_width = bankw;
3908 		tiling_info->gfx8.bank_height = bankh;
3909 		tiling_info->gfx8.tile_aspect = mtaspect;
3910 		tiling_info->gfx8.tile_mode =
3911 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3912 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3913 			== DC_ARRAY_1D_TILED_THIN1) {
3914 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3915 	}
3916 
3917 	tiling_info->gfx8.pipe_config =
3918 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3919 }
3920 
3921 static void
3922 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3923 				  union dc_tiling_info *tiling_info)
3924 {
3925 	tiling_info->gfx9.num_pipes =
3926 		adev->gfx.config.gb_addr_config_fields.num_pipes;
3927 	tiling_info->gfx9.num_banks =
3928 		adev->gfx.config.gb_addr_config_fields.num_banks;
3929 	tiling_info->gfx9.pipe_interleave =
3930 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3931 	tiling_info->gfx9.num_shader_engines =
3932 		adev->gfx.config.gb_addr_config_fields.num_se;
3933 	tiling_info->gfx9.max_compressed_frags =
3934 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3935 	tiling_info->gfx9.num_rb_per_se =
3936 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3937 	tiling_info->gfx9.shaderEnable = 1;
3938 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3939 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
3940 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3941 	    adev->asic_type == CHIP_VANGOGH)
3942 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3943 }
3944 
3945 static int
3946 validate_dcc(struct amdgpu_device *adev,
3947 	     const enum surface_pixel_format format,
3948 	     const enum dc_rotation_angle rotation,
3949 	     const union dc_tiling_info *tiling_info,
3950 	     const struct dc_plane_dcc_param *dcc,
3951 	     const struct dc_plane_address *address,
3952 	     const struct plane_size *plane_size)
3953 {
3954 	struct dc *dc = adev->dm.dc;
3955 	struct dc_dcc_surface_param input;
3956 	struct dc_surface_dcc_cap output;
3957 
3958 	memset(&input, 0, sizeof(input));
3959 	memset(&output, 0, sizeof(output));
3960 
3961 	if (!dcc->enable)
3962 		return 0;
3963 
3964 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3965 	    !dc->cap_funcs.get_dcc_compression_cap)
3966 		return -EINVAL;
3967 
3968 	input.format = format;
3969 	input.surface_size.width = plane_size->surface_size.width;
3970 	input.surface_size.height = plane_size->surface_size.height;
3971 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3972 
3973 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3974 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3975 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3976 		input.scan = SCAN_DIRECTION_VERTICAL;
3977 
3978 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3979 		return -EINVAL;
3980 
3981 	if (!output.capable)
3982 		return -EINVAL;
3983 
3984 	if (dcc->independent_64b_blks == 0 &&
3985 	    output.grph.rgb.independent_64b_blks != 0)
3986 		return -EINVAL;
3987 
3988 	return 0;
3989 }
3990 
3991 static bool
3992 modifier_has_dcc(uint64_t modifier)
3993 {
3994 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3995 }
3996 
3997 static unsigned
3998 modifier_gfx9_swizzle_mode(uint64_t modifier)
3999 {
4000 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4001 		return 0;
4002 
4003 	return AMD_FMT_MOD_GET(TILE, modifier);
4004 }
4005 
4006 static const struct drm_format_info *
4007 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4008 {
4009 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4010 }
4011 
4012 static void
4013 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4014 				    union dc_tiling_info *tiling_info,
4015 				    uint64_t modifier)
4016 {
4017 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4018 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4019 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4020 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4021 
4022 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4023 
4024 	if (!IS_AMD_FMT_MOD(modifier))
4025 		return;
4026 
4027 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4028 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4029 
4030 	if (adev->family >= AMDGPU_FAMILY_NV) {
4031 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4032 	} else {
4033 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4034 
4035 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4036 	}
4037 }
4038 
4039 enum dm_micro_swizzle {
4040 	MICRO_SWIZZLE_Z = 0,
4041 	MICRO_SWIZZLE_S = 1,
4042 	MICRO_SWIZZLE_D = 2,
4043 	MICRO_SWIZZLE_R = 3
4044 };
4045 
4046 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4047 					  uint32_t format,
4048 					  uint64_t modifier)
4049 {
4050 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4051 	const struct drm_format_info *info = drm_format_info(format);
4052 
4053 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4054 
4055 	if (!info)
4056 		return false;
4057 
4058 	/*
4059 	 * We always have to allow this modifier, because core DRM still
4060 	 * checks LINEAR support if userspace does not provide modifers.
4061 	 */
4062 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4063 		return true;
4064 
4065 	/*
4066 	 * The arbitrary tiling support for multiplane formats has not been hooked
4067 	 * up.
4068 	 */
4069 	if (info->num_planes > 1)
4070 		return false;
4071 
4072 	/*
4073 	 * For D swizzle the canonical modifier depends on the bpp, so check
4074 	 * it here.
4075 	 */
4076 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4077 	    adev->family >= AMDGPU_FAMILY_NV) {
4078 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4079 			return false;
4080 	}
4081 
4082 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4083 	    info->cpp[0] < 8)
4084 		return false;
4085 
4086 	if (modifier_has_dcc(modifier)) {
4087 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4088 		if (info->cpp[0] != 4)
4089 			return false;
4090 	}
4091 
4092 	return true;
4093 }
4094 
4095 static void
4096 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4097 {
4098 	if (!*mods)
4099 		return;
4100 
4101 	if (*cap - *size < 1) {
4102 		uint64_t new_cap = *cap * 2;
4103 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4104 
4105 		if (!new_mods) {
4106 			kfree(*mods);
4107 			*mods = NULL;
4108 			return;
4109 		}
4110 
4111 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4112 		kfree(*mods);
4113 		*mods = new_mods;
4114 		*cap = new_cap;
4115 	}
4116 
4117 	(*mods)[*size] = mod;
4118 	*size += 1;
4119 }
4120 
4121 static void
4122 add_gfx9_modifiers(const struct amdgpu_device *adev,
4123 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4124 {
4125 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4126 	int pipe_xor_bits = min(8, pipes +
4127 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4128 	int bank_xor_bits = min(8 - pipe_xor_bits,
4129 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4130 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4131 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4132 
4133 
4134 	if (adev->family == AMDGPU_FAMILY_RV) {
4135 		/* Raven2 and later */
4136 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4137 
4138 		/*
4139 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4140 		 * doesn't support _D on DCN
4141 		 */
4142 
4143 		if (has_constant_encode) {
4144 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4145 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4146 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4147 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4148 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4149 				    AMD_FMT_MOD_SET(DCC, 1) |
4150 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4151 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4152 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4153 		}
4154 
4155 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4156 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4157 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4158 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4159 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4160 			    AMD_FMT_MOD_SET(DCC, 1) |
4161 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4162 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4163 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4164 
4165 		if (has_constant_encode) {
4166 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4167 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4168 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4169 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4170 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4171 				    AMD_FMT_MOD_SET(DCC, 1) |
4172 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4173 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4174 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4175 
4176 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4177 				    AMD_FMT_MOD_SET(RB, rb) |
4178 				    AMD_FMT_MOD_SET(PIPE, pipes));
4179 		}
4180 
4181 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4182 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4183 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4184 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4185 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4186 			    AMD_FMT_MOD_SET(DCC, 1) |
4187 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4188 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4189 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4190 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4191 			    AMD_FMT_MOD_SET(RB, rb) |
4192 			    AMD_FMT_MOD_SET(PIPE, pipes));
4193 	}
4194 
4195 	/*
4196 	 * Only supported for 64bpp on Raven, will be filtered on format in
4197 	 * dm_plane_format_mod_supported.
4198 	 */
4199 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4200 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4201 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4202 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4203 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4204 
4205 	if (adev->family == AMDGPU_FAMILY_RV) {
4206 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4207 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4208 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4209 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4210 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4211 	}
4212 
4213 	/*
4214 	 * Only supported for 64bpp on Raven, will be filtered on format in
4215 	 * dm_plane_format_mod_supported.
4216 	 */
4217 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4218 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4219 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4220 
4221 	if (adev->family == AMDGPU_FAMILY_RV) {
4222 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4223 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4224 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4225 	}
4226 }
4227 
4228 static void
4229 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4230 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4231 {
4232 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4233 
4234 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4235 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4236 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4237 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4238 		    AMD_FMT_MOD_SET(DCC, 1) |
4239 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4240 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4241 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4242 
4243 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4244 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4245 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4246 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4247 		    AMD_FMT_MOD_SET(DCC, 1) |
4248 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4249 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4250 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4251 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4252 
4253 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4254 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4255 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4256 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4257 
4258 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4259 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4260 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4261 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4262 
4263 
4264 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4265 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4266 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4267 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4268 
4269 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4270 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4271 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4272 }
4273 
4274 static void
4275 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4276 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4277 {
4278 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4279 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4280 
4281 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4282 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4283 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4284 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4285 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4286 		    AMD_FMT_MOD_SET(DCC, 1) |
4287 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4288 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4289 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4290 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4291 
4292 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4293 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4294 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4295 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4296 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4297 		    AMD_FMT_MOD_SET(DCC, 1) |
4298 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4299 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4300 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4301 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4302 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4303 
4304 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4305 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4306 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4307 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4308 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4309 
4310 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4311 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4312 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4313 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4314 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4315 
4316 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4317 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4318 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4319 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4320 
4321 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4322 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4323 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4324 }
4325 
4326 static int
4327 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4328 {
4329 	uint64_t size = 0, capacity = 128;
4330 	*mods = NULL;
4331 
4332 	/* We have not hooked up any pre-GFX9 modifiers. */
4333 	if (adev->family < AMDGPU_FAMILY_AI)
4334 		return 0;
4335 
4336 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4337 
4338 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4339 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4340 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4341 		return *mods ? 0 : -ENOMEM;
4342 	}
4343 
4344 	switch (adev->family) {
4345 	case AMDGPU_FAMILY_AI:
4346 	case AMDGPU_FAMILY_RV:
4347 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4348 		break;
4349 	case AMDGPU_FAMILY_NV:
4350 	case AMDGPU_FAMILY_VGH:
4351 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4352 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4353 		else
4354 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4355 		break;
4356 	}
4357 
4358 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4359 
4360 	/* INVALID marks the end of the list. */
4361 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4362 
4363 	if (!*mods)
4364 		return -ENOMEM;
4365 
4366 	return 0;
4367 }
4368 
4369 static int
4370 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4371 					  const struct amdgpu_framebuffer *afb,
4372 					  const enum surface_pixel_format format,
4373 					  const enum dc_rotation_angle rotation,
4374 					  const struct plane_size *plane_size,
4375 					  union dc_tiling_info *tiling_info,
4376 					  struct dc_plane_dcc_param *dcc,
4377 					  struct dc_plane_address *address,
4378 					  const bool force_disable_dcc)
4379 {
4380 	const uint64_t modifier = afb->base.modifier;
4381 	int ret;
4382 
4383 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4384 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4385 
4386 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4387 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4388 
4389 		dcc->enable = 1;
4390 		dcc->meta_pitch = afb->base.pitches[1];
4391 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4392 
4393 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4394 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4395 	}
4396 
4397 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4398 	if (ret)
4399 		return ret;
4400 
4401 	return 0;
4402 }
4403 
4404 static int
4405 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4406 			     const struct amdgpu_framebuffer *afb,
4407 			     const enum surface_pixel_format format,
4408 			     const enum dc_rotation_angle rotation,
4409 			     const uint64_t tiling_flags,
4410 			     union dc_tiling_info *tiling_info,
4411 			     struct plane_size *plane_size,
4412 			     struct dc_plane_dcc_param *dcc,
4413 			     struct dc_plane_address *address,
4414 			     bool tmz_surface,
4415 			     bool force_disable_dcc)
4416 {
4417 	const struct drm_framebuffer *fb = &afb->base;
4418 	int ret;
4419 
4420 	memset(tiling_info, 0, sizeof(*tiling_info));
4421 	memset(plane_size, 0, sizeof(*plane_size));
4422 	memset(dcc, 0, sizeof(*dcc));
4423 	memset(address, 0, sizeof(*address));
4424 
4425 	address->tmz_surface = tmz_surface;
4426 
4427 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4428 		uint64_t addr = afb->address + fb->offsets[0];
4429 
4430 		plane_size->surface_size.x = 0;
4431 		plane_size->surface_size.y = 0;
4432 		plane_size->surface_size.width = fb->width;
4433 		plane_size->surface_size.height = fb->height;
4434 		plane_size->surface_pitch =
4435 			fb->pitches[0] / fb->format->cpp[0];
4436 
4437 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4438 		address->grph.addr.low_part = lower_32_bits(addr);
4439 		address->grph.addr.high_part = upper_32_bits(addr);
4440 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4441 		uint64_t luma_addr = afb->address + fb->offsets[0];
4442 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4443 
4444 		plane_size->surface_size.x = 0;
4445 		plane_size->surface_size.y = 0;
4446 		plane_size->surface_size.width = fb->width;
4447 		plane_size->surface_size.height = fb->height;
4448 		plane_size->surface_pitch =
4449 			fb->pitches[0] / fb->format->cpp[0];
4450 
4451 		plane_size->chroma_size.x = 0;
4452 		plane_size->chroma_size.y = 0;
4453 		/* TODO: set these based on surface format */
4454 		plane_size->chroma_size.width = fb->width / 2;
4455 		plane_size->chroma_size.height = fb->height / 2;
4456 
4457 		plane_size->chroma_pitch =
4458 			fb->pitches[1] / fb->format->cpp[1];
4459 
4460 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4461 		address->video_progressive.luma_addr.low_part =
4462 			lower_32_bits(luma_addr);
4463 		address->video_progressive.luma_addr.high_part =
4464 			upper_32_bits(luma_addr);
4465 		address->video_progressive.chroma_addr.low_part =
4466 			lower_32_bits(chroma_addr);
4467 		address->video_progressive.chroma_addr.high_part =
4468 			upper_32_bits(chroma_addr);
4469 	}
4470 
4471 	if (adev->family >= AMDGPU_FAMILY_AI) {
4472 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4473 								rotation, plane_size,
4474 								tiling_info, dcc,
4475 								address,
4476 								force_disable_dcc);
4477 		if (ret)
4478 			return ret;
4479 	} else {
4480 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4481 	}
4482 
4483 	return 0;
4484 }
4485 
4486 static void
4487 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4488 			       bool *per_pixel_alpha, bool *global_alpha,
4489 			       int *global_alpha_value)
4490 {
4491 	*per_pixel_alpha = false;
4492 	*global_alpha = false;
4493 	*global_alpha_value = 0xff;
4494 
4495 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4496 		return;
4497 
4498 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4499 		static const uint32_t alpha_formats[] = {
4500 			DRM_FORMAT_ARGB8888,
4501 			DRM_FORMAT_RGBA8888,
4502 			DRM_FORMAT_ABGR8888,
4503 		};
4504 		uint32_t format = plane_state->fb->format->format;
4505 		unsigned int i;
4506 
4507 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4508 			if (format == alpha_formats[i]) {
4509 				*per_pixel_alpha = true;
4510 				break;
4511 			}
4512 		}
4513 	}
4514 
4515 	if (plane_state->alpha < 0xffff) {
4516 		*global_alpha = true;
4517 		*global_alpha_value = plane_state->alpha >> 8;
4518 	}
4519 }
4520 
4521 static int
4522 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4523 			    const enum surface_pixel_format format,
4524 			    enum dc_color_space *color_space)
4525 {
4526 	bool full_range;
4527 
4528 	*color_space = COLOR_SPACE_SRGB;
4529 
4530 	/* DRM color properties only affect non-RGB formats. */
4531 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4532 		return 0;
4533 
4534 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4535 
4536 	switch (plane_state->color_encoding) {
4537 	case DRM_COLOR_YCBCR_BT601:
4538 		if (full_range)
4539 			*color_space = COLOR_SPACE_YCBCR601;
4540 		else
4541 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4542 		break;
4543 
4544 	case DRM_COLOR_YCBCR_BT709:
4545 		if (full_range)
4546 			*color_space = COLOR_SPACE_YCBCR709;
4547 		else
4548 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4549 		break;
4550 
4551 	case DRM_COLOR_YCBCR_BT2020:
4552 		if (full_range)
4553 			*color_space = COLOR_SPACE_2020_YCBCR;
4554 		else
4555 			return -EINVAL;
4556 		break;
4557 
4558 	default:
4559 		return -EINVAL;
4560 	}
4561 
4562 	return 0;
4563 }
4564 
4565 static int
4566 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4567 			    const struct drm_plane_state *plane_state,
4568 			    const uint64_t tiling_flags,
4569 			    struct dc_plane_info *plane_info,
4570 			    struct dc_plane_address *address,
4571 			    bool tmz_surface,
4572 			    bool force_disable_dcc)
4573 {
4574 	const struct drm_framebuffer *fb = plane_state->fb;
4575 	const struct amdgpu_framebuffer *afb =
4576 		to_amdgpu_framebuffer(plane_state->fb);
4577 	struct drm_format_name_buf format_name;
4578 	int ret;
4579 
4580 	memset(plane_info, 0, sizeof(*plane_info));
4581 
4582 	switch (fb->format->format) {
4583 	case DRM_FORMAT_C8:
4584 		plane_info->format =
4585 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4586 		break;
4587 	case DRM_FORMAT_RGB565:
4588 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4589 		break;
4590 	case DRM_FORMAT_XRGB8888:
4591 	case DRM_FORMAT_ARGB8888:
4592 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4593 		break;
4594 	case DRM_FORMAT_XRGB2101010:
4595 	case DRM_FORMAT_ARGB2101010:
4596 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4597 		break;
4598 	case DRM_FORMAT_XBGR2101010:
4599 	case DRM_FORMAT_ABGR2101010:
4600 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4601 		break;
4602 	case DRM_FORMAT_XBGR8888:
4603 	case DRM_FORMAT_ABGR8888:
4604 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4605 		break;
4606 	case DRM_FORMAT_NV21:
4607 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4608 		break;
4609 	case DRM_FORMAT_NV12:
4610 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4611 		break;
4612 	case DRM_FORMAT_P010:
4613 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4614 		break;
4615 	case DRM_FORMAT_XRGB16161616F:
4616 	case DRM_FORMAT_ARGB16161616F:
4617 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4618 		break;
4619 	case DRM_FORMAT_XBGR16161616F:
4620 	case DRM_FORMAT_ABGR16161616F:
4621 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4622 		break;
4623 	default:
4624 		DRM_ERROR(
4625 			"Unsupported screen format %s\n",
4626 			drm_get_format_name(fb->format->format, &format_name));
4627 		return -EINVAL;
4628 	}
4629 
4630 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4631 	case DRM_MODE_ROTATE_0:
4632 		plane_info->rotation = ROTATION_ANGLE_0;
4633 		break;
4634 	case DRM_MODE_ROTATE_90:
4635 		plane_info->rotation = ROTATION_ANGLE_90;
4636 		break;
4637 	case DRM_MODE_ROTATE_180:
4638 		plane_info->rotation = ROTATION_ANGLE_180;
4639 		break;
4640 	case DRM_MODE_ROTATE_270:
4641 		plane_info->rotation = ROTATION_ANGLE_270;
4642 		break;
4643 	default:
4644 		plane_info->rotation = ROTATION_ANGLE_0;
4645 		break;
4646 	}
4647 
4648 	plane_info->visible = true;
4649 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4650 
4651 	plane_info->layer_index = 0;
4652 
4653 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4654 					  &plane_info->color_space);
4655 	if (ret)
4656 		return ret;
4657 
4658 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4659 					   plane_info->rotation, tiling_flags,
4660 					   &plane_info->tiling_info,
4661 					   &plane_info->plane_size,
4662 					   &plane_info->dcc, address, tmz_surface,
4663 					   force_disable_dcc);
4664 	if (ret)
4665 		return ret;
4666 
4667 	fill_blending_from_plane_state(
4668 		plane_state, &plane_info->per_pixel_alpha,
4669 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4670 
4671 	return 0;
4672 }
4673 
4674 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4675 				    struct dc_plane_state *dc_plane_state,
4676 				    struct drm_plane_state *plane_state,
4677 				    struct drm_crtc_state *crtc_state)
4678 {
4679 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4680 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4681 	struct dc_scaling_info scaling_info;
4682 	struct dc_plane_info plane_info;
4683 	int ret;
4684 	bool force_disable_dcc = false;
4685 
4686 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4687 	if (ret)
4688 		return ret;
4689 
4690 	dc_plane_state->src_rect = scaling_info.src_rect;
4691 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4692 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4693 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4694 
4695 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4696 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4697 					  afb->tiling_flags,
4698 					  &plane_info,
4699 					  &dc_plane_state->address,
4700 					  afb->tmz_surface,
4701 					  force_disable_dcc);
4702 	if (ret)
4703 		return ret;
4704 
4705 	dc_plane_state->format = plane_info.format;
4706 	dc_plane_state->color_space = plane_info.color_space;
4707 	dc_plane_state->format = plane_info.format;
4708 	dc_plane_state->plane_size = plane_info.plane_size;
4709 	dc_plane_state->rotation = plane_info.rotation;
4710 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4711 	dc_plane_state->stereo_format = plane_info.stereo_format;
4712 	dc_plane_state->tiling_info = plane_info.tiling_info;
4713 	dc_plane_state->visible = plane_info.visible;
4714 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4715 	dc_plane_state->global_alpha = plane_info.global_alpha;
4716 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4717 	dc_plane_state->dcc = plane_info.dcc;
4718 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4719 
4720 	/*
4721 	 * Always set input transfer function, since plane state is refreshed
4722 	 * every time.
4723 	 */
4724 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4725 	if (ret)
4726 		return ret;
4727 
4728 	return 0;
4729 }
4730 
4731 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4732 					   const struct dm_connector_state *dm_state,
4733 					   struct dc_stream_state *stream)
4734 {
4735 	enum amdgpu_rmx_type rmx_type;
4736 
4737 	struct rect src = { 0 }; /* viewport in composition space*/
4738 	struct rect dst = { 0 }; /* stream addressable area */
4739 
4740 	/* no mode. nothing to be done */
4741 	if (!mode)
4742 		return;
4743 
4744 	/* Full screen scaling by default */
4745 	src.width = mode->hdisplay;
4746 	src.height = mode->vdisplay;
4747 	dst.width = stream->timing.h_addressable;
4748 	dst.height = stream->timing.v_addressable;
4749 
4750 	if (dm_state) {
4751 		rmx_type = dm_state->scaling;
4752 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4753 			if (src.width * dst.height <
4754 					src.height * dst.width) {
4755 				/* height needs less upscaling/more downscaling */
4756 				dst.width = src.width *
4757 						dst.height / src.height;
4758 			} else {
4759 				/* width needs less upscaling/more downscaling */
4760 				dst.height = src.height *
4761 						dst.width / src.width;
4762 			}
4763 		} else if (rmx_type == RMX_CENTER) {
4764 			dst = src;
4765 		}
4766 
4767 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4768 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4769 
4770 		if (dm_state->underscan_enable) {
4771 			dst.x += dm_state->underscan_hborder / 2;
4772 			dst.y += dm_state->underscan_vborder / 2;
4773 			dst.width -= dm_state->underscan_hborder;
4774 			dst.height -= dm_state->underscan_vborder;
4775 		}
4776 	}
4777 
4778 	stream->src = src;
4779 	stream->dst = dst;
4780 
4781 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4782 			dst.x, dst.y, dst.width, dst.height);
4783 
4784 }
4785 
4786 static enum dc_color_depth
4787 convert_color_depth_from_display_info(const struct drm_connector *connector,
4788 				      bool is_y420, int requested_bpc)
4789 {
4790 	uint8_t bpc;
4791 
4792 	if (is_y420) {
4793 		bpc = 8;
4794 
4795 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4796 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4797 			bpc = 16;
4798 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4799 			bpc = 12;
4800 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4801 			bpc = 10;
4802 	} else {
4803 		bpc = (uint8_t)connector->display_info.bpc;
4804 		/* Assume 8 bpc by default if no bpc is specified. */
4805 		bpc = bpc ? bpc : 8;
4806 	}
4807 
4808 	if (requested_bpc > 0) {
4809 		/*
4810 		 * Cap display bpc based on the user requested value.
4811 		 *
4812 		 * The value for state->max_bpc may not correctly updated
4813 		 * depending on when the connector gets added to the state
4814 		 * or if this was called outside of atomic check, so it
4815 		 * can't be used directly.
4816 		 */
4817 		bpc = min_t(u8, bpc, requested_bpc);
4818 
4819 		/* Round down to the nearest even number. */
4820 		bpc = bpc - (bpc & 1);
4821 	}
4822 
4823 	switch (bpc) {
4824 	case 0:
4825 		/*
4826 		 * Temporary Work around, DRM doesn't parse color depth for
4827 		 * EDID revision before 1.4
4828 		 * TODO: Fix edid parsing
4829 		 */
4830 		return COLOR_DEPTH_888;
4831 	case 6:
4832 		return COLOR_DEPTH_666;
4833 	case 8:
4834 		return COLOR_DEPTH_888;
4835 	case 10:
4836 		return COLOR_DEPTH_101010;
4837 	case 12:
4838 		return COLOR_DEPTH_121212;
4839 	case 14:
4840 		return COLOR_DEPTH_141414;
4841 	case 16:
4842 		return COLOR_DEPTH_161616;
4843 	default:
4844 		return COLOR_DEPTH_UNDEFINED;
4845 	}
4846 }
4847 
4848 static enum dc_aspect_ratio
4849 get_aspect_ratio(const struct drm_display_mode *mode_in)
4850 {
4851 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4852 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4853 }
4854 
4855 static enum dc_color_space
4856 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4857 {
4858 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4859 
4860 	switch (dc_crtc_timing->pixel_encoding)	{
4861 	case PIXEL_ENCODING_YCBCR422:
4862 	case PIXEL_ENCODING_YCBCR444:
4863 	case PIXEL_ENCODING_YCBCR420:
4864 	{
4865 		/*
4866 		 * 27030khz is the separation point between HDTV and SDTV
4867 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4868 		 * respectively
4869 		 */
4870 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4871 			if (dc_crtc_timing->flags.Y_ONLY)
4872 				color_space =
4873 					COLOR_SPACE_YCBCR709_LIMITED;
4874 			else
4875 				color_space = COLOR_SPACE_YCBCR709;
4876 		} else {
4877 			if (dc_crtc_timing->flags.Y_ONLY)
4878 				color_space =
4879 					COLOR_SPACE_YCBCR601_LIMITED;
4880 			else
4881 				color_space = COLOR_SPACE_YCBCR601;
4882 		}
4883 
4884 	}
4885 	break;
4886 	case PIXEL_ENCODING_RGB:
4887 		color_space = COLOR_SPACE_SRGB;
4888 		break;
4889 
4890 	default:
4891 		WARN_ON(1);
4892 		break;
4893 	}
4894 
4895 	return color_space;
4896 }
4897 
4898 static bool adjust_colour_depth_from_display_info(
4899 	struct dc_crtc_timing *timing_out,
4900 	const struct drm_display_info *info)
4901 {
4902 	enum dc_color_depth depth = timing_out->display_color_depth;
4903 	int normalized_clk;
4904 	do {
4905 		normalized_clk = timing_out->pix_clk_100hz / 10;
4906 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4907 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4908 			normalized_clk /= 2;
4909 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4910 		switch (depth) {
4911 		case COLOR_DEPTH_888:
4912 			break;
4913 		case COLOR_DEPTH_101010:
4914 			normalized_clk = (normalized_clk * 30) / 24;
4915 			break;
4916 		case COLOR_DEPTH_121212:
4917 			normalized_clk = (normalized_clk * 36) / 24;
4918 			break;
4919 		case COLOR_DEPTH_161616:
4920 			normalized_clk = (normalized_clk * 48) / 24;
4921 			break;
4922 		default:
4923 			/* The above depths are the only ones valid for HDMI. */
4924 			return false;
4925 		}
4926 		if (normalized_clk <= info->max_tmds_clock) {
4927 			timing_out->display_color_depth = depth;
4928 			return true;
4929 		}
4930 	} while (--depth > COLOR_DEPTH_666);
4931 	return false;
4932 }
4933 
4934 static void fill_stream_properties_from_drm_display_mode(
4935 	struct dc_stream_state *stream,
4936 	const struct drm_display_mode *mode_in,
4937 	const struct drm_connector *connector,
4938 	const struct drm_connector_state *connector_state,
4939 	const struct dc_stream_state *old_stream,
4940 	int requested_bpc)
4941 {
4942 	struct dc_crtc_timing *timing_out = &stream->timing;
4943 	const struct drm_display_info *info = &connector->display_info;
4944 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4945 	struct hdmi_vendor_infoframe hv_frame;
4946 	struct hdmi_avi_infoframe avi_frame;
4947 
4948 	memset(&hv_frame, 0, sizeof(hv_frame));
4949 	memset(&avi_frame, 0, sizeof(avi_frame));
4950 
4951 	timing_out->h_border_left = 0;
4952 	timing_out->h_border_right = 0;
4953 	timing_out->v_border_top = 0;
4954 	timing_out->v_border_bottom = 0;
4955 	/* TODO: un-hardcode */
4956 	if (drm_mode_is_420_only(info, mode_in)
4957 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4958 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4959 	else if (drm_mode_is_420_also(info, mode_in)
4960 			&& aconnector->force_yuv420_output)
4961 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4962 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4963 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4964 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4965 	else
4966 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4967 
4968 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4969 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4970 		connector,
4971 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4972 		requested_bpc);
4973 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4974 	timing_out->hdmi_vic = 0;
4975 
4976 	if(old_stream) {
4977 		timing_out->vic = old_stream->timing.vic;
4978 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4979 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4980 	} else {
4981 		timing_out->vic = drm_match_cea_mode(mode_in);
4982 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4983 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4984 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4985 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4986 	}
4987 
4988 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4989 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4990 		timing_out->vic = avi_frame.video_code;
4991 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4992 		timing_out->hdmi_vic = hv_frame.vic;
4993 	}
4994 
4995 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4996 	timing_out->h_total = mode_in->crtc_htotal;
4997 	timing_out->h_sync_width =
4998 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4999 	timing_out->h_front_porch =
5000 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5001 	timing_out->v_total = mode_in->crtc_vtotal;
5002 	timing_out->v_addressable = mode_in->crtc_vdisplay;
5003 	timing_out->v_front_porch =
5004 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5005 	timing_out->v_sync_width =
5006 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5007 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5008 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5009 
5010 	stream->output_color_space = get_output_color_space(timing_out);
5011 
5012 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5013 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5014 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5015 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5016 		    drm_mode_is_420_also(info, mode_in) &&
5017 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5018 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5019 			adjust_colour_depth_from_display_info(timing_out, info);
5020 		}
5021 	}
5022 }
5023 
5024 static void fill_audio_info(struct audio_info *audio_info,
5025 			    const struct drm_connector *drm_connector,
5026 			    const struct dc_sink *dc_sink)
5027 {
5028 	int i = 0;
5029 	int cea_revision = 0;
5030 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5031 
5032 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5033 	audio_info->product_id = edid_caps->product_id;
5034 
5035 	cea_revision = drm_connector->display_info.cea_rev;
5036 
5037 	strscpy(audio_info->display_name,
5038 		edid_caps->display_name,
5039 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5040 
5041 	if (cea_revision >= 3) {
5042 		audio_info->mode_count = edid_caps->audio_mode_count;
5043 
5044 		for (i = 0; i < audio_info->mode_count; ++i) {
5045 			audio_info->modes[i].format_code =
5046 					(enum audio_format_code)
5047 					(edid_caps->audio_modes[i].format_code);
5048 			audio_info->modes[i].channel_count =
5049 					edid_caps->audio_modes[i].channel_count;
5050 			audio_info->modes[i].sample_rates.all =
5051 					edid_caps->audio_modes[i].sample_rate;
5052 			audio_info->modes[i].sample_size =
5053 					edid_caps->audio_modes[i].sample_size;
5054 		}
5055 	}
5056 
5057 	audio_info->flags.all = edid_caps->speaker_flags;
5058 
5059 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5060 	if (drm_connector->latency_present[0]) {
5061 		audio_info->video_latency = drm_connector->video_latency[0];
5062 		audio_info->audio_latency = drm_connector->audio_latency[0];
5063 	}
5064 
5065 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5066 
5067 }
5068 
5069 static void
5070 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5071 				      struct drm_display_mode *dst_mode)
5072 {
5073 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5074 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5075 	dst_mode->crtc_clock = src_mode->crtc_clock;
5076 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5077 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5078 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5079 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5080 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5081 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5082 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5083 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5084 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5085 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5086 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5087 }
5088 
5089 static void
5090 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5091 					const struct drm_display_mode *native_mode,
5092 					bool scale_enabled)
5093 {
5094 	if (scale_enabled) {
5095 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5096 	} else if (native_mode->clock == drm_mode->clock &&
5097 			native_mode->htotal == drm_mode->htotal &&
5098 			native_mode->vtotal == drm_mode->vtotal) {
5099 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5100 	} else {
5101 		/* no scaling nor amdgpu inserted, no need to patch */
5102 	}
5103 }
5104 
5105 static struct dc_sink *
5106 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5107 {
5108 	struct dc_sink_init_data sink_init_data = { 0 };
5109 	struct dc_sink *sink = NULL;
5110 	sink_init_data.link = aconnector->dc_link;
5111 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5112 
5113 	sink = dc_sink_create(&sink_init_data);
5114 	if (!sink) {
5115 		DRM_ERROR("Failed to create sink!\n");
5116 		return NULL;
5117 	}
5118 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5119 
5120 	return sink;
5121 }
5122 
5123 static void set_multisync_trigger_params(
5124 		struct dc_stream_state *stream)
5125 {
5126 	if (stream->triggered_crtc_reset.enabled) {
5127 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5128 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5129 	}
5130 }
5131 
5132 static void set_master_stream(struct dc_stream_state *stream_set[],
5133 			      int stream_count)
5134 {
5135 	int j, highest_rfr = 0, master_stream = 0;
5136 
5137 	for (j = 0;  j < stream_count; j++) {
5138 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5139 			int refresh_rate = 0;
5140 
5141 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5142 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5143 			if (refresh_rate > highest_rfr) {
5144 				highest_rfr = refresh_rate;
5145 				master_stream = j;
5146 			}
5147 		}
5148 	}
5149 	for (j = 0;  j < stream_count; j++) {
5150 		if (stream_set[j])
5151 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5152 	}
5153 }
5154 
5155 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5156 {
5157 	int i = 0;
5158 
5159 	if (context->stream_count < 2)
5160 		return;
5161 	for (i = 0; i < context->stream_count ; i++) {
5162 		if (!context->streams[i])
5163 			continue;
5164 		/*
5165 		 * TODO: add a function to read AMD VSDB bits and set
5166 		 * crtc_sync_master.multi_sync_enabled flag
5167 		 * For now it's set to false
5168 		 */
5169 		set_multisync_trigger_params(context->streams[i]);
5170 	}
5171 	set_master_stream(context->streams, context->stream_count);
5172 }
5173 
5174 static struct dc_stream_state *
5175 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5176 		       const struct drm_display_mode *drm_mode,
5177 		       const struct dm_connector_state *dm_state,
5178 		       const struct dc_stream_state *old_stream,
5179 		       int requested_bpc)
5180 {
5181 	struct drm_display_mode *preferred_mode = NULL;
5182 	struct drm_connector *drm_connector;
5183 	const struct drm_connector_state *con_state =
5184 		dm_state ? &dm_state->base : NULL;
5185 	struct dc_stream_state *stream = NULL;
5186 	struct drm_display_mode mode = *drm_mode;
5187 	bool native_mode_found = false;
5188 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5189 	int mode_refresh;
5190 	int preferred_refresh = 0;
5191 #if defined(CONFIG_DRM_AMD_DC_DCN)
5192 	struct dsc_dec_dpcd_caps dsc_caps;
5193 	uint32_t link_bandwidth_kbps;
5194 #endif
5195 	struct dc_sink *sink = NULL;
5196 	if (aconnector == NULL) {
5197 		DRM_ERROR("aconnector is NULL!\n");
5198 		return stream;
5199 	}
5200 
5201 	drm_connector = &aconnector->base;
5202 
5203 	if (!aconnector->dc_sink) {
5204 		sink = create_fake_sink(aconnector);
5205 		if (!sink)
5206 			return stream;
5207 	} else {
5208 		sink = aconnector->dc_sink;
5209 		dc_sink_retain(sink);
5210 	}
5211 
5212 	stream = dc_create_stream_for_sink(sink);
5213 
5214 	if (stream == NULL) {
5215 		DRM_ERROR("Failed to create stream for sink!\n");
5216 		goto finish;
5217 	}
5218 
5219 	stream->dm_stream_context = aconnector;
5220 
5221 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5222 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5223 
5224 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5225 		/* Search for preferred mode */
5226 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5227 			native_mode_found = true;
5228 			break;
5229 		}
5230 	}
5231 	if (!native_mode_found)
5232 		preferred_mode = list_first_entry_or_null(
5233 				&aconnector->base.modes,
5234 				struct drm_display_mode,
5235 				head);
5236 
5237 	mode_refresh = drm_mode_vrefresh(&mode);
5238 
5239 	if (preferred_mode == NULL) {
5240 		/*
5241 		 * This may not be an error, the use case is when we have no
5242 		 * usermode calls to reset and set mode upon hotplug. In this
5243 		 * case, we call set mode ourselves to restore the previous mode
5244 		 * and the modelist may not be filled in in time.
5245 		 */
5246 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5247 	} else {
5248 		decide_crtc_timing_for_drm_display_mode(
5249 				&mode, preferred_mode,
5250 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5251 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5252 	}
5253 
5254 	if (!dm_state)
5255 		drm_mode_set_crtcinfo(&mode, 0);
5256 
5257 	/*
5258 	* If scaling is enabled and refresh rate didn't change
5259 	* we copy the vic and polarities of the old timings
5260 	*/
5261 	if (!scale || mode_refresh != preferred_refresh)
5262 		fill_stream_properties_from_drm_display_mode(stream,
5263 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
5264 	else
5265 		fill_stream_properties_from_drm_display_mode(stream,
5266 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
5267 
5268 	stream->timing.flags.DSC = 0;
5269 
5270 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5271 #if defined(CONFIG_DRM_AMD_DC_DCN)
5272 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5273 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5274 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5275 				      &dsc_caps);
5276 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5277 							     dc_link_get_link_cap(aconnector->dc_link));
5278 
5279 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5280 			/* Set DSC policy according to dsc_clock_en */
5281 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5282 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5283 
5284 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5285 						  &dsc_caps,
5286 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5287 						  0,
5288 						  link_bandwidth_kbps,
5289 						  &stream->timing,
5290 						  &stream->timing.dsc_cfg))
5291 				stream->timing.flags.DSC = 1;
5292 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5293 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5294 				stream->timing.flags.DSC = 1;
5295 
5296 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5297 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5298 
5299 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5300 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5301 
5302 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5303 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5304 		}
5305 #endif
5306 	}
5307 
5308 	update_stream_scaling_settings(&mode, dm_state, stream);
5309 
5310 	fill_audio_info(
5311 		&stream->audio_info,
5312 		drm_connector,
5313 		sink);
5314 
5315 	update_stream_signal(stream, sink);
5316 
5317 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5318 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5319 
5320 	if (stream->link->psr_settings.psr_feature_enabled) {
5321 		//
5322 		// should decide stream support vsc sdp colorimetry capability
5323 		// before building vsc info packet
5324 		//
5325 		stream->use_vsc_sdp_for_colorimetry = false;
5326 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5327 			stream->use_vsc_sdp_for_colorimetry =
5328 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5329 		} else {
5330 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5331 				stream->use_vsc_sdp_for_colorimetry = true;
5332 		}
5333 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5334 	}
5335 finish:
5336 	dc_sink_release(sink);
5337 
5338 	return stream;
5339 }
5340 
5341 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5342 {
5343 	drm_crtc_cleanup(crtc);
5344 	kfree(crtc);
5345 }
5346 
5347 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5348 				  struct drm_crtc_state *state)
5349 {
5350 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5351 
5352 	/* TODO Destroy dc_stream objects are stream object is flattened */
5353 	if (cur->stream)
5354 		dc_stream_release(cur->stream);
5355 
5356 
5357 	__drm_atomic_helper_crtc_destroy_state(state);
5358 
5359 
5360 	kfree(state);
5361 }
5362 
5363 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5364 {
5365 	struct dm_crtc_state *state;
5366 
5367 	if (crtc->state)
5368 		dm_crtc_destroy_state(crtc, crtc->state);
5369 
5370 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5371 	if (WARN_ON(!state))
5372 		return;
5373 
5374 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5375 }
5376 
5377 static struct drm_crtc_state *
5378 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5379 {
5380 	struct dm_crtc_state *state, *cur;
5381 
5382 	cur = to_dm_crtc_state(crtc->state);
5383 
5384 	if (WARN_ON(!crtc->state))
5385 		return NULL;
5386 
5387 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5388 	if (!state)
5389 		return NULL;
5390 
5391 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5392 
5393 	if (cur->stream) {
5394 		state->stream = cur->stream;
5395 		dc_stream_retain(state->stream);
5396 	}
5397 
5398 	state->active_planes = cur->active_planes;
5399 	state->vrr_infopacket = cur->vrr_infopacket;
5400 	state->abm_level = cur->abm_level;
5401 	state->vrr_supported = cur->vrr_supported;
5402 	state->freesync_config = cur->freesync_config;
5403 	state->crc_src = cur->crc_src;
5404 	state->cm_has_degamma = cur->cm_has_degamma;
5405 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5406 
5407 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5408 
5409 	return &state->base;
5410 }
5411 
5412 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5413 {
5414 	enum dc_irq_source irq_source;
5415 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5416 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5417 	int rc;
5418 
5419 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5420 
5421 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5422 
5423 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5424 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
5425 	return rc;
5426 }
5427 
5428 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5429 {
5430 	enum dc_irq_source irq_source;
5431 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5432 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5433 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5434 #if defined(CONFIG_DRM_AMD_DC_DCN)
5435 	struct amdgpu_display_manager *dm = &adev->dm;
5436 	unsigned long flags;
5437 #endif
5438 	int rc = 0;
5439 
5440 	if (enable) {
5441 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5442 		if (amdgpu_dm_vrr_active(acrtc_state))
5443 			rc = dm_set_vupdate_irq(crtc, true);
5444 	} else {
5445 		/* vblank irq off -> vupdate irq off */
5446 		rc = dm_set_vupdate_irq(crtc, false);
5447 	}
5448 
5449 	if (rc)
5450 		return rc;
5451 
5452 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5453 
5454 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5455 		return -EBUSY;
5456 
5457 	if (amdgpu_in_reset(adev))
5458 		return 0;
5459 
5460 #if defined(CONFIG_DRM_AMD_DC_DCN)
5461 	spin_lock_irqsave(&dm->vblank_lock, flags);
5462 	dm->vblank_workqueue->dm = dm;
5463 	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5464 	dm->vblank_workqueue->enable = enable;
5465 	spin_unlock_irqrestore(&dm->vblank_lock, flags);
5466 	schedule_work(&dm->vblank_workqueue->mall_work);
5467 #endif
5468 
5469 	return 0;
5470 }
5471 
5472 static int dm_enable_vblank(struct drm_crtc *crtc)
5473 {
5474 	return dm_set_vblank(crtc, true);
5475 }
5476 
5477 static void dm_disable_vblank(struct drm_crtc *crtc)
5478 {
5479 	dm_set_vblank(crtc, false);
5480 }
5481 
5482 /* Implemented only the options currently availible for the driver */
5483 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5484 	.reset = dm_crtc_reset_state,
5485 	.destroy = amdgpu_dm_crtc_destroy,
5486 	.set_config = drm_atomic_helper_set_config,
5487 	.page_flip = drm_atomic_helper_page_flip,
5488 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5489 	.atomic_destroy_state = dm_crtc_destroy_state,
5490 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5491 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5492 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5493 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5494 	.enable_vblank = dm_enable_vblank,
5495 	.disable_vblank = dm_disable_vblank,
5496 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5497 };
5498 
5499 static enum drm_connector_status
5500 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5501 {
5502 	bool connected;
5503 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5504 
5505 	/*
5506 	 * Notes:
5507 	 * 1. This interface is NOT called in context of HPD irq.
5508 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5509 	 * makes it a bad place for *any* MST-related activity.
5510 	 */
5511 
5512 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5513 	    !aconnector->fake_enable)
5514 		connected = (aconnector->dc_sink != NULL);
5515 	else
5516 		connected = (aconnector->base.force == DRM_FORCE_ON);
5517 
5518 	update_subconnector_property(aconnector);
5519 
5520 	return (connected ? connector_status_connected :
5521 			connector_status_disconnected);
5522 }
5523 
5524 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5525 					    struct drm_connector_state *connector_state,
5526 					    struct drm_property *property,
5527 					    uint64_t val)
5528 {
5529 	struct drm_device *dev = connector->dev;
5530 	struct amdgpu_device *adev = drm_to_adev(dev);
5531 	struct dm_connector_state *dm_old_state =
5532 		to_dm_connector_state(connector->state);
5533 	struct dm_connector_state *dm_new_state =
5534 		to_dm_connector_state(connector_state);
5535 
5536 	int ret = -EINVAL;
5537 
5538 	if (property == dev->mode_config.scaling_mode_property) {
5539 		enum amdgpu_rmx_type rmx_type;
5540 
5541 		switch (val) {
5542 		case DRM_MODE_SCALE_CENTER:
5543 			rmx_type = RMX_CENTER;
5544 			break;
5545 		case DRM_MODE_SCALE_ASPECT:
5546 			rmx_type = RMX_ASPECT;
5547 			break;
5548 		case DRM_MODE_SCALE_FULLSCREEN:
5549 			rmx_type = RMX_FULL;
5550 			break;
5551 		case DRM_MODE_SCALE_NONE:
5552 		default:
5553 			rmx_type = RMX_OFF;
5554 			break;
5555 		}
5556 
5557 		if (dm_old_state->scaling == rmx_type)
5558 			return 0;
5559 
5560 		dm_new_state->scaling = rmx_type;
5561 		ret = 0;
5562 	} else if (property == adev->mode_info.underscan_hborder_property) {
5563 		dm_new_state->underscan_hborder = val;
5564 		ret = 0;
5565 	} else if (property == adev->mode_info.underscan_vborder_property) {
5566 		dm_new_state->underscan_vborder = val;
5567 		ret = 0;
5568 	} else if (property == adev->mode_info.underscan_property) {
5569 		dm_new_state->underscan_enable = val;
5570 		ret = 0;
5571 	} else if (property == adev->mode_info.abm_level_property) {
5572 		dm_new_state->abm_level = val;
5573 		ret = 0;
5574 	}
5575 
5576 	return ret;
5577 }
5578 
5579 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5580 					    const struct drm_connector_state *state,
5581 					    struct drm_property *property,
5582 					    uint64_t *val)
5583 {
5584 	struct drm_device *dev = connector->dev;
5585 	struct amdgpu_device *adev = drm_to_adev(dev);
5586 	struct dm_connector_state *dm_state =
5587 		to_dm_connector_state(state);
5588 	int ret = -EINVAL;
5589 
5590 	if (property == dev->mode_config.scaling_mode_property) {
5591 		switch (dm_state->scaling) {
5592 		case RMX_CENTER:
5593 			*val = DRM_MODE_SCALE_CENTER;
5594 			break;
5595 		case RMX_ASPECT:
5596 			*val = DRM_MODE_SCALE_ASPECT;
5597 			break;
5598 		case RMX_FULL:
5599 			*val = DRM_MODE_SCALE_FULLSCREEN;
5600 			break;
5601 		case RMX_OFF:
5602 		default:
5603 			*val = DRM_MODE_SCALE_NONE;
5604 			break;
5605 		}
5606 		ret = 0;
5607 	} else if (property == adev->mode_info.underscan_hborder_property) {
5608 		*val = dm_state->underscan_hborder;
5609 		ret = 0;
5610 	} else if (property == adev->mode_info.underscan_vborder_property) {
5611 		*val = dm_state->underscan_vborder;
5612 		ret = 0;
5613 	} else if (property == adev->mode_info.underscan_property) {
5614 		*val = dm_state->underscan_enable;
5615 		ret = 0;
5616 	} else if (property == adev->mode_info.abm_level_property) {
5617 		*val = dm_state->abm_level;
5618 		ret = 0;
5619 	}
5620 
5621 	return ret;
5622 }
5623 
5624 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5625 {
5626 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5627 
5628 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5629 }
5630 
5631 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5632 {
5633 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5634 	const struct dc_link *link = aconnector->dc_link;
5635 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5636 	struct amdgpu_display_manager *dm = &adev->dm;
5637 
5638 	/*
5639 	 * Call only if mst_mgr was iniitalized before since it's not done
5640 	 * for all connector types.
5641 	 */
5642 	if (aconnector->mst_mgr.dev)
5643 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5644 
5645 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5646 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5647 
5648 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5649 	    link->type != dc_connection_none &&
5650 	    dm->backlight_dev) {
5651 		backlight_device_unregister(dm->backlight_dev);
5652 		dm->backlight_dev = NULL;
5653 	}
5654 #endif
5655 
5656 	if (aconnector->dc_em_sink)
5657 		dc_sink_release(aconnector->dc_em_sink);
5658 	aconnector->dc_em_sink = NULL;
5659 	if (aconnector->dc_sink)
5660 		dc_sink_release(aconnector->dc_sink);
5661 	aconnector->dc_sink = NULL;
5662 
5663 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5664 	drm_connector_unregister(connector);
5665 	drm_connector_cleanup(connector);
5666 	if (aconnector->i2c) {
5667 		i2c_del_adapter(&aconnector->i2c->base);
5668 		kfree(aconnector->i2c);
5669 	}
5670 	kfree(aconnector->dm_dp_aux.aux.name);
5671 
5672 	kfree(connector);
5673 }
5674 
5675 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5676 {
5677 	struct dm_connector_state *state =
5678 		to_dm_connector_state(connector->state);
5679 
5680 	if (connector->state)
5681 		__drm_atomic_helper_connector_destroy_state(connector->state);
5682 
5683 	kfree(state);
5684 
5685 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5686 
5687 	if (state) {
5688 		state->scaling = RMX_OFF;
5689 		state->underscan_enable = false;
5690 		state->underscan_hborder = 0;
5691 		state->underscan_vborder = 0;
5692 		state->base.max_requested_bpc = 8;
5693 		state->vcpi_slots = 0;
5694 		state->pbn = 0;
5695 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5696 			state->abm_level = amdgpu_dm_abm_level;
5697 
5698 		__drm_atomic_helper_connector_reset(connector, &state->base);
5699 	}
5700 }
5701 
5702 struct drm_connector_state *
5703 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5704 {
5705 	struct dm_connector_state *state =
5706 		to_dm_connector_state(connector->state);
5707 
5708 	struct dm_connector_state *new_state =
5709 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5710 
5711 	if (!new_state)
5712 		return NULL;
5713 
5714 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5715 
5716 	new_state->freesync_capable = state->freesync_capable;
5717 	new_state->abm_level = state->abm_level;
5718 	new_state->scaling = state->scaling;
5719 	new_state->underscan_enable = state->underscan_enable;
5720 	new_state->underscan_hborder = state->underscan_hborder;
5721 	new_state->underscan_vborder = state->underscan_vborder;
5722 	new_state->vcpi_slots = state->vcpi_slots;
5723 	new_state->pbn = state->pbn;
5724 	return &new_state->base;
5725 }
5726 
5727 static int
5728 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5729 {
5730 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5731 		to_amdgpu_dm_connector(connector);
5732 	int r;
5733 
5734 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5735 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5736 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5737 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5738 		if (r)
5739 			return r;
5740 	}
5741 
5742 #if defined(CONFIG_DEBUG_FS)
5743 	connector_debugfs_init(amdgpu_dm_connector);
5744 #endif
5745 
5746 	return 0;
5747 }
5748 
5749 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5750 	.reset = amdgpu_dm_connector_funcs_reset,
5751 	.detect = amdgpu_dm_connector_detect,
5752 	.fill_modes = drm_helper_probe_single_connector_modes,
5753 	.destroy = amdgpu_dm_connector_destroy,
5754 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5755 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5756 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5757 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5758 	.late_register = amdgpu_dm_connector_late_register,
5759 	.early_unregister = amdgpu_dm_connector_unregister
5760 };
5761 
5762 static int get_modes(struct drm_connector *connector)
5763 {
5764 	return amdgpu_dm_connector_get_modes(connector);
5765 }
5766 
5767 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5768 {
5769 	struct dc_sink_init_data init_params = {
5770 			.link = aconnector->dc_link,
5771 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5772 	};
5773 	struct edid *edid;
5774 
5775 	if (!aconnector->base.edid_blob_ptr) {
5776 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5777 				aconnector->base.name);
5778 
5779 		aconnector->base.force = DRM_FORCE_OFF;
5780 		aconnector->base.override_edid = false;
5781 		return;
5782 	}
5783 
5784 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5785 
5786 	aconnector->edid = edid;
5787 
5788 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5789 		aconnector->dc_link,
5790 		(uint8_t *)edid,
5791 		(edid->extensions + 1) * EDID_LENGTH,
5792 		&init_params);
5793 
5794 	if (aconnector->base.force == DRM_FORCE_ON) {
5795 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5796 		aconnector->dc_link->local_sink :
5797 		aconnector->dc_em_sink;
5798 		dc_sink_retain(aconnector->dc_sink);
5799 	}
5800 }
5801 
5802 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5803 {
5804 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5805 
5806 	/*
5807 	 * In case of headless boot with force on for DP managed connector
5808 	 * Those settings have to be != 0 to get initial modeset
5809 	 */
5810 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5811 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5812 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5813 	}
5814 
5815 
5816 	aconnector->base.override_edid = true;
5817 	create_eml_sink(aconnector);
5818 }
5819 
5820 static struct dc_stream_state *
5821 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5822 				const struct drm_display_mode *drm_mode,
5823 				const struct dm_connector_state *dm_state,
5824 				const struct dc_stream_state *old_stream)
5825 {
5826 	struct drm_connector *connector = &aconnector->base;
5827 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5828 	struct dc_stream_state *stream;
5829 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5830 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5831 	enum dc_status dc_result = DC_OK;
5832 
5833 	do {
5834 		stream = create_stream_for_sink(aconnector, drm_mode,
5835 						dm_state, old_stream,
5836 						requested_bpc);
5837 		if (stream == NULL) {
5838 			DRM_ERROR("Failed to create stream for sink!\n");
5839 			break;
5840 		}
5841 
5842 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5843 
5844 		if (dc_result != DC_OK) {
5845 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5846 				      drm_mode->hdisplay,
5847 				      drm_mode->vdisplay,
5848 				      drm_mode->clock,
5849 				      dc_result,
5850 				      dc_status_to_str(dc_result));
5851 
5852 			dc_stream_release(stream);
5853 			stream = NULL;
5854 			requested_bpc -= 2; /* lower bpc to retry validation */
5855 		}
5856 
5857 	} while (stream == NULL && requested_bpc >= 6);
5858 
5859 	return stream;
5860 }
5861 
5862 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5863 				   struct drm_display_mode *mode)
5864 {
5865 	int result = MODE_ERROR;
5866 	struct dc_sink *dc_sink;
5867 	/* TODO: Unhardcode stream count */
5868 	struct dc_stream_state *stream;
5869 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5870 
5871 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5872 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5873 		return result;
5874 
5875 	/*
5876 	 * Only run this the first time mode_valid is called to initilialize
5877 	 * EDID mgmt
5878 	 */
5879 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5880 		!aconnector->dc_em_sink)
5881 		handle_edid_mgmt(aconnector);
5882 
5883 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5884 
5885 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5886 				aconnector->base.force != DRM_FORCE_ON) {
5887 		DRM_ERROR("dc_sink is NULL!\n");
5888 		goto fail;
5889 	}
5890 
5891 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5892 	if (stream) {
5893 		dc_stream_release(stream);
5894 		result = MODE_OK;
5895 	}
5896 
5897 fail:
5898 	/* TODO: error handling*/
5899 	return result;
5900 }
5901 
5902 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5903 				struct dc_info_packet *out)
5904 {
5905 	struct hdmi_drm_infoframe frame;
5906 	unsigned char buf[30]; /* 26 + 4 */
5907 	ssize_t len;
5908 	int ret, i;
5909 
5910 	memset(out, 0, sizeof(*out));
5911 
5912 	if (!state->hdr_output_metadata)
5913 		return 0;
5914 
5915 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5916 	if (ret)
5917 		return ret;
5918 
5919 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5920 	if (len < 0)
5921 		return (int)len;
5922 
5923 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5924 	if (len != 30)
5925 		return -EINVAL;
5926 
5927 	/* Prepare the infopacket for DC. */
5928 	switch (state->connector->connector_type) {
5929 	case DRM_MODE_CONNECTOR_HDMIA:
5930 		out->hb0 = 0x87; /* type */
5931 		out->hb1 = 0x01; /* version */
5932 		out->hb2 = 0x1A; /* length */
5933 		out->sb[0] = buf[3]; /* checksum */
5934 		i = 1;
5935 		break;
5936 
5937 	case DRM_MODE_CONNECTOR_DisplayPort:
5938 	case DRM_MODE_CONNECTOR_eDP:
5939 		out->hb0 = 0x00; /* sdp id, zero */
5940 		out->hb1 = 0x87; /* type */
5941 		out->hb2 = 0x1D; /* payload len - 1 */
5942 		out->hb3 = (0x13 << 2); /* sdp version */
5943 		out->sb[0] = 0x01; /* version */
5944 		out->sb[1] = 0x1A; /* length */
5945 		i = 2;
5946 		break;
5947 
5948 	default:
5949 		return -EINVAL;
5950 	}
5951 
5952 	memcpy(&out->sb[i], &buf[4], 26);
5953 	out->valid = true;
5954 
5955 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5956 		       sizeof(out->sb), false);
5957 
5958 	return 0;
5959 }
5960 
5961 static bool
5962 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5963 			  const struct drm_connector_state *new_state)
5964 {
5965 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5966 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5967 
5968 	if (old_blob != new_blob) {
5969 		if (old_blob && new_blob &&
5970 		    old_blob->length == new_blob->length)
5971 			return memcmp(old_blob->data, new_blob->data,
5972 				      old_blob->length);
5973 
5974 		return true;
5975 	}
5976 
5977 	return false;
5978 }
5979 
5980 static int
5981 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5982 				 struct drm_atomic_state *state)
5983 {
5984 	struct drm_connector_state *new_con_state =
5985 		drm_atomic_get_new_connector_state(state, conn);
5986 	struct drm_connector_state *old_con_state =
5987 		drm_atomic_get_old_connector_state(state, conn);
5988 	struct drm_crtc *crtc = new_con_state->crtc;
5989 	struct drm_crtc_state *new_crtc_state;
5990 	int ret;
5991 
5992 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
5993 
5994 	if (!crtc)
5995 		return 0;
5996 
5997 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5998 		struct dc_info_packet hdr_infopacket;
5999 
6000 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6001 		if (ret)
6002 			return ret;
6003 
6004 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6005 		if (IS_ERR(new_crtc_state))
6006 			return PTR_ERR(new_crtc_state);
6007 
6008 		/*
6009 		 * DC considers the stream backends changed if the
6010 		 * static metadata changes. Forcing the modeset also
6011 		 * gives a simple way for userspace to switch from
6012 		 * 8bpc to 10bpc when setting the metadata to enter
6013 		 * or exit HDR.
6014 		 *
6015 		 * Changing the static metadata after it's been
6016 		 * set is permissible, however. So only force a
6017 		 * modeset if we're entering or exiting HDR.
6018 		 */
6019 		new_crtc_state->mode_changed =
6020 			!old_con_state->hdr_output_metadata ||
6021 			!new_con_state->hdr_output_metadata;
6022 	}
6023 
6024 	return 0;
6025 }
6026 
6027 static const struct drm_connector_helper_funcs
6028 amdgpu_dm_connector_helper_funcs = {
6029 	/*
6030 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6031 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6032 	 * are missing after user start lightdm. So we need to renew modes list.
6033 	 * in get_modes call back, not just return the modes count
6034 	 */
6035 	.get_modes = get_modes,
6036 	.mode_valid = amdgpu_dm_connector_mode_valid,
6037 	.atomic_check = amdgpu_dm_connector_atomic_check,
6038 };
6039 
6040 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6041 {
6042 }
6043 
6044 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6045 {
6046 	struct drm_atomic_state *state = new_crtc_state->state;
6047 	struct drm_plane *plane;
6048 	int num_active = 0;
6049 
6050 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6051 		struct drm_plane_state *new_plane_state;
6052 
6053 		/* Cursor planes are "fake". */
6054 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6055 			continue;
6056 
6057 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6058 
6059 		if (!new_plane_state) {
6060 			/*
6061 			 * The plane is enable on the CRTC and hasn't changed
6062 			 * state. This means that it previously passed
6063 			 * validation and is therefore enabled.
6064 			 */
6065 			num_active += 1;
6066 			continue;
6067 		}
6068 
6069 		/* We need a framebuffer to be considered enabled. */
6070 		num_active += (new_plane_state->fb != NULL);
6071 	}
6072 
6073 	return num_active;
6074 }
6075 
6076 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6077 					 struct drm_crtc_state *new_crtc_state)
6078 {
6079 	struct dm_crtc_state *dm_new_crtc_state =
6080 		to_dm_crtc_state(new_crtc_state);
6081 
6082 	dm_new_crtc_state->active_planes = 0;
6083 
6084 	if (!dm_new_crtc_state->stream)
6085 		return;
6086 
6087 	dm_new_crtc_state->active_planes =
6088 		count_crtc_active_planes(new_crtc_state);
6089 }
6090 
6091 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6092 				       struct drm_atomic_state *state)
6093 {
6094 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6095 									  crtc);
6096 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6097 	struct dc *dc = adev->dm.dc;
6098 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6099 	int ret = -EINVAL;
6100 
6101 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6102 
6103 	dm_update_crtc_active_planes(crtc, crtc_state);
6104 
6105 	if (unlikely(!dm_crtc_state->stream &&
6106 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6107 		WARN_ON(1);
6108 		return ret;
6109 	}
6110 
6111 	/*
6112 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6113 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6114 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6115 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6116 	 */
6117 	if (crtc_state->enable &&
6118 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6119 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6120 		return -EINVAL;
6121 	}
6122 
6123 	/* In some use cases, like reset, no stream is attached */
6124 	if (!dm_crtc_state->stream)
6125 		return 0;
6126 
6127 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6128 		return 0;
6129 
6130 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6131 	return ret;
6132 }
6133 
6134 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6135 				      const struct drm_display_mode *mode,
6136 				      struct drm_display_mode *adjusted_mode)
6137 {
6138 	return true;
6139 }
6140 
6141 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6142 	.disable = dm_crtc_helper_disable,
6143 	.atomic_check = dm_crtc_helper_atomic_check,
6144 	.mode_fixup = dm_crtc_helper_mode_fixup,
6145 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6146 };
6147 
6148 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6149 {
6150 
6151 }
6152 
6153 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6154 {
6155 	switch (display_color_depth) {
6156 		case COLOR_DEPTH_666:
6157 			return 6;
6158 		case COLOR_DEPTH_888:
6159 			return 8;
6160 		case COLOR_DEPTH_101010:
6161 			return 10;
6162 		case COLOR_DEPTH_121212:
6163 			return 12;
6164 		case COLOR_DEPTH_141414:
6165 			return 14;
6166 		case COLOR_DEPTH_161616:
6167 			return 16;
6168 		default:
6169 			break;
6170 		}
6171 	return 0;
6172 }
6173 
6174 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6175 					  struct drm_crtc_state *crtc_state,
6176 					  struct drm_connector_state *conn_state)
6177 {
6178 	struct drm_atomic_state *state = crtc_state->state;
6179 	struct drm_connector *connector = conn_state->connector;
6180 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6181 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6182 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6183 	struct drm_dp_mst_topology_mgr *mst_mgr;
6184 	struct drm_dp_mst_port *mst_port;
6185 	enum dc_color_depth color_depth;
6186 	int clock, bpp = 0;
6187 	bool is_y420 = false;
6188 
6189 	if (!aconnector->port || !aconnector->dc_sink)
6190 		return 0;
6191 
6192 	mst_port = aconnector->port;
6193 	mst_mgr = &aconnector->mst_port->mst_mgr;
6194 
6195 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6196 		return 0;
6197 
6198 	if (!state->duplicated) {
6199 		int max_bpc = conn_state->max_requested_bpc;
6200 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6201 				aconnector->force_yuv420_output;
6202 		color_depth = convert_color_depth_from_display_info(connector,
6203 								    is_y420,
6204 								    max_bpc);
6205 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6206 		clock = adjusted_mode->clock;
6207 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6208 	}
6209 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6210 									   mst_mgr,
6211 									   mst_port,
6212 									   dm_new_connector_state->pbn,
6213 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6214 	if (dm_new_connector_state->vcpi_slots < 0) {
6215 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6216 		return dm_new_connector_state->vcpi_slots;
6217 	}
6218 	return 0;
6219 }
6220 
6221 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6222 	.disable = dm_encoder_helper_disable,
6223 	.atomic_check = dm_encoder_helper_atomic_check
6224 };
6225 
6226 #if defined(CONFIG_DRM_AMD_DC_DCN)
6227 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6228 					    struct dc_state *dc_state)
6229 {
6230 	struct dc_stream_state *stream = NULL;
6231 	struct drm_connector *connector;
6232 	struct drm_connector_state *new_con_state, *old_con_state;
6233 	struct amdgpu_dm_connector *aconnector;
6234 	struct dm_connector_state *dm_conn_state;
6235 	int i, j, clock, bpp;
6236 	int vcpi, pbn_div, pbn = 0;
6237 
6238 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6239 
6240 		aconnector = to_amdgpu_dm_connector(connector);
6241 
6242 		if (!aconnector->port)
6243 			continue;
6244 
6245 		if (!new_con_state || !new_con_state->crtc)
6246 			continue;
6247 
6248 		dm_conn_state = to_dm_connector_state(new_con_state);
6249 
6250 		for (j = 0; j < dc_state->stream_count; j++) {
6251 			stream = dc_state->streams[j];
6252 			if (!stream)
6253 				continue;
6254 
6255 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6256 				break;
6257 
6258 			stream = NULL;
6259 		}
6260 
6261 		if (!stream)
6262 			continue;
6263 
6264 		if (stream->timing.flags.DSC != 1) {
6265 			drm_dp_mst_atomic_enable_dsc(state,
6266 						     aconnector->port,
6267 						     dm_conn_state->pbn,
6268 						     0,
6269 						     false);
6270 			continue;
6271 		}
6272 
6273 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6274 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6275 		clock = stream->timing.pix_clk_100hz / 10;
6276 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6277 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6278 						    aconnector->port,
6279 						    pbn, pbn_div,
6280 						    true);
6281 		if (vcpi < 0)
6282 			return vcpi;
6283 
6284 		dm_conn_state->pbn = pbn;
6285 		dm_conn_state->vcpi_slots = vcpi;
6286 	}
6287 	return 0;
6288 }
6289 #endif
6290 
6291 static void dm_drm_plane_reset(struct drm_plane *plane)
6292 {
6293 	struct dm_plane_state *amdgpu_state = NULL;
6294 
6295 	if (plane->state)
6296 		plane->funcs->atomic_destroy_state(plane, plane->state);
6297 
6298 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6299 	WARN_ON(amdgpu_state == NULL);
6300 
6301 	if (amdgpu_state)
6302 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6303 }
6304 
6305 static struct drm_plane_state *
6306 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6307 {
6308 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6309 
6310 	old_dm_plane_state = to_dm_plane_state(plane->state);
6311 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6312 	if (!dm_plane_state)
6313 		return NULL;
6314 
6315 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6316 
6317 	if (old_dm_plane_state->dc_state) {
6318 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6319 		dc_plane_state_retain(dm_plane_state->dc_state);
6320 	}
6321 
6322 	return &dm_plane_state->base;
6323 }
6324 
6325 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6326 				struct drm_plane_state *state)
6327 {
6328 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6329 
6330 	if (dm_plane_state->dc_state)
6331 		dc_plane_state_release(dm_plane_state->dc_state);
6332 
6333 	drm_atomic_helper_plane_destroy_state(plane, state);
6334 }
6335 
6336 static const struct drm_plane_funcs dm_plane_funcs = {
6337 	.update_plane	= drm_atomic_helper_update_plane,
6338 	.disable_plane	= drm_atomic_helper_disable_plane,
6339 	.destroy	= drm_primary_helper_destroy,
6340 	.reset = dm_drm_plane_reset,
6341 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6342 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6343 	.format_mod_supported = dm_plane_format_mod_supported,
6344 };
6345 
6346 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6347 				      struct drm_plane_state *new_state)
6348 {
6349 	struct amdgpu_framebuffer *afb;
6350 	struct drm_gem_object *obj;
6351 	struct amdgpu_device *adev;
6352 	struct amdgpu_bo *rbo;
6353 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6354 	struct list_head list;
6355 	struct ttm_validate_buffer tv;
6356 	struct ww_acquire_ctx ticket;
6357 	uint32_t domain;
6358 	int r;
6359 
6360 	if (!new_state->fb) {
6361 		DRM_DEBUG_DRIVER("No FB bound\n");
6362 		return 0;
6363 	}
6364 
6365 	afb = to_amdgpu_framebuffer(new_state->fb);
6366 	obj = new_state->fb->obj[0];
6367 	rbo = gem_to_amdgpu_bo(obj);
6368 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6369 	INIT_LIST_HEAD(&list);
6370 
6371 	tv.bo = &rbo->tbo;
6372 	tv.num_shared = 1;
6373 	list_add(&tv.head, &list);
6374 
6375 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6376 	if (r) {
6377 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6378 		return r;
6379 	}
6380 
6381 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6382 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6383 	else
6384 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6385 
6386 	r = amdgpu_bo_pin(rbo, domain);
6387 	if (unlikely(r != 0)) {
6388 		if (r != -ERESTARTSYS)
6389 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6390 		ttm_eu_backoff_reservation(&ticket, &list);
6391 		return r;
6392 	}
6393 
6394 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6395 	if (unlikely(r != 0)) {
6396 		amdgpu_bo_unpin(rbo);
6397 		ttm_eu_backoff_reservation(&ticket, &list);
6398 		DRM_ERROR("%p bind failed\n", rbo);
6399 		return r;
6400 	}
6401 
6402 	ttm_eu_backoff_reservation(&ticket, &list);
6403 
6404 	afb->address = amdgpu_bo_gpu_offset(rbo);
6405 
6406 	amdgpu_bo_ref(rbo);
6407 
6408 	/**
6409 	 * We don't do surface updates on planes that have been newly created,
6410 	 * but we also don't have the afb->address during atomic check.
6411 	 *
6412 	 * Fill in buffer attributes depending on the address here, but only on
6413 	 * newly created planes since they're not being used by DC yet and this
6414 	 * won't modify global state.
6415 	 */
6416 	dm_plane_state_old = to_dm_plane_state(plane->state);
6417 	dm_plane_state_new = to_dm_plane_state(new_state);
6418 
6419 	if (dm_plane_state_new->dc_state &&
6420 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6421 		struct dc_plane_state *plane_state =
6422 			dm_plane_state_new->dc_state;
6423 		bool force_disable_dcc = !plane_state->dcc.enable;
6424 
6425 		fill_plane_buffer_attributes(
6426 			adev, afb, plane_state->format, plane_state->rotation,
6427 			afb->tiling_flags,
6428 			&plane_state->tiling_info, &plane_state->plane_size,
6429 			&plane_state->dcc, &plane_state->address,
6430 			afb->tmz_surface, force_disable_dcc);
6431 	}
6432 
6433 	return 0;
6434 }
6435 
6436 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6437 				       struct drm_plane_state *old_state)
6438 {
6439 	struct amdgpu_bo *rbo;
6440 	int r;
6441 
6442 	if (!old_state->fb)
6443 		return;
6444 
6445 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6446 	r = amdgpu_bo_reserve(rbo, false);
6447 	if (unlikely(r)) {
6448 		DRM_ERROR("failed to reserve rbo before unpin\n");
6449 		return;
6450 	}
6451 
6452 	amdgpu_bo_unpin(rbo);
6453 	amdgpu_bo_unreserve(rbo);
6454 	amdgpu_bo_unref(&rbo);
6455 }
6456 
6457 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6458 				       struct drm_crtc_state *new_crtc_state)
6459 {
6460 	struct drm_framebuffer *fb = state->fb;
6461 	int min_downscale, max_upscale;
6462 	int min_scale = 0;
6463 	int max_scale = INT_MAX;
6464 
6465 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6466 	if (fb && state->crtc) {
6467 		/* Validate viewport to cover the case when only the position changes */
6468 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6469 			int viewport_width = state->crtc_w;
6470 			int viewport_height = state->crtc_h;
6471 
6472 			if (state->crtc_x < 0)
6473 				viewport_width += state->crtc_x;
6474 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6475 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6476 
6477 			if (state->crtc_y < 0)
6478 				viewport_height += state->crtc_y;
6479 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6480 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6481 
6482 			/* If completely outside of screen, viewport_width and/or viewport_height will be negative,
6483 			 * which is still OK to satisfy the condition below, thereby also covering these cases
6484 			 * (when plane is completely outside of screen).
6485 			 * x2 for width is because of pipe-split.
6486 			 */
6487 			if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE)
6488 				return -EINVAL;
6489 		}
6490 
6491 		/* Get min/max allowed scaling factors from plane caps. */
6492 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6493 					     &min_downscale, &max_upscale);
6494 		/*
6495 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
6496 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6497 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6498 		 */
6499 		min_scale = (1000 << 16) / max_upscale;
6500 		max_scale = (1000 << 16) / min_downscale;
6501 	}
6502 
6503 	return drm_atomic_helper_check_plane_state(
6504 		state, new_crtc_state, min_scale, max_scale, true, true);
6505 }
6506 
6507 static int dm_plane_atomic_check(struct drm_plane *plane,
6508 				 struct drm_plane_state *state)
6509 {
6510 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6511 	struct dc *dc = adev->dm.dc;
6512 	struct dm_plane_state *dm_plane_state;
6513 	struct dc_scaling_info scaling_info;
6514 	struct drm_crtc_state *new_crtc_state;
6515 	int ret;
6516 
6517 	trace_amdgpu_dm_plane_atomic_check(state);
6518 
6519 	dm_plane_state = to_dm_plane_state(state);
6520 
6521 	if (!dm_plane_state->dc_state)
6522 		return 0;
6523 
6524 	new_crtc_state =
6525 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
6526 	if (!new_crtc_state)
6527 		return -EINVAL;
6528 
6529 	ret = dm_plane_helper_check_state(state, new_crtc_state);
6530 	if (ret)
6531 		return ret;
6532 
6533 	ret = fill_dc_scaling_info(state, &scaling_info);
6534 	if (ret)
6535 		return ret;
6536 
6537 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6538 		return 0;
6539 
6540 	return -EINVAL;
6541 }
6542 
6543 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6544 				       struct drm_plane_state *new_plane_state)
6545 {
6546 	/* Only support async updates on cursor planes. */
6547 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6548 		return -EINVAL;
6549 
6550 	return 0;
6551 }
6552 
6553 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6554 					 struct drm_plane_state *new_state)
6555 {
6556 	struct drm_plane_state *old_state =
6557 		drm_atomic_get_old_plane_state(new_state->state, plane);
6558 
6559 	trace_amdgpu_dm_atomic_update_cursor(new_state);
6560 
6561 	swap(plane->state->fb, new_state->fb);
6562 
6563 	plane->state->src_x = new_state->src_x;
6564 	plane->state->src_y = new_state->src_y;
6565 	plane->state->src_w = new_state->src_w;
6566 	plane->state->src_h = new_state->src_h;
6567 	plane->state->crtc_x = new_state->crtc_x;
6568 	plane->state->crtc_y = new_state->crtc_y;
6569 	plane->state->crtc_w = new_state->crtc_w;
6570 	plane->state->crtc_h = new_state->crtc_h;
6571 
6572 	handle_cursor_update(plane, old_state);
6573 }
6574 
6575 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6576 	.prepare_fb = dm_plane_helper_prepare_fb,
6577 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6578 	.atomic_check = dm_plane_atomic_check,
6579 	.atomic_async_check = dm_plane_atomic_async_check,
6580 	.atomic_async_update = dm_plane_atomic_async_update
6581 };
6582 
6583 /*
6584  * TODO: these are currently initialized to rgb formats only.
6585  * For future use cases we should either initialize them dynamically based on
6586  * plane capabilities, or initialize this array to all formats, so internal drm
6587  * check will succeed, and let DC implement proper check
6588  */
6589 static const uint32_t rgb_formats[] = {
6590 	DRM_FORMAT_XRGB8888,
6591 	DRM_FORMAT_ARGB8888,
6592 	DRM_FORMAT_RGBA8888,
6593 	DRM_FORMAT_XRGB2101010,
6594 	DRM_FORMAT_XBGR2101010,
6595 	DRM_FORMAT_ARGB2101010,
6596 	DRM_FORMAT_ABGR2101010,
6597 	DRM_FORMAT_XBGR8888,
6598 	DRM_FORMAT_ABGR8888,
6599 	DRM_FORMAT_RGB565,
6600 };
6601 
6602 static const uint32_t overlay_formats[] = {
6603 	DRM_FORMAT_XRGB8888,
6604 	DRM_FORMAT_ARGB8888,
6605 	DRM_FORMAT_RGBA8888,
6606 	DRM_FORMAT_XBGR8888,
6607 	DRM_FORMAT_ABGR8888,
6608 	DRM_FORMAT_RGB565
6609 };
6610 
6611 static const u32 cursor_formats[] = {
6612 	DRM_FORMAT_ARGB8888
6613 };
6614 
6615 static int get_plane_formats(const struct drm_plane *plane,
6616 			     const struct dc_plane_cap *plane_cap,
6617 			     uint32_t *formats, int max_formats)
6618 {
6619 	int i, num_formats = 0;
6620 
6621 	/*
6622 	 * TODO: Query support for each group of formats directly from
6623 	 * DC plane caps. This will require adding more formats to the
6624 	 * caps list.
6625 	 */
6626 
6627 	switch (plane->type) {
6628 	case DRM_PLANE_TYPE_PRIMARY:
6629 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6630 			if (num_formats >= max_formats)
6631 				break;
6632 
6633 			formats[num_formats++] = rgb_formats[i];
6634 		}
6635 
6636 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6637 			formats[num_formats++] = DRM_FORMAT_NV12;
6638 		if (plane_cap && plane_cap->pixel_format_support.p010)
6639 			formats[num_formats++] = DRM_FORMAT_P010;
6640 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6641 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6642 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6643 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6644 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6645 		}
6646 		break;
6647 
6648 	case DRM_PLANE_TYPE_OVERLAY:
6649 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6650 			if (num_formats >= max_formats)
6651 				break;
6652 
6653 			formats[num_formats++] = overlay_formats[i];
6654 		}
6655 		break;
6656 
6657 	case DRM_PLANE_TYPE_CURSOR:
6658 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6659 			if (num_formats >= max_formats)
6660 				break;
6661 
6662 			formats[num_formats++] = cursor_formats[i];
6663 		}
6664 		break;
6665 	}
6666 
6667 	return num_formats;
6668 }
6669 
6670 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6671 				struct drm_plane *plane,
6672 				unsigned long possible_crtcs,
6673 				const struct dc_plane_cap *plane_cap)
6674 {
6675 	uint32_t formats[32];
6676 	int num_formats;
6677 	int res = -EPERM;
6678 	unsigned int supported_rotations;
6679 	uint64_t *modifiers = NULL;
6680 
6681 	num_formats = get_plane_formats(plane, plane_cap, formats,
6682 					ARRAY_SIZE(formats));
6683 
6684 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6685 	if (res)
6686 		return res;
6687 
6688 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6689 				       &dm_plane_funcs, formats, num_formats,
6690 				       modifiers, plane->type, NULL);
6691 	kfree(modifiers);
6692 	if (res)
6693 		return res;
6694 
6695 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6696 	    plane_cap && plane_cap->per_pixel_alpha) {
6697 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6698 					  BIT(DRM_MODE_BLEND_PREMULTI);
6699 
6700 		drm_plane_create_alpha_property(plane);
6701 		drm_plane_create_blend_mode_property(plane, blend_caps);
6702 	}
6703 
6704 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6705 	    plane_cap &&
6706 	    (plane_cap->pixel_format_support.nv12 ||
6707 	     plane_cap->pixel_format_support.p010)) {
6708 		/* This only affects YUV formats. */
6709 		drm_plane_create_color_properties(
6710 			plane,
6711 			BIT(DRM_COLOR_YCBCR_BT601) |
6712 			BIT(DRM_COLOR_YCBCR_BT709) |
6713 			BIT(DRM_COLOR_YCBCR_BT2020),
6714 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6715 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6716 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6717 	}
6718 
6719 	supported_rotations =
6720 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6721 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6722 
6723 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
6724 	    plane->type != DRM_PLANE_TYPE_CURSOR)
6725 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6726 						   supported_rotations);
6727 
6728 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6729 
6730 	/* Create (reset) the plane state */
6731 	if (plane->funcs->reset)
6732 		plane->funcs->reset(plane);
6733 
6734 	return 0;
6735 }
6736 
6737 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6738 			       struct drm_plane *plane,
6739 			       uint32_t crtc_index)
6740 {
6741 	struct amdgpu_crtc *acrtc = NULL;
6742 	struct drm_plane *cursor_plane;
6743 
6744 	int res = -ENOMEM;
6745 
6746 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6747 	if (!cursor_plane)
6748 		goto fail;
6749 
6750 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6751 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6752 
6753 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6754 	if (!acrtc)
6755 		goto fail;
6756 
6757 	res = drm_crtc_init_with_planes(
6758 			dm->ddev,
6759 			&acrtc->base,
6760 			plane,
6761 			cursor_plane,
6762 			&amdgpu_dm_crtc_funcs, NULL);
6763 
6764 	if (res)
6765 		goto fail;
6766 
6767 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6768 
6769 	/* Create (reset) the plane state */
6770 	if (acrtc->base.funcs->reset)
6771 		acrtc->base.funcs->reset(&acrtc->base);
6772 
6773 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6774 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6775 
6776 	acrtc->crtc_id = crtc_index;
6777 	acrtc->base.enabled = false;
6778 	acrtc->otg_inst = -1;
6779 
6780 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6781 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6782 				   true, MAX_COLOR_LUT_ENTRIES);
6783 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6784 
6785 	return 0;
6786 
6787 fail:
6788 	kfree(acrtc);
6789 	kfree(cursor_plane);
6790 	return res;
6791 }
6792 
6793 
6794 static int to_drm_connector_type(enum signal_type st)
6795 {
6796 	switch (st) {
6797 	case SIGNAL_TYPE_HDMI_TYPE_A:
6798 		return DRM_MODE_CONNECTOR_HDMIA;
6799 	case SIGNAL_TYPE_EDP:
6800 		return DRM_MODE_CONNECTOR_eDP;
6801 	case SIGNAL_TYPE_LVDS:
6802 		return DRM_MODE_CONNECTOR_LVDS;
6803 	case SIGNAL_TYPE_RGB:
6804 		return DRM_MODE_CONNECTOR_VGA;
6805 	case SIGNAL_TYPE_DISPLAY_PORT:
6806 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6807 		return DRM_MODE_CONNECTOR_DisplayPort;
6808 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6809 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6810 		return DRM_MODE_CONNECTOR_DVID;
6811 	case SIGNAL_TYPE_VIRTUAL:
6812 		return DRM_MODE_CONNECTOR_VIRTUAL;
6813 
6814 	default:
6815 		return DRM_MODE_CONNECTOR_Unknown;
6816 	}
6817 }
6818 
6819 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6820 {
6821 	struct drm_encoder *encoder;
6822 
6823 	/* There is only one encoder per connector */
6824 	drm_connector_for_each_possible_encoder(connector, encoder)
6825 		return encoder;
6826 
6827 	return NULL;
6828 }
6829 
6830 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6831 {
6832 	struct drm_encoder *encoder;
6833 	struct amdgpu_encoder *amdgpu_encoder;
6834 
6835 	encoder = amdgpu_dm_connector_to_encoder(connector);
6836 
6837 	if (encoder == NULL)
6838 		return;
6839 
6840 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6841 
6842 	amdgpu_encoder->native_mode.clock = 0;
6843 
6844 	if (!list_empty(&connector->probed_modes)) {
6845 		struct drm_display_mode *preferred_mode = NULL;
6846 
6847 		list_for_each_entry(preferred_mode,
6848 				    &connector->probed_modes,
6849 				    head) {
6850 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6851 				amdgpu_encoder->native_mode = *preferred_mode;
6852 
6853 			break;
6854 		}
6855 
6856 	}
6857 }
6858 
6859 static struct drm_display_mode *
6860 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6861 			     char *name,
6862 			     int hdisplay, int vdisplay)
6863 {
6864 	struct drm_device *dev = encoder->dev;
6865 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6866 	struct drm_display_mode *mode = NULL;
6867 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6868 
6869 	mode = drm_mode_duplicate(dev, native_mode);
6870 
6871 	if (mode == NULL)
6872 		return NULL;
6873 
6874 	mode->hdisplay = hdisplay;
6875 	mode->vdisplay = vdisplay;
6876 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6877 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6878 
6879 	return mode;
6880 
6881 }
6882 
6883 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6884 						 struct drm_connector *connector)
6885 {
6886 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6887 	struct drm_display_mode *mode = NULL;
6888 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6889 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6890 				to_amdgpu_dm_connector(connector);
6891 	int i;
6892 	int n;
6893 	struct mode_size {
6894 		char name[DRM_DISPLAY_MODE_LEN];
6895 		int w;
6896 		int h;
6897 	} common_modes[] = {
6898 		{  "640x480",  640,  480},
6899 		{  "800x600",  800,  600},
6900 		{ "1024x768", 1024,  768},
6901 		{ "1280x720", 1280,  720},
6902 		{ "1280x800", 1280,  800},
6903 		{"1280x1024", 1280, 1024},
6904 		{ "1440x900", 1440,  900},
6905 		{"1680x1050", 1680, 1050},
6906 		{"1600x1200", 1600, 1200},
6907 		{"1920x1080", 1920, 1080},
6908 		{"1920x1200", 1920, 1200}
6909 	};
6910 
6911 	n = ARRAY_SIZE(common_modes);
6912 
6913 	for (i = 0; i < n; i++) {
6914 		struct drm_display_mode *curmode = NULL;
6915 		bool mode_existed = false;
6916 
6917 		if (common_modes[i].w > native_mode->hdisplay ||
6918 		    common_modes[i].h > native_mode->vdisplay ||
6919 		   (common_modes[i].w == native_mode->hdisplay &&
6920 		    common_modes[i].h == native_mode->vdisplay))
6921 			continue;
6922 
6923 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6924 			if (common_modes[i].w == curmode->hdisplay &&
6925 			    common_modes[i].h == curmode->vdisplay) {
6926 				mode_existed = true;
6927 				break;
6928 			}
6929 		}
6930 
6931 		if (mode_existed)
6932 			continue;
6933 
6934 		mode = amdgpu_dm_create_common_mode(encoder,
6935 				common_modes[i].name, common_modes[i].w,
6936 				common_modes[i].h);
6937 		drm_mode_probed_add(connector, mode);
6938 		amdgpu_dm_connector->num_modes++;
6939 	}
6940 }
6941 
6942 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6943 					      struct edid *edid)
6944 {
6945 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6946 			to_amdgpu_dm_connector(connector);
6947 
6948 	if (edid) {
6949 		/* empty probed_modes */
6950 		INIT_LIST_HEAD(&connector->probed_modes);
6951 		amdgpu_dm_connector->num_modes =
6952 				drm_add_edid_modes(connector, edid);
6953 
6954 		/* sorting the probed modes before calling function
6955 		 * amdgpu_dm_get_native_mode() since EDID can have
6956 		 * more than one preferred mode. The modes that are
6957 		 * later in the probed mode list could be of higher
6958 		 * and preferred resolution. For example, 3840x2160
6959 		 * resolution in base EDID preferred timing and 4096x2160
6960 		 * preferred resolution in DID extension block later.
6961 		 */
6962 		drm_mode_sort(&connector->probed_modes);
6963 		amdgpu_dm_get_native_mode(connector);
6964 	} else {
6965 		amdgpu_dm_connector->num_modes = 0;
6966 	}
6967 }
6968 
6969 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6970 {
6971 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6972 			to_amdgpu_dm_connector(connector);
6973 	struct drm_encoder *encoder;
6974 	struct edid *edid = amdgpu_dm_connector->edid;
6975 
6976 	encoder = amdgpu_dm_connector_to_encoder(connector);
6977 
6978 	if (!drm_edid_is_valid(edid)) {
6979 		amdgpu_dm_connector->num_modes =
6980 				drm_add_modes_noedid(connector, 640, 480);
6981 	} else {
6982 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6983 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6984 	}
6985 	amdgpu_dm_fbc_init(connector);
6986 
6987 	return amdgpu_dm_connector->num_modes;
6988 }
6989 
6990 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6991 				     struct amdgpu_dm_connector *aconnector,
6992 				     int connector_type,
6993 				     struct dc_link *link,
6994 				     int link_index)
6995 {
6996 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6997 
6998 	/*
6999 	 * Some of the properties below require access to state, like bpc.
7000 	 * Allocate some default initial connector state with our reset helper.
7001 	 */
7002 	if (aconnector->base.funcs->reset)
7003 		aconnector->base.funcs->reset(&aconnector->base);
7004 
7005 	aconnector->connector_id = link_index;
7006 	aconnector->dc_link = link;
7007 	aconnector->base.interlace_allowed = false;
7008 	aconnector->base.doublescan_allowed = false;
7009 	aconnector->base.stereo_allowed = false;
7010 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7011 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7012 	aconnector->audio_inst = -1;
7013 	mutex_init(&aconnector->hpd_lock);
7014 
7015 	/*
7016 	 * configure support HPD hot plug connector_>polled default value is 0
7017 	 * which means HPD hot plug not supported
7018 	 */
7019 	switch (connector_type) {
7020 	case DRM_MODE_CONNECTOR_HDMIA:
7021 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7022 		aconnector->base.ycbcr_420_allowed =
7023 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7024 		break;
7025 	case DRM_MODE_CONNECTOR_DisplayPort:
7026 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7027 		aconnector->base.ycbcr_420_allowed =
7028 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7029 		break;
7030 	case DRM_MODE_CONNECTOR_DVID:
7031 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7032 		break;
7033 	default:
7034 		break;
7035 	}
7036 
7037 	drm_object_attach_property(&aconnector->base.base,
7038 				dm->ddev->mode_config.scaling_mode_property,
7039 				DRM_MODE_SCALE_NONE);
7040 
7041 	drm_object_attach_property(&aconnector->base.base,
7042 				adev->mode_info.underscan_property,
7043 				UNDERSCAN_OFF);
7044 	drm_object_attach_property(&aconnector->base.base,
7045 				adev->mode_info.underscan_hborder_property,
7046 				0);
7047 	drm_object_attach_property(&aconnector->base.base,
7048 				adev->mode_info.underscan_vborder_property,
7049 				0);
7050 
7051 	if (!aconnector->mst_port)
7052 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7053 
7054 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7055 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7056 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7057 
7058 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7059 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7060 		drm_object_attach_property(&aconnector->base.base,
7061 				adev->mode_info.abm_level_property, 0);
7062 	}
7063 
7064 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7065 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7066 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7067 		drm_object_attach_property(
7068 			&aconnector->base.base,
7069 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
7070 
7071 		if (!aconnector->mst_port)
7072 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7073 
7074 #ifdef CONFIG_DRM_AMD_DC_HDCP
7075 		if (adev->dm.hdcp_workqueue)
7076 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7077 #endif
7078 	}
7079 }
7080 
7081 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7082 			      struct i2c_msg *msgs, int num)
7083 {
7084 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7085 	struct ddc_service *ddc_service = i2c->ddc_service;
7086 	struct i2c_command cmd;
7087 	int i;
7088 	int result = -EIO;
7089 
7090 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7091 
7092 	if (!cmd.payloads)
7093 		return result;
7094 
7095 	cmd.number_of_payloads = num;
7096 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7097 	cmd.speed = 100;
7098 
7099 	for (i = 0; i < num; i++) {
7100 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7101 		cmd.payloads[i].address = msgs[i].addr;
7102 		cmd.payloads[i].length = msgs[i].len;
7103 		cmd.payloads[i].data = msgs[i].buf;
7104 	}
7105 
7106 	if (dc_submit_i2c(
7107 			ddc_service->ctx->dc,
7108 			ddc_service->ddc_pin->hw_info.ddc_channel,
7109 			&cmd))
7110 		result = num;
7111 
7112 	kfree(cmd.payloads);
7113 	return result;
7114 }
7115 
7116 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7117 {
7118 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7119 }
7120 
7121 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7122 	.master_xfer = amdgpu_dm_i2c_xfer,
7123 	.functionality = amdgpu_dm_i2c_func,
7124 };
7125 
7126 static struct amdgpu_i2c_adapter *
7127 create_i2c(struct ddc_service *ddc_service,
7128 	   int link_index,
7129 	   int *res)
7130 {
7131 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7132 	struct amdgpu_i2c_adapter *i2c;
7133 
7134 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7135 	if (!i2c)
7136 		return NULL;
7137 	i2c->base.owner = THIS_MODULE;
7138 	i2c->base.class = I2C_CLASS_DDC;
7139 	i2c->base.dev.parent = &adev->pdev->dev;
7140 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7141 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7142 	i2c_set_adapdata(&i2c->base, i2c);
7143 	i2c->ddc_service = ddc_service;
7144 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7145 
7146 	return i2c;
7147 }
7148 
7149 
7150 /*
7151  * Note: this function assumes that dc_link_detect() was called for the
7152  * dc_link which will be represented by this aconnector.
7153  */
7154 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7155 				    struct amdgpu_dm_connector *aconnector,
7156 				    uint32_t link_index,
7157 				    struct amdgpu_encoder *aencoder)
7158 {
7159 	int res = 0;
7160 	int connector_type;
7161 	struct dc *dc = dm->dc;
7162 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7163 	struct amdgpu_i2c_adapter *i2c;
7164 
7165 	link->priv = aconnector;
7166 
7167 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7168 
7169 	i2c = create_i2c(link->ddc, link->link_index, &res);
7170 	if (!i2c) {
7171 		DRM_ERROR("Failed to create i2c adapter data\n");
7172 		return -ENOMEM;
7173 	}
7174 
7175 	aconnector->i2c = i2c;
7176 	res = i2c_add_adapter(&i2c->base);
7177 
7178 	if (res) {
7179 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7180 		goto out_free;
7181 	}
7182 
7183 	connector_type = to_drm_connector_type(link->connector_signal);
7184 
7185 	res = drm_connector_init_with_ddc(
7186 			dm->ddev,
7187 			&aconnector->base,
7188 			&amdgpu_dm_connector_funcs,
7189 			connector_type,
7190 			&i2c->base);
7191 
7192 	if (res) {
7193 		DRM_ERROR("connector_init failed\n");
7194 		aconnector->connector_id = -1;
7195 		goto out_free;
7196 	}
7197 
7198 	drm_connector_helper_add(
7199 			&aconnector->base,
7200 			&amdgpu_dm_connector_helper_funcs);
7201 
7202 	amdgpu_dm_connector_init_helper(
7203 		dm,
7204 		aconnector,
7205 		connector_type,
7206 		link,
7207 		link_index);
7208 
7209 	drm_connector_attach_encoder(
7210 		&aconnector->base, &aencoder->base);
7211 
7212 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7213 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7214 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7215 
7216 out_free:
7217 	if (res) {
7218 		kfree(i2c);
7219 		aconnector->i2c = NULL;
7220 	}
7221 	return res;
7222 }
7223 
7224 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7225 {
7226 	switch (adev->mode_info.num_crtc) {
7227 	case 1:
7228 		return 0x1;
7229 	case 2:
7230 		return 0x3;
7231 	case 3:
7232 		return 0x7;
7233 	case 4:
7234 		return 0xf;
7235 	case 5:
7236 		return 0x1f;
7237 	case 6:
7238 	default:
7239 		return 0x3f;
7240 	}
7241 }
7242 
7243 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7244 				  struct amdgpu_encoder *aencoder,
7245 				  uint32_t link_index)
7246 {
7247 	struct amdgpu_device *adev = drm_to_adev(dev);
7248 
7249 	int res = drm_encoder_init(dev,
7250 				   &aencoder->base,
7251 				   &amdgpu_dm_encoder_funcs,
7252 				   DRM_MODE_ENCODER_TMDS,
7253 				   NULL);
7254 
7255 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7256 
7257 	if (!res)
7258 		aencoder->encoder_id = link_index;
7259 	else
7260 		aencoder->encoder_id = -1;
7261 
7262 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7263 
7264 	return res;
7265 }
7266 
7267 static void manage_dm_interrupts(struct amdgpu_device *adev,
7268 				 struct amdgpu_crtc *acrtc,
7269 				 bool enable)
7270 {
7271 	/*
7272 	 * We have no guarantee that the frontend index maps to the same
7273 	 * backend index - some even map to more than one.
7274 	 *
7275 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7276 	 */
7277 	int irq_type =
7278 		amdgpu_display_crtc_idx_to_irq_type(
7279 			adev,
7280 			acrtc->crtc_id);
7281 
7282 	if (enable) {
7283 		drm_crtc_vblank_on(&acrtc->base);
7284 		amdgpu_irq_get(
7285 			adev,
7286 			&adev->pageflip_irq,
7287 			irq_type);
7288 	} else {
7289 
7290 		amdgpu_irq_put(
7291 			adev,
7292 			&adev->pageflip_irq,
7293 			irq_type);
7294 		drm_crtc_vblank_off(&acrtc->base);
7295 	}
7296 }
7297 
7298 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7299 				      struct amdgpu_crtc *acrtc)
7300 {
7301 	int irq_type =
7302 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7303 
7304 	/**
7305 	 * This reads the current state for the IRQ and force reapplies
7306 	 * the setting to hardware.
7307 	 */
7308 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7309 }
7310 
7311 static bool
7312 is_scaling_state_different(const struct dm_connector_state *dm_state,
7313 			   const struct dm_connector_state *old_dm_state)
7314 {
7315 	if (dm_state->scaling != old_dm_state->scaling)
7316 		return true;
7317 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7318 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7319 			return true;
7320 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7321 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7322 			return true;
7323 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7324 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7325 		return true;
7326 	return false;
7327 }
7328 
7329 #ifdef CONFIG_DRM_AMD_DC_HDCP
7330 static bool is_content_protection_different(struct drm_connector_state *state,
7331 					    const struct drm_connector_state *old_state,
7332 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7333 {
7334 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7335 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7336 
7337 	/* Handle: Type0/1 change */
7338 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7339 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7340 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7341 		return true;
7342 	}
7343 
7344 	/* CP is being re enabled, ignore this
7345 	 *
7346 	 * Handles:	ENABLED -> DESIRED
7347 	 */
7348 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7349 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7350 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7351 		return false;
7352 	}
7353 
7354 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7355 	 *
7356 	 * Handles:	UNDESIRED -> ENABLED
7357 	 */
7358 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7359 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7360 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7361 
7362 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7363 	 * hot-plug, headless s3, dpms
7364 	 *
7365 	 * Handles:	DESIRED -> DESIRED (Special case)
7366 	 */
7367 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7368 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7369 		dm_con_state->update_hdcp = false;
7370 		return true;
7371 	}
7372 
7373 	/*
7374 	 * Handles:	UNDESIRED -> UNDESIRED
7375 	 *		DESIRED -> DESIRED
7376 	 *		ENABLED -> ENABLED
7377 	 */
7378 	if (old_state->content_protection == state->content_protection)
7379 		return false;
7380 
7381 	/*
7382 	 * Handles:	UNDESIRED -> DESIRED
7383 	 *		DESIRED -> UNDESIRED
7384 	 *		ENABLED -> UNDESIRED
7385 	 */
7386 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7387 		return true;
7388 
7389 	/*
7390 	 * Handles:	DESIRED -> ENABLED
7391 	 */
7392 	return false;
7393 }
7394 
7395 #endif
7396 static void remove_stream(struct amdgpu_device *adev,
7397 			  struct amdgpu_crtc *acrtc,
7398 			  struct dc_stream_state *stream)
7399 {
7400 	/* this is the update mode case */
7401 
7402 	acrtc->otg_inst = -1;
7403 	acrtc->enabled = false;
7404 }
7405 
7406 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7407 			       struct dc_cursor_position *position)
7408 {
7409 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7410 	int x, y;
7411 	int xorigin = 0, yorigin = 0;
7412 
7413 	position->enable = false;
7414 	position->x = 0;
7415 	position->y = 0;
7416 
7417 	if (!crtc || !plane->state->fb)
7418 		return 0;
7419 
7420 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7421 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7422 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7423 			  __func__,
7424 			  plane->state->crtc_w,
7425 			  plane->state->crtc_h);
7426 		return -EINVAL;
7427 	}
7428 
7429 	x = plane->state->crtc_x;
7430 	y = plane->state->crtc_y;
7431 
7432 	if (x <= -amdgpu_crtc->max_cursor_width ||
7433 	    y <= -amdgpu_crtc->max_cursor_height)
7434 		return 0;
7435 
7436 	if (x < 0) {
7437 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7438 		x = 0;
7439 	}
7440 	if (y < 0) {
7441 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7442 		y = 0;
7443 	}
7444 	position->enable = true;
7445 	position->translate_by_source = true;
7446 	position->x = x;
7447 	position->y = y;
7448 	position->x_hotspot = xorigin;
7449 	position->y_hotspot = yorigin;
7450 
7451 	return 0;
7452 }
7453 
7454 static void handle_cursor_update(struct drm_plane *plane,
7455 				 struct drm_plane_state *old_plane_state)
7456 {
7457 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7458 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7459 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7460 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7461 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7462 	uint64_t address = afb ? afb->address : 0;
7463 	struct dc_cursor_position position;
7464 	struct dc_cursor_attributes attributes;
7465 	int ret;
7466 
7467 	if (!plane->state->fb && !old_plane_state->fb)
7468 		return;
7469 
7470 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7471 			 __func__,
7472 			 amdgpu_crtc->crtc_id,
7473 			 plane->state->crtc_w,
7474 			 plane->state->crtc_h);
7475 
7476 	ret = get_cursor_position(plane, crtc, &position);
7477 	if (ret)
7478 		return;
7479 
7480 	if (!position.enable) {
7481 		/* turn off cursor */
7482 		if (crtc_state && crtc_state->stream) {
7483 			mutex_lock(&adev->dm.dc_lock);
7484 			dc_stream_set_cursor_position(crtc_state->stream,
7485 						      &position);
7486 			mutex_unlock(&adev->dm.dc_lock);
7487 		}
7488 		return;
7489 	}
7490 
7491 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
7492 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
7493 
7494 	memset(&attributes, 0, sizeof(attributes));
7495 	attributes.address.high_part = upper_32_bits(address);
7496 	attributes.address.low_part  = lower_32_bits(address);
7497 	attributes.width             = plane->state->crtc_w;
7498 	attributes.height            = plane->state->crtc_h;
7499 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7500 	attributes.rotation_angle    = 0;
7501 	attributes.attribute_flags.value = 0;
7502 
7503 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7504 
7505 	if (crtc_state->stream) {
7506 		mutex_lock(&adev->dm.dc_lock);
7507 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7508 							 &attributes))
7509 			DRM_ERROR("DC failed to set cursor attributes\n");
7510 
7511 		if (!dc_stream_set_cursor_position(crtc_state->stream,
7512 						   &position))
7513 			DRM_ERROR("DC failed to set cursor position\n");
7514 		mutex_unlock(&adev->dm.dc_lock);
7515 	}
7516 }
7517 
7518 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7519 {
7520 
7521 	assert_spin_locked(&acrtc->base.dev->event_lock);
7522 	WARN_ON(acrtc->event);
7523 
7524 	acrtc->event = acrtc->base.state->event;
7525 
7526 	/* Set the flip status */
7527 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7528 
7529 	/* Mark this event as consumed */
7530 	acrtc->base.state->event = NULL;
7531 
7532 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7533 						 acrtc->crtc_id);
7534 }
7535 
7536 static void update_freesync_state_on_stream(
7537 	struct amdgpu_display_manager *dm,
7538 	struct dm_crtc_state *new_crtc_state,
7539 	struct dc_stream_state *new_stream,
7540 	struct dc_plane_state *surface,
7541 	u32 flip_timestamp_in_us)
7542 {
7543 	struct mod_vrr_params vrr_params;
7544 	struct dc_info_packet vrr_infopacket = {0};
7545 	struct amdgpu_device *adev = dm->adev;
7546 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7547 	unsigned long flags;
7548 
7549 	if (!new_stream)
7550 		return;
7551 
7552 	/*
7553 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7554 	 * For now it's sufficient to just guard against these conditions.
7555 	 */
7556 
7557 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7558 		return;
7559 
7560 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7561         vrr_params = acrtc->dm_irq_params.vrr_params;
7562 
7563 	if (surface) {
7564 		mod_freesync_handle_preflip(
7565 			dm->freesync_module,
7566 			surface,
7567 			new_stream,
7568 			flip_timestamp_in_us,
7569 			&vrr_params);
7570 
7571 		if (adev->family < AMDGPU_FAMILY_AI &&
7572 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7573 			mod_freesync_handle_v_update(dm->freesync_module,
7574 						     new_stream, &vrr_params);
7575 
7576 			/* Need to call this before the frame ends. */
7577 			dc_stream_adjust_vmin_vmax(dm->dc,
7578 						   new_crtc_state->stream,
7579 						   &vrr_params.adjust);
7580 		}
7581 	}
7582 
7583 	mod_freesync_build_vrr_infopacket(
7584 		dm->freesync_module,
7585 		new_stream,
7586 		&vrr_params,
7587 		PACKET_TYPE_VRR,
7588 		TRANSFER_FUNC_UNKNOWN,
7589 		&vrr_infopacket);
7590 
7591 	new_crtc_state->freesync_timing_changed |=
7592 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7593 			&vrr_params.adjust,
7594 			sizeof(vrr_params.adjust)) != 0);
7595 
7596 	new_crtc_state->freesync_vrr_info_changed |=
7597 		(memcmp(&new_crtc_state->vrr_infopacket,
7598 			&vrr_infopacket,
7599 			sizeof(vrr_infopacket)) != 0);
7600 
7601 	acrtc->dm_irq_params.vrr_params = vrr_params;
7602 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7603 
7604 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7605 	new_stream->vrr_infopacket = vrr_infopacket;
7606 
7607 	if (new_crtc_state->freesync_vrr_info_changed)
7608 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7609 			      new_crtc_state->base.crtc->base.id,
7610 			      (int)new_crtc_state->base.vrr_enabled,
7611 			      (int)vrr_params.state);
7612 
7613 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7614 }
7615 
7616 static void update_stream_irq_parameters(
7617 	struct amdgpu_display_manager *dm,
7618 	struct dm_crtc_state *new_crtc_state)
7619 {
7620 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7621 	struct mod_vrr_params vrr_params;
7622 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7623 	struct amdgpu_device *adev = dm->adev;
7624 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7625 	unsigned long flags;
7626 
7627 	if (!new_stream)
7628 		return;
7629 
7630 	/*
7631 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7632 	 * For now it's sufficient to just guard against these conditions.
7633 	 */
7634 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7635 		return;
7636 
7637 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7638 	vrr_params = acrtc->dm_irq_params.vrr_params;
7639 
7640 	if (new_crtc_state->vrr_supported &&
7641 	    config.min_refresh_in_uhz &&
7642 	    config.max_refresh_in_uhz) {
7643 		config.state = new_crtc_state->base.vrr_enabled ?
7644 			VRR_STATE_ACTIVE_VARIABLE :
7645 			VRR_STATE_INACTIVE;
7646 	} else {
7647 		config.state = VRR_STATE_UNSUPPORTED;
7648 	}
7649 
7650 	mod_freesync_build_vrr_params(dm->freesync_module,
7651 				      new_stream,
7652 				      &config, &vrr_params);
7653 
7654 	new_crtc_state->freesync_timing_changed |=
7655 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7656 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7657 
7658 	new_crtc_state->freesync_config = config;
7659 	/* Copy state for access from DM IRQ handler */
7660 	acrtc->dm_irq_params.freesync_config = config;
7661 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7662 	acrtc->dm_irq_params.vrr_params = vrr_params;
7663 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7664 }
7665 
7666 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7667 					    struct dm_crtc_state *new_state)
7668 {
7669 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7670 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7671 
7672 	if (!old_vrr_active && new_vrr_active) {
7673 		/* Transition VRR inactive -> active:
7674 		 * While VRR is active, we must not disable vblank irq, as a
7675 		 * reenable after disable would compute bogus vblank/pflip
7676 		 * timestamps if it likely happened inside display front-porch.
7677 		 *
7678 		 * We also need vupdate irq for the actual core vblank handling
7679 		 * at end of vblank.
7680 		 */
7681 		dm_set_vupdate_irq(new_state->base.crtc, true);
7682 		drm_crtc_vblank_get(new_state->base.crtc);
7683 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7684 				 __func__, new_state->base.crtc->base.id);
7685 	} else if (old_vrr_active && !new_vrr_active) {
7686 		/* Transition VRR active -> inactive:
7687 		 * Allow vblank irq disable again for fixed refresh rate.
7688 		 */
7689 		dm_set_vupdate_irq(new_state->base.crtc, false);
7690 		drm_crtc_vblank_put(new_state->base.crtc);
7691 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7692 				 __func__, new_state->base.crtc->base.id);
7693 	}
7694 }
7695 
7696 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7697 {
7698 	struct drm_plane *plane;
7699 	struct drm_plane_state *old_plane_state, *new_plane_state;
7700 	int i;
7701 
7702 	/*
7703 	 * TODO: Make this per-stream so we don't issue redundant updates for
7704 	 * commits with multiple streams.
7705 	 */
7706 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7707 				       new_plane_state, i)
7708 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7709 			handle_cursor_update(plane, old_plane_state);
7710 }
7711 
7712 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7713 				    struct dc_state *dc_state,
7714 				    struct drm_device *dev,
7715 				    struct amdgpu_display_manager *dm,
7716 				    struct drm_crtc *pcrtc,
7717 				    bool wait_for_vblank)
7718 {
7719 	uint32_t i;
7720 	uint64_t timestamp_ns;
7721 	struct drm_plane *plane;
7722 	struct drm_plane_state *old_plane_state, *new_plane_state;
7723 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7724 	struct drm_crtc_state *new_pcrtc_state =
7725 			drm_atomic_get_new_crtc_state(state, pcrtc);
7726 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7727 	struct dm_crtc_state *dm_old_crtc_state =
7728 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7729 	int planes_count = 0, vpos, hpos;
7730 	long r;
7731 	unsigned long flags;
7732 	struct amdgpu_bo *abo;
7733 	uint32_t target_vblank, last_flip_vblank;
7734 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7735 	bool pflip_present = false;
7736 	struct {
7737 		struct dc_surface_update surface_updates[MAX_SURFACES];
7738 		struct dc_plane_info plane_infos[MAX_SURFACES];
7739 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7740 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7741 		struct dc_stream_update stream_update;
7742 	} *bundle;
7743 
7744 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7745 
7746 	if (!bundle) {
7747 		dm_error("Failed to allocate update bundle\n");
7748 		goto cleanup;
7749 	}
7750 
7751 	/*
7752 	 * Disable the cursor first if we're disabling all the planes.
7753 	 * It'll remain on the screen after the planes are re-enabled
7754 	 * if we don't.
7755 	 */
7756 	if (acrtc_state->active_planes == 0)
7757 		amdgpu_dm_commit_cursors(state);
7758 
7759 	/* update planes when needed */
7760 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7761 		struct drm_crtc *crtc = new_plane_state->crtc;
7762 		struct drm_crtc_state *new_crtc_state;
7763 		struct drm_framebuffer *fb = new_plane_state->fb;
7764 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7765 		bool plane_needs_flip;
7766 		struct dc_plane_state *dc_plane;
7767 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7768 
7769 		/* Cursor plane is handled after stream updates */
7770 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7771 			continue;
7772 
7773 		if (!fb || !crtc || pcrtc != crtc)
7774 			continue;
7775 
7776 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7777 		if (!new_crtc_state->active)
7778 			continue;
7779 
7780 		dc_plane = dm_new_plane_state->dc_state;
7781 
7782 		bundle->surface_updates[planes_count].surface = dc_plane;
7783 		if (new_pcrtc_state->color_mgmt_changed) {
7784 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7785 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7786 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7787 		}
7788 
7789 		fill_dc_scaling_info(new_plane_state,
7790 				     &bundle->scaling_infos[planes_count]);
7791 
7792 		bundle->surface_updates[planes_count].scaling_info =
7793 			&bundle->scaling_infos[planes_count];
7794 
7795 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7796 
7797 		pflip_present = pflip_present || plane_needs_flip;
7798 
7799 		if (!plane_needs_flip) {
7800 			planes_count += 1;
7801 			continue;
7802 		}
7803 
7804 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7805 
7806 		/*
7807 		 * Wait for all fences on this FB. Do limited wait to avoid
7808 		 * deadlock during GPU reset when this fence will not signal
7809 		 * but we hold reservation lock for the BO.
7810 		 */
7811 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7812 							false,
7813 							msecs_to_jiffies(5000));
7814 		if (unlikely(r <= 0))
7815 			DRM_ERROR("Waiting for fences timed out!");
7816 
7817 		fill_dc_plane_info_and_addr(
7818 			dm->adev, new_plane_state,
7819 			afb->tiling_flags,
7820 			&bundle->plane_infos[planes_count],
7821 			&bundle->flip_addrs[planes_count].address,
7822 			afb->tmz_surface, false);
7823 
7824 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7825 				 new_plane_state->plane->index,
7826 				 bundle->plane_infos[planes_count].dcc.enable);
7827 
7828 		bundle->surface_updates[planes_count].plane_info =
7829 			&bundle->plane_infos[planes_count];
7830 
7831 		/*
7832 		 * Only allow immediate flips for fast updates that don't
7833 		 * change FB pitch, DCC state, rotation or mirroing.
7834 		 */
7835 		bundle->flip_addrs[planes_count].flip_immediate =
7836 			crtc->state->async_flip &&
7837 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7838 
7839 		timestamp_ns = ktime_get_ns();
7840 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7841 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7842 		bundle->surface_updates[planes_count].surface = dc_plane;
7843 
7844 		if (!bundle->surface_updates[planes_count].surface) {
7845 			DRM_ERROR("No surface for CRTC: id=%d\n",
7846 					acrtc_attach->crtc_id);
7847 			continue;
7848 		}
7849 
7850 		if (plane == pcrtc->primary)
7851 			update_freesync_state_on_stream(
7852 				dm,
7853 				acrtc_state,
7854 				acrtc_state->stream,
7855 				dc_plane,
7856 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7857 
7858 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7859 				 __func__,
7860 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7861 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7862 
7863 		planes_count += 1;
7864 
7865 	}
7866 
7867 	if (pflip_present) {
7868 		if (!vrr_active) {
7869 			/* Use old throttling in non-vrr fixed refresh rate mode
7870 			 * to keep flip scheduling based on target vblank counts
7871 			 * working in a backwards compatible way, e.g., for
7872 			 * clients using the GLX_OML_sync_control extension or
7873 			 * DRI3/Present extension with defined target_msc.
7874 			 */
7875 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7876 		}
7877 		else {
7878 			/* For variable refresh rate mode only:
7879 			 * Get vblank of last completed flip to avoid > 1 vrr
7880 			 * flips per video frame by use of throttling, but allow
7881 			 * flip programming anywhere in the possibly large
7882 			 * variable vrr vblank interval for fine-grained flip
7883 			 * timing control and more opportunity to avoid stutter
7884 			 * on late submission of flips.
7885 			 */
7886 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7887 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7888 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7889 		}
7890 
7891 		target_vblank = last_flip_vblank + wait_for_vblank;
7892 
7893 		/*
7894 		 * Wait until we're out of the vertical blank period before the one
7895 		 * targeted by the flip
7896 		 */
7897 		while ((acrtc_attach->enabled &&
7898 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7899 							    0, &vpos, &hpos, NULL,
7900 							    NULL, &pcrtc->hwmode)
7901 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7902 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7903 			(int)(target_vblank -
7904 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7905 			usleep_range(1000, 1100);
7906 		}
7907 
7908 		/**
7909 		 * Prepare the flip event for the pageflip interrupt to handle.
7910 		 *
7911 		 * This only works in the case where we've already turned on the
7912 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7913 		 * from 0 -> n planes we have to skip a hardware generated event
7914 		 * and rely on sending it from software.
7915 		 */
7916 		if (acrtc_attach->base.state->event &&
7917 		    acrtc_state->active_planes > 0) {
7918 			drm_crtc_vblank_get(pcrtc);
7919 
7920 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7921 
7922 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7923 			prepare_flip_isr(acrtc_attach);
7924 
7925 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7926 		}
7927 
7928 		if (acrtc_state->stream) {
7929 			if (acrtc_state->freesync_vrr_info_changed)
7930 				bundle->stream_update.vrr_infopacket =
7931 					&acrtc_state->stream->vrr_infopacket;
7932 		}
7933 	}
7934 
7935 	/* Update the planes if changed or disable if we don't have any. */
7936 	if ((planes_count || acrtc_state->active_planes == 0) &&
7937 		acrtc_state->stream) {
7938 		bundle->stream_update.stream = acrtc_state->stream;
7939 		if (new_pcrtc_state->mode_changed) {
7940 			bundle->stream_update.src = acrtc_state->stream->src;
7941 			bundle->stream_update.dst = acrtc_state->stream->dst;
7942 		}
7943 
7944 		if (new_pcrtc_state->color_mgmt_changed) {
7945 			/*
7946 			 * TODO: This isn't fully correct since we've actually
7947 			 * already modified the stream in place.
7948 			 */
7949 			bundle->stream_update.gamut_remap =
7950 				&acrtc_state->stream->gamut_remap_matrix;
7951 			bundle->stream_update.output_csc_transform =
7952 				&acrtc_state->stream->csc_color_matrix;
7953 			bundle->stream_update.out_transfer_func =
7954 				acrtc_state->stream->out_transfer_func;
7955 		}
7956 
7957 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7958 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7959 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7960 
7961 		/*
7962 		 * If FreeSync state on the stream has changed then we need to
7963 		 * re-adjust the min/max bounds now that DC doesn't handle this
7964 		 * as part of commit.
7965 		 */
7966 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7967 		    amdgpu_dm_vrr_active(acrtc_state)) {
7968 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7969 			dc_stream_adjust_vmin_vmax(
7970 				dm->dc, acrtc_state->stream,
7971 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7972 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7973 		}
7974 		mutex_lock(&dm->dc_lock);
7975 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7976 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7977 			amdgpu_dm_psr_disable(acrtc_state->stream);
7978 
7979 		dc_commit_updates_for_stream(dm->dc,
7980 						     bundle->surface_updates,
7981 						     planes_count,
7982 						     acrtc_state->stream,
7983 						     &bundle->stream_update,
7984 						     dc_state);
7985 
7986 		/**
7987 		 * Enable or disable the interrupts on the backend.
7988 		 *
7989 		 * Most pipes are put into power gating when unused.
7990 		 *
7991 		 * When power gating is enabled on a pipe we lose the
7992 		 * interrupt enablement state when power gating is disabled.
7993 		 *
7994 		 * So we need to update the IRQ control state in hardware
7995 		 * whenever the pipe turns on (since it could be previously
7996 		 * power gated) or off (since some pipes can't be power gated
7997 		 * on some ASICs).
7998 		 */
7999 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8000 			dm_update_pflip_irq_state(drm_to_adev(dev),
8001 						  acrtc_attach);
8002 
8003 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8004 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8005 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8006 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8007 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8008 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8009 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
8010 			amdgpu_dm_psr_enable(acrtc_state->stream);
8011 		}
8012 
8013 		mutex_unlock(&dm->dc_lock);
8014 	}
8015 
8016 	/*
8017 	 * Update cursor state *after* programming all the planes.
8018 	 * This avoids redundant programming in the case where we're going
8019 	 * to be disabling a single plane - those pipes are being disabled.
8020 	 */
8021 	if (acrtc_state->active_planes)
8022 		amdgpu_dm_commit_cursors(state);
8023 
8024 cleanup:
8025 	kfree(bundle);
8026 }
8027 
8028 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8029 				   struct drm_atomic_state *state)
8030 {
8031 	struct amdgpu_device *adev = drm_to_adev(dev);
8032 	struct amdgpu_dm_connector *aconnector;
8033 	struct drm_connector *connector;
8034 	struct drm_connector_state *old_con_state, *new_con_state;
8035 	struct drm_crtc_state *new_crtc_state;
8036 	struct dm_crtc_state *new_dm_crtc_state;
8037 	const struct dc_stream_status *status;
8038 	int i, inst;
8039 
8040 	/* Notify device removals. */
8041 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8042 		if (old_con_state->crtc != new_con_state->crtc) {
8043 			/* CRTC changes require notification. */
8044 			goto notify;
8045 		}
8046 
8047 		if (!new_con_state->crtc)
8048 			continue;
8049 
8050 		new_crtc_state = drm_atomic_get_new_crtc_state(
8051 			state, new_con_state->crtc);
8052 
8053 		if (!new_crtc_state)
8054 			continue;
8055 
8056 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8057 			continue;
8058 
8059 	notify:
8060 		aconnector = to_amdgpu_dm_connector(connector);
8061 
8062 		mutex_lock(&adev->dm.audio_lock);
8063 		inst = aconnector->audio_inst;
8064 		aconnector->audio_inst = -1;
8065 		mutex_unlock(&adev->dm.audio_lock);
8066 
8067 		amdgpu_dm_audio_eld_notify(adev, inst);
8068 	}
8069 
8070 	/* Notify audio device additions. */
8071 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8072 		if (!new_con_state->crtc)
8073 			continue;
8074 
8075 		new_crtc_state = drm_atomic_get_new_crtc_state(
8076 			state, new_con_state->crtc);
8077 
8078 		if (!new_crtc_state)
8079 			continue;
8080 
8081 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8082 			continue;
8083 
8084 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8085 		if (!new_dm_crtc_state->stream)
8086 			continue;
8087 
8088 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8089 		if (!status)
8090 			continue;
8091 
8092 		aconnector = to_amdgpu_dm_connector(connector);
8093 
8094 		mutex_lock(&adev->dm.audio_lock);
8095 		inst = status->audio_inst;
8096 		aconnector->audio_inst = inst;
8097 		mutex_unlock(&adev->dm.audio_lock);
8098 
8099 		amdgpu_dm_audio_eld_notify(adev, inst);
8100 	}
8101 }
8102 
8103 /*
8104  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8105  * @crtc_state: the DRM CRTC state
8106  * @stream_state: the DC stream state.
8107  *
8108  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8109  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8110  */
8111 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8112 						struct dc_stream_state *stream_state)
8113 {
8114 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8115 }
8116 
8117 /**
8118  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8119  * @state: The atomic state to commit
8120  *
8121  * This will tell DC to commit the constructed DC state from atomic_check,
8122  * programming the hardware. Any failures here implies a hardware failure, since
8123  * atomic check should have filtered anything non-kosher.
8124  */
8125 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8126 {
8127 	struct drm_device *dev = state->dev;
8128 	struct amdgpu_device *adev = drm_to_adev(dev);
8129 	struct amdgpu_display_manager *dm = &adev->dm;
8130 	struct dm_atomic_state *dm_state;
8131 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8132 	uint32_t i, j;
8133 	struct drm_crtc *crtc;
8134 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8135 	unsigned long flags;
8136 	bool wait_for_vblank = true;
8137 	struct drm_connector *connector;
8138 	struct drm_connector_state *old_con_state, *new_con_state;
8139 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8140 	int crtc_disable_count = 0;
8141 	bool mode_set_reset_required = false;
8142 
8143 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8144 
8145 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8146 
8147 	dm_state = dm_atomic_get_new_state(state);
8148 	if (dm_state && dm_state->context) {
8149 		dc_state = dm_state->context;
8150 	} else {
8151 		/* No state changes, retain current state. */
8152 		dc_state_temp = dc_create_state(dm->dc);
8153 		ASSERT(dc_state_temp);
8154 		dc_state = dc_state_temp;
8155 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8156 	}
8157 
8158 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8159 				       new_crtc_state, i) {
8160 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8161 
8162 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8163 
8164 		if (old_crtc_state->active &&
8165 		    (!new_crtc_state->active ||
8166 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8167 			manage_dm_interrupts(adev, acrtc, false);
8168 			dc_stream_release(dm_old_crtc_state->stream);
8169 		}
8170 	}
8171 
8172 	drm_atomic_helper_calc_timestamping_constants(state);
8173 
8174 	/* update changed items */
8175 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8176 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8177 
8178 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8179 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8180 
8181 		DRM_DEBUG_DRIVER(
8182 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8183 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8184 			"connectors_changed:%d\n",
8185 			acrtc->crtc_id,
8186 			new_crtc_state->enable,
8187 			new_crtc_state->active,
8188 			new_crtc_state->planes_changed,
8189 			new_crtc_state->mode_changed,
8190 			new_crtc_state->active_changed,
8191 			new_crtc_state->connectors_changed);
8192 
8193 		/* Disable cursor if disabling crtc */
8194 		if (old_crtc_state->active && !new_crtc_state->active) {
8195 			struct dc_cursor_position position;
8196 
8197 			memset(&position, 0, sizeof(position));
8198 			mutex_lock(&dm->dc_lock);
8199 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8200 			mutex_unlock(&dm->dc_lock);
8201 		}
8202 
8203 		/* Copy all transient state flags into dc state */
8204 		if (dm_new_crtc_state->stream) {
8205 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8206 							    dm_new_crtc_state->stream);
8207 		}
8208 
8209 		/* handles headless hotplug case, updating new_state and
8210 		 * aconnector as needed
8211 		 */
8212 
8213 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8214 
8215 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8216 
8217 			if (!dm_new_crtc_state->stream) {
8218 				/*
8219 				 * this could happen because of issues with
8220 				 * userspace notifications delivery.
8221 				 * In this case userspace tries to set mode on
8222 				 * display which is disconnected in fact.
8223 				 * dc_sink is NULL in this case on aconnector.
8224 				 * We expect reset mode will come soon.
8225 				 *
8226 				 * This can also happen when unplug is done
8227 				 * during resume sequence ended
8228 				 *
8229 				 * In this case, we want to pretend we still
8230 				 * have a sink to keep the pipe running so that
8231 				 * hw state is consistent with the sw state
8232 				 */
8233 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8234 						__func__, acrtc->base.base.id);
8235 				continue;
8236 			}
8237 
8238 			if (dm_old_crtc_state->stream)
8239 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8240 
8241 			pm_runtime_get_noresume(dev->dev);
8242 
8243 			acrtc->enabled = true;
8244 			acrtc->hw_mode = new_crtc_state->mode;
8245 			crtc->hwmode = new_crtc_state->mode;
8246 			mode_set_reset_required = true;
8247 		} else if (modereset_required(new_crtc_state)) {
8248 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8249 			/* i.e. reset mode */
8250 			if (dm_old_crtc_state->stream)
8251 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8252 			mode_set_reset_required = true;
8253 		}
8254 	} /* for_each_crtc_in_state() */
8255 
8256 	if (dc_state) {
8257 		/* if there mode set or reset, disable eDP PSR */
8258 		if (mode_set_reset_required)
8259 			amdgpu_dm_psr_disable_all(dm);
8260 
8261 		dm_enable_per_frame_crtc_master_sync(dc_state);
8262 		mutex_lock(&dm->dc_lock);
8263 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8264 		mutex_unlock(&dm->dc_lock);
8265 	}
8266 
8267 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8268 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8269 
8270 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8271 
8272 		if (dm_new_crtc_state->stream != NULL) {
8273 			const struct dc_stream_status *status =
8274 					dc_stream_get_status(dm_new_crtc_state->stream);
8275 
8276 			if (!status)
8277 				status = dc_stream_get_status_from_state(dc_state,
8278 									 dm_new_crtc_state->stream);
8279 			if (!status)
8280 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8281 			else
8282 				acrtc->otg_inst = status->primary_otg_inst;
8283 		}
8284 	}
8285 #ifdef CONFIG_DRM_AMD_DC_HDCP
8286 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8287 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8288 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8289 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8290 
8291 		new_crtc_state = NULL;
8292 
8293 		if (acrtc)
8294 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8295 
8296 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8297 
8298 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8299 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8300 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8301 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8302 			dm_new_con_state->update_hdcp = true;
8303 			continue;
8304 		}
8305 
8306 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8307 			hdcp_update_display(
8308 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8309 				new_con_state->hdcp_content_type,
8310 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8311 													 : false);
8312 	}
8313 #endif
8314 
8315 	/* Handle connector state changes */
8316 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8317 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8318 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8319 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8320 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8321 		struct dc_stream_update stream_update;
8322 		struct dc_info_packet hdr_packet;
8323 		struct dc_stream_status *status = NULL;
8324 		bool abm_changed, hdr_changed, scaling_changed;
8325 
8326 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8327 		memset(&stream_update, 0, sizeof(stream_update));
8328 
8329 		if (acrtc) {
8330 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8331 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8332 		}
8333 
8334 		/* Skip any modesets/resets */
8335 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8336 			continue;
8337 
8338 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8339 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8340 
8341 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8342 							     dm_old_con_state);
8343 
8344 		abm_changed = dm_new_crtc_state->abm_level !=
8345 			      dm_old_crtc_state->abm_level;
8346 
8347 		hdr_changed =
8348 			is_hdr_metadata_different(old_con_state, new_con_state);
8349 
8350 		if (!scaling_changed && !abm_changed && !hdr_changed)
8351 			continue;
8352 
8353 		stream_update.stream = dm_new_crtc_state->stream;
8354 		if (scaling_changed) {
8355 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8356 					dm_new_con_state, dm_new_crtc_state->stream);
8357 
8358 			stream_update.src = dm_new_crtc_state->stream->src;
8359 			stream_update.dst = dm_new_crtc_state->stream->dst;
8360 		}
8361 
8362 		if (abm_changed) {
8363 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8364 
8365 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8366 		}
8367 
8368 		if (hdr_changed) {
8369 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8370 			stream_update.hdr_static_metadata = &hdr_packet;
8371 		}
8372 
8373 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8374 		WARN_ON(!status);
8375 		WARN_ON(!status->plane_count);
8376 
8377 		/*
8378 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8379 		 * Here we create an empty update on each plane.
8380 		 * To fix this, DC should permit updating only stream properties.
8381 		 */
8382 		for (j = 0; j < status->plane_count; j++)
8383 			dummy_updates[j].surface = status->plane_states[0];
8384 
8385 
8386 		mutex_lock(&dm->dc_lock);
8387 		dc_commit_updates_for_stream(dm->dc,
8388 						     dummy_updates,
8389 						     status->plane_count,
8390 						     dm_new_crtc_state->stream,
8391 						     &stream_update,
8392 						     dc_state);
8393 		mutex_unlock(&dm->dc_lock);
8394 	}
8395 
8396 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8397 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8398 				      new_crtc_state, i) {
8399 		if (old_crtc_state->active && !new_crtc_state->active)
8400 			crtc_disable_count++;
8401 
8402 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8403 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8404 
8405 		/* For freesync config update on crtc state and params for irq */
8406 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8407 
8408 		/* Handle vrr on->off / off->on transitions */
8409 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8410 						dm_new_crtc_state);
8411 	}
8412 
8413 	/**
8414 	 * Enable interrupts for CRTCs that are newly enabled or went through
8415 	 * a modeset. It was intentionally deferred until after the front end
8416 	 * state was modified to wait until the OTG was on and so the IRQ
8417 	 * handlers didn't access stale or invalid state.
8418 	 */
8419 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8420 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8421 
8422 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8423 
8424 		if (new_crtc_state->active &&
8425 		    (!old_crtc_state->active ||
8426 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8427 			dc_stream_retain(dm_new_crtc_state->stream);
8428 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8429 			manage_dm_interrupts(adev, acrtc, true);
8430 
8431 #ifdef CONFIG_DEBUG_FS
8432 			/**
8433 			 * Frontend may have changed so reapply the CRC capture
8434 			 * settings for the stream.
8435 			 */
8436 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8437 
8438 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8439 				amdgpu_dm_crtc_configure_crc_source(
8440 					crtc, dm_new_crtc_state,
8441 					dm_new_crtc_state->crc_src);
8442 			}
8443 #endif
8444 		}
8445 	}
8446 
8447 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8448 		if (new_crtc_state->async_flip)
8449 			wait_for_vblank = false;
8450 
8451 	/* update planes when needed per crtc*/
8452 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8453 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8454 
8455 		if (dm_new_crtc_state->stream)
8456 			amdgpu_dm_commit_planes(state, dc_state, dev,
8457 						dm, crtc, wait_for_vblank);
8458 	}
8459 
8460 	/* Update audio instances for each connector. */
8461 	amdgpu_dm_commit_audio(dev, state);
8462 
8463 	/*
8464 	 * send vblank event on all events not handled in flip and
8465 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8466 	 */
8467 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8468 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8469 
8470 		if (new_crtc_state->event)
8471 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8472 
8473 		new_crtc_state->event = NULL;
8474 	}
8475 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8476 
8477 	/* Signal HW programming completion */
8478 	drm_atomic_helper_commit_hw_done(state);
8479 
8480 	if (wait_for_vblank)
8481 		drm_atomic_helper_wait_for_flip_done(dev, state);
8482 
8483 	drm_atomic_helper_cleanup_planes(dev, state);
8484 
8485 	/* return the stolen vga memory back to VRAM */
8486 	if (!adev->mman.keep_stolen_vga_memory)
8487 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8488 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8489 
8490 	/*
8491 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8492 	 * so we can put the GPU into runtime suspend if we're not driving any
8493 	 * displays anymore
8494 	 */
8495 	for (i = 0; i < crtc_disable_count; i++)
8496 		pm_runtime_put_autosuspend(dev->dev);
8497 	pm_runtime_mark_last_busy(dev->dev);
8498 
8499 	if (dc_state_temp)
8500 		dc_release_state(dc_state_temp);
8501 }
8502 
8503 
8504 static int dm_force_atomic_commit(struct drm_connector *connector)
8505 {
8506 	int ret = 0;
8507 	struct drm_device *ddev = connector->dev;
8508 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8509 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8510 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8511 	struct drm_connector_state *conn_state;
8512 	struct drm_crtc_state *crtc_state;
8513 	struct drm_plane_state *plane_state;
8514 
8515 	if (!state)
8516 		return -ENOMEM;
8517 
8518 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8519 
8520 	/* Construct an atomic state to restore previous display setting */
8521 
8522 	/*
8523 	 * Attach connectors to drm_atomic_state
8524 	 */
8525 	conn_state = drm_atomic_get_connector_state(state, connector);
8526 
8527 	ret = PTR_ERR_OR_ZERO(conn_state);
8528 	if (ret)
8529 		goto out;
8530 
8531 	/* Attach crtc to drm_atomic_state*/
8532 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8533 
8534 	ret = PTR_ERR_OR_ZERO(crtc_state);
8535 	if (ret)
8536 		goto out;
8537 
8538 	/* force a restore */
8539 	crtc_state->mode_changed = true;
8540 
8541 	/* Attach plane to drm_atomic_state */
8542 	plane_state = drm_atomic_get_plane_state(state, plane);
8543 
8544 	ret = PTR_ERR_OR_ZERO(plane_state);
8545 	if (ret)
8546 		goto out;
8547 
8548 	/* Call commit internally with the state we just constructed */
8549 	ret = drm_atomic_commit(state);
8550 
8551 out:
8552 	drm_atomic_state_put(state);
8553 	if (ret)
8554 		DRM_ERROR("Restoring old state failed with %i\n", ret);
8555 
8556 	return ret;
8557 }
8558 
8559 /*
8560  * This function handles all cases when set mode does not come upon hotplug.
8561  * This includes when a display is unplugged then plugged back into the
8562  * same port and when running without usermode desktop manager supprot
8563  */
8564 void dm_restore_drm_connector_state(struct drm_device *dev,
8565 				    struct drm_connector *connector)
8566 {
8567 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8568 	struct amdgpu_crtc *disconnected_acrtc;
8569 	struct dm_crtc_state *acrtc_state;
8570 
8571 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8572 		return;
8573 
8574 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8575 	if (!disconnected_acrtc)
8576 		return;
8577 
8578 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8579 	if (!acrtc_state->stream)
8580 		return;
8581 
8582 	/*
8583 	 * If the previous sink is not released and different from the current,
8584 	 * we deduce we are in a state where we can not rely on usermode call
8585 	 * to turn on the display, so we do it here
8586 	 */
8587 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8588 		dm_force_atomic_commit(&aconnector->base);
8589 }
8590 
8591 /*
8592  * Grabs all modesetting locks to serialize against any blocking commits,
8593  * Waits for completion of all non blocking commits.
8594  */
8595 static int do_aquire_global_lock(struct drm_device *dev,
8596 				 struct drm_atomic_state *state)
8597 {
8598 	struct drm_crtc *crtc;
8599 	struct drm_crtc_commit *commit;
8600 	long ret;
8601 
8602 	/*
8603 	 * Adding all modeset locks to aquire_ctx will
8604 	 * ensure that when the framework release it the
8605 	 * extra locks we are locking here will get released to
8606 	 */
8607 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8608 	if (ret)
8609 		return ret;
8610 
8611 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8612 		spin_lock(&crtc->commit_lock);
8613 		commit = list_first_entry_or_null(&crtc->commit_list,
8614 				struct drm_crtc_commit, commit_entry);
8615 		if (commit)
8616 			drm_crtc_commit_get(commit);
8617 		spin_unlock(&crtc->commit_lock);
8618 
8619 		if (!commit)
8620 			continue;
8621 
8622 		/*
8623 		 * Make sure all pending HW programming completed and
8624 		 * page flips done
8625 		 */
8626 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8627 
8628 		if (ret > 0)
8629 			ret = wait_for_completion_interruptible_timeout(
8630 					&commit->flip_done, 10*HZ);
8631 
8632 		if (ret == 0)
8633 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8634 				  "timed out\n", crtc->base.id, crtc->name);
8635 
8636 		drm_crtc_commit_put(commit);
8637 	}
8638 
8639 	return ret < 0 ? ret : 0;
8640 }
8641 
8642 static void get_freesync_config_for_crtc(
8643 	struct dm_crtc_state *new_crtc_state,
8644 	struct dm_connector_state *new_con_state)
8645 {
8646 	struct mod_freesync_config config = {0};
8647 	struct amdgpu_dm_connector *aconnector =
8648 			to_amdgpu_dm_connector(new_con_state->base.connector);
8649 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8650 	int vrefresh = drm_mode_vrefresh(mode);
8651 
8652 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8653 					vrefresh >= aconnector->min_vfreq &&
8654 					vrefresh <= aconnector->max_vfreq;
8655 
8656 	if (new_crtc_state->vrr_supported) {
8657 		new_crtc_state->stream->ignore_msa_timing_param = true;
8658 		config.state = new_crtc_state->base.vrr_enabled ?
8659 				VRR_STATE_ACTIVE_VARIABLE :
8660 				VRR_STATE_INACTIVE;
8661 		config.min_refresh_in_uhz =
8662 				aconnector->min_vfreq * 1000000;
8663 		config.max_refresh_in_uhz =
8664 				aconnector->max_vfreq * 1000000;
8665 		config.vsif_supported = true;
8666 		config.btr = true;
8667 	}
8668 
8669 	new_crtc_state->freesync_config = config;
8670 }
8671 
8672 static void reset_freesync_config_for_crtc(
8673 	struct dm_crtc_state *new_crtc_state)
8674 {
8675 	new_crtc_state->vrr_supported = false;
8676 
8677 	memset(&new_crtc_state->vrr_infopacket, 0,
8678 	       sizeof(new_crtc_state->vrr_infopacket));
8679 }
8680 
8681 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8682 				struct drm_atomic_state *state,
8683 				struct drm_crtc *crtc,
8684 				struct drm_crtc_state *old_crtc_state,
8685 				struct drm_crtc_state *new_crtc_state,
8686 				bool enable,
8687 				bool *lock_and_validation_needed)
8688 {
8689 	struct dm_atomic_state *dm_state = NULL;
8690 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8691 	struct dc_stream_state *new_stream;
8692 	int ret = 0;
8693 
8694 	/*
8695 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8696 	 * update changed items
8697 	 */
8698 	struct amdgpu_crtc *acrtc = NULL;
8699 	struct amdgpu_dm_connector *aconnector = NULL;
8700 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8701 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8702 
8703 	new_stream = NULL;
8704 
8705 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8706 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8707 	acrtc = to_amdgpu_crtc(crtc);
8708 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8709 
8710 	/* TODO This hack should go away */
8711 	if (aconnector && enable) {
8712 		/* Make sure fake sink is created in plug-in scenario */
8713 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8714 							    &aconnector->base);
8715 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8716 							    &aconnector->base);
8717 
8718 		if (IS_ERR(drm_new_conn_state)) {
8719 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8720 			goto fail;
8721 		}
8722 
8723 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8724 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8725 
8726 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8727 			goto skip_modeset;
8728 
8729 		new_stream = create_validate_stream_for_sink(aconnector,
8730 							     &new_crtc_state->mode,
8731 							     dm_new_conn_state,
8732 							     dm_old_crtc_state->stream);
8733 
8734 		/*
8735 		 * we can have no stream on ACTION_SET if a display
8736 		 * was disconnected during S3, in this case it is not an
8737 		 * error, the OS will be updated after detection, and
8738 		 * will do the right thing on next atomic commit
8739 		 */
8740 
8741 		if (!new_stream) {
8742 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8743 					__func__, acrtc->base.base.id);
8744 			ret = -ENOMEM;
8745 			goto fail;
8746 		}
8747 
8748 		/*
8749 		 * TODO: Check VSDB bits to decide whether this should
8750 		 * be enabled or not.
8751 		 */
8752 		new_stream->triggered_crtc_reset.enabled =
8753 			dm->force_timing_sync;
8754 
8755 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8756 
8757 		ret = fill_hdr_info_packet(drm_new_conn_state,
8758 					   &new_stream->hdr_static_metadata);
8759 		if (ret)
8760 			goto fail;
8761 
8762 		/*
8763 		 * If we already removed the old stream from the context
8764 		 * (and set the new stream to NULL) then we can't reuse
8765 		 * the old stream even if the stream and scaling are unchanged.
8766 		 * We'll hit the BUG_ON and black screen.
8767 		 *
8768 		 * TODO: Refactor this function to allow this check to work
8769 		 * in all conditions.
8770 		 */
8771 		if (dm_new_crtc_state->stream &&
8772 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8773 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8774 			new_crtc_state->mode_changed = false;
8775 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8776 					 new_crtc_state->mode_changed);
8777 		}
8778 	}
8779 
8780 	/* mode_changed flag may get updated above, need to check again */
8781 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8782 		goto skip_modeset;
8783 
8784 	DRM_DEBUG_DRIVER(
8785 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8786 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8787 		"connectors_changed:%d\n",
8788 		acrtc->crtc_id,
8789 		new_crtc_state->enable,
8790 		new_crtc_state->active,
8791 		new_crtc_state->planes_changed,
8792 		new_crtc_state->mode_changed,
8793 		new_crtc_state->active_changed,
8794 		new_crtc_state->connectors_changed);
8795 
8796 	/* Remove stream for any changed/disabled CRTC */
8797 	if (!enable) {
8798 
8799 		if (!dm_old_crtc_state->stream)
8800 			goto skip_modeset;
8801 
8802 		ret = dm_atomic_get_state(state, &dm_state);
8803 		if (ret)
8804 			goto fail;
8805 
8806 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8807 				crtc->base.id);
8808 
8809 		/* i.e. reset mode */
8810 		if (dc_remove_stream_from_ctx(
8811 				dm->dc,
8812 				dm_state->context,
8813 				dm_old_crtc_state->stream) != DC_OK) {
8814 			ret = -EINVAL;
8815 			goto fail;
8816 		}
8817 
8818 		dc_stream_release(dm_old_crtc_state->stream);
8819 		dm_new_crtc_state->stream = NULL;
8820 
8821 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8822 
8823 		*lock_and_validation_needed = true;
8824 
8825 	} else {/* Add stream for any updated/enabled CRTC */
8826 		/*
8827 		 * Quick fix to prevent NULL pointer on new_stream when
8828 		 * added MST connectors not found in existing crtc_state in the chained mode
8829 		 * TODO: need to dig out the root cause of that
8830 		 */
8831 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8832 			goto skip_modeset;
8833 
8834 		if (modereset_required(new_crtc_state))
8835 			goto skip_modeset;
8836 
8837 		if (modeset_required(new_crtc_state, new_stream,
8838 				     dm_old_crtc_state->stream)) {
8839 
8840 			WARN_ON(dm_new_crtc_state->stream);
8841 
8842 			ret = dm_atomic_get_state(state, &dm_state);
8843 			if (ret)
8844 				goto fail;
8845 
8846 			dm_new_crtc_state->stream = new_stream;
8847 
8848 			dc_stream_retain(new_stream);
8849 
8850 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8851 						crtc->base.id);
8852 
8853 			if (dc_add_stream_to_ctx(
8854 					dm->dc,
8855 					dm_state->context,
8856 					dm_new_crtc_state->stream) != DC_OK) {
8857 				ret = -EINVAL;
8858 				goto fail;
8859 			}
8860 
8861 			*lock_and_validation_needed = true;
8862 		}
8863 	}
8864 
8865 skip_modeset:
8866 	/* Release extra reference */
8867 	if (new_stream)
8868 		 dc_stream_release(new_stream);
8869 
8870 	/*
8871 	 * We want to do dc stream updates that do not require a
8872 	 * full modeset below.
8873 	 */
8874 	if (!(enable && aconnector && new_crtc_state->active))
8875 		return 0;
8876 	/*
8877 	 * Given above conditions, the dc state cannot be NULL because:
8878 	 * 1. We're in the process of enabling CRTCs (just been added
8879 	 *    to the dc context, or already is on the context)
8880 	 * 2. Has a valid connector attached, and
8881 	 * 3. Is currently active and enabled.
8882 	 * => The dc stream state currently exists.
8883 	 */
8884 	BUG_ON(dm_new_crtc_state->stream == NULL);
8885 
8886 	/* Scaling or underscan settings */
8887 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8888 		update_stream_scaling_settings(
8889 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8890 
8891 	/* ABM settings */
8892 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8893 
8894 	/*
8895 	 * Color management settings. We also update color properties
8896 	 * when a modeset is needed, to ensure it gets reprogrammed.
8897 	 */
8898 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8899 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8900 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8901 		if (ret)
8902 			goto fail;
8903 	}
8904 
8905 	/* Update Freesync settings. */
8906 	get_freesync_config_for_crtc(dm_new_crtc_state,
8907 				     dm_new_conn_state);
8908 
8909 	return ret;
8910 
8911 fail:
8912 	if (new_stream)
8913 		dc_stream_release(new_stream);
8914 	return ret;
8915 }
8916 
8917 static bool should_reset_plane(struct drm_atomic_state *state,
8918 			       struct drm_plane *plane,
8919 			       struct drm_plane_state *old_plane_state,
8920 			       struct drm_plane_state *new_plane_state)
8921 {
8922 	struct drm_plane *other;
8923 	struct drm_plane_state *old_other_state, *new_other_state;
8924 	struct drm_crtc_state *new_crtc_state;
8925 	int i;
8926 
8927 	/*
8928 	 * TODO: Remove this hack once the checks below are sufficient
8929 	 * enough to determine when we need to reset all the planes on
8930 	 * the stream.
8931 	 */
8932 	if (state->allow_modeset)
8933 		return true;
8934 
8935 	/* Exit early if we know that we're adding or removing the plane. */
8936 	if (old_plane_state->crtc != new_plane_state->crtc)
8937 		return true;
8938 
8939 	/* old crtc == new_crtc == NULL, plane not in context. */
8940 	if (!new_plane_state->crtc)
8941 		return false;
8942 
8943 	new_crtc_state =
8944 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8945 
8946 	if (!new_crtc_state)
8947 		return true;
8948 
8949 	/* CRTC Degamma changes currently require us to recreate planes. */
8950 	if (new_crtc_state->color_mgmt_changed)
8951 		return true;
8952 
8953 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8954 		return true;
8955 
8956 	/*
8957 	 * If there are any new primary or overlay planes being added or
8958 	 * removed then the z-order can potentially change. To ensure
8959 	 * correct z-order and pipe acquisition the current DC architecture
8960 	 * requires us to remove and recreate all existing planes.
8961 	 *
8962 	 * TODO: Come up with a more elegant solution for this.
8963 	 */
8964 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8965 		struct amdgpu_framebuffer *old_afb, *new_afb;
8966 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8967 			continue;
8968 
8969 		if (old_other_state->crtc != new_plane_state->crtc &&
8970 		    new_other_state->crtc != new_plane_state->crtc)
8971 			continue;
8972 
8973 		if (old_other_state->crtc != new_other_state->crtc)
8974 			return true;
8975 
8976 		/* Src/dst size and scaling updates. */
8977 		if (old_other_state->src_w != new_other_state->src_w ||
8978 		    old_other_state->src_h != new_other_state->src_h ||
8979 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8980 		    old_other_state->crtc_h != new_other_state->crtc_h)
8981 			return true;
8982 
8983 		/* Rotation / mirroring updates. */
8984 		if (old_other_state->rotation != new_other_state->rotation)
8985 			return true;
8986 
8987 		/* Blending updates. */
8988 		if (old_other_state->pixel_blend_mode !=
8989 		    new_other_state->pixel_blend_mode)
8990 			return true;
8991 
8992 		/* Alpha updates. */
8993 		if (old_other_state->alpha != new_other_state->alpha)
8994 			return true;
8995 
8996 		/* Colorspace changes. */
8997 		if (old_other_state->color_range != new_other_state->color_range ||
8998 		    old_other_state->color_encoding != new_other_state->color_encoding)
8999 			return true;
9000 
9001 		/* Framebuffer checks fall at the end. */
9002 		if (!old_other_state->fb || !new_other_state->fb)
9003 			continue;
9004 
9005 		/* Pixel format changes can require bandwidth updates. */
9006 		if (old_other_state->fb->format != new_other_state->fb->format)
9007 			return true;
9008 
9009 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9010 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9011 
9012 		/* Tiling and DCC changes also require bandwidth updates. */
9013 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9014 		    old_afb->base.modifier != new_afb->base.modifier)
9015 			return true;
9016 	}
9017 
9018 	return false;
9019 }
9020 
9021 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9022 			      struct drm_plane_state *new_plane_state,
9023 			      struct drm_framebuffer *fb)
9024 {
9025 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9026 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9027 	unsigned int pitch;
9028 	bool linear;
9029 
9030 	if (fb->width > new_acrtc->max_cursor_width ||
9031 	    fb->height > new_acrtc->max_cursor_height) {
9032 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9033 				 new_plane_state->fb->width,
9034 				 new_plane_state->fb->height);
9035 		return -EINVAL;
9036 	}
9037 	if (new_plane_state->src_w != fb->width << 16 ||
9038 	    new_plane_state->src_h != fb->height << 16) {
9039 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9040 		return -EINVAL;
9041 	}
9042 
9043 	/* Pitch in pixels */
9044 	pitch = fb->pitches[0] / fb->format->cpp[0];
9045 
9046 	if (fb->width != pitch) {
9047 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9048 				 fb->width, pitch);
9049 		return -EINVAL;
9050 	}
9051 
9052 	switch (pitch) {
9053 	case 64:
9054 	case 128:
9055 	case 256:
9056 		/* FB pitch is supported by cursor plane */
9057 		break;
9058 	default:
9059 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9060 		return -EINVAL;
9061 	}
9062 
9063 	/* Core DRM takes care of checking FB modifiers, so we only need to
9064 	 * check tiling flags when the FB doesn't have a modifier. */
9065 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9066 		if (adev->family < AMDGPU_FAMILY_AI) {
9067 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9068 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9069 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9070 		} else {
9071 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9072 		}
9073 		if (!linear) {
9074 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9075 			return -EINVAL;
9076 		}
9077 	}
9078 
9079 	return 0;
9080 }
9081 
9082 static int dm_update_plane_state(struct dc *dc,
9083 				 struct drm_atomic_state *state,
9084 				 struct drm_plane *plane,
9085 				 struct drm_plane_state *old_plane_state,
9086 				 struct drm_plane_state *new_plane_state,
9087 				 bool enable,
9088 				 bool *lock_and_validation_needed)
9089 {
9090 
9091 	struct dm_atomic_state *dm_state = NULL;
9092 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9093 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9094 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9095 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9096 	struct amdgpu_crtc *new_acrtc;
9097 	bool needs_reset;
9098 	int ret = 0;
9099 
9100 
9101 	new_plane_crtc = new_plane_state->crtc;
9102 	old_plane_crtc = old_plane_state->crtc;
9103 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9104 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9105 
9106 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9107 		if (!enable || !new_plane_crtc ||
9108 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9109 			return 0;
9110 
9111 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9112 
9113 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9114 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9115 			return -EINVAL;
9116 		}
9117 
9118 		if (new_plane_state->fb) {
9119 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9120 						 new_plane_state->fb);
9121 			if (ret)
9122 				return ret;
9123 		}
9124 
9125 		return 0;
9126 	}
9127 
9128 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9129 					 new_plane_state);
9130 
9131 	/* Remove any changed/removed planes */
9132 	if (!enable) {
9133 		if (!needs_reset)
9134 			return 0;
9135 
9136 		if (!old_plane_crtc)
9137 			return 0;
9138 
9139 		old_crtc_state = drm_atomic_get_old_crtc_state(
9140 				state, old_plane_crtc);
9141 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9142 
9143 		if (!dm_old_crtc_state->stream)
9144 			return 0;
9145 
9146 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9147 				plane->base.id, old_plane_crtc->base.id);
9148 
9149 		ret = dm_atomic_get_state(state, &dm_state);
9150 		if (ret)
9151 			return ret;
9152 
9153 		if (!dc_remove_plane_from_context(
9154 				dc,
9155 				dm_old_crtc_state->stream,
9156 				dm_old_plane_state->dc_state,
9157 				dm_state->context)) {
9158 
9159 			return -EINVAL;
9160 		}
9161 
9162 
9163 		dc_plane_state_release(dm_old_plane_state->dc_state);
9164 		dm_new_plane_state->dc_state = NULL;
9165 
9166 		*lock_and_validation_needed = true;
9167 
9168 	} else { /* Add new planes */
9169 		struct dc_plane_state *dc_new_plane_state;
9170 
9171 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9172 			return 0;
9173 
9174 		if (!new_plane_crtc)
9175 			return 0;
9176 
9177 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9178 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9179 
9180 		if (!dm_new_crtc_state->stream)
9181 			return 0;
9182 
9183 		if (!needs_reset)
9184 			return 0;
9185 
9186 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9187 		if (ret)
9188 			return ret;
9189 
9190 		WARN_ON(dm_new_plane_state->dc_state);
9191 
9192 		dc_new_plane_state = dc_create_plane_state(dc);
9193 		if (!dc_new_plane_state)
9194 			return -ENOMEM;
9195 
9196 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9197 				plane->base.id, new_plane_crtc->base.id);
9198 
9199 		ret = fill_dc_plane_attributes(
9200 			drm_to_adev(new_plane_crtc->dev),
9201 			dc_new_plane_state,
9202 			new_plane_state,
9203 			new_crtc_state);
9204 		if (ret) {
9205 			dc_plane_state_release(dc_new_plane_state);
9206 			return ret;
9207 		}
9208 
9209 		ret = dm_atomic_get_state(state, &dm_state);
9210 		if (ret) {
9211 			dc_plane_state_release(dc_new_plane_state);
9212 			return ret;
9213 		}
9214 
9215 		/*
9216 		 * Any atomic check errors that occur after this will
9217 		 * not need a release. The plane state will be attached
9218 		 * to the stream, and therefore part of the atomic
9219 		 * state. It'll be released when the atomic state is
9220 		 * cleaned.
9221 		 */
9222 		if (!dc_add_plane_to_context(
9223 				dc,
9224 				dm_new_crtc_state->stream,
9225 				dc_new_plane_state,
9226 				dm_state->context)) {
9227 
9228 			dc_plane_state_release(dc_new_plane_state);
9229 			return -EINVAL;
9230 		}
9231 
9232 		dm_new_plane_state->dc_state = dc_new_plane_state;
9233 
9234 		/* Tell DC to do a full surface update every time there
9235 		 * is a plane change. Inefficient, but works for now.
9236 		 */
9237 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9238 
9239 		*lock_and_validation_needed = true;
9240 	}
9241 
9242 
9243 	return ret;
9244 }
9245 
9246 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9247 				struct drm_crtc *crtc,
9248 				struct drm_crtc_state *new_crtc_state)
9249 {
9250 	struct drm_plane_state *new_cursor_state, *new_primary_state;
9251 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9252 
9253 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9254 	 * cursor per pipe but it's going to inherit the scaling and
9255 	 * positioning from the underlying pipe. Check the cursor plane's
9256 	 * blending properties match the primary plane's. */
9257 
9258 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9259 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9260 	if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9261 		return 0;
9262 	}
9263 
9264 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9265 			 (new_cursor_state->src_w >> 16);
9266 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9267 			 (new_cursor_state->src_h >> 16);
9268 
9269 	primary_scale_w = new_primary_state->crtc_w * 1000 /
9270 			 (new_primary_state->src_w >> 16);
9271 	primary_scale_h = new_primary_state->crtc_h * 1000 /
9272 			 (new_primary_state->src_h >> 16);
9273 
9274 	if (cursor_scale_w != primary_scale_w ||
9275 	    cursor_scale_h != primary_scale_h) {
9276 		DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9277 		return -EINVAL;
9278 	}
9279 
9280 	return 0;
9281 }
9282 
9283 #if defined(CONFIG_DRM_AMD_DC_DCN)
9284 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9285 {
9286 	struct drm_connector *connector;
9287 	struct drm_connector_state *conn_state;
9288 	struct amdgpu_dm_connector *aconnector = NULL;
9289 	int i;
9290 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9291 		if (conn_state->crtc != crtc)
9292 			continue;
9293 
9294 		aconnector = to_amdgpu_dm_connector(connector);
9295 		if (!aconnector->port || !aconnector->mst_port)
9296 			aconnector = NULL;
9297 		else
9298 			break;
9299 	}
9300 
9301 	if (!aconnector)
9302 		return 0;
9303 
9304 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9305 }
9306 #endif
9307 
9308 /**
9309  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9310  * @dev: The DRM device
9311  * @state: The atomic state to commit
9312  *
9313  * Validate that the given atomic state is programmable by DC into hardware.
9314  * This involves constructing a &struct dc_state reflecting the new hardware
9315  * state we wish to commit, then querying DC to see if it is programmable. It's
9316  * important not to modify the existing DC state. Otherwise, atomic_check
9317  * may unexpectedly commit hardware changes.
9318  *
9319  * When validating the DC state, it's important that the right locks are
9320  * acquired. For full updates case which removes/adds/updates streams on one
9321  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9322  * that any such full update commit will wait for completion of any outstanding
9323  * flip using DRMs synchronization events.
9324  *
9325  * Note that DM adds the affected connectors for all CRTCs in state, when that
9326  * might not seem necessary. This is because DC stream creation requires the
9327  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9328  * be possible but non-trivial - a possible TODO item.
9329  *
9330  * Return: -Error code if validation failed.
9331  */
9332 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9333 				  struct drm_atomic_state *state)
9334 {
9335 	struct amdgpu_device *adev = drm_to_adev(dev);
9336 	struct dm_atomic_state *dm_state = NULL;
9337 	struct dc *dc = adev->dm.dc;
9338 	struct drm_connector *connector;
9339 	struct drm_connector_state *old_con_state, *new_con_state;
9340 	struct drm_crtc *crtc;
9341 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9342 	struct drm_plane *plane;
9343 	struct drm_plane_state *old_plane_state, *new_plane_state;
9344 	enum dc_status status;
9345 	int ret, i;
9346 	bool lock_and_validation_needed = false;
9347 	struct dm_crtc_state *dm_old_crtc_state;
9348 
9349 	trace_amdgpu_dm_atomic_check_begin(state);
9350 
9351 	ret = drm_atomic_helper_check_modeset(dev, state);
9352 	if (ret)
9353 		goto fail;
9354 
9355 	/* Check connector changes */
9356 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9357 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9358 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9359 
9360 		/* Skip connectors that are disabled or part of modeset already. */
9361 		if (!old_con_state->crtc && !new_con_state->crtc)
9362 			continue;
9363 
9364 		if (!new_con_state->crtc)
9365 			continue;
9366 
9367 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9368 		if (IS_ERR(new_crtc_state)) {
9369 			ret = PTR_ERR(new_crtc_state);
9370 			goto fail;
9371 		}
9372 
9373 		if (dm_old_con_state->abm_level !=
9374 		    dm_new_con_state->abm_level)
9375 			new_crtc_state->connectors_changed = true;
9376 	}
9377 
9378 #if defined(CONFIG_DRM_AMD_DC_DCN)
9379 	if (adev->asic_type >= CHIP_NAVI10) {
9380 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9381 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9382 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9383 				if (ret)
9384 					goto fail;
9385 			}
9386 		}
9387 	}
9388 #endif
9389 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9390 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9391 
9392 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9393 		    !new_crtc_state->color_mgmt_changed &&
9394 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9395 			dm_old_crtc_state->dsc_force_changed == false)
9396 			continue;
9397 
9398 		if (!new_crtc_state->enable)
9399 			continue;
9400 
9401 		ret = drm_atomic_add_affected_connectors(state, crtc);
9402 		if (ret)
9403 			return ret;
9404 
9405 		ret = drm_atomic_add_affected_planes(state, crtc);
9406 		if (ret)
9407 			goto fail;
9408 
9409 		if (dm_old_crtc_state->dsc_force_changed)
9410 			new_crtc_state->mode_changed = true;
9411 	}
9412 
9413 	/*
9414 	 * Add all primary and overlay planes on the CRTC to the state
9415 	 * whenever a plane is enabled to maintain correct z-ordering
9416 	 * and to enable fast surface updates.
9417 	 */
9418 	drm_for_each_crtc(crtc, dev) {
9419 		bool modified = false;
9420 
9421 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9422 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9423 				continue;
9424 
9425 			if (new_plane_state->crtc == crtc ||
9426 			    old_plane_state->crtc == crtc) {
9427 				modified = true;
9428 				break;
9429 			}
9430 		}
9431 
9432 		if (!modified)
9433 			continue;
9434 
9435 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9436 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9437 				continue;
9438 
9439 			new_plane_state =
9440 				drm_atomic_get_plane_state(state, plane);
9441 
9442 			if (IS_ERR(new_plane_state)) {
9443 				ret = PTR_ERR(new_plane_state);
9444 				goto fail;
9445 			}
9446 		}
9447 	}
9448 
9449 	/* Remove exiting planes if they are modified */
9450 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9451 		ret = dm_update_plane_state(dc, state, plane,
9452 					    old_plane_state,
9453 					    new_plane_state,
9454 					    false,
9455 					    &lock_and_validation_needed);
9456 		if (ret)
9457 			goto fail;
9458 	}
9459 
9460 	/* Disable all crtcs which require disable */
9461 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9462 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9463 					   old_crtc_state,
9464 					   new_crtc_state,
9465 					   false,
9466 					   &lock_and_validation_needed);
9467 		if (ret)
9468 			goto fail;
9469 	}
9470 
9471 	/* Enable all crtcs which require enable */
9472 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9473 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9474 					   old_crtc_state,
9475 					   new_crtc_state,
9476 					   true,
9477 					   &lock_and_validation_needed);
9478 		if (ret)
9479 			goto fail;
9480 	}
9481 
9482 	/* Add new/modified planes */
9483 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9484 		ret = dm_update_plane_state(dc, state, plane,
9485 					    old_plane_state,
9486 					    new_plane_state,
9487 					    true,
9488 					    &lock_and_validation_needed);
9489 		if (ret)
9490 			goto fail;
9491 	}
9492 
9493 	/* Run this here since we want to validate the streams we created */
9494 	ret = drm_atomic_helper_check_planes(dev, state);
9495 	if (ret)
9496 		goto fail;
9497 
9498 	/* Check cursor planes scaling */
9499 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9500 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9501 		if (ret)
9502 			goto fail;
9503 	}
9504 
9505 	if (state->legacy_cursor_update) {
9506 		/*
9507 		 * This is a fast cursor update coming from the plane update
9508 		 * helper, check if it can be done asynchronously for better
9509 		 * performance.
9510 		 */
9511 		state->async_update =
9512 			!drm_atomic_helper_async_check(dev, state);
9513 
9514 		/*
9515 		 * Skip the remaining global validation if this is an async
9516 		 * update. Cursor updates can be done without affecting
9517 		 * state or bandwidth calcs and this avoids the performance
9518 		 * penalty of locking the private state object and
9519 		 * allocating a new dc_state.
9520 		 */
9521 		if (state->async_update)
9522 			return 0;
9523 	}
9524 
9525 	/* Check scaling and underscan changes*/
9526 	/* TODO Removed scaling changes validation due to inability to commit
9527 	 * new stream into context w\o causing full reset. Need to
9528 	 * decide how to handle.
9529 	 */
9530 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9531 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9532 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9533 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9534 
9535 		/* Skip any modesets/resets */
9536 		if (!acrtc || drm_atomic_crtc_needs_modeset(
9537 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9538 			continue;
9539 
9540 		/* Skip any thing not scale or underscan changes */
9541 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9542 			continue;
9543 
9544 		lock_and_validation_needed = true;
9545 	}
9546 
9547 	/**
9548 	 * Streams and planes are reset when there are changes that affect
9549 	 * bandwidth. Anything that affects bandwidth needs to go through
9550 	 * DC global validation to ensure that the configuration can be applied
9551 	 * to hardware.
9552 	 *
9553 	 * We have to currently stall out here in atomic_check for outstanding
9554 	 * commits to finish in this case because our IRQ handlers reference
9555 	 * DRM state directly - we can end up disabling interrupts too early
9556 	 * if we don't.
9557 	 *
9558 	 * TODO: Remove this stall and drop DM state private objects.
9559 	 */
9560 	if (lock_and_validation_needed) {
9561 		ret = dm_atomic_get_state(state, &dm_state);
9562 		if (ret)
9563 			goto fail;
9564 
9565 		ret = do_aquire_global_lock(dev, state);
9566 		if (ret)
9567 			goto fail;
9568 
9569 #if defined(CONFIG_DRM_AMD_DC_DCN)
9570 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9571 			goto fail;
9572 
9573 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9574 		if (ret)
9575 			goto fail;
9576 #endif
9577 
9578 		/*
9579 		 * Perform validation of MST topology in the state:
9580 		 * We need to perform MST atomic check before calling
9581 		 * dc_validate_global_state(), or there is a chance
9582 		 * to get stuck in an infinite loop and hang eventually.
9583 		 */
9584 		ret = drm_dp_mst_atomic_check(state);
9585 		if (ret)
9586 			goto fail;
9587 		status = dc_validate_global_state(dc, dm_state->context, false);
9588 		if (status != DC_OK) {
9589 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
9590 				       dc_status_to_str(status), status);
9591 			ret = -EINVAL;
9592 			goto fail;
9593 		}
9594 	} else {
9595 		/*
9596 		 * The commit is a fast update. Fast updates shouldn't change
9597 		 * the DC context, affect global validation, and can have their
9598 		 * commit work done in parallel with other commits not touching
9599 		 * the same resource. If we have a new DC context as part of
9600 		 * the DM atomic state from validation we need to free it and
9601 		 * retain the existing one instead.
9602 		 *
9603 		 * Furthermore, since the DM atomic state only contains the DC
9604 		 * context and can safely be annulled, we can free the state
9605 		 * and clear the associated private object now to free
9606 		 * some memory and avoid a possible use-after-free later.
9607 		 */
9608 
9609 		for (i = 0; i < state->num_private_objs; i++) {
9610 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9611 
9612 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9613 				int j = state->num_private_objs-1;
9614 
9615 				dm_atomic_destroy_state(obj,
9616 						state->private_objs[i].state);
9617 
9618 				/* If i is not at the end of the array then the
9619 				 * last element needs to be moved to where i was
9620 				 * before the array can safely be truncated.
9621 				 */
9622 				if (i != j)
9623 					state->private_objs[i] =
9624 						state->private_objs[j];
9625 
9626 				state->private_objs[j].ptr = NULL;
9627 				state->private_objs[j].state = NULL;
9628 				state->private_objs[j].old_state = NULL;
9629 				state->private_objs[j].new_state = NULL;
9630 
9631 				state->num_private_objs = j;
9632 				break;
9633 			}
9634 		}
9635 	}
9636 
9637 	/* Store the overall update type for use later in atomic check. */
9638 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9639 		struct dm_crtc_state *dm_new_crtc_state =
9640 			to_dm_crtc_state(new_crtc_state);
9641 
9642 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9643 							 UPDATE_TYPE_FULL :
9644 							 UPDATE_TYPE_FAST;
9645 	}
9646 
9647 	/* Must be success */
9648 	WARN_ON(ret);
9649 
9650 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9651 
9652 	return ret;
9653 
9654 fail:
9655 	if (ret == -EDEADLK)
9656 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9657 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9658 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9659 	else
9660 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9661 
9662 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9663 
9664 	return ret;
9665 }
9666 
9667 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9668 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9669 {
9670 	uint8_t dpcd_data;
9671 	bool capable = false;
9672 
9673 	if (amdgpu_dm_connector->dc_link &&
9674 		dm_helpers_dp_read_dpcd(
9675 				NULL,
9676 				amdgpu_dm_connector->dc_link,
9677 				DP_DOWN_STREAM_PORT_COUNT,
9678 				&dpcd_data,
9679 				sizeof(dpcd_data))) {
9680 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9681 	}
9682 
9683 	return capable;
9684 }
9685 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9686 					struct edid *edid)
9687 {
9688 	int i;
9689 	bool edid_check_required;
9690 	struct detailed_timing *timing;
9691 	struct detailed_non_pixel *data;
9692 	struct detailed_data_monitor_range *range;
9693 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9694 			to_amdgpu_dm_connector(connector);
9695 	struct dm_connector_state *dm_con_state = NULL;
9696 
9697 	struct drm_device *dev = connector->dev;
9698 	struct amdgpu_device *adev = drm_to_adev(dev);
9699 	bool freesync_capable = false;
9700 
9701 	if (!connector->state) {
9702 		DRM_ERROR("%s - Connector has no state", __func__);
9703 		goto update;
9704 	}
9705 
9706 	if (!edid) {
9707 		dm_con_state = to_dm_connector_state(connector->state);
9708 
9709 		amdgpu_dm_connector->min_vfreq = 0;
9710 		amdgpu_dm_connector->max_vfreq = 0;
9711 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9712 
9713 		goto update;
9714 	}
9715 
9716 	dm_con_state = to_dm_connector_state(connector->state);
9717 
9718 	edid_check_required = false;
9719 	if (!amdgpu_dm_connector->dc_sink) {
9720 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9721 		goto update;
9722 	}
9723 	if (!adev->dm.freesync_module)
9724 		goto update;
9725 	/*
9726 	 * if edid non zero restrict freesync only for dp and edp
9727 	 */
9728 	if (edid) {
9729 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9730 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9731 			edid_check_required = is_dp_capable_without_timing_msa(
9732 						adev->dm.dc,
9733 						amdgpu_dm_connector);
9734 		}
9735 	}
9736 	if (edid_check_required == true && (edid->version > 1 ||
9737 	   (edid->version == 1 && edid->revision > 1))) {
9738 		for (i = 0; i < 4; i++) {
9739 
9740 			timing	= &edid->detailed_timings[i];
9741 			data	= &timing->data.other_data;
9742 			range	= &data->data.range;
9743 			/*
9744 			 * Check if monitor has continuous frequency mode
9745 			 */
9746 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
9747 				continue;
9748 			/*
9749 			 * Check for flag range limits only. If flag == 1 then
9750 			 * no additional timing information provided.
9751 			 * Default GTF, GTF Secondary curve and CVT are not
9752 			 * supported
9753 			 */
9754 			if (range->flags != 1)
9755 				continue;
9756 
9757 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9758 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9759 			amdgpu_dm_connector->pixel_clock_mhz =
9760 				range->pixel_clock_mhz * 10;
9761 
9762 			connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
9763 			connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
9764 
9765 			break;
9766 		}
9767 
9768 		if (amdgpu_dm_connector->max_vfreq -
9769 		    amdgpu_dm_connector->min_vfreq > 10) {
9770 
9771 			freesync_capable = true;
9772 		}
9773 	}
9774 
9775 update:
9776 	if (dm_con_state)
9777 		dm_con_state->freesync_capable = freesync_capable;
9778 
9779 	if (connector->vrr_capable_property)
9780 		drm_connector_set_vrr_capable_property(connector,
9781 						       freesync_capable);
9782 }
9783 
9784 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9785 {
9786 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9787 
9788 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9789 		return;
9790 	if (link->type == dc_connection_none)
9791 		return;
9792 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9793 					dpcd_data, sizeof(dpcd_data))) {
9794 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9795 
9796 		if (dpcd_data[0] == 0) {
9797 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9798 			link->psr_settings.psr_feature_enabled = false;
9799 		} else {
9800 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9801 			link->psr_settings.psr_feature_enabled = true;
9802 		}
9803 
9804 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9805 	}
9806 }
9807 
9808 /*
9809  * amdgpu_dm_link_setup_psr() - configure psr link
9810  * @stream: stream state
9811  *
9812  * Return: true if success
9813  */
9814 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9815 {
9816 	struct dc_link *link = NULL;
9817 	struct psr_config psr_config = {0};
9818 	struct psr_context psr_context = {0};
9819 	bool ret = false;
9820 
9821 	if (stream == NULL)
9822 		return false;
9823 
9824 	link = stream->link;
9825 
9826 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9827 
9828 	if (psr_config.psr_version > 0) {
9829 		psr_config.psr_exit_link_training_required = 0x1;
9830 		psr_config.psr_frame_capture_indication_req = 0;
9831 		psr_config.psr_rfb_setup_time = 0x37;
9832 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9833 		psr_config.allow_smu_optimizations = 0x0;
9834 
9835 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9836 
9837 	}
9838 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9839 
9840 	return ret;
9841 }
9842 
9843 /*
9844  * amdgpu_dm_psr_enable() - enable psr f/w
9845  * @stream: stream state
9846  *
9847  * Return: true if success
9848  */
9849 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9850 {
9851 	struct dc_link *link = stream->link;
9852 	unsigned int vsync_rate_hz = 0;
9853 	struct dc_static_screen_params params = {0};
9854 	/* Calculate number of static frames before generating interrupt to
9855 	 * enter PSR.
9856 	 */
9857 	// Init fail safe of 2 frames static
9858 	unsigned int num_frames_static = 2;
9859 
9860 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9861 
9862 	vsync_rate_hz = div64_u64(div64_u64((
9863 			stream->timing.pix_clk_100hz * 100),
9864 			stream->timing.v_total),
9865 			stream->timing.h_total);
9866 
9867 	/* Round up
9868 	 * Calculate number of frames such that at least 30 ms of time has
9869 	 * passed.
9870 	 */
9871 	if (vsync_rate_hz != 0) {
9872 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9873 		num_frames_static = (30000 / frame_time_microsec) + 1;
9874 	}
9875 
9876 	params.triggers.cursor_update = true;
9877 	params.triggers.overlay_update = true;
9878 	params.triggers.surface_update = true;
9879 	params.num_frames = num_frames_static;
9880 
9881 	dc_stream_set_static_screen_params(link->ctx->dc,
9882 					   &stream, 1,
9883 					   &params);
9884 
9885 	return dc_link_set_psr_allow_active(link, true, false, false);
9886 }
9887 
9888 /*
9889  * amdgpu_dm_psr_disable() - disable psr f/w
9890  * @stream:  stream state
9891  *
9892  * Return: true if success
9893  */
9894 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9895 {
9896 
9897 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9898 
9899 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
9900 }
9901 
9902 /*
9903  * amdgpu_dm_psr_disable() - disable psr f/w
9904  * if psr is enabled on any stream
9905  *
9906  * Return: true if success
9907  */
9908 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9909 {
9910 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9911 	return dc_set_psr_allow_active(dm->dc, false);
9912 }
9913 
9914 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9915 {
9916 	struct amdgpu_device *adev = drm_to_adev(dev);
9917 	struct dc *dc = adev->dm.dc;
9918 	int i;
9919 
9920 	mutex_lock(&adev->dm.dc_lock);
9921 	if (dc->current_state) {
9922 		for (i = 0; i < dc->current_state->stream_count; ++i)
9923 			dc->current_state->streams[i]
9924 				->triggered_crtc_reset.enabled =
9925 				adev->dm.force_timing_sync;
9926 
9927 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9928 		dc_trigger_sync(dc, dc->current_state);
9929 	}
9930 	mutex_unlock(&adev->dm.dc_lock);
9931 }
9932 
9933 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9934 		       uint32_t value, const char *func_name)
9935 {
9936 #ifdef DM_CHECK_ADDR_0
9937 	if (address == 0) {
9938 		DC_ERR("invalid register write. address = 0");
9939 		return;
9940 	}
9941 #endif
9942 	cgs_write_register(ctx->cgs_device, address, value);
9943 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9944 }
9945 
9946 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9947 			  const char *func_name)
9948 {
9949 	uint32_t value;
9950 #ifdef DM_CHECK_ADDR_0
9951 	if (address == 0) {
9952 		DC_ERR("invalid register read; address = 0\n");
9953 		return 0;
9954 	}
9955 #endif
9956 
9957 	if (ctx->dmub_srv &&
9958 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9959 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9960 		ASSERT(false);
9961 		return 0;
9962 	}
9963 
9964 	value = cgs_read_register(ctx->cgs_device, address);
9965 
9966 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9967 
9968 	return value;
9969 }
9970