1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38 
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50 
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58 
59 #include "ivsrcid/ivsrcid_vislands30.h"
60 
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107 
108 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110 
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113 
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116 
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119 
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129 
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135 {
136 	switch (link->dpcd_caps.dongle_type) {
137 	case DISPLAY_DONGLE_NONE:
138 		return DRM_MODE_SUBCONNECTOR_Native;
139 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 		return DRM_MODE_SUBCONNECTOR_VGA;
141 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 		return DRM_MODE_SUBCONNECTOR_DVID;
144 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 		return DRM_MODE_SUBCONNECTOR_HDMIA;
147 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148 	default:
149 		return DRM_MODE_SUBCONNECTOR_Unknown;
150 	}
151 }
152 
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154 {
155 	struct dc_link *link = aconnector->dc_link;
156 	struct drm_connector *connector = &aconnector->base;
157 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158 
159 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160 		return;
161 
162 	if (aconnector->dc_sink)
163 		subconnector = get_subconnector_type(link);
164 
165 	drm_object_property_set_value(&connector->base,
166 			connector->dev->mode_config.dp_subconnector_property,
167 			subconnector);
168 }
169 
170 /*
171  * initializes drm_device display related structures, based on the information
172  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173  * drm_encoder, drm_mode_config
174  *
175  * Returns 0 on success
176  */
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180 
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182 				struct drm_plane *plane,
183 				unsigned long possible_crtcs,
184 				const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 			       struct drm_plane *plane,
187 			       uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
190 				    uint32_t link_index,
191 				    struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 				  struct amdgpu_encoder *aencoder,
194 				  uint32_t link_index);
195 
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197 
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199 
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 				  struct drm_atomic_state *state);
202 
203 static void handle_cursor_update(struct drm_plane *plane,
204 				 struct drm_plane_state *old_plane_state);
205 
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
211 
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214 
215 /*
216  * dm_vblank_get_counter
217  *
218  * @brief
219  * Get counter for number of vertical blanks
220  *
221  * @param
222  * struct amdgpu_device *adev - [in] desired amdgpu device
223  * int disp_idx - [in] which CRTC to get the counter from
224  *
225  * @return
226  * Counter for vertical blanks
227  */
228 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
229 {
230 	if (crtc >= adev->mode_info.num_crtc)
231 		return 0;
232 	else {
233 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
234 
235 		if (acrtc->dm_irq_params.stream == NULL) {
236 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
237 				  crtc);
238 			return 0;
239 		}
240 
241 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
242 	}
243 }
244 
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246 				  u32 *vbl, u32 *position)
247 {
248 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
249 
250 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251 		return -EINVAL;
252 	else {
253 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254 
255 		if (acrtc->dm_irq_params.stream ==  NULL) {
256 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
257 				  crtc);
258 			return 0;
259 		}
260 
261 		/*
262 		 * TODO rework base driver to use values directly.
263 		 * for now parse it back into reg-format
264 		 */
265 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
266 					 &v_blank_start,
267 					 &v_blank_end,
268 					 &h_position,
269 					 &v_position);
270 
271 		*position = v_position | (h_position << 16);
272 		*vbl = v_blank_start | (v_blank_end << 16);
273 	}
274 
275 	return 0;
276 }
277 
278 static bool dm_is_idle(void *handle)
279 {
280 	/* XXX todo */
281 	return true;
282 }
283 
284 static int dm_wait_for_idle(void *handle)
285 {
286 	/* XXX todo */
287 	return 0;
288 }
289 
290 static bool dm_check_soft_reset(void *handle)
291 {
292 	return false;
293 }
294 
295 static int dm_soft_reset(void *handle)
296 {
297 	/* XXX todo */
298 	return 0;
299 }
300 
301 static struct amdgpu_crtc *
302 get_crtc_by_otg_inst(struct amdgpu_device *adev,
303 		     int otg_inst)
304 {
305 	struct drm_device *dev = adev_to_drm(adev);
306 	struct drm_crtc *crtc;
307 	struct amdgpu_crtc *amdgpu_crtc;
308 
309 	if (otg_inst == -1) {
310 		WARN_ON(1);
311 		return adev->mode_info.crtcs[0];
312 	}
313 
314 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315 		amdgpu_crtc = to_amdgpu_crtc(crtc);
316 
317 		if (amdgpu_crtc->otg_inst == otg_inst)
318 			return amdgpu_crtc;
319 	}
320 
321 	return NULL;
322 }
323 
324 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
325 {
326 	return acrtc->dm_irq_params.freesync_config.state ==
327 		       VRR_STATE_ACTIVE_VARIABLE ||
328 	       acrtc->dm_irq_params.freesync_config.state ==
329 		       VRR_STATE_ACTIVE_FIXED;
330 }
331 
332 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
333 {
334 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
336 }
337 
338 /**
339  * dm_pflip_high_irq() - Handle pageflip interrupt
340  * @interrupt_params: ignored
341  *
342  * Handles the pageflip interrupt by notifying all interested parties
343  * that the pageflip has been completed.
344  */
345 static void dm_pflip_high_irq(void *interrupt_params)
346 {
347 	struct amdgpu_crtc *amdgpu_crtc;
348 	struct common_irq_params *irq_params = interrupt_params;
349 	struct amdgpu_device *adev = irq_params->adev;
350 	unsigned long flags;
351 	struct drm_pending_vblank_event *e;
352 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
353 	bool vrr_active;
354 
355 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
356 
357 	/* IRQ could occur when in initial stage */
358 	/* TODO work and BO cleanup */
359 	if (amdgpu_crtc == NULL) {
360 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
361 		return;
362 	}
363 
364 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
365 
366 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368 						 amdgpu_crtc->pflip_status,
369 						 AMDGPU_FLIP_SUBMITTED,
370 						 amdgpu_crtc->crtc_id,
371 						 amdgpu_crtc);
372 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
373 		return;
374 	}
375 
376 	/* page flip completed. */
377 	e = amdgpu_crtc->event;
378 	amdgpu_crtc->event = NULL;
379 
380 	if (!e)
381 		WARN_ON(1);
382 
383 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
384 
385 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
386 	if (!vrr_active ||
387 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
388 				      &v_blank_end, &hpos, &vpos) ||
389 	    (vpos < v_blank_start)) {
390 		/* Update to correct count and vblank timestamp if racing with
391 		 * vblank irq. This also updates to the correct vblank timestamp
392 		 * even in VRR mode, as scanout is past the front-porch atm.
393 		 */
394 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
395 
396 		/* Wake up userspace by sending the pageflip event with proper
397 		 * count and timestamp of vblank of flip completion.
398 		 */
399 		if (e) {
400 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
401 
402 			/* Event sent, so done with vblank for this flip */
403 			drm_crtc_vblank_put(&amdgpu_crtc->base);
404 		}
405 	} else if (e) {
406 		/* VRR active and inside front-porch: vblank count and
407 		 * timestamp for pageflip event will only be up to date after
408 		 * drm_crtc_handle_vblank() has been executed from late vblank
409 		 * irq handler after start of back-porch (vline 0). We queue the
410 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
411 		 * updated timestamp and count, once it runs after us.
412 		 *
413 		 * We need to open-code this instead of using the helper
414 		 * drm_crtc_arm_vblank_event(), as that helper would
415 		 * call drm_crtc_accurate_vblank_count(), which we must
416 		 * not call in VRR mode while we are in front-porch!
417 		 */
418 
419 		/* sequence will be replaced by real count during send-out. */
420 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421 		e->pipe = amdgpu_crtc->crtc_id;
422 
423 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
424 		e = NULL;
425 	}
426 
427 	/* Keep track of vblank of this flip for flip throttling. We use the
428 	 * cooked hw counter, as that one incremented at start of this vblank
429 	 * of pageflip completion, so last_flip_vblank is the forbidden count
430 	 * for queueing new pageflips if vsync + VRR is enabled.
431 	 */
432 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
433 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
434 
435 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
436 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
437 
438 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
440 			 vrr_active, (int) !e);
441 }
442 
443 static void dm_vupdate_high_irq(void *interrupt_params)
444 {
445 	struct common_irq_params *irq_params = interrupt_params;
446 	struct amdgpu_device *adev = irq_params->adev;
447 	struct amdgpu_crtc *acrtc;
448 	unsigned long flags;
449 	int vrr_active;
450 
451 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
452 
453 	if (acrtc) {
454 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
455 
456 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
457 			      acrtc->crtc_id,
458 			      vrr_active);
459 
460 		/* Core vblank handling is done here after end of front-porch in
461 		 * vrr mode, as vblank timestamping will give valid results
462 		 * while now done after front-porch. This will also deliver
463 		 * page-flip completion events that have been queued to us
464 		 * if a pageflip happened inside front-porch.
465 		 */
466 		if (vrr_active) {
467 			drm_crtc_handle_vblank(&acrtc->base);
468 
469 			/* BTR processing for pre-DCE12 ASICs */
470 			if (acrtc->dm_irq_params.stream &&
471 			    adev->family < AMDGPU_FAMILY_AI) {
472 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
473 				mod_freesync_handle_v_update(
474 				    adev->dm.freesync_module,
475 				    acrtc->dm_irq_params.stream,
476 				    &acrtc->dm_irq_params.vrr_params);
477 
478 				dc_stream_adjust_vmin_vmax(
479 				    adev->dm.dc,
480 				    acrtc->dm_irq_params.stream,
481 				    &acrtc->dm_irq_params.vrr_params.adjust);
482 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
483 			}
484 		}
485 	}
486 }
487 
488 /**
489  * dm_crtc_high_irq() - Handles CRTC interrupt
490  * @interrupt_params: used for determining the CRTC instance
491  *
492  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
493  * event handler.
494  */
495 static void dm_crtc_high_irq(void *interrupt_params)
496 {
497 	struct common_irq_params *irq_params = interrupt_params;
498 	struct amdgpu_device *adev = irq_params->adev;
499 	struct amdgpu_crtc *acrtc;
500 	unsigned long flags;
501 	int vrr_active;
502 
503 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
504 	if (!acrtc)
505 		return;
506 
507 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
508 
509 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
510 		      vrr_active, acrtc->dm_irq_params.active_planes);
511 
512 	/**
513 	 * Core vblank handling at start of front-porch is only possible
514 	 * in non-vrr mode, as only there vblank timestamping will give
515 	 * valid results while done in front-porch. Otherwise defer it
516 	 * to dm_vupdate_high_irq after end of front-porch.
517 	 */
518 	if (!vrr_active)
519 		drm_crtc_handle_vblank(&acrtc->base);
520 
521 	/**
522 	 * Following stuff must happen at start of vblank, for crc
523 	 * computation and below-the-range btr support in vrr mode.
524 	 */
525 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
526 
527 	/* BTR updates need to happen before VUPDATE on Vega and above. */
528 	if (adev->family < AMDGPU_FAMILY_AI)
529 		return;
530 
531 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
532 
533 	if (acrtc->dm_irq_params.stream &&
534 	    acrtc->dm_irq_params.vrr_params.supported &&
535 	    acrtc->dm_irq_params.freesync_config.state ==
536 		    VRR_STATE_ACTIVE_VARIABLE) {
537 		mod_freesync_handle_v_update(adev->dm.freesync_module,
538 					     acrtc->dm_irq_params.stream,
539 					     &acrtc->dm_irq_params.vrr_params);
540 
541 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542 					   &acrtc->dm_irq_params.vrr_params.adjust);
543 	}
544 
545 	/*
546 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
547 	 * In that case, pageflip completion interrupts won't fire and pageflip
548 	 * completion events won't get delivered. Prevent this by sending
549 	 * pending pageflip events from here if a flip is still pending.
550 	 *
551 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
552 	 * avoid race conditions between flip programming and completion,
553 	 * which could cause too early flip completion events.
554 	 */
555 	if (adev->family >= AMDGPU_FAMILY_RV &&
556 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
557 	    acrtc->dm_irq_params.active_planes == 0) {
558 		if (acrtc->event) {
559 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
560 			acrtc->event = NULL;
561 			drm_crtc_vblank_put(&acrtc->base);
562 		}
563 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
564 	}
565 
566 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
567 }
568 
569 static int dm_set_clockgating_state(void *handle,
570 		  enum amd_clockgating_state state)
571 {
572 	return 0;
573 }
574 
575 static int dm_set_powergating_state(void *handle,
576 		  enum amd_powergating_state state)
577 {
578 	return 0;
579 }
580 
581 /* Prototypes of private functions */
582 static int dm_early_init(void* handle);
583 
584 /* Allocate memory for FBC compressed data  */
585 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
586 {
587 	struct drm_device *dev = connector->dev;
588 	struct amdgpu_device *adev = drm_to_adev(dev);
589 	struct dm_compressor_info *compressor = &adev->dm.compressor;
590 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591 	struct drm_display_mode *mode;
592 	unsigned long max_size = 0;
593 
594 	if (adev->dm.dc->fbc_compressor == NULL)
595 		return;
596 
597 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
598 		return;
599 
600 	if (compressor->bo_ptr)
601 		return;
602 
603 
604 	list_for_each_entry(mode, &connector->modes, head) {
605 		if (max_size < mode->htotal * mode->vtotal)
606 			max_size = mode->htotal * mode->vtotal;
607 	}
608 
609 	if (max_size) {
610 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
611 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
612 			    &compressor->gpu_addr, &compressor->cpu_addr);
613 
614 		if (r)
615 			DRM_ERROR("DM: Failed to initialize FBC\n");
616 		else {
617 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
619 		}
620 
621 	}
622 
623 }
624 
625 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626 					  int pipe, bool *enabled,
627 					  unsigned char *buf, int max_bytes)
628 {
629 	struct drm_device *dev = dev_get_drvdata(kdev);
630 	struct amdgpu_device *adev = drm_to_adev(dev);
631 	struct drm_connector *connector;
632 	struct drm_connector_list_iter conn_iter;
633 	struct amdgpu_dm_connector *aconnector;
634 	int ret = 0;
635 
636 	*enabled = false;
637 
638 	mutex_lock(&adev->dm.audio_lock);
639 
640 	drm_connector_list_iter_begin(dev, &conn_iter);
641 	drm_for_each_connector_iter(connector, &conn_iter) {
642 		aconnector = to_amdgpu_dm_connector(connector);
643 		if (aconnector->audio_inst != port)
644 			continue;
645 
646 		*enabled = true;
647 		ret = drm_eld_size(connector->eld);
648 		memcpy(buf, connector->eld, min(max_bytes, ret));
649 
650 		break;
651 	}
652 	drm_connector_list_iter_end(&conn_iter);
653 
654 	mutex_unlock(&adev->dm.audio_lock);
655 
656 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
657 
658 	return ret;
659 }
660 
661 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662 	.get_eld = amdgpu_dm_audio_component_get_eld,
663 };
664 
665 static int amdgpu_dm_audio_component_bind(struct device *kdev,
666 				       struct device *hda_kdev, void *data)
667 {
668 	struct drm_device *dev = dev_get_drvdata(kdev);
669 	struct amdgpu_device *adev = drm_to_adev(dev);
670 	struct drm_audio_component *acomp = data;
671 
672 	acomp->ops = &amdgpu_dm_audio_component_ops;
673 	acomp->dev = kdev;
674 	adev->dm.audio_component = acomp;
675 
676 	return 0;
677 }
678 
679 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680 					  struct device *hda_kdev, void *data)
681 {
682 	struct drm_device *dev = dev_get_drvdata(kdev);
683 	struct amdgpu_device *adev = drm_to_adev(dev);
684 	struct drm_audio_component *acomp = data;
685 
686 	acomp->ops = NULL;
687 	acomp->dev = NULL;
688 	adev->dm.audio_component = NULL;
689 }
690 
691 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692 	.bind	= amdgpu_dm_audio_component_bind,
693 	.unbind	= amdgpu_dm_audio_component_unbind,
694 };
695 
696 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
697 {
698 	int i, ret;
699 
700 	if (!amdgpu_audio)
701 		return 0;
702 
703 	adev->mode_info.audio.enabled = true;
704 
705 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
706 
707 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708 		adev->mode_info.audio.pin[i].channels = -1;
709 		adev->mode_info.audio.pin[i].rate = -1;
710 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
711 		adev->mode_info.audio.pin[i].status_bits = 0;
712 		adev->mode_info.audio.pin[i].category_code = 0;
713 		adev->mode_info.audio.pin[i].connected = false;
714 		adev->mode_info.audio.pin[i].id =
715 			adev->dm.dc->res_pool->audios[i]->inst;
716 		adev->mode_info.audio.pin[i].offset = 0;
717 	}
718 
719 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
720 	if (ret < 0)
721 		return ret;
722 
723 	adev->dm.audio_registered = true;
724 
725 	return 0;
726 }
727 
728 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
729 {
730 	if (!amdgpu_audio)
731 		return;
732 
733 	if (!adev->mode_info.audio.enabled)
734 		return;
735 
736 	if (adev->dm.audio_registered) {
737 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738 		adev->dm.audio_registered = false;
739 	}
740 
741 	/* TODO: Disable audio? */
742 
743 	adev->mode_info.audio.enabled = false;
744 }
745 
746 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
747 {
748 	struct drm_audio_component *acomp = adev->dm.audio_component;
749 
750 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
752 
753 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
754 						 pin, -1);
755 	}
756 }
757 
758 static int dm_dmub_hw_init(struct amdgpu_device *adev)
759 {
760 	const struct dmcub_firmware_header_v1_0 *hdr;
761 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
762 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
763 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
764 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765 	struct abm *abm = adev->dm.dc->res_pool->abm;
766 	struct dmub_srv_hw_params hw_params;
767 	enum dmub_status status;
768 	const unsigned char *fw_inst_const, *fw_bss_data;
769 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
770 	bool has_hw_support;
771 
772 	if (!dmub_srv)
773 		/* DMUB isn't supported on the ASIC. */
774 		return 0;
775 
776 	if (!fb_info) {
777 		DRM_ERROR("No framebuffer info for DMUB service.\n");
778 		return -EINVAL;
779 	}
780 
781 	if (!dmub_fw) {
782 		/* Firmware required for DMUB support. */
783 		DRM_ERROR("No firmware provided for DMUB.\n");
784 		return -EINVAL;
785 	}
786 
787 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788 	if (status != DMUB_STATUS_OK) {
789 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
790 		return -EINVAL;
791 	}
792 
793 	if (!has_hw_support) {
794 		DRM_INFO("DMUB unsupported on ASIC\n");
795 		return 0;
796 	}
797 
798 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
799 
800 	fw_inst_const = dmub_fw->data +
801 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
802 			PSP_HEADER_BYTES;
803 
804 	fw_bss_data = dmub_fw->data +
805 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 		      le32_to_cpu(hdr->inst_const_bytes);
807 
808 	/* Copy firmware and bios info into FB memory. */
809 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
811 
812 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
813 
814 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815 	 * amdgpu_ucode_init_single_fw will load dmub firmware
816 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
817 	 * will be done by dm_dmub_hw_init
818 	 */
819 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
821 				fw_inst_const_size);
822 	}
823 
824 	if (fw_bss_data_size)
825 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826 		       fw_bss_data, fw_bss_data_size);
827 
828 	/* Copy firmware bios info into FB memory. */
829 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
830 	       adev->bios_size);
831 
832 	/* Reset regions that need to be reset. */
833 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
835 
836 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
838 
839 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
841 
842 	/* Initialize hardware. */
843 	memset(&hw_params, 0, sizeof(hw_params));
844 	hw_params.fb_base = adev->gmc.fb_start;
845 	hw_params.fb_offset = adev->gmc.aper_base;
846 
847 	/* backdoor load firmware and trigger dmub running */
848 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849 		hw_params.load_inst_const = true;
850 
851 	if (dmcu)
852 		hw_params.psp_version = dmcu->psp_version;
853 
854 	for (i = 0; i < fb_info->num_fb; ++i)
855 		hw_params.fb[i] = &fb_info->fb[i];
856 
857 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
858 	if (status != DMUB_STATUS_OK) {
859 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
860 		return -EINVAL;
861 	}
862 
863 	/* Wait for firmware load to finish. */
864 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865 	if (status != DMUB_STATUS_OK)
866 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
867 
868 	/* Init DMCU and ABM if available. */
869 	if (dmcu && abm) {
870 		dmcu->funcs->dmcu_init(dmcu);
871 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
872 	}
873 
874 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 	if (!adev->dm.dc->ctx->dmub_srv) {
876 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877 		return -ENOMEM;
878 	}
879 
880 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 		 adev->dm.dmcub_fw_version);
882 
883 	return 0;
884 }
885 
886 #if defined(CONFIG_DRM_AMD_DC_DCN)
887 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
888 {
889 	uint64_t pt_base;
890 	uint32_t logical_addr_low;
891 	uint32_t logical_addr_high;
892 	uint32_t agp_base, agp_bot, agp_top;
893 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
894 
895 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
897 
898 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
899 		/*
900 		 * Raven2 has a HW issue that it is unable to use the vram which
901 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902 		 * workaround that increase system aperture high address (add 1)
903 		 * to get rid of the VM fault and hardware hang.
904 		 */
905 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
906 	else
907 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
908 
909 	agp_base = 0;
910 	agp_bot = adev->gmc.agp_start >> 24;
911 	agp_top = adev->gmc.agp_end >> 24;
912 
913 
914 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919 	page_table_base.low_part = lower_32_bits(pt_base);
920 
921 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
923 
924 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
927 
928 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
931 
932 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
935 
936 	pa_config->is_hvm_enabled = 0;
937 
938 }
939 #endif
940 
941 static int amdgpu_dm_init(struct amdgpu_device *adev)
942 {
943 	struct dc_init_data init_data;
944 #ifdef CONFIG_DRM_AMD_DC_HDCP
945 	struct dc_callback_init init_params;
946 #endif
947 	int r;
948 
949 	adev->dm.ddev = adev_to_drm(adev);
950 	adev->dm.adev = adev;
951 
952 	/* Zero all the fields */
953 	memset(&init_data, 0, sizeof(init_data));
954 #ifdef CONFIG_DRM_AMD_DC_HDCP
955 	memset(&init_params, 0, sizeof(init_params));
956 #endif
957 
958 	mutex_init(&adev->dm.dc_lock);
959 	mutex_init(&adev->dm.audio_lock);
960 
961 	if(amdgpu_dm_irq_init(adev)) {
962 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
963 		goto error;
964 	}
965 
966 	init_data.asic_id.chip_family = adev->family;
967 
968 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
969 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
970 
971 	init_data.asic_id.vram_width = adev->gmc.vram_width;
972 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
973 	init_data.asic_id.atombios_base_address =
974 		adev->mode_info.atom_context->bios;
975 
976 	init_data.driver = adev;
977 
978 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
979 
980 	if (!adev->dm.cgs_device) {
981 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
982 		goto error;
983 	}
984 
985 	init_data.cgs_device = adev->dm.cgs_device;
986 
987 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
988 
989 	switch (adev->asic_type) {
990 	case CHIP_CARRIZO:
991 	case CHIP_STONEY:
992 	case CHIP_RAVEN:
993 	case CHIP_RENOIR:
994 		init_data.flags.gpu_vm_support = true;
995 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
996 			init_data.flags.disable_dmcu = true;
997 		break;
998 #if defined(CONFIG_DRM_AMD_DC_DCN)
999 	case CHIP_VANGOGH:
1000 		init_data.flags.gpu_vm_support = true;
1001 		break;
1002 #endif
1003 	default:
1004 		break;
1005 	}
1006 
1007 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1008 		init_data.flags.fbc_support = true;
1009 
1010 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1011 		init_data.flags.multi_mon_pp_mclk_switch = true;
1012 
1013 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1014 		init_data.flags.disable_fractional_pwm = true;
1015 
1016 	init_data.flags.power_down_display_on_boot = true;
1017 
1018 	/* Display Core create. */
1019 	adev->dm.dc = dc_create(&init_data);
1020 
1021 	if (adev->dm.dc) {
1022 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1023 	} else {
1024 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1025 		goto error;
1026 	}
1027 
1028 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1029 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1030 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1031 	}
1032 
1033 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1034 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1035 
1036 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1037 		adev->dm.dc->debug.disable_stutter = true;
1038 
1039 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1040 		adev->dm.dc->debug.disable_dsc = true;
1041 
1042 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1043 		adev->dm.dc->debug.disable_clock_gate = true;
1044 
1045 	r = dm_dmub_hw_init(adev);
1046 	if (r) {
1047 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1048 		goto error;
1049 	}
1050 
1051 	dc_hardware_init(adev->dm.dc);
1052 
1053 #if defined(CONFIG_DRM_AMD_DC_DCN)
1054 	if (adev->apu_flags) {
1055 		struct dc_phy_addr_space_config pa_config;
1056 
1057 		mmhub_read_system_context(adev, &pa_config);
1058 
1059 		// Call the DC init_memory func
1060 		dc_setup_system_context(adev->dm.dc, &pa_config);
1061 	}
1062 #endif
1063 
1064 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1065 	if (!adev->dm.freesync_module) {
1066 		DRM_ERROR(
1067 		"amdgpu: failed to initialize freesync_module.\n");
1068 	} else
1069 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1070 				adev->dm.freesync_module);
1071 
1072 	amdgpu_dm_init_color_mod();
1073 
1074 #ifdef CONFIG_DRM_AMD_DC_HDCP
1075 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1076 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1077 
1078 		if (!adev->dm.hdcp_workqueue)
1079 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1080 		else
1081 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1082 
1083 		dc_init_callbacks(adev->dm.dc, &init_params);
1084 	}
1085 #endif
1086 	if (amdgpu_dm_initialize_drm_device(adev)) {
1087 		DRM_ERROR(
1088 		"amdgpu: failed to initialize sw for display support.\n");
1089 		goto error;
1090 	}
1091 
1092 	/* create fake encoders for MST */
1093 	dm_dp_create_fake_mst_encoders(adev);
1094 
1095 	/* TODO: Add_display_info? */
1096 
1097 	/* TODO use dynamic cursor width */
1098 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1099 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1100 
1101 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1102 		DRM_ERROR(
1103 		"amdgpu: failed to initialize sw for display support.\n");
1104 		goto error;
1105 	}
1106 
1107 
1108 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1109 
1110 	return 0;
1111 error:
1112 	amdgpu_dm_fini(adev);
1113 
1114 	return -EINVAL;
1115 }
1116 
1117 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1118 {
1119 	int i;
1120 
1121 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1122 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1123 	}
1124 
1125 	amdgpu_dm_audio_fini(adev);
1126 
1127 	amdgpu_dm_destroy_drm_device(&adev->dm);
1128 
1129 #ifdef CONFIG_DRM_AMD_DC_HDCP
1130 	if (adev->dm.hdcp_workqueue) {
1131 		hdcp_destroy(adev->dm.hdcp_workqueue);
1132 		adev->dm.hdcp_workqueue = NULL;
1133 	}
1134 
1135 	if (adev->dm.dc)
1136 		dc_deinit_callbacks(adev->dm.dc);
1137 #endif
1138 	if (adev->dm.dc->ctx->dmub_srv) {
1139 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1140 		adev->dm.dc->ctx->dmub_srv = NULL;
1141 	}
1142 
1143 	if (adev->dm.dmub_bo)
1144 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1145 				      &adev->dm.dmub_bo_gpu_addr,
1146 				      &adev->dm.dmub_bo_cpu_addr);
1147 
1148 	/* DC Destroy TODO: Replace destroy DAL */
1149 	if (adev->dm.dc)
1150 		dc_destroy(&adev->dm.dc);
1151 	/*
1152 	 * TODO: pageflip, vlank interrupt
1153 	 *
1154 	 * amdgpu_dm_irq_fini(adev);
1155 	 */
1156 
1157 	if (adev->dm.cgs_device) {
1158 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1159 		adev->dm.cgs_device = NULL;
1160 	}
1161 	if (adev->dm.freesync_module) {
1162 		mod_freesync_destroy(adev->dm.freesync_module);
1163 		adev->dm.freesync_module = NULL;
1164 	}
1165 
1166 	mutex_destroy(&adev->dm.audio_lock);
1167 	mutex_destroy(&adev->dm.dc_lock);
1168 
1169 	return;
1170 }
1171 
1172 static int load_dmcu_fw(struct amdgpu_device *adev)
1173 {
1174 	const char *fw_name_dmcu = NULL;
1175 	int r;
1176 	const struct dmcu_firmware_header_v1_0 *hdr;
1177 
1178 	switch(adev->asic_type) {
1179 #if defined(CONFIG_DRM_AMD_DC_SI)
1180 	case CHIP_TAHITI:
1181 	case CHIP_PITCAIRN:
1182 	case CHIP_VERDE:
1183 	case CHIP_OLAND:
1184 #endif
1185 	case CHIP_BONAIRE:
1186 	case CHIP_HAWAII:
1187 	case CHIP_KAVERI:
1188 	case CHIP_KABINI:
1189 	case CHIP_MULLINS:
1190 	case CHIP_TONGA:
1191 	case CHIP_FIJI:
1192 	case CHIP_CARRIZO:
1193 	case CHIP_STONEY:
1194 	case CHIP_POLARIS11:
1195 	case CHIP_POLARIS10:
1196 	case CHIP_POLARIS12:
1197 	case CHIP_VEGAM:
1198 	case CHIP_VEGA10:
1199 	case CHIP_VEGA12:
1200 	case CHIP_VEGA20:
1201 	case CHIP_NAVI10:
1202 	case CHIP_NAVI14:
1203 	case CHIP_RENOIR:
1204 	case CHIP_SIENNA_CICHLID:
1205 	case CHIP_NAVY_FLOUNDER:
1206 	case CHIP_DIMGREY_CAVEFISH:
1207 	case CHIP_VANGOGH:
1208 		return 0;
1209 	case CHIP_NAVI12:
1210 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1211 		break;
1212 	case CHIP_RAVEN:
1213 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1214 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1215 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1216 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1217 		else
1218 			return 0;
1219 		break;
1220 	default:
1221 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1222 		return -EINVAL;
1223 	}
1224 
1225 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1226 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1227 		return 0;
1228 	}
1229 
1230 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1231 	if (r == -ENOENT) {
1232 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1233 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1234 		adev->dm.fw_dmcu = NULL;
1235 		return 0;
1236 	}
1237 	if (r) {
1238 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1239 			fw_name_dmcu);
1240 		return r;
1241 	}
1242 
1243 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1244 	if (r) {
1245 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1246 			fw_name_dmcu);
1247 		release_firmware(adev->dm.fw_dmcu);
1248 		adev->dm.fw_dmcu = NULL;
1249 		return r;
1250 	}
1251 
1252 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1253 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1254 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1255 	adev->firmware.fw_size +=
1256 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1257 
1258 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1259 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1260 	adev->firmware.fw_size +=
1261 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1262 
1263 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1264 
1265 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1266 
1267 	return 0;
1268 }
1269 
1270 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1271 {
1272 	struct amdgpu_device *adev = ctx;
1273 
1274 	return dm_read_reg(adev->dm.dc->ctx, address);
1275 }
1276 
1277 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1278 				     uint32_t value)
1279 {
1280 	struct amdgpu_device *adev = ctx;
1281 
1282 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1283 }
1284 
1285 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1286 {
1287 	struct dmub_srv_create_params create_params;
1288 	struct dmub_srv_region_params region_params;
1289 	struct dmub_srv_region_info region_info;
1290 	struct dmub_srv_fb_params fb_params;
1291 	struct dmub_srv_fb_info *fb_info;
1292 	struct dmub_srv *dmub_srv;
1293 	const struct dmcub_firmware_header_v1_0 *hdr;
1294 	const char *fw_name_dmub;
1295 	enum dmub_asic dmub_asic;
1296 	enum dmub_status status;
1297 	int r;
1298 
1299 	switch (adev->asic_type) {
1300 	case CHIP_RENOIR:
1301 		dmub_asic = DMUB_ASIC_DCN21;
1302 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1303 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1304 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1305 		break;
1306 	case CHIP_SIENNA_CICHLID:
1307 		dmub_asic = DMUB_ASIC_DCN30;
1308 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1309 		break;
1310 	case CHIP_NAVY_FLOUNDER:
1311 		dmub_asic = DMUB_ASIC_DCN30;
1312 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1313 		break;
1314 	case CHIP_VANGOGH:
1315 		dmub_asic = DMUB_ASIC_DCN301;
1316 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1317 		break;
1318 	case CHIP_DIMGREY_CAVEFISH:
1319 		dmub_asic = DMUB_ASIC_DCN302;
1320 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1321 		break;
1322 
1323 	default:
1324 		/* ASIC doesn't support DMUB. */
1325 		return 0;
1326 	}
1327 
1328 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1329 	if (r) {
1330 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1331 		return 0;
1332 	}
1333 
1334 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1335 	if (r) {
1336 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1337 		return 0;
1338 	}
1339 
1340 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1341 
1342 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1343 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1344 			AMDGPU_UCODE_ID_DMCUB;
1345 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1346 			adev->dm.dmub_fw;
1347 		adev->firmware.fw_size +=
1348 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1349 
1350 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1351 			 adev->dm.dmcub_fw_version);
1352 	}
1353 
1354 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1355 
1356 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1357 	dmub_srv = adev->dm.dmub_srv;
1358 
1359 	if (!dmub_srv) {
1360 		DRM_ERROR("Failed to allocate DMUB service!\n");
1361 		return -ENOMEM;
1362 	}
1363 
1364 	memset(&create_params, 0, sizeof(create_params));
1365 	create_params.user_ctx = adev;
1366 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1367 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1368 	create_params.asic = dmub_asic;
1369 
1370 	/* Create the DMUB service. */
1371 	status = dmub_srv_create(dmub_srv, &create_params);
1372 	if (status != DMUB_STATUS_OK) {
1373 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1374 		return -EINVAL;
1375 	}
1376 
1377 	/* Calculate the size of all the regions for the DMUB service. */
1378 	memset(&region_params, 0, sizeof(region_params));
1379 
1380 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1381 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1382 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1383 	region_params.vbios_size = adev->bios_size;
1384 	region_params.fw_bss_data = region_params.bss_data_size ?
1385 		adev->dm.dmub_fw->data +
1386 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1387 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1388 	region_params.fw_inst_const =
1389 		adev->dm.dmub_fw->data +
1390 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1391 		PSP_HEADER_BYTES;
1392 
1393 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1394 					   &region_info);
1395 
1396 	if (status != DMUB_STATUS_OK) {
1397 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1398 		return -EINVAL;
1399 	}
1400 
1401 	/*
1402 	 * Allocate a framebuffer based on the total size of all the regions.
1403 	 * TODO: Move this into GART.
1404 	 */
1405 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1406 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1407 				    &adev->dm.dmub_bo_gpu_addr,
1408 				    &adev->dm.dmub_bo_cpu_addr);
1409 	if (r)
1410 		return r;
1411 
1412 	/* Rebase the regions on the framebuffer address. */
1413 	memset(&fb_params, 0, sizeof(fb_params));
1414 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1415 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1416 	fb_params.region_info = &region_info;
1417 
1418 	adev->dm.dmub_fb_info =
1419 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1420 	fb_info = adev->dm.dmub_fb_info;
1421 
1422 	if (!fb_info) {
1423 		DRM_ERROR(
1424 			"Failed to allocate framebuffer info for DMUB service!\n");
1425 		return -ENOMEM;
1426 	}
1427 
1428 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1429 	if (status != DMUB_STATUS_OK) {
1430 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1431 		return -EINVAL;
1432 	}
1433 
1434 	return 0;
1435 }
1436 
1437 static int dm_sw_init(void *handle)
1438 {
1439 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1440 	int r;
1441 
1442 	r = dm_dmub_sw_init(adev);
1443 	if (r)
1444 		return r;
1445 
1446 	return load_dmcu_fw(adev);
1447 }
1448 
1449 static int dm_sw_fini(void *handle)
1450 {
1451 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1452 
1453 	kfree(adev->dm.dmub_fb_info);
1454 	adev->dm.dmub_fb_info = NULL;
1455 
1456 	if (adev->dm.dmub_srv) {
1457 		dmub_srv_destroy(adev->dm.dmub_srv);
1458 		adev->dm.dmub_srv = NULL;
1459 	}
1460 
1461 	release_firmware(adev->dm.dmub_fw);
1462 	adev->dm.dmub_fw = NULL;
1463 
1464 	release_firmware(adev->dm.fw_dmcu);
1465 	adev->dm.fw_dmcu = NULL;
1466 
1467 	return 0;
1468 }
1469 
1470 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1471 {
1472 	struct amdgpu_dm_connector *aconnector;
1473 	struct drm_connector *connector;
1474 	struct drm_connector_list_iter iter;
1475 	int ret = 0;
1476 
1477 	drm_connector_list_iter_begin(dev, &iter);
1478 	drm_for_each_connector_iter(connector, &iter) {
1479 		aconnector = to_amdgpu_dm_connector(connector);
1480 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1481 		    aconnector->mst_mgr.aux) {
1482 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1483 					 aconnector,
1484 					 aconnector->base.base.id);
1485 
1486 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1487 			if (ret < 0) {
1488 				DRM_ERROR("DM_MST: Failed to start MST\n");
1489 				aconnector->dc_link->type =
1490 					dc_connection_single;
1491 				break;
1492 			}
1493 		}
1494 	}
1495 	drm_connector_list_iter_end(&iter);
1496 
1497 	return ret;
1498 }
1499 
1500 static int dm_late_init(void *handle)
1501 {
1502 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1503 
1504 	struct dmcu_iram_parameters params;
1505 	unsigned int linear_lut[16];
1506 	int i;
1507 	struct dmcu *dmcu = NULL;
1508 	bool ret = true;
1509 
1510 	dmcu = adev->dm.dc->res_pool->dmcu;
1511 
1512 	for (i = 0; i < 16; i++)
1513 		linear_lut[i] = 0xFFFF * i / 15;
1514 
1515 	params.set = 0;
1516 	params.backlight_ramping_start = 0xCCCC;
1517 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1518 	params.backlight_lut_array_size = 16;
1519 	params.backlight_lut_array = linear_lut;
1520 
1521 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1522 	 * 0xFFFF x 0.01 = 0x28F
1523 	 */
1524 	params.min_abm_backlight = 0x28F;
1525 
1526 	/* In the case where abm is implemented on dmcub,
1527 	 * dmcu object will be null.
1528 	 * ABM 2.4 and up are implemented on dmcub.
1529 	 */
1530 	if (dmcu)
1531 		ret = dmcu_load_iram(dmcu, params);
1532 	else if (adev->dm.dc->ctx->dmub_srv)
1533 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1534 
1535 	if (!ret)
1536 		return -EINVAL;
1537 
1538 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1539 }
1540 
1541 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1542 {
1543 	struct amdgpu_dm_connector *aconnector;
1544 	struct drm_connector *connector;
1545 	struct drm_connector_list_iter iter;
1546 	struct drm_dp_mst_topology_mgr *mgr;
1547 	int ret;
1548 	bool need_hotplug = false;
1549 
1550 	drm_connector_list_iter_begin(dev, &iter);
1551 	drm_for_each_connector_iter(connector, &iter) {
1552 		aconnector = to_amdgpu_dm_connector(connector);
1553 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1554 		    aconnector->mst_port)
1555 			continue;
1556 
1557 		mgr = &aconnector->mst_mgr;
1558 
1559 		if (suspend) {
1560 			drm_dp_mst_topology_mgr_suspend(mgr);
1561 		} else {
1562 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1563 			if (ret < 0) {
1564 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1565 				need_hotplug = true;
1566 			}
1567 		}
1568 	}
1569 	drm_connector_list_iter_end(&iter);
1570 
1571 	if (need_hotplug)
1572 		drm_kms_helper_hotplug_event(dev);
1573 }
1574 
1575 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1576 {
1577 	struct smu_context *smu = &adev->smu;
1578 	int ret = 0;
1579 
1580 	if (!is_support_sw_smu(adev))
1581 		return 0;
1582 
1583 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1584 	 * on window driver dc implementation.
1585 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1586 	 * should be passed to smu during boot up and resume from s3.
1587 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1588 	 * dcn20_resource_construct
1589 	 * then call pplib functions below to pass the settings to smu:
1590 	 * smu_set_watermarks_for_clock_ranges
1591 	 * smu_set_watermarks_table
1592 	 * navi10_set_watermarks_table
1593 	 * smu_write_watermarks_table
1594 	 *
1595 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1596 	 * dc has implemented different flow for window driver:
1597 	 * dc_hardware_init / dc_set_power_state
1598 	 * dcn10_init_hw
1599 	 * notify_wm_ranges
1600 	 * set_wm_ranges
1601 	 * -- Linux
1602 	 * smu_set_watermarks_for_clock_ranges
1603 	 * renoir_set_watermarks_table
1604 	 * smu_write_watermarks_table
1605 	 *
1606 	 * For Linux,
1607 	 * dc_hardware_init -> amdgpu_dm_init
1608 	 * dc_set_power_state --> dm_resume
1609 	 *
1610 	 * therefore, this function apply to navi10/12/14 but not Renoir
1611 	 * *
1612 	 */
1613 	switch(adev->asic_type) {
1614 	case CHIP_NAVI10:
1615 	case CHIP_NAVI14:
1616 	case CHIP_NAVI12:
1617 		break;
1618 	default:
1619 		return 0;
1620 	}
1621 
1622 	ret = smu_write_watermarks_table(smu);
1623 	if (ret) {
1624 		DRM_ERROR("Failed to update WMTABLE!\n");
1625 		return ret;
1626 	}
1627 
1628 	return 0;
1629 }
1630 
1631 /**
1632  * dm_hw_init() - Initialize DC device
1633  * @handle: The base driver device containing the amdgpu_dm device.
1634  *
1635  * Initialize the &struct amdgpu_display_manager device. This involves calling
1636  * the initializers of each DM component, then populating the struct with them.
1637  *
1638  * Although the function implies hardware initialization, both hardware and
1639  * software are initialized here. Splitting them out to their relevant init
1640  * hooks is a future TODO item.
1641  *
1642  * Some notable things that are initialized here:
1643  *
1644  * - Display Core, both software and hardware
1645  * - DC modules that we need (freesync and color management)
1646  * - DRM software states
1647  * - Interrupt sources and handlers
1648  * - Vblank support
1649  * - Debug FS entries, if enabled
1650  */
1651 static int dm_hw_init(void *handle)
1652 {
1653 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1654 	/* Create DAL display manager */
1655 	amdgpu_dm_init(adev);
1656 	amdgpu_dm_hpd_init(adev);
1657 
1658 	return 0;
1659 }
1660 
1661 /**
1662  * dm_hw_fini() - Teardown DC device
1663  * @handle: The base driver device containing the amdgpu_dm device.
1664  *
1665  * Teardown components within &struct amdgpu_display_manager that require
1666  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1667  * were loaded. Also flush IRQ workqueues and disable them.
1668  */
1669 static int dm_hw_fini(void *handle)
1670 {
1671 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1672 
1673 	amdgpu_dm_hpd_fini(adev);
1674 
1675 	amdgpu_dm_irq_fini(adev);
1676 	amdgpu_dm_fini(adev);
1677 	return 0;
1678 }
1679 
1680 
1681 static int dm_enable_vblank(struct drm_crtc *crtc);
1682 static void dm_disable_vblank(struct drm_crtc *crtc);
1683 
1684 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1685 				 struct dc_state *state, bool enable)
1686 {
1687 	enum dc_irq_source irq_source;
1688 	struct amdgpu_crtc *acrtc;
1689 	int rc = -EBUSY;
1690 	int i = 0;
1691 
1692 	for (i = 0; i < state->stream_count; i++) {
1693 		acrtc = get_crtc_by_otg_inst(
1694 				adev, state->stream_status[i].primary_otg_inst);
1695 
1696 		if (acrtc && state->stream_status[i].plane_count != 0) {
1697 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1698 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1699 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1700 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1701 			if (rc)
1702 				DRM_WARN("Failed to %s pflip interrupts\n",
1703 					 enable ? "enable" : "disable");
1704 
1705 			if (enable) {
1706 				rc = dm_enable_vblank(&acrtc->base);
1707 				if (rc)
1708 					DRM_WARN("Failed to enable vblank interrupts\n");
1709 			} else {
1710 				dm_disable_vblank(&acrtc->base);
1711 			}
1712 
1713 		}
1714 	}
1715 
1716 }
1717 
1718 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1719 {
1720 	struct dc_state *context = NULL;
1721 	enum dc_status res = DC_ERROR_UNEXPECTED;
1722 	int i;
1723 	struct dc_stream_state *del_streams[MAX_PIPES];
1724 	int del_streams_count = 0;
1725 
1726 	memset(del_streams, 0, sizeof(del_streams));
1727 
1728 	context = dc_create_state(dc);
1729 	if (context == NULL)
1730 		goto context_alloc_fail;
1731 
1732 	dc_resource_state_copy_construct_current(dc, context);
1733 
1734 	/* First remove from context all streams */
1735 	for (i = 0; i < context->stream_count; i++) {
1736 		struct dc_stream_state *stream = context->streams[i];
1737 
1738 		del_streams[del_streams_count++] = stream;
1739 	}
1740 
1741 	/* Remove all planes for removed streams and then remove the streams */
1742 	for (i = 0; i < del_streams_count; i++) {
1743 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1744 			res = DC_FAIL_DETACH_SURFACES;
1745 			goto fail;
1746 		}
1747 
1748 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1749 		if (res != DC_OK)
1750 			goto fail;
1751 	}
1752 
1753 
1754 	res = dc_validate_global_state(dc, context, false);
1755 
1756 	if (res != DC_OK) {
1757 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1758 		goto fail;
1759 	}
1760 
1761 	res = dc_commit_state(dc, context);
1762 
1763 fail:
1764 	dc_release_state(context);
1765 
1766 context_alloc_fail:
1767 	return res;
1768 }
1769 
1770 static int dm_suspend(void *handle)
1771 {
1772 	struct amdgpu_device *adev = handle;
1773 	struct amdgpu_display_manager *dm = &adev->dm;
1774 	int ret = 0;
1775 
1776 	if (amdgpu_in_reset(adev)) {
1777 		mutex_lock(&dm->dc_lock);
1778 
1779 #if defined(CONFIG_DRM_AMD_DC_DCN)
1780 		dc_allow_idle_optimizations(adev->dm.dc, false);
1781 #endif
1782 
1783 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1784 
1785 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1786 
1787 		amdgpu_dm_commit_zero_streams(dm->dc);
1788 
1789 		amdgpu_dm_irq_suspend(adev);
1790 
1791 		return ret;
1792 	}
1793 
1794 	WARN_ON(adev->dm.cached_state);
1795 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1796 
1797 	s3_handle_mst(adev_to_drm(adev), true);
1798 
1799 	amdgpu_dm_irq_suspend(adev);
1800 
1801 
1802 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1803 
1804 	return 0;
1805 }
1806 
1807 static struct amdgpu_dm_connector *
1808 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1809 					     struct drm_crtc *crtc)
1810 {
1811 	uint32_t i;
1812 	struct drm_connector_state *new_con_state;
1813 	struct drm_connector *connector;
1814 	struct drm_crtc *crtc_from_state;
1815 
1816 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1817 		crtc_from_state = new_con_state->crtc;
1818 
1819 		if (crtc_from_state == crtc)
1820 			return to_amdgpu_dm_connector(connector);
1821 	}
1822 
1823 	return NULL;
1824 }
1825 
1826 static void emulated_link_detect(struct dc_link *link)
1827 {
1828 	struct dc_sink_init_data sink_init_data = { 0 };
1829 	struct display_sink_capability sink_caps = { 0 };
1830 	enum dc_edid_status edid_status;
1831 	struct dc_context *dc_ctx = link->ctx;
1832 	struct dc_sink *sink = NULL;
1833 	struct dc_sink *prev_sink = NULL;
1834 
1835 	link->type = dc_connection_none;
1836 	prev_sink = link->local_sink;
1837 
1838 	if (prev_sink != NULL)
1839 		dc_sink_retain(prev_sink);
1840 
1841 	switch (link->connector_signal) {
1842 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1843 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1844 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1845 		break;
1846 	}
1847 
1848 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1849 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1850 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1851 		break;
1852 	}
1853 
1854 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1855 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1856 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1857 		break;
1858 	}
1859 
1860 	case SIGNAL_TYPE_LVDS: {
1861 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1862 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1863 		break;
1864 	}
1865 
1866 	case SIGNAL_TYPE_EDP: {
1867 		sink_caps.transaction_type =
1868 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1869 		sink_caps.signal = SIGNAL_TYPE_EDP;
1870 		break;
1871 	}
1872 
1873 	case SIGNAL_TYPE_DISPLAY_PORT: {
1874 		sink_caps.transaction_type =
1875 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1876 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1877 		break;
1878 	}
1879 
1880 	default:
1881 		DC_ERROR("Invalid connector type! signal:%d\n",
1882 			link->connector_signal);
1883 		return;
1884 	}
1885 
1886 	sink_init_data.link = link;
1887 	sink_init_data.sink_signal = sink_caps.signal;
1888 
1889 	sink = dc_sink_create(&sink_init_data);
1890 	if (!sink) {
1891 		DC_ERROR("Failed to create sink!\n");
1892 		return;
1893 	}
1894 
1895 	/* dc_sink_create returns a new reference */
1896 	link->local_sink = sink;
1897 
1898 	edid_status = dm_helpers_read_local_edid(
1899 			link->ctx,
1900 			link,
1901 			sink);
1902 
1903 	if (edid_status != EDID_OK)
1904 		DC_ERROR("Failed to read EDID");
1905 
1906 }
1907 
1908 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1909 				     struct amdgpu_display_manager *dm)
1910 {
1911 	struct {
1912 		struct dc_surface_update surface_updates[MAX_SURFACES];
1913 		struct dc_plane_info plane_infos[MAX_SURFACES];
1914 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1915 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1916 		struct dc_stream_update stream_update;
1917 	} * bundle;
1918 	int k, m;
1919 
1920 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1921 
1922 	if (!bundle) {
1923 		dm_error("Failed to allocate update bundle\n");
1924 		goto cleanup;
1925 	}
1926 
1927 	for (k = 0; k < dc_state->stream_count; k++) {
1928 		bundle->stream_update.stream = dc_state->streams[k];
1929 
1930 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1931 			bundle->surface_updates[m].surface =
1932 				dc_state->stream_status->plane_states[m];
1933 			bundle->surface_updates[m].surface->force_full_update =
1934 				true;
1935 		}
1936 		dc_commit_updates_for_stream(
1937 			dm->dc, bundle->surface_updates,
1938 			dc_state->stream_status->plane_count,
1939 			dc_state->streams[k], &bundle->stream_update, dc_state);
1940 	}
1941 
1942 cleanup:
1943 	kfree(bundle);
1944 
1945 	return;
1946 }
1947 
1948 static void dm_set_dpms_off(struct dc_link *link)
1949 {
1950 	struct dc_stream_state *stream_state;
1951 	struct amdgpu_dm_connector *aconnector = link->priv;
1952 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1953 	struct dc_stream_update stream_update;
1954 	bool dpms_off = true;
1955 
1956 	memset(&stream_update, 0, sizeof(stream_update));
1957 	stream_update.dpms_off = &dpms_off;
1958 
1959 	mutex_lock(&adev->dm.dc_lock);
1960 	stream_state = dc_stream_find_from_link(link);
1961 
1962 	if (stream_state == NULL) {
1963 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1964 		mutex_unlock(&adev->dm.dc_lock);
1965 		return;
1966 	}
1967 
1968 	stream_update.stream = stream_state;
1969 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1970 				     stream_state, &stream_update,
1971 				     stream_state->ctx->dc->current_state);
1972 	mutex_unlock(&adev->dm.dc_lock);
1973 }
1974 
1975 static int dm_resume(void *handle)
1976 {
1977 	struct amdgpu_device *adev = handle;
1978 	struct drm_device *ddev = adev_to_drm(adev);
1979 	struct amdgpu_display_manager *dm = &adev->dm;
1980 	struct amdgpu_dm_connector *aconnector;
1981 	struct drm_connector *connector;
1982 	struct drm_connector_list_iter iter;
1983 	struct drm_crtc *crtc;
1984 	struct drm_crtc_state *new_crtc_state;
1985 	struct dm_crtc_state *dm_new_crtc_state;
1986 	struct drm_plane *plane;
1987 	struct drm_plane_state *new_plane_state;
1988 	struct dm_plane_state *dm_new_plane_state;
1989 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1990 	enum dc_connection_type new_connection_type = dc_connection_none;
1991 	struct dc_state *dc_state;
1992 	int i, r, j;
1993 
1994 	if (amdgpu_in_reset(adev)) {
1995 		dc_state = dm->cached_dc_state;
1996 
1997 		r = dm_dmub_hw_init(adev);
1998 		if (r)
1999 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2000 
2001 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2002 		dc_resume(dm->dc);
2003 
2004 		amdgpu_dm_irq_resume_early(adev);
2005 
2006 		for (i = 0; i < dc_state->stream_count; i++) {
2007 			dc_state->streams[i]->mode_changed = true;
2008 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2009 				dc_state->stream_status->plane_states[j]->update_flags.raw
2010 					= 0xffffffff;
2011 			}
2012 		}
2013 
2014 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2015 
2016 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2017 
2018 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2019 
2020 		dc_release_state(dm->cached_dc_state);
2021 		dm->cached_dc_state = NULL;
2022 
2023 		amdgpu_dm_irq_resume_late(adev);
2024 
2025 		mutex_unlock(&dm->dc_lock);
2026 
2027 		return 0;
2028 	}
2029 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2030 	dc_release_state(dm_state->context);
2031 	dm_state->context = dc_create_state(dm->dc);
2032 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2033 	dc_resource_state_construct(dm->dc, dm_state->context);
2034 
2035 	/* Before powering on DC we need to re-initialize DMUB. */
2036 	r = dm_dmub_hw_init(adev);
2037 	if (r)
2038 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2039 
2040 	/* power on hardware */
2041 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2042 
2043 	/* program HPD filter */
2044 	dc_resume(dm->dc);
2045 
2046 	/*
2047 	 * early enable HPD Rx IRQ, should be done before set mode as short
2048 	 * pulse interrupts are used for MST
2049 	 */
2050 	amdgpu_dm_irq_resume_early(adev);
2051 
2052 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2053 	s3_handle_mst(ddev, false);
2054 
2055 	/* Do detection*/
2056 	drm_connector_list_iter_begin(ddev, &iter);
2057 	drm_for_each_connector_iter(connector, &iter) {
2058 		aconnector = to_amdgpu_dm_connector(connector);
2059 
2060 		/*
2061 		 * this is the case when traversing through already created
2062 		 * MST connectors, should be skipped
2063 		 */
2064 		if (aconnector->mst_port)
2065 			continue;
2066 
2067 		mutex_lock(&aconnector->hpd_lock);
2068 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2069 			DRM_ERROR("KMS: Failed to detect connector\n");
2070 
2071 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2072 			emulated_link_detect(aconnector->dc_link);
2073 		else
2074 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2075 
2076 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2077 			aconnector->fake_enable = false;
2078 
2079 		if (aconnector->dc_sink)
2080 			dc_sink_release(aconnector->dc_sink);
2081 		aconnector->dc_sink = NULL;
2082 		amdgpu_dm_update_connector_after_detect(aconnector);
2083 		mutex_unlock(&aconnector->hpd_lock);
2084 	}
2085 	drm_connector_list_iter_end(&iter);
2086 
2087 	/* Force mode set in atomic commit */
2088 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2089 		new_crtc_state->active_changed = true;
2090 
2091 	/*
2092 	 * atomic_check is expected to create the dc states. We need to release
2093 	 * them here, since they were duplicated as part of the suspend
2094 	 * procedure.
2095 	 */
2096 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2097 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2098 		if (dm_new_crtc_state->stream) {
2099 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2100 			dc_stream_release(dm_new_crtc_state->stream);
2101 			dm_new_crtc_state->stream = NULL;
2102 		}
2103 	}
2104 
2105 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2106 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2107 		if (dm_new_plane_state->dc_state) {
2108 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2109 			dc_plane_state_release(dm_new_plane_state->dc_state);
2110 			dm_new_plane_state->dc_state = NULL;
2111 		}
2112 	}
2113 
2114 	drm_atomic_helper_resume(ddev, dm->cached_state);
2115 
2116 	dm->cached_state = NULL;
2117 
2118 	amdgpu_dm_irq_resume_late(adev);
2119 
2120 	amdgpu_dm_smu_write_watermarks_table(adev);
2121 
2122 	return 0;
2123 }
2124 
2125 /**
2126  * DOC: DM Lifecycle
2127  *
2128  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2129  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2130  * the base driver's device list to be initialized and torn down accordingly.
2131  *
2132  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2133  */
2134 
2135 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2136 	.name = "dm",
2137 	.early_init = dm_early_init,
2138 	.late_init = dm_late_init,
2139 	.sw_init = dm_sw_init,
2140 	.sw_fini = dm_sw_fini,
2141 	.hw_init = dm_hw_init,
2142 	.hw_fini = dm_hw_fini,
2143 	.suspend = dm_suspend,
2144 	.resume = dm_resume,
2145 	.is_idle = dm_is_idle,
2146 	.wait_for_idle = dm_wait_for_idle,
2147 	.check_soft_reset = dm_check_soft_reset,
2148 	.soft_reset = dm_soft_reset,
2149 	.set_clockgating_state = dm_set_clockgating_state,
2150 	.set_powergating_state = dm_set_powergating_state,
2151 };
2152 
2153 const struct amdgpu_ip_block_version dm_ip_block =
2154 {
2155 	.type = AMD_IP_BLOCK_TYPE_DCE,
2156 	.major = 1,
2157 	.minor = 0,
2158 	.rev = 0,
2159 	.funcs = &amdgpu_dm_funcs,
2160 };
2161 
2162 
2163 /**
2164  * DOC: atomic
2165  *
2166  * *WIP*
2167  */
2168 
2169 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2170 	.fb_create = amdgpu_display_user_framebuffer_create,
2171 	.get_format_info = amd_get_format_info,
2172 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2173 	.atomic_check = amdgpu_dm_atomic_check,
2174 	.atomic_commit = drm_atomic_helper_commit,
2175 };
2176 
2177 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2178 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2179 };
2180 
2181 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2182 {
2183 	u32 max_cll, min_cll, max, min, q, r;
2184 	struct amdgpu_dm_backlight_caps *caps;
2185 	struct amdgpu_display_manager *dm;
2186 	struct drm_connector *conn_base;
2187 	struct amdgpu_device *adev;
2188 	struct dc_link *link = NULL;
2189 	static const u8 pre_computed_values[] = {
2190 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2191 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2192 
2193 	if (!aconnector || !aconnector->dc_link)
2194 		return;
2195 
2196 	link = aconnector->dc_link;
2197 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2198 		return;
2199 
2200 	conn_base = &aconnector->base;
2201 	adev = drm_to_adev(conn_base->dev);
2202 	dm = &adev->dm;
2203 	caps = &dm->backlight_caps;
2204 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2205 	caps->aux_support = false;
2206 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2207 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2208 
2209 	if (caps->ext_caps->bits.oled == 1 ||
2210 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2211 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2212 		caps->aux_support = true;
2213 
2214 	/* From the specification (CTA-861-G), for calculating the maximum
2215 	 * luminance we need to use:
2216 	 *	Luminance = 50*2**(CV/32)
2217 	 * Where CV is a one-byte value.
2218 	 * For calculating this expression we may need float point precision;
2219 	 * to avoid this complexity level, we take advantage that CV is divided
2220 	 * by a constant. From the Euclids division algorithm, we know that CV
2221 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2222 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2223 	 * need to pre-compute the value of r/32. For pre-computing the values
2224 	 * We just used the following Ruby line:
2225 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2226 	 * The results of the above expressions can be verified at
2227 	 * pre_computed_values.
2228 	 */
2229 	q = max_cll >> 5;
2230 	r = max_cll % 32;
2231 	max = (1 << q) * pre_computed_values[r];
2232 
2233 	// min luminance: maxLum * (CV/255)^2 / 100
2234 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2235 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2236 
2237 	caps->aux_max_input_signal = max;
2238 	caps->aux_min_input_signal = min;
2239 }
2240 
2241 void amdgpu_dm_update_connector_after_detect(
2242 		struct amdgpu_dm_connector *aconnector)
2243 {
2244 	struct drm_connector *connector = &aconnector->base;
2245 	struct drm_device *dev = connector->dev;
2246 	struct dc_sink *sink;
2247 
2248 	/* MST handled by drm_mst framework */
2249 	if (aconnector->mst_mgr.mst_state == true)
2250 		return;
2251 
2252 	sink = aconnector->dc_link->local_sink;
2253 	if (sink)
2254 		dc_sink_retain(sink);
2255 
2256 	/*
2257 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2258 	 * the connector sink is set to either fake or physical sink depends on link status.
2259 	 * Skip if already done during boot.
2260 	 */
2261 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2262 			&& aconnector->dc_em_sink) {
2263 
2264 		/*
2265 		 * For S3 resume with headless use eml_sink to fake stream
2266 		 * because on resume connector->sink is set to NULL
2267 		 */
2268 		mutex_lock(&dev->mode_config.mutex);
2269 
2270 		if (sink) {
2271 			if (aconnector->dc_sink) {
2272 				amdgpu_dm_update_freesync_caps(connector, NULL);
2273 				/*
2274 				 * retain and release below are used to
2275 				 * bump up refcount for sink because the link doesn't point
2276 				 * to it anymore after disconnect, so on next crtc to connector
2277 				 * reshuffle by UMD we will get into unwanted dc_sink release
2278 				 */
2279 				dc_sink_release(aconnector->dc_sink);
2280 			}
2281 			aconnector->dc_sink = sink;
2282 			dc_sink_retain(aconnector->dc_sink);
2283 			amdgpu_dm_update_freesync_caps(connector,
2284 					aconnector->edid);
2285 		} else {
2286 			amdgpu_dm_update_freesync_caps(connector, NULL);
2287 			if (!aconnector->dc_sink) {
2288 				aconnector->dc_sink = aconnector->dc_em_sink;
2289 				dc_sink_retain(aconnector->dc_sink);
2290 			}
2291 		}
2292 
2293 		mutex_unlock(&dev->mode_config.mutex);
2294 
2295 		if (sink)
2296 			dc_sink_release(sink);
2297 		return;
2298 	}
2299 
2300 	/*
2301 	 * TODO: temporary guard to look for proper fix
2302 	 * if this sink is MST sink, we should not do anything
2303 	 */
2304 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2305 		dc_sink_release(sink);
2306 		return;
2307 	}
2308 
2309 	if (aconnector->dc_sink == sink) {
2310 		/*
2311 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2312 		 * Do nothing!!
2313 		 */
2314 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2315 				aconnector->connector_id);
2316 		if (sink)
2317 			dc_sink_release(sink);
2318 		return;
2319 	}
2320 
2321 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2322 		aconnector->connector_id, aconnector->dc_sink, sink);
2323 
2324 	mutex_lock(&dev->mode_config.mutex);
2325 
2326 	/*
2327 	 * 1. Update status of the drm connector
2328 	 * 2. Send an event and let userspace tell us what to do
2329 	 */
2330 	if (sink) {
2331 		/*
2332 		 * TODO: check if we still need the S3 mode update workaround.
2333 		 * If yes, put it here.
2334 		 */
2335 		if (aconnector->dc_sink)
2336 			amdgpu_dm_update_freesync_caps(connector, NULL);
2337 
2338 		aconnector->dc_sink = sink;
2339 		dc_sink_retain(aconnector->dc_sink);
2340 		if (sink->dc_edid.length == 0) {
2341 			aconnector->edid = NULL;
2342 			if (aconnector->dc_link->aux_mode) {
2343 				drm_dp_cec_unset_edid(
2344 					&aconnector->dm_dp_aux.aux);
2345 			}
2346 		} else {
2347 			aconnector->edid =
2348 				(struct edid *)sink->dc_edid.raw_edid;
2349 
2350 			drm_connector_update_edid_property(connector,
2351 							   aconnector->edid);
2352 			drm_add_edid_modes(connector, aconnector->edid);
2353 
2354 			if (aconnector->dc_link->aux_mode)
2355 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2356 						    aconnector->edid);
2357 		}
2358 
2359 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2360 		update_connector_ext_caps(aconnector);
2361 	} else {
2362 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2363 		amdgpu_dm_update_freesync_caps(connector, NULL);
2364 		drm_connector_update_edid_property(connector, NULL);
2365 		aconnector->num_modes = 0;
2366 		dc_sink_release(aconnector->dc_sink);
2367 		aconnector->dc_sink = NULL;
2368 		aconnector->edid = NULL;
2369 #ifdef CONFIG_DRM_AMD_DC_HDCP
2370 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2371 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2372 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2373 #endif
2374 	}
2375 
2376 	mutex_unlock(&dev->mode_config.mutex);
2377 
2378 	update_subconnector_property(aconnector);
2379 
2380 	if (sink)
2381 		dc_sink_release(sink);
2382 }
2383 
2384 static void handle_hpd_irq(void *param)
2385 {
2386 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2387 	struct drm_connector *connector = &aconnector->base;
2388 	struct drm_device *dev = connector->dev;
2389 	enum dc_connection_type new_connection_type = dc_connection_none;
2390 #ifdef CONFIG_DRM_AMD_DC_HDCP
2391 	struct amdgpu_device *adev = drm_to_adev(dev);
2392 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2393 #endif
2394 
2395 	/*
2396 	 * In case of failure or MST no need to update connector status or notify the OS
2397 	 * since (for MST case) MST does this in its own context.
2398 	 */
2399 	mutex_lock(&aconnector->hpd_lock);
2400 
2401 #ifdef CONFIG_DRM_AMD_DC_HDCP
2402 	if (adev->dm.hdcp_workqueue) {
2403 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2404 		dm_con_state->update_hdcp = true;
2405 	}
2406 #endif
2407 	if (aconnector->fake_enable)
2408 		aconnector->fake_enable = false;
2409 
2410 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2411 		DRM_ERROR("KMS: Failed to detect connector\n");
2412 
2413 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2414 		emulated_link_detect(aconnector->dc_link);
2415 
2416 
2417 		drm_modeset_lock_all(dev);
2418 		dm_restore_drm_connector_state(dev, connector);
2419 		drm_modeset_unlock_all(dev);
2420 
2421 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2422 			drm_kms_helper_hotplug_event(dev);
2423 
2424 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2425 		if (new_connection_type == dc_connection_none &&
2426 		    aconnector->dc_link->type == dc_connection_none)
2427 			dm_set_dpms_off(aconnector->dc_link);
2428 
2429 		amdgpu_dm_update_connector_after_detect(aconnector);
2430 
2431 		drm_modeset_lock_all(dev);
2432 		dm_restore_drm_connector_state(dev, connector);
2433 		drm_modeset_unlock_all(dev);
2434 
2435 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2436 			drm_kms_helper_hotplug_event(dev);
2437 	}
2438 	mutex_unlock(&aconnector->hpd_lock);
2439 
2440 }
2441 
2442 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2443 {
2444 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2445 	uint8_t dret;
2446 	bool new_irq_handled = false;
2447 	int dpcd_addr;
2448 	int dpcd_bytes_to_read;
2449 
2450 	const int max_process_count = 30;
2451 	int process_count = 0;
2452 
2453 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2454 
2455 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2456 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2457 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2458 		dpcd_addr = DP_SINK_COUNT;
2459 	} else {
2460 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2461 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2462 		dpcd_addr = DP_SINK_COUNT_ESI;
2463 	}
2464 
2465 	dret = drm_dp_dpcd_read(
2466 		&aconnector->dm_dp_aux.aux,
2467 		dpcd_addr,
2468 		esi,
2469 		dpcd_bytes_to_read);
2470 
2471 	while (dret == dpcd_bytes_to_read &&
2472 		process_count < max_process_count) {
2473 		uint8_t retry;
2474 		dret = 0;
2475 
2476 		process_count++;
2477 
2478 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2479 		/* handle HPD short pulse irq */
2480 		if (aconnector->mst_mgr.mst_state)
2481 			drm_dp_mst_hpd_irq(
2482 				&aconnector->mst_mgr,
2483 				esi,
2484 				&new_irq_handled);
2485 
2486 		if (new_irq_handled) {
2487 			/* ACK at DPCD to notify down stream */
2488 			const int ack_dpcd_bytes_to_write =
2489 				dpcd_bytes_to_read - 1;
2490 
2491 			for (retry = 0; retry < 3; retry++) {
2492 				uint8_t wret;
2493 
2494 				wret = drm_dp_dpcd_write(
2495 					&aconnector->dm_dp_aux.aux,
2496 					dpcd_addr + 1,
2497 					&esi[1],
2498 					ack_dpcd_bytes_to_write);
2499 				if (wret == ack_dpcd_bytes_to_write)
2500 					break;
2501 			}
2502 
2503 			/* check if there is new irq to be handled */
2504 			dret = drm_dp_dpcd_read(
2505 				&aconnector->dm_dp_aux.aux,
2506 				dpcd_addr,
2507 				esi,
2508 				dpcd_bytes_to_read);
2509 
2510 			new_irq_handled = false;
2511 		} else {
2512 			break;
2513 		}
2514 	}
2515 
2516 	if (process_count == max_process_count)
2517 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2518 }
2519 
2520 static void handle_hpd_rx_irq(void *param)
2521 {
2522 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2523 	struct drm_connector *connector = &aconnector->base;
2524 	struct drm_device *dev = connector->dev;
2525 	struct dc_link *dc_link = aconnector->dc_link;
2526 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2527 	bool result = false;
2528 	enum dc_connection_type new_connection_type = dc_connection_none;
2529 	struct amdgpu_device *adev = drm_to_adev(dev);
2530 	union hpd_irq_data hpd_irq_data;
2531 
2532 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2533 
2534 	/*
2535 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2536 	 * conflict, after implement i2c helper, this mutex should be
2537 	 * retired.
2538 	 */
2539 	if (dc_link->type != dc_connection_mst_branch)
2540 		mutex_lock(&aconnector->hpd_lock);
2541 
2542 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2543 
2544 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2545 		(dc_link->type == dc_connection_mst_branch)) {
2546 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2547 			result = true;
2548 			dm_handle_hpd_rx_irq(aconnector);
2549 			goto out;
2550 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2551 			result = false;
2552 			dm_handle_hpd_rx_irq(aconnector);
2553 			goto out;
2554 		}
2555 	}
2556 
2557 	mutex_lock(&adev->dm.dc_lock);
2558 #ifdef CONFIG_DRM_AMD_DC_HDCP
2559 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2560 #else
2561 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2562 #endif
2563 	mutex_unlock(&adev->dm.dc_lock);
2564 
2565 out:
2566 	if (result && !is_mst_root_connector) {
2567 		/* Downstream Port status changed. */
2568 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2569 			DRM_ERROR("KMS: Failed to detect connector\n");
2570 
2571 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2572 			emulated_link_detect(dc_link);
2573 
2574 			if (aconnector->fake_enable)
2575 				aconnector->fake_enable = false;
2576 
2577 			amdgpu_dm_update_connector_after_detect(aconnector);
2578 
2579 
2580 			drm_modeset_lock_all(dev);
2581 			dm_restore_drm_connector_state(dev, connector);
2582 			drm_modeset_unlock_all(dev);
2583 
2584 			drm_kms_helper_hotplug_event(dev);
2585 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2586 
2587 			if (aconnector->fake_enable)
2588 				aconnector->fake_enable = false;
2589 
2590 			amdgpu_dm_update_connector_after_detect(aconnector);
2591 
2592 
2593 			drm_modeset_lock_all(dev);
2594 			dm_restore_drm_connector_state(dev, connector);
2595 			drm_modeset_unlock_all(dev);
2596 
2597 			drm_kms_helper_hotplug_event(dev);
2598 		}
2599 	}
2600 #ifdef CONFIG_DRM_AMD_DC_HDCP
2601 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2602 		if (adev->dm.hdcp_workqueue)
2603 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2604 	}
2605 #endif
2606 
2607 	if (dc_link->type != dc_connection_mst_branch) {
2608 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2609 		mutex_unlock(&aconnector->hpd_lock);
2610 	}
2611 }
2612 
2613 static void register_hpd_handlers(struct amdgpu_device *adev)
2614 {
2615 	struct drm_device *dev = adev_to_drm(adev);
2616 	struct drm_connector *connector;
2617 	struct amdgpu_dm_connector *aconnector;
2618 	const struct dc_link *dc_link;
2619 	struct dc_interrupt_params int_params = {0};
2620 
2621 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2622 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2623 
2624 	list_for_each_entry(connector,
2625 			&dev->mode_config.connector_list, head)	{
2626 
2627 		aconnector = to_amdgpu_dm_connector(connector);
2628 		dc_link = aconnector->dc_link;
2629 
2630 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2631 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2632 			int_params.irq_source = dc_link->irq_source_hpd;
2633 
2634 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2635 					handle_hpd_irq,
2636 					(void *) aconnector);
2637 		}
2638 
2639 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2640 
2641 			/* Also register for DP short pulse (hpd_rx). */
2642 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2643 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2644 
2645 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2646 					handle_hpd_rx_irq,
2647 					(void *) aconnector);
2648 		}
2649 	}
2650 }
2651 
2652 #if defined(CONFIG_DRM_AMD_DC_SI)
2653 /* Register IRQ sources and initialize IRQ callbacks */
2654 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2655 {
2656 	struct dc *dc = adev->dm.dc;
2657 	struct common_irq_params *c_irq_params;
2658 	struct dc_interrupt_params int_params = {0};
2659 	int r;
2660 	int i;
2661 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2662 
2663 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2664 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2665 
2666 	/*
2667 	 * Actions of amdgpu_irq_add_id():
2668 	 * 1. Register a set() function with base driver.
2669 	 *    Base driver will call set() function to enable/disable an
2670 	 *    interrupt in DC hardware.
2671 	 * 2. Register amdgpu_dm_irq_handler().
2672 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2673 	 *    coming from DC hardware.
2674 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2675 	 *    for acknowledging and handling. */
2676 
2677 	/* Use VBLANK interrupt */
2678 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2679 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2680 		if (r) {
2681 			DRM_ERROR("Failed to add crtc irq id!\n");
2682 			return r;
2683 		}
2684 
2685 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2686 		int_params.irq_source =
2687 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2688 
2689 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2690 
2691 		c_irq_params->adev = adev;
2692 		c_irq_params->irq_src = int_params.irq_source;
2693 
2694 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2695 				dm_crtc_high_irq, c_irq_params);
2696 	}
2697 
2698 	/* Use GRPH_PFLIP interrupt */
2699 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2700 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2701 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2702 		if (r) {
2703 			DRM_ERROR("Failed to add page flip irq id!\n");
2704 			return r;
2705 		}
2706 
2707 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2708 		int_params.irq_source =
2709 			dc_interrupt_to_irq_source(dc, i, 0);
2710 
2711 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2712 
2713 		c_irq_params->adev = adev;
2714 		c_irq_params->irq_src = int_params.irq_source;
2715 
2716 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2717 				dm_pflip_high_irq, c_irq_params);
2718 
2719 	}
2720 
2721 	/* HPD */
2722 	r = amdgpu_irq_add_id(adev, client_id,
2723 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2724 	if (r) {
2725 		DRM_ERROR("Failed to add hpd irq id!\n");
2726 		return r;
2727 	}
2728 
2729 	register_hpd_handlers(adev);
2730 
2731 	return 0;
2732 }
2733 #endif
2734 
2735 /* Register IRQ sources and initialize IRQ callbacks */
2736 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2737 {
2738 	struct dc *dc = adev->dm.dc;
2739 	struct common_irq_params *c_irq_params;
2740 	struct dc_interrupt_params int_params = {0};
2741 	int r;
2742 	int i;
2743 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2744 
2745 	if (adev->asic_type >= CHIP_VEGA10)
2746 		client_id = SOC15_IH_CLIENTID_DCE;
2747 
2748 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2749 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2750 
2751 	/*
2752 	 * Actions of amdgpu_irq_add_id():
2753 	 * 1. Register a set() function with base driver.
2754 	 *    Base driver will call set() function to enable/disable an
2755 	 *    interrupt in DC hardware.
2756 	 * 2. Register amdgpu_dm_irq_handler().
2757 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2758 	 *    coming from DC hardware.
2759 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2760 	 *    for acknowledging and handling. */
2761 
2762 	/* Use VBLANK interrupt */
2763 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2764 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2765 		if (r) {
2766 			DRM_ERROR("Failed to add crtc irq id!\n");
2767 			return r;
2768 		}
2769 
2770 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2771 		int_params.irq_source =
2772 			dc_interrupt_to_irq_source(dc, i, 0);
2773 
2774 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2775 
2776 		c_irq_params->adev = adev;
2777 		c_irq_params->irq_src = int_params.irq_source;
2778 
2779 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2780 				dm_crtc_high_irq, c_irq_params);
2781 	}
2782 
2783 	/* Use VUPDATE interrupt */
2784 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2785 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2786 		if (r) {
2787 			DRM_ERROR("Failed to add vupdate irq id!\n");
2788 			return r;
2789 		}
2790 
2791 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2792 		int_params.irq_source =
2793 			dc_interrupt_to_irq_source(dc, i, 0);
2794 
2795 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2796 
2797 		c_irq_params->adev = adev;
2798 		c_irq_params->irq_src = int_params.irq_source;
2799 
2800 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2801 				dm_vupdate_high_irq, c_irq_params);
2802 	}
2803 
2804 	/* Use GRPH_PFLIP interrupt */
2805 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2806 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2807 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2808 		if (r) {
2809 			DRM_ERROR("Failed to add page flip irq id!\n");
2810 			return r;
2811 		}
2812 
2813 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2814 		int_params.irq_source =
2815 			dc_interrupt_to_irq_source(dc, i, 0);
2816 
2817 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2818 
2819 		c_irq_params->adev = adev;
2820 		c_irq_params->irq_src = int_params.irq_source;
2821 
2822 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2823 				dm_pflip_high_irq, c_irq_params);
2824 
2825 	}
2826 
2827 	/* HPD */
2828 	r = amdgpu_irq_add_id(adev, client_id,
2829 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2830 	if (r) {
2831 		DRM_ERROR("Failed to add hpd irq id!\n");
2832 		return r;
2833 	}
2834 
2835 	register_hpd_handlers(adev);
2836 
2837 	return 0;
2838 }
2839 
2840 #if defined(CONFIG_DRM_AMD_DC_DCN)
2841 /* Register IRQ sources and initialize IRQ callbacks */
2842 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2843 {
2844 	struct dc *dc = adev->dm.dc;
2845 	struct common_irq_params *c_irq_params;
2846 	struct dc_interrupt_params int_params = {0};
2847 	int r;
2848 	int i;
2849 
2850 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2851 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2852 
2853 	/*
2854 	 * Actions of amdgpu_irq_add_id():
2855 	 * 1. Register a set() function with base driver.
2856 	 *    Base driver will call set() function to enable/disable an
2857 	 *    interrupt in DC hardware.
2858 	 * 2. Register amdgpu_dm_irq_handler().
2859 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2860 	 *    coming from DC hardware.
2861 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2862 	 *    for acknowledging and handling.
2863 	 */
2864 
2865 	/* Use VSTARTUP interrupt */
2866 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2867 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2868 			i++) {
2869 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2870 
2871 		if (r) {
2872 			DRM_ERROR("Failed to add crtc irq id!\n");
2873 			return r;
2874 		}
2875 
2876 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2877 		int_params.irq_source =
2878 			dc_interrupt_to_irq_source(dc, i, 0);
2879 
2880 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2881 
2882 		c_irq_params->adev = adev;
2883 		c_irq_params->irq_src = int_params.irq_source;
2884 
2885 		amdgpu_dm_irq_register_interrupt(
2886 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2887 	}
2888 
2889 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2890 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2891 	 * to trigger at end of each vblank, regardless of state of the lock,
2892 	 * matching DCE behaviour.
2893 	 */
2894 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2895 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2896 	     i++) {
2897 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2898 
2899 		if (r) {
2900 			DRM_ERROR("Failed to add vupdate irq id!\n");
2901 			return r;
2902 		}
2903 
2904 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2905 		int_params.irq_source =
2906 			dc_interrupt_to_irq_source(dc, i, 0);
2907 
2908 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2909 
2910 		c_irq_params->adev = adev;
2911 		c_irq_params->irq_src = int_params.irq_source;
2912 
2913 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2914 				dm_vupdate_high_irq, c_irq_params);
2915 	}
2916 
2917 	/* Use GRPH_PFLIP interrupt */
2918 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2919 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2920 			i++) {
2921 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2922 		if (r) {
2923 			DRM_ERROR("Failed to add page flip irq id!\n");
2924 			return r;
2925 		}
2926 
2927 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2928 		int_params.irq_source =
2929 			dc_interrupt_to_irq_source(dc, i, 0);
2930 
2931 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2932 
2933 		c_irq_params->adev = adev;
2934 		c_irq_params->irq_src = int_params.irq_source;
2935 
2936 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2937 				dm_pflip_high_irq, c_irq_params);
2938 
2939 	}
2940 
2941 	/* HPD */
2942 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2943 			&adev->hpd_irq);
2944 	if (r) {
2945 		DRM_ERROR("Failed to add hpd irq id!\n");
2946 		return r;
2947 	}
2948 
2949 	register_hpd_handlers(adev);
2950 
2951 	return 0;
2952 }
2953 #endif
2954 
2955 /*
2956  * Acquires the lock for the atomic state object and returns
2957  * the new atomic state.
2958  *
2959  * This should only be called during atomic check.
2960  */
2961 static int dm_atomic_get_state(struct drm_atomic_state *state,
2962 			       struct dm_atomic_state **dm_state)
2963 {
2964 	struct drm_device *dev = state->dev;
2965 	struct amdgpu_device *adev = drm_to_adev(dev);
2966 	struct amdgpu_display_manager *dm = &adev->dm;
2967 	struct drm_private_state *priv_state;
2968 
2969 	if (*dm_state)
2970 		return 0;
2971 
2972 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2973 	if (IS_ERR(priv_state))
2974 		return PTR_ERR(priv_state);
2975 
2976 	*dm_state = to_dm_atomic_state(priv_state);
2977 
2978 	return 0;
2979 }
2980 
2981 static struct dm_atomic_state *
2982 dm_atomic_get_new_state(struct drm_atomic_state *state)
2983 {
2984 	struct drm_device *dev = state->dev;
2985 	struct amdgpu_device *adev = drm_to_adev(dev);
2986 	struct amdgpu_display_manager *dm = &adev->dm;
2987 	struct drm_private_obj *obj;
2988 	struct drm_private_state *new_obj_state;
2989 	int i;
2990 
2991 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2992 		if (obj->funcs == dm->atomic_obj.funcs)
2993 			return to_dm_atomic_state(new_obj_state);
2994 	}
2995 
2996 	return NULL;
2997 }
2998 
2999 static struct drm_private_state *
3000 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3001 {
3002 	struct dm_atomic_state *old_state, *new_state;
3003 
3004 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3005 	if (!new_state)
3006 		return NULL;
3007 
3008 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3009 
3010 	old_state = to_dm_atomic_state(obj->state);
3011 
3012 	if (old_state && old_state->context)
3013 		new_state->context = dc_copy_state(old_state->context);
3014 
3015 	if (!new_state->context) {
3016 		kfree(new_state);
3017 		return NULL;
3018 	}
3019 
3020 	return &new_state->base;
3021 }
3022 
3023 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3024 				    struct drm_private_state *state)
3025 {
3026 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3027 
3028 	if (dm_state && dm_state->context)
3029 		dc_release_state(dm_state->context);
3030 
3031 	kfree(dm_state);
3032 }
3033 
3034 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3035 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3036 	.atomic_destroy_state = dm_atomic_destroy_state,
3037 };
3038 
3039 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3040 {
3041 	struct dm_atomic_state *state;
3042 	int r;
3043 
3044 	adev->mode_info.mode_config_initialized = true;
3045 
3046 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3047 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3048 
3049 	adev_to_drm(adev)->mode_config.max_width = 16384;
3050 	adev_to_drm(adev)->mode_config.max_height = 16384;
3051 
3052 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3053 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3054 	/* indicates support for immediate flip */
3055 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3056 
3057 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3058 
3059 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3060 	if (!state)
3061 		return -ENOMEM;
3062 
3063 	state->context = dc_create_state(adev->dm.dc);
3064 	if (!state->context) {
3065 		kfree(state);
3066 		return -ENOMEM;
3067 	}
3068 
3069 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3070 
3071 	drm_atomic_private_obj_init(adev_to_drm(adev),
3072 				    &adev->dm.atomic_obj,
3073 				    &state->base,
3074 				    &dm_atomic_state_funcs);
3075 
3076 	r = amdgpu_display_modeset_create_props(adev);
3077 	if (r) {
3078 		dc_release_state(state->context);
3079 		kfree(state);
3080 		return r;
3081 	}
3082 
3083 	r = amdgpu_dm_audio_init(adev);
3084 	if (r) {
3085 		dc_release_state(state->context);
3086 		kfree(state);
3087 		return r;
3088 	}
3089 
3090 	return 0;
3091 }
3092 
3093 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3094 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3095 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3096 
3097 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3098 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3099 
3100 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3101 {
3102 #if defined(CONFIG_ACPI)
3103 	struct amdgpu_dm_backlight_caps caps;
3104 
3105 	memset(&caps, 0, sizeof(caps));
3106 
3107 	if (dm->backlight_caps.caps_valid)
3108 		return;
3109 
3110 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3111 	if (caps.caps_valid) {
3112 		dm->backlight_caps.caps_valid = true;
3113 		if (caps.aux_support)
3114 			return;
3115 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3116 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3117 	} else {
3118 		dm->backlight_caps.min_input_signal =
3119 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3120 		dm->backlight_caps.max_input_signal =
3121 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3122 	}
3123 #else
3124 	if (dm->backlight_caps.aux_support)
3125 		return;
3126 
3127 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3128 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3129 #endif
3130 }
3131 
3132 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3133 {
3134 	bool rc;
3135 
3136 	if (!link)
3137 		return 1;
3138 
3139 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
3140 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3141 
3142 	return rc ? 0 : 1;
3143 }
3144 
3145 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3146 				unsigned *min, unsigned *max)
3147 {
3148 	if (!caps)
3149 		return 0;
3150 
3151 	if (caps->aux_support) {
3152 		// Firmware limits are in nits, DC API wants millinits.
3153 		*max = 1000 * caps->aux_max_input_signal;
3154 		*min = 1000 * caps->aux_min_input_signal;
3155 	} else {
3156 		// Firmware limits are 8-bit, PWM control is 16-bit.
3157 		*max = 0x101 * caps->max_input_signal;
3158 		*min = 0x101 * caps->min_input_signal;
3159 	}
3160 	return 1;
3161 }
3162 
3163 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3164 					uint32_t brightness)
3165 {
3166 	unsigned min, max;
3167 
3168 	if (!get_brightness_range(caps, &min, &max))
3169 		return brightness;
3170 
3171 	// Rescale 0..255 to min..max
3172 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3173 				       AMDGPU_MAX_BL_LEVEL);
3174 }
3175 
3176 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3177 				      uint32_t brightness)
3178 {
3179 	unsigned min, max;
3180 
3181 	if (!get_brightness_range(caps, &min, &max))
3182 		return brightness;
3183 
3184 	if (brightness < min)
3185 		return 0;
3186 	// Rescale min..max to 0..255
3187 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3188 				 max - min);
3189 }
3190 
3191 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3192 {
3193 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3194 	struct amdgpu_dm_backlight_caps caps;
3195 	struct dc_link *link = NULL;
3196 	u32 brightness;
3197 	bool rc;
3198 
3199 	amdgpu_dm_update_backlight_caps(dm);
3200 	caps = dm->backlight_caps;
3201 
3202 	link = (struct dc_link *)dm->backlight_link;
3203 
3204 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3205 	// Change brightness based on AUX property
3206 	if (caps.aux_support)
3207 		return set_backlight_via_aux(link, brightness);
3208 
3209 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3210 
3211 	return rc ? 0 : 1;
3212 }
3213 
3214 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3215 {
3216 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3217 	int ret = dc_link_get_backlight_level(dm->backlight_link);
3218 
3219 	if (ret == DC_ERROR_UNEXPECTED)
3220 		return bd->props.brightness;
3221 	return convert_brightness_to_user(&dm->backlight_caps, ret);
3222 }
3223 
3224 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3225 	.options = BL_CORE_SUSPENDRESUME,
3226 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3227 	.update_status	= amdgpu_dm_backlight_update_status,
3228 };
3229 
3230 static void
3231 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3232 {
3233 	char bl_name[16];
3234 	struct backlight_properties props = { 0 };
3235 
3236 	amdgpu_dm_update_backlight_caps(dm);
3237 
3238 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3239 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3240 	props.type = BACKLIGHT_RAW;
3241 
3242 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3243 		 adev_to_drm(dm->adev)->primary->index);
3244 
3245 	dm->backlight_dev = backlight_device_register(bl_name,
3246 						      adev_to_drm(dm->adev)->dev,
3247 						      dm,
3248 						      &amdgpu_dm_backlight_ops,
3249 						      &props);
3250 
3251 	if (IS_ERR(dm->backlight_dev))
3252 		DRM_ERROR("DM: Backlight registration failed!\n");
3253 	else
3254 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3255 }
3256 
3257 #endif
3258 
3259 static int initialize_plane(struct amdgpu_display_manager *dm,
3260 			    struct amdgpu_mode_info *mode_info, int plane_id,
3261 			    enum drm_plane_type plane_type,
3262 			    const struct dc_plane_cap *plane_cap)
3263 {
3264 	struct drm_plane *plane;
3265 	unsigned long possible_crtcs;
3266 	int ret = 0;
3267 
3268 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3269 	if (!plane) {
3270 		DRM_ERROR("KMS: Failed to allocate plane\n");
3271 		return -ENOMEM;
3272 	}
3273 	plane->type = plane_type;
3274 
3275 	/*
3276 	 * HACK: IGT tests expect that the primary plane for a CRTC
3277 	 * can only have one possible CRTC. Only expose support for
3278 	 * any CRTC if they're not going to be used as a primary plane
3279 	 * for a CRTC - like overlay or underlay planes.
3280 	 */
3281 	possible_crtcs = 1 << plane_id;
3282 	if (plane_id >= dm->dc->caps.max_streams)
3283 		possible_crtcs = 0xff;
3284 
3285 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3286 
3287 	if (ret) {
3288 		DRM_ERROR("KMS: Failed to initialize plane\n");
3289 		kfree(plane);
3290 		return ret;
3291 	}
3292 
3293 	if (mode_info)
3294 		mode_info->planes[plane_id] = plane;
3295 
3296 	return ret;
3297 }
3298 
3299 
3300 static void register_backlight_device(struct amdgpu_display_manager *dm,
3301 				      struct dc_link *link)
3302 {
3303 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3304 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3305 
3306 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3307 	    link->type != dc_connection_none) {
3308 		/*
3309 		 * Event if registration failed, we should continue with
3310 		 * DM initialization because not having a backlight control
3311 		 * is better then a black screen.
3312 		 */
3313 		amdgpu_dm_register_backlight_device(dm);
3314 
3315 		if (dm->backlight_dev)
3316 			dm->backlight_link = link;
3317 	}
3318 #endif
3319 }
3320 
3321 
3322 /*
3323  * In this architecture, the association
3324  * connector -> encoder -> crtc
3325  * id not really requried. The crtc and connector will hold the
3326  * display_index as an abstraction to use with DAL component
3327  *
3328  * Returns 0 on success
3329  */
3330 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3331 {
3332 	struct amdgpu_display_manager *dm = &adev->dm;
3333 	int32_t i;
3334 	struct amdgpu_dm_connector *aconnector = NULL;
3335 	struct amdgpu_encoder *aencoder = NULL;
3336 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3337 	uint32_t link_cnt;
3338 	int32_t primary_planes;
3339 	enum dc_connection_type new_connection_type = dc_connection_none;
3340 	const struct dc_plane_cap *plane;
3341 
3342 	dm->display_indexes_num = dm->dc->caps.max_streams;
3343 	/* Update the actual used number of crtc */
3344 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3345 
3346 	link_cnt = dm->dc->caps.max_links;
3347 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3348 		DRM_ERROR("DM: Failed to initialize mode config\n");
3349 		return -EINVAL;
3350 	}
3351 
3352 	/* There is one primary plane per CRTC */
3353 	primary_planes = dm->dc->caps.max_streams;
3354 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3355 
3356 	/*
3357 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3358 	 * Order is reversed to match iteration order in atomic check.
3359 	 */
3360 	for (i = (primary_planes - 1); i >= 0; i--) {
3361 		plane = &dm->dc->caps.planes[i];
3362 
3363 		if (initialize_plane(dm, mode_info, i,
3364 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3365 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3366 			goto fail;
3367 		}
3368 	}
3369 
3370 	/*
3371 	 * Initialize overlay planes, index starting after primary planes.
3372 	 * These planes have a higher DRM index than the primary planes since
3373 	 * they should be considered as having a higher z-order.
3374 	 * Order is reversed to match iteration order in atomic check.
3375 	 *
3376 	 * Only support DCN for now, and only expose one so we don't encourage
3377 	 * userspace to use up all the pipes.
3378 	 */
3379 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3380 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3381 
3382 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3383 			continue;
3384 
3385 		if (!plane->blends_with_above || !plane->blends_with_below)
3386 			continue;
3387 
3388 		if (!plane->pixel_format_support.argb8888)
3389 			continue;
3390 
3391 		if (initialize_plane(dm, NULL, primary_planes + i,
3392 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3393 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3394 			goto fail;
3395 		}
3396 
3397 		/* Only create one overlay plane. */
3398 		break;
3399 	}
3400 
3401 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3402 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3403 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3404 			goto fail;
3405 		}
3406 
3407 	/* loops over all connectors on the board */
3408 	for (i = 0; i < link_cnt; i++) {
3409 		struct dc_link *link = NULL;
3410 
3411 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3412 			DRM_ERROR(
3413 				"KMS: Cannot support more than %d display indexes\n",
3414 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3415 			continue;
3416 		}
3417 
3418 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3419 		if (!aconnector)
3420 			goto fail;
3421 
3422 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3423 		if (!aencoder)
3424 			goto fail;
3425 
3426 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3427 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3428 			goto fail;
3429 		}
3430 
3431 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3432 			DRM_ERROR("KMS: Failed to initialize connector\n");
3433 			goto fail;
3434 		}
3435 
3436 		link = dc_get_link_at_index(dm->dc, i);
3437 
3438 		if (!dc_link_detect_sink(link, &new_connection_type))
3439 			DRM_ERROR("KMS: Failed to detect connector\n");
3440 
3441 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3442 			emulated_link_detect(link);
3443 			amdgpu_dm_update_connector_after_detect(aconnector);
3444 
3445 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3446 			amdgpu_dm_update_connector_after_detect(aconnector);
3447 			register_backlight_device(dm, link);
3448 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3449 				amdgpu_dm_set_psr_caps(link);
3450 		}
3451 
3452 
3453 	}
3454 
3455 	/* Software is initialized. Now we can register interrupt handlers. */
3456 	switch (adev->asic_type) {
3457 #if defined(CONFIG_DRM_AMD_DC_SI)
3458 	case CHIP_TAHITI:
3459 	case CHIP_PITCAIRN:
3460 	case CHIP_VERDE:
3461 	case CHIP_OLAND:
3462 		if (dce60_register_irq_handlers(dm->adev)) {
3463 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3464 			goto fail;
3465 		}
3466 		break;
3467 #endif
3468 	case CHIP_BONAIRE:
3469 	case CHIP_HAWAII:
3470 	case CHIP_KAVERI:
3471 	case CHIP_KABINI:
3472 	case CHIP_MULLINS:
3473 	case CHIP_TONGA:
3474 	case CHIP_FIJI:
3475 	case CHIP_CARRIZO:
3476 	case CHIP_STONEY:
3477 	case CHIP_POLARIS11:
3478 	case CHIP_POLARIS10:
3479 	case CHIP_POLARIS12:
3480 	case CHIP_VEGAM:
3481 	case CHIP_VEGA10:
3482 	case CHIP_VEGA12:
3483 	case CHIP_VEGA20:
3484 		if (dce110_register_irq_handlers(dm->adev)) {
3485 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3486 			goto fail;
3487 		}
3488 		break;
3489 #if defined(CONFIG_DRM_AMD_DC_DCN)
3490 	case CHIP_RAVEN:
3491 	case CHIP_NAVI12:
3492 	case CHIP_NAVI10:
3493 	case CHIP_NAVI14:
3494 	case CHIP_RENOIR:
3495 	case CHIP_SIENNA_CICHLID:
3496 	case CHIP_NAVY_FLOUNDER:
3497 	case CHIP_DIMGREY_CAVEFISH:
3498 	case CHIP_VANGOGH:
3499 		if (dcn10_register_irq_handlers(dm->adev)) {
3500 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3501 			goto fail;
3502 		}
3503 		break;
3504 #endif
3505 	default:
3506 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3507 		goto fail;
3508 	}
3509 
3510 	return 0;
3511 fail:
3512 	kfree(aencoder);
3513 	kfree(aconnector);
3514 
3515 	return -EINVAL;
3516 }
3517 
3518 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3519 {
3520 	drm_mode_config_cleanup(dm->ddev);
3521 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3522 	return;
3523 }
3524 
3525 /******************************************************************************
3526  * amdgpu_display_funcs functions
3527  *****************************************************************************/
3528 
3529 /*
3530  * dm_bandwidth_update - program display watermarks
3531  *
3532  * @adev: amdgpu_device pointer
3533  *
3534  * Calculate and program the display watermarks and line buffer allocation.
3535  */
3536 static void dm_bandwidth_update(struct amdgpu_device *adev)
3537 {
3538 	/* TODO: implement later */
3539 }
3540 
3541 static const struct amdgpu_display_funcs dm_display_funcs = {
3542 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3543 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3544 	.backlight_set_level = NULL, /* never called for DC */
3545 	.backlight_get_level = NULL, /* never called for DC */
3546 	.hpd_sense = NULL,/* called unconditionally */
3547 	.hpd_set_polarity = NULL, /* called unconditionally */
3548 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3549 	.page_flip_get_scanoutpos =
3550 		dm_crtc_get_scanoutpos,/* called unconditionally */
3551 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3552 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3553 };
3554 
3555 #if defined(CONFIG_DEBUG_KERNEL_DC)
3556 
3557 static ssize_t s3_debug_store(struct device *device,
3558 			      struct device_attribute *attr,
3559 			      const char *buf,
3560 			      size_t count)
3561 {
3562 	int ret;
3563 	int s3_state;
3564 	struct drm_device *drm_dev = dev_get_drvdata(device);
3565 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3566 
3567 	ret = kstrtoint(buf, 0, &s3_state);
3568 
3569 	if (ret == 0) {
3570 		if (s3_state) {
3571 			dm_resume(adev);
3572 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3573 		} else
3574 			dm_suspend(adev);
3575 	}
3576 
3577 	return ret == 0 ? count : 0;
3578 }
3579 
3580 DEVICE_ATTR_WO(s3_debug);
3581 
3582 #endif
3583 
3584 static int dm_early_init(void *handle)
3585 {
3586 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3587 
3588 	switch (adev->asic_type) {
3589 #if defined(CONFIG_DRM_AMD_DC_SI)
3590 	case CHIP_TAHITI:
3591 	case CHIP_PITCAIRN:
3592 	case CHIP_VERDE:
3593 		adev->mode_info.num_crtc = 6;
3594 		adev->mode_info.num_hpd = 6;
3595 		adev->mode_info.num_dig = 6;
3596 		break;
3597 	case CHIP_OLAND:
3598 		adev->mode_info.num_crtc = 2;
3599 		adev->mode_info.num_hpd = 2;
3600 		adev->mode_info.num_dig = 2;
3601 		break;
3602 #endif
3603 	case CHIP_BONAIRE:
3604 	case CHIP_HAWAII:
3605 		adev->mode_info.num_crtc = 6;
3606 		adev->mode_info.num_hpd = 6;
3607 		adev->mode_info.num_dig = 6;
3608 		break;
3609 	case CHIP_KAVERI:
3610 		adev->mode_info.num_crtc = 4;
3611 		adev->mode_info.num_hpd = 6;
3612 		adev->mode_info.num_dig = 7;
3613 		break;
3614 	case CHIP_KABINI:
3615 	case CHIP_MULLINS:
3616 		adev->mode_info.num_crtc = 2;
3617 		adev->mode_info.num_hpd = 6;
3618 		adev->mode_info.num_dig = 6;
3619 		break;
3620 	case CHIP_FIJI:
3621 	case CHIP_TONGA:
3622 		adev->mode_info.num_crtc = 6;
3623 		adev->mode_info.num_hpd = 6;
3624 		adev->mode_info.num_dig = 7;
3625 		break;
3626 	case CHIP_CARRIZO:
3627 		adev->mode_info.num_crtc = 3;
3628 		adev->mode_info.num_hpd = 6;
3629 		adev->mode_info.num_dig = 9;
3630 		break;
3631 	case CHIP_STONEY:
3632 		adev->mode_info.num_crtc = 2;
3633 		adev->mode_info.num_hpd = 6;
3634 		adev->mode_info.num_dig = 9;
3635 		break;
3636 	case CHIP_POLARIS11:
3637 	case CHIP_POLARIS12:
3638 		adev->mode_info.num_crtc = 5;
3639 		adev->mode_info.num_hpd = 5;
3640 		adev->mode_info.num_dig = 5;
3641 		break;
3642 	case CHIP_POLARIS10:
3643 	case CHIP_VEGAM:
3644 		adev->mode_info.num_crtc = 6;
3645 		adev->mode_info.num_hpd = 6;
3646 		adev->mode_info.num_dig = 6;
3647 		break;
3648 	case CHIP_VEGA10:
3649 	case CHIP_VEGA12:
3650 	case CHIP_VEGA20:
3651 		adev->mode_info.num_crtc = 6;
3652 		adev->mode_info.num_hpd = 6;
3653 		adev->mode_info.num_dig = 6;
3654 		break;
3655 #if defined(CONFIG_DRM_AMD_DC_DCN)
3656 	case CHIP_RAVEN:
3657 	case CHIP_RENOIR:
3658 	case CHIP_VANGOGH:
3659 		adev->mode_info.num_crtc = 4;
3660 		adev->mode_info.num_hpd = 4;
3661 		adev->mode_info.num_dig = 4;
3662 		break;
3663 	case CHIP_NAVI10:
3664 	case CHIP_NAVI12:
3665 	case CHIP_SIENNA_CICHLID:
3666 	case CHIP_NAVY_FLOUNDER:
3667 		adev->mode_info.num_crtc = 6;
3668 		adev->mode_info.num_hpd = 6;
3669 		adev->mode_info.num_dig = 6;
3670 		break;
3671 	case CHIP_NAVI14:
3672 	case CHIP_DIMGREY_CAVEFISH:
3673 		adev->mode_info.num_crtc = 5;
3674 		adev->mode_info.num_hpd = 5;
3675 		adev->mode_info.num_dig = 5;
3676 		break;
3677 #endif
3678 	default:
3679 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3680 		return -EINVAL;
3681 	}
3682 
3683 	amdgpu_dm_set_irq_funcs(adev);
3684 
3685 	if (adev->mode_info.funcs == NULL)
3686 		adev->mode_info.funcs = &dm_display_funcs;
3687 
3688 	/*
3689 	 * Note: Do NOT change adev->audio_endpt_rreg and
3690 	 * adev->audio_endpt_wreg because they are initialised in
3691 	 * amdgpu_device_init()
3692 	 */
3693 #if defined(CONFIG_DEBUG_KERNEL_DC)
3694 	device_create_file(
3695 		adev_to_drm(adev)->dev,
3696 		&dev_attr_s3_debug);
3697 #endif
3698 
3699 	return 0;
3700 }
3701 
3702 static bool modeset_required(struct drm_crtc_state *crtc_state,
3703 			     struct dc_stream_state *new_stream,
3704 			     struct dc_stream_state *old_stream)
3705 {
3706 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3707 }
3708 
3709 static bool modereset_required(struct drm_crtc_state *crtc_state)
3710 {
3711 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3712 }
3713 
3714 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3715 {
3716 	drm_encoder_cleanup(encoder);
3717 	kfree(encoder);
3718 }
3719 
3720 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3721 	.destroy = amdgpu_dm_encoder_destroy,
3722 };
3723 
3724 
3725 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3726 					 struct drm_framebuffer *fb,
3727 					 int *min_downscale, int *max_upscale)
3728 {
3729 	struct amdgpu_device *adev = drm_to_adev(dev);
3730 	struct dc *dc = adev->dm.dc;
3731 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3732 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3733 
3734 	switch (fb->format->format) {
3735 	case DRM_FORMAT_P010:
3736 	case DRM_FORMAT_NV12:
3737 	case DRM_FORMAT_NV21:
3738 		*max_upscale = plane_cap->max_upscale_factor.nv12;
3739 		*min_downscale = plane_cap->max_downscale_factor.nv12;
3740 		break;
3741 
3742 	case DRM_FORMAT_XRGB16161616F:
3743 	case DRM_FORMAT_ARGB16161616F:
3744 	case DRM_FORMAT_XBGR16161616F:
3745 	case DRM_FORMAT_ABGR16161616F:
3746 		*max_upscale = plane_cap->max_upscale_factor.fp16;
3747 		*min_downscale = plane_cap->max_downscale_factor.fp16;
3748 		break;
3749 
3750 	default:
3751 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
3752 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
3753 		break;
3754 	}
3755 
3756 	/*
3757 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3758 	 * scaling factor of 1.0 == 1000 units.
3759 	 */
3760 	if (*max_upscale == 1)
3761 		*max_upscale = 1000;
3762 
3763 	if (*min_downscale == 1)
3764 		*min_downscale = 1000;
3765 }
3766 
3767 
3768 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3769 				struct dc_scaling_info *scaling_info)
3770 {
3771 	int scale_w, scale_h, min_downscale, max_upscale;
3772 
3773 	memset(scaling_info, 0, sizeof(*scaling_info));
3774 
3775 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3776 	scaling_info->src_rect.x = state->src_x >> 16;
3777 	scaling_info->src_rect.y = state->src_y >> 16;
3778 
3779 	scaling_info->src_rect.width = state->src_w >> 16;
3780 	if (scaling_info->src_rect.width == 0)
3781 		return -EINVAL;
3782 
3783 	scaling_info->src_rect.height = state->src_h >> 16;
3784 	if (scaling_info->src_rect.height == 0)
3785 		return -EINVAL;
3786 
3787 	scaling_info->dst_rect.x = state->crtc_x;
3788 	scaling_info->dst_rect.y = state->crtc_y;
3789 
3790 	if (state->crtc_w == 0)
3791 		return -EINVAL;
3792 
3793 	scaling_info->dst_rect.width = state->crtc_w;
3794 
3795 	if (state->crtc_h == 0)
3796 		return -EINVAL;
3797 
3798 	scaling_info->dst_rect.height = state->crtc_h;
3799 
3800 	/* DRM doesn't specify clipping on destination output. */
3801 	scaling_info->clip_rect = scaling_info->dst_rect;
3802 
3803 	/* Validate scaling per-format with DC plane caps */
3804 	if (state->plane && state->plane->dev && state->fb) {
3805 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3806 					     &min_downscale, &max_upscale);
3807 	} else {
3808 		min_downscale = 250;
3809 		max_upscale = 16000;
3810 	}
3811 
3812 	scale_w = scaling_info->dst_rect.width * 1000 /
3813 		  scaling_info->src_rect.width;
3814 
3815 	if (scale_w < min_downscale || scale_w > max_upscale)
3816 		return -EINVAL;
3817 
3818 	scale_h = scaling_info->dst_rect.height * 1000 /
3819 		  scaling_info->src_rect.height;
3820 
3821 	if (scale_h < min_downscale || scale_h > max_upscale)
3822 		return -EINVAL;
3823 
3824 	/*
3825 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3826 	 * assume reasonable defaults based on the format.
3827 	 */
3828 
3829 	return 0;
3830 }
3831 
3832 static void
3833 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3834 				 uint64_t tiling_flags)
3835 {
3836 	/* Fill GFX8 params */
3837 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3838 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3839 
3840 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3841 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3842 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3843 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3844 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3845 
3846 		/* XXX fix me for VI */
3847 		tiling_info->gfx8.num_banks = num_banks;
3848 		tiling_info->gfx8.array_mode =
3849 				DC_ARRAY_2D_TILED_THIN1;
3850 		tiling_info->gfx8.tile_split = tile_split;
3851 		tiling_info->gfx8.bank_width = bankw;
3852 		tiling_info->gfx8.bank_height = bankh;
3853 		tiling_info->gfx8.tile_aspect = mtaspect;
3854 		tiling_info->gfx8.tile_mode =
3855 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3856 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3857 			== DC_ARRAY_1D_TILED_THIN1) {
3858 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3859 	}
3860 
3861 	tiling_info->gfx8.pipe_config =
3862 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3863 }
3864 
3865 static void
3866 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3867 				  union dc_tiling_info *tiling_info)
3868 {
3869 	tiling_info->gfx9.num_pipes =
3870 		adev->gfx.config.gb_addr_config_fields.num_pipes;
3871 	tiling_info->gfx9.num_banks =
3872 		adev->gfx.config.gb_addr_config_fields.num_banks;
3873 	tiling_info->gfx9.pipe_interleave =
3874 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3875 	tiling_info->gfx9.num_shader_engines =
3876 		adev->gfx.config.gb_addr_config_fields.num_se;
3877 	tiling_info->gfx9.max_compressed_frags =
3878 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3879 	tiling_info->gfx9.num_rb_per_se =
3880 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3881 	tiling_info->gfx9.shaderEnable = 1;
3882 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3883 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
3884 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3885 	    adev->asic_type == CHIP_VANGOGH)
3886 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3887 }
3888 
3889 static int
3890 validate_dcc(struct amdgpu_device *adev,
3891 	     const enum surface_pixel_format format,
3892 	     const enum dc_rotation_angle rotation,
3893 	     const union dc_tiling_info *tiling_info,
3894 	     const struct dc_plane_dcc_param *dcc,
3895 	     const struct dc_plane_address *address,
3896 	     const struct plane_size *plane_size)
3897 {
3898 	struct dc *dc = adev->dm.dc;
3899 	struct dc_dcc_surface_param input;
3900 	struct dc_surface_dcc_cap output;
3901 
3902 	memset(&input, 0, sizeof(input));
3903 	memset(&output, 0, sizeof(output));
3904 
3905 	if (!dcc->enable)
3906 		return 0;
3907 
3908 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3909 	    !dc->cap_funcs.get_dcc_compression_cap)
3910 		return -EINVAL;
3911 
3912 	input.format = format;
3913 	input.surface_size.width = plane_size->surface_size.width;
3914 	input.surface_size.height = plane_size->surface_size.height;
3915 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3916 
3917 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3918 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3919 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3920 		input.scan = SCAN_DIRECTION_VERTICAL;
3921 
3922 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3923 		return -EINVAL;
3924 
3925 	if (!output.capable)
3926 		return -EINVAL;
3927 
3928 	if (dcc->independent_64b_blks == 0 &&
3929 	    output.grph.rgb.independent_64b_blks != 0)
3930 		return -EINVAL;
3931 
3932 	return 0;
3933 }
3934 
3935 static bool
3936 modifier_has_dcc(uint64_t modifier)
3937 {
3938 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3939 }
3940 
3941 static unsigned
3942 modifier_gfx9_swizzle_mode(uint64_t modifier)
3943 {
3944 	if (modifier == DRM_FORMAT_MOD_LINEAR)
3945 		return 0;
3946 
3947 	return AMD_FMT_MOD_GET(TILE, modifier);
3948 }
3949 
3950 static const struct drm_format_info *
3951 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3952 {
3953 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3954 }
3955 
3956 static void
3957 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3958 				    union dc_tiling_info *tiling_info,
3959 				    uint64_t modifier)
3960 {
3961 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3962 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3963 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3964 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3965 
3966 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
3967 
3968 	if (!IS_AMD_FMT_MOD(modifier))
3969 		return;
3970 
3971 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3972 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3973 
3974 	if (adev->family >= AMDGPU_FAMILY_NV) {
3975 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3976 	} else {
3977 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3978 
3979 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3980 	}
3981 }
3982 
3983 enum dm_micro_swizzle {
3984 	MICRO_SWIZZLE_Z = 0,
3985 	MICRO_SWIZZLE_S = 1,
3986 	MICRO_SWIZZLE_D = 2,
3987 	MICRO_SWIZZLE_R = 3
3988 };
3989 
3990 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3991 					  uint32_t format,
3992 					  uint64_t modifier)
3993 {
3994 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
3995 	const struct drm_format_info *info = drm_format_info(format);
3996 
3997 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3998 
3999 	if (!info)
4000 		return false;
4001 
4002 	/*
4003 	 * We always have to allow this modifier, because core DRM still
4004 	 * checks LINEAR support if userspace does not provide modifers.
4005 	 */
4006 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4007 		return true;
4008 
4009 	/*
4010 	 * The arbitrary tiling support for multiplane formats has not been hooked
4011 	 * up.
4012 	 */
4013 	if (info->num_planes > 1)
4014 		return false;
4015 
4016 	/*
4017 	 * For D swizzle the canonical modifier depends on the bpp, so check
4018 	 * it here.
4019 	 */
4020 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4021 	    adev->family >= AMDGPU_FAMILY_NV) {
4022 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4023 			return false;
4024 	}
4025 
4026 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4027 	    info->cpp[0] < 8)
4028 		return false;
4029 
4030 	if (modifier_has_dcc(modifier)) {
4031 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4032 		if (info->cpp[0] != 4)
4033 			return false;
4034 	}
4035 
4036 	return true;
4037 }
4038 
4039 static void
4040 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4041 {
4042 	if (!*mods)
4043 		return;
4044 
4045 	if (*cap - *size < 1) {
4046 		uint64_t new_cap = *cap * 2;
4047 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4048 
4049 		if (!new_mods) {
4050 			kfree(*mods);
4051 			*mods = NULL;
4052 			return;
4053 		}
4054 
4055 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4056 		kfree(*mods);
4057 		*mods = new_mods;
4058 		*cap = new_cap;
4059 	}
4060 
4061 	(*mods)[*size] = mod;
4062 	*size += 1;
4063 }
4064 
4065 static void
4066 add_gfx9_modifiers(const struct amdgpu_device *adev,
4067 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4068 {
4069 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4070 	int pipe_xor_bits = min(8, pipes +
4071 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4072 	int bank_xor_bits = min(8 - pipe_xor_bits,
4073 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4074 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4075 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4076 
4077 
4078 	if (adev->family == AMDGPU_FAMILY_RV) {
4079 		/* Raven2 and later */
4080 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4081 
4082 		/*
4083 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4084 		 * doesn't support _D on DCN
4085 		 */
4086 
4087 		if (has_constant_encode) {
4088 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4089 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4090 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4091 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4092 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4093 				    AMD_FMT_MOD_SET(DCC, 1) |
4094 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4095 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4096 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4097 		}
4098 
4099 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4100 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4101 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4102 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4103 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4104 			    AMD_FMT_MOD_SET(DCC, 1) |
4105 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4106 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4107 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4108 
4109 		if (has_constant_encode) {
4110 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4111 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4112 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4113 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4114 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4115 				    AMD_FMT_MOD_SET(DCC, 1) |
4116 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4117 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4118 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4119 
4120 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4121 				    AMD_FMT_MOD_SET(RB, rb) |
4122 				    AMD_FMT_MOD_SET(PIPE, pipes));
4123 		}
4124 
4125 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4126 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4127 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4128 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4129 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4130 			    AMD_FMT_MOD_SET(DCC, 1) |
4131 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4132 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4133 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4134 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4135 			    AMD_FMT_MOD_SET(RB, rb) |
4136 			    AMD_FMT_MOD_SET(PIPE, pipes));
4137 	}
4138 
4139 	/*
4140 	 * Only supported for 64bpp on Raven, will be filtered on format in
4141 	 * dm_plane_format_mod_supported.
4142 	 */
4143 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4144 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4145 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4146 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4147 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4148 
4149 	if (adev->family == AMDGPU_FAMILY_RV) {
4150 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4151 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4152 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4153 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4154 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4155 	}
4156 
4157 	/*
4158 	 * Only supported for 64bpp on Raven, will be filtered on format in
4159 	 * dm_plane_format_mod_supported.
4160 	 */
4161 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4162 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4163 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4164 
4165 	if (adev->family == AMDGPU_FAMILY_RV) {
4166 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4167 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4168 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4169 	}
4170 }
4171 
4172 static void
4173 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4174 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4175 {
4176 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4177 
4178 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4179 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4180 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4181 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4182 		    AMD_FMT_MOD_SET(DCC, 1) |
4183 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4184 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4185 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4186 
4187 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4188 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4189 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4190 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4191 		    AMD_FMT_MOD_SET(DCC, 1) |
4192 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4193 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4194 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4195 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4196 
4197 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4198 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4199 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4200 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4201 
4202 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4203 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4204 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4205 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4206 
4207 
4208 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4209 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4210 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4211 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4212 
4213 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4214 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4215 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4216 }
4217 
4218 static void
4219 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4220 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4221 {
4222 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4223 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4224 
4225 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4226 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4227 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4228 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4229 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4230 		    AMD_FMT_MOD_SET(DCC, 1) |
4231 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4232 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4233 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4234 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4235 
4236 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4237 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4238 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4239 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4240 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4241 		    AMD_FMT_MOD_SET(DCC, 1) |
4242 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4243 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4244 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4245 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4246 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4247 
4248 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4249 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4250 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4251 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4252 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4253 
4254 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4255 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4256 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4257 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4258 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4259 
4260 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4261 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4262 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4263 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4264 
4265 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4266 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4267 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4268 }
4269 
4270 static int
4271 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4272 {
4273 	uint64_t size = 0, capacity = 128;
4274 	*mods = NULL;
4275 
4276 	/* We have not hooked up any pre-GFX9 modifiers. */
4277 	if (adev->family < AMDGPU_FAMILY_AI)
4278 		return 0;
4279 
4280 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4281 
4282 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4283 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4284 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4285 		return *mods ? 0 : -ENOMEM;
4286 	}
4287 
4288 	switch (adev->family) {
4289 	case AMDGPU_FAMILY_AI:
4290 	case AMDGPU_FAMILY_RV:
4291 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4292 		break;
4293 	case AMDGPU_FAMILY_NV:
4294 	case AMDGPU_FAMILY_VGH:
4295 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4296 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4297 		else
4298 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4299 		break;
4300 	}
4301 
4302 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4303 
4304 	/* INVALID marks the end of the list. */
4305 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4306 
4307 	if (!*mods)
4308 		return -ENOMEM;
4309 
4310 	return 0;
4311 }
4312 
4313 static int
4314 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4315 					  const struct amdgpu_framebuffer *afb,
4316 					  const enum surface_pixel_format format,
4317 					  const enum dc_rotation_angle rotation,
4318 					  const struct plane_size *plane_size,
4319 					  union dc_tiling_info *tiling_info,
4320 					  struct dc_plane_dcc_param *dcc,
4321 					  struct dc_plane_address *address,
4322 					  const bool force_disable_dcc)
4323 {
4324 	const uint64_t modifier = afb->base.modifier;
4325 	int ret;
4326 
4327 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4328 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4329 
4330 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4331 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4332 
4333 		dcc->enable = 1;
4334 		dcc->meta_pitch = afb->base.pitches[1];
4335 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4336 
4337 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4338 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4339 	}
4340 
4341 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4342 	if (ret)
4343 		return ret;
4344 
4345 	return 0;
4346 }
4347 
4348 static int
4349 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4350 			     const struct amdgpu_framebuffer *afb,
4351 			     const enum surface_pixel_format format,
4352 			     const enum dc_rotation_angle rotation,
4353 			     const uint64_t tiling_flags,
4354 			     union dc_tiling_info *tiling_info,
4355 			     struct plane_size *plane_size,
4356 			     struct dc_plane_dcc_param *dcc,
4357 			     struct dc_plane_address *address,
4358 			     bool tmz_surface,
4359 			     bool force_disable_dcc)
4360 {
4361 	const struct drm_framebuffer *fb = &afb->base;
4362 	int ret;
4363 
4364 	memset(tiling_info, 0, sizeof(*tiling_info));
4365 	memset(plane_size, 0, sizeof(*plane_size));
4366 	memset(dcc, 0, sizeof(*dcc));
4367 	memset(address, 0, sizeof(*address));
4368 
4369 	address->tmz_surface = tmz_surface;
4370 
4371 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4372 		uint64_t addr = afb->address + fb->offsets[0];
4373 
4374 		plane_size->surface_size.x = 0;
4375 		plane_size->surface_size.y = 0;
4376 		plane_size->surface_size.width = fb->width;
4377 		plane_size->surface_size.height = fb->height;
4378 		plane_size->surface_pitch =
4379 			fb->pitches[0] / fb->format->cpp[0];
4380 
4381 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4382 		address->grph.addr.low_part = lower_32_bits(addr);
4383 		address->grph.addr.high_part = upper_32_bits(addr);
4384 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4385 		uint64_t luma_addr = afb->address + fb->offsets[0];
4386 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4387 
4388 		plane_size->surface_size.x = 0;
4389 		plane_size->surface_size.y = 0;
4390 		plane_size->surface_size.width = fb->width;
4391 		plane_size->surface_size.height = fb->height;
4392 		plane_size->surface_pitch =
4393 			fb->pitches[0] / fb->format->cpp[0];
4394 
4395 		plane_size->chroma_size.x = 0;
4396 		plane_size->chroma_size.y = 0;
4397 		/* TODO: set these based on surface format */
4398 		plane_size->chroma_size.width = fb->width / 2;
4399 		plane_size->chroma_size.height = fb->height / 2;
4400 
4401 		plane_size->chroma_pitch =
4402 			fb->pitches[1] / fb->format->cpp[1];
4403 
4404 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4405 		address->video_progressive.luma_addr.low_part =
4406 			lower_32_bits(luma_addr);
4407 		address->video_progressive.luma_addr.high_part =
4408 			upper_32_bits(luma_addr);
4409 		address->video_progressive.chroma_addr.low_part =
4410 			lower_32_bits(chroma_addr);
4411 		address->video_progressive.chroma_addr.high_part =
4412 			upper_32_bits(chroma_addr);
4413 	}
4414 
4415 	if (adev->family >= AMDGPU_FAMILY_AI) {
4416 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4417 								rotation, plane_size,
4418 								tiling_info, dcc,
4419 								address,
4420 								force_disable_dcc);
4421 		if (ret)
4422 			return ret;
4423 	} else {
4424 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4425 	}
4426 
4427 	return 0;
4428 }
4429 
4430 static void
4431 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4432 			       bool *per_pixel_alpha, bool *global_alpha,
4433 			       int *global_alpha_value)
4434 {
4435 	*per_pixel_alpha = false;
4436 	*global_alpha = false;
4437 	*global_alpha_value = 0xff;
4438 
4439 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4440 		return;
4441 
4442 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4443 		static const uint32_t alpha_formats[] = {
4444 			DRM_FORMAT_ARGB8888,
4445 			DRM_FORMAT_RGBA8888,
4446 			DRM_FORMAT_ABGR8888,
4447 		};
4448 		uint32_t format = plane_state->fb->format->format;
4449 		unsigned int i;
4450 
4451 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4452 			if (format == alpha_formats[i]) {
4453 				*per_pixel_alpha = true;
4454 				break;
4455 			}
4456 		}
4457 	}
4458 
4459 	if (plane_state->alpha < 0xffff) {
4460 		*global_alpha = true;
4461 		*global_alpha_value = plane_state->alpha >> 8;
4462 	}
4463 }
4464 
4465 static int
4466 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4467 			    const enum surface_pixel_format format,
4468 			    enum dc_color_space *color_space)
4469 {
4470 	bool full_range;
4471 
4472 	*color_space = COLOR_SPACE_SRGB;
4473 
4474 	/* DRM color properties only affect non-RGB formats. */
4475 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4476 		return 0;
4477 
4478 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4479 
4480 	switch (plane_state->color_encoding) {
4481 	case DRM_COLOR_YCBCR_BT601:
4482 		if (full_range)
4483 			*color_space = COLOR_SPACE_YCBCR601;
4484 		else
4485 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4486 		break;
4487 
4488 	case DRM_COLOR_YCBCR_BT709:
4489 		if (full_range)
4490 			*color_space = COLOR_SPACE_YCBCR709;
4491 		else
4492 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4493 		break;
4494 
4495 	case DRM_COLOR_YCBCR_BT2020:
4496 		if (full_range)
4497 			*color_space = COLOR_SPACE_2020_YCBCR;
4498 		else
4499 			return -EINVAL;
4500 		break;
4501 
4502 	default:
4503 		return -EINVAL;
4504 	}
4505 
4506 	return 0;
4507 }
4508 
4509 static int
4510 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4511 			    const struct drm_plane_state *plane_state,
4512 			    const uint64_t tiling_flags,
4513 			    struct dc_plane_info *plane_info,
4514 			    struct dc_plane_address *address,
4515 			    bool tmz_surface,
4516 			    bool force_disable_dcc)
4517 {
4518 	const struct drm_framebuffer *fb = plane_state->fb;
4519 	const struct amdgpu_framebuffer *afb =
4520 		to_amdgpu_framebuffer(plane_state->fb);
4521 	struct drm_format_name_buf format_name;
4522 	int ret;
4523 
4524 	memset(plane_info, 0, sizeof(*plane_info));
4525 
4526 	switch (fb->format->format) {
4527 	case DRM_FORMAT_C8:
4528 		plane_info->format =
4529 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4530 		break;
4531 	case DRM_FORMAT_RGB565:
4532 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4533 		break;
4534 	case DRM_FORMAT_XRGB8888:
4535 	case DRM_FORMAT_ARGB8888:
4536 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4537 		break;
4538 	case DRM_FORMAT_XRGB2101010:
4539 	case DRM_FORMAT_ARGB2101010:
4540 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4541 		break;
4542 	case DRM_FORMAT_XBGR2101010:
4543 	case DRM_FORMAT_ABGR2101010:
4544 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4545 		break;
4546 	case DRM_FORMAT_XBGR8888:
4547 	case DRM_FORMAT_ABGR8888:
4548 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4549 		break;
4550 	case DRM_FORMAT_NV21:
4551 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4552 		break;
4553 	case DRM_FORMAT_NV12:
4554 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4555 		break;
4556 	case DRM_FORMAT_P010:
4557 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4558 		break;
4559 	case DRM_FORMAT_XRGB16161616F:
4560 	case DRM_FORMAT_ARGB16161616F:
4561 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4562 		break;
4563 	case DRM_FORMAT_XBGR16161616F:
4564 	case DRM_FORMAT_ABGR16161616F:
4565 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4566 		break;
4567 	default:
4568 		DRM_ERROR(
4569 			"Unsupported screen format %s\n",
4570 			drm_get_format_name(fb->format->format, &format_name));
4571 		return -EINVAL;
4572 	}
4573 
4574 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4575 	case DRM_MODE_ROTATE_0:
4576 		plane_info->rotation = ROTATION_ANGLE_0;
4577 		break;
4578 	case DRM_MODE_ROTATE_90:
4579 		plane_info->rotation = ROTATION_ANGLE_90;
4580 		break;
4581 	case DRM_MODE_ROTATE_180:
4582 		plane_info->rotation = ROTATION_ANGLE_180;
4583 		break;
4584 	case DRM_MODE_ROTATE_270:
4585 		plane_info->rotation = ROTATION_ANGLE_270;
4586 		break;
4587 	default:
4588 		plane_info->rotation = ROTATION_ANGLE_0;
4589 		break;
4590 	}
4591 
4592 	plane_info->visible = true;
4593 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4594 
4595 	plane_info->layer_index = 0;
4596 
4597 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4598 					  &plane_info->color_space);
4599 	if (ret)
4600 		return ret;
4601 
4602 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4603 					   plane_info->rotation, tiling_flags,
4604 					   &plane_info->tiling_info,
4605 					   &plane_info->plane_size,
4606 					   &plane_info->dcc, address, tmz_surface,
4607 					   force_disable_dcc);
4608 	if (ret)
4609 		return ret;
4610 
4611 	fill_blending_from_plane_state(
4612 		plane_state, &plane_info->per_pixel_alpha,
4613 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4614 
4615 	return 0;
4616 }
4617 
4618 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4619 				    struct dc_plane_state *dc_plane_state,
4620 				    struct drm_plane_state *plane_state,
4621 				    struct drm_crtc_state *crtc_state)
4622 {
4623 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4624 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4625 	struct dc_scaling_info scaling_info;
4626 	struct dc_plane_info plane_info;
4627 	int ret;
4628 	bool force_disable_dcc = false;
4629 
4630 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4631 	if (ret)
4632 		return ret;
4633 
4634 	dc_plane_state->src_rect = scaling_info.src_rect;
4635 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4636 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4637 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4638 
4639 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4640 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4641 					  afb->tiling_flags,
4642 					  &plane_info,
4643 					  &dc_plane_state->address,
4644 					  afb->tmz_surface,
4645 					  force_disable_dcc);
4646 	if (ret)
4647 		return ret;
4648 
4649 	dc_plane_state->format = plane_info.format;
4650 	dc_plane_state->color_space = plane_info.color_space;
4651 	dc_plane_state->format = plane_info.format;
4652 	dc_plane_state->plane_size = plane_info.plane_size;
4653 	dc_plane_state->rotation = plane_info.rotation;
4654 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4655 	dc_plane_state->stereo_format = plane_info.stereo_format;
4656 	dc_plane_state->tiling_info = plane_info.tiling_info;
4657 	dc_plane_state->visible = plane_info.visible;
4658 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4659 	dc_plane_state->global_alpha = plane_info.global_alpha;
4660 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4661 	dc_plane_state->dcc = plane_info.dcc;
4662 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4663 
4664 	/*
4665 	 * Always set input transfer function, since plane state is refreshed
4666 	 * every time.
4667 	 */
4668 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4669 	if (ret)
4670 		return ret;
4671 
4672 	return 0;
4673 }
4674 
4675 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4676 					   const struct dm_connector_state *dm_state,
4677 					   struct dc_stream_state *stream)
4678 {
4679 	enum amdgpu_rmx_type rmx_type;
4680 
4681 	struct rect src = { 0 }; /* viewport in composition space*/
4682 	struct rect dst = { 0 }; /* stream addressable area */
4683 
4684 	/* no mode. nothing to be done */
4685 	if (!mode)
4686 		return;
4687 
4688 	/* Full screen scaling by default */
4689 	src.width = mode->hdisplay;
4690 	src.height = mode->vdisplay;
4691 	dst.width = stream->timing.h_addressable;
4692 	dst.height = stream->timing.v_addressable;
4693 
4694 	if (dm_state) {
4695 		rmx_type = dm_state->scaling;
4696 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4697 			if (src.width * dst.height <
4698 					src.height * dst.width) {
4699 				/* height needs less upscaling/more downscaling */
4700 				dst.width = src.width *
4701 						dst.height / src.height;
4702 			} else {
4703 				/* width needs less upscaling/more downscaling */
4704 				dst.height = src.height *
4705 						dst.width / src.width;
4706 			}
4707 		} else if (rmx_type == RMX_CENTER) {
4708 			dst = src;
4709 		}
4710 
4711 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4712 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4713 
4714 		if (dm_state->underscan_enable) {
4715 			dst.x += dm_state->underscan_hborder / 2;
4716 			dst.y += dm_state->underscan_vborder / 2;
4717 			dst.width -= dm_state->underscan_hborder;
4718 			dst.height -= dm_state->underscan_vborder;
4719 		}
4720 	}
4721 
4722 	stream->src = src;
4723 	stream->dst = dst;
4724 
4725 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4726 			dst.x, dst.y, dst.width, dst.height);
4727 
4728 }
4729 
4730 static enum dc_color_depth
4731 convert_color_depth_from_display_info(const struct drm_connector *connector,
4732 				      bool is_y420, int requested_bpc)
4733 {
4734 	uint8_t bpc;
4735 
4736 	if (is_y420) {
4737 		bpc = 8;
4738 
4739 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4740 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4741 			bpc = 16;
4742 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4743 			bpc = 12;
4744 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4745 			bpc = 10;
4746 	} else {
4747 		bpc = (uint8_t)connector->display_info.bpc;
4748 		/* Assume 8 bpc by default if no bpc is specified. */
4749 		bpc = bpc ? bpc : 8;
4750 	}
4751 
4752 	if (requested_bpc > 0) {
4753 		/*
4754 		 * Cap display bpc based on the user requested value.
4755 		 *
4756 		 * The value for state->max_bpc may not correctly updated
4757 		 * depending on when the connector gets added to the state
4758 		 * or if this was called outside of atomic check, so it
4759 		 * can't be used directly.
4760 		 */
4761 		bpc = min_t(u8, bpc, requested_bpc);
4762 
4763 		/* Round down to the nearest even number. */
4764 		bpc = bpc - (bpc & 1);
4765 	}
4766 
4767 	switch (bpc) {
4768 	case 0:
4769 		/*
4770 		 * Temporary Work around, DRM doesn't parse color depth for
4771 		 * EDID revision before 1.4
4772 		 * TODO: Fix edid parsing
4773 		 */
4774 		return COLOR_DEPTH_888;
4775 	case 6:
4776 		return COLOR_DEPTH_666;
4777 	case 8:
4778 		return COLOR_DEPTH_888;
4779 	case 10:
4780 		return COLOR_DEPTH_101010;
4781 	case 12:
4782 		return COLOR_DEPTH_121212;
4783 	case 14:
4784 		return COLOR_DEPTH_141414;
4785 	case 16:
4786 		return COLOR_DEPTH_161616;
4787 	default:
4788 		return COLOR_DEPTH_UNDEFINED;
4789 	}
4790 }
4791 
4792 static enum dc_aspect_ratio
4793 get_aspect_ratio(const struct drm_display_mode *mode_in)
4794 {
4795 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4796 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4797 }
4798 
4799 static enum dc_color_space
4800 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4801 {
4802 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4803 
4804 	switch (dc_crtc_timing->pixel_encoding)	{
4805 	case PIXEL_ENCODING_YCBCR422:
4806 	case PIXEL_ENCODING_YCBCR444:
4807 	case PIXEL_ENCODING_YCBCR420:
4808 	{
4809 		/*
4810 		 * 27030khz is the separation point between HDTV and SDTV
4811 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4812 		 * respectively
4813 		 */
4814 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4815 			if (dc_crtc_timing->flags.Y_ONLY)
4816 				color_space =
4817 					COLOR_SPACE_YCBCR709_LIMITED;
4818 			else
4819 				color_space = COLOR_SPACE_YCBCR709;
4820 		} else {
4821 			if (dc_crtc_timing->flags.Y_ONLY)
4822 				color_space =
4823 					COLOR_SPACE_YCBCR601_LIMITED;
4824 			else
4825 				color_space = COLOR_SPACE_YCBCR601;
4826 		}
4827 
4828 	}
4829 	break;
4830 	case PIXEL_ENCODING_RGB:
4831 		color_space = COLOR_SPACE_SRGB;
4832 		break;
4833 
4834 	default:
4835 		WARN_ON(1);
4836 		break;
4837 	}
4838 
4839 	return color_space;
4840 }
4841 
4842 static bool adjust_colour_depth_from_display_info(
4843 	struct dc_crtc_timing *timing_out,
4844 	const struct drm_display_info *info)
4845 {
4846 	enum dc_color_depth depth = timing_out->display_color_depth;
4847 	int normalized_clk;
4848 	do {
4849 		normalized_clk = timing_out->pix_clk_100hz / 10;
4850 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4851 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4852 			normalized_clk /= 2;
4853 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4854 		switch (depth) {
4855 		case COLOR_DEPTH_888:
4856 			break;
4857 		case COLOR_DEPTH_101010:
4858 			normalized_clk = (normalized_clk * 30) / 24;
4859 			break;
4860 		case COLOR_DEPTH_121212:
4861 			normalized_clk = (normalized_clk * 36) / 24;
4862 			break;
4863 		case COLOR_DEPTH_161616:
4864 			normalized_clk = (normalized_clk * 48) / 24;
4865 			break;
4866 		default:
4867 			/* The above depths are the only ones valid for HDMI. */
4868 			return false;
4869 		}
4870 		if (normalized_clk <= info->max_tmds_clock) {
4871 			timing_out->display_color_depth = depth;
4872 			return true;
4873 		}
4874 	} while (--depth > COLOR_DEPTH_666);
4875 	return false;
4876 }
4877 
4878 static void fill_stream_properties_from_drm_display_mode(
4879 	struct dc_stream_state *stream,
4880 	const struct drm_display_mode *mode_in,
4881 	const struct drm_connector *connector,
4882 	const struct drm_connector_state *connector_state,
4883 	const struct dc_stream_state *old_stream,
4884 	int requested_bpc)
4885 {
4886 	struct dc_crtc_timing *timing_out = &stream->timing;
4887 	const struct drm_display_info *info = &connector->display_info;
4888 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4889 	struct hdmi_vendor_infoframe hv_frame;
4890 	struct hdmi_avi_infoframe avi_frame;
4891 
4892 	memset(&hv_frame, 0, sizeof(hv_frame));
4893 	memset(&avi_frame, 0, sizeof(avi_frame));
4894 
4895 	timing_out->h_border_left = 0;
4896 	timing_out->h_border_right = 0;
4897 	timing_out->v_border_top = 0;
4898 	timing_out->v_border_bottom = 0;
4899 	/* TODO: un-hardcode */
4900 	if (drm_mode_is_420_only(info, mode_in)
4901 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4902 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4903 	else if (drm_mode_is_420_also(info, mode_in)
4904 			&& aconnector->force_yuv420_output)
4905 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4906 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4907 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4908 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4909 	else
4910 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4911 
4912 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4913 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4914 		connector,
4915 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4916 		requested_bpc);
4917 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4918 	timing_out->hdmi_vic = 0;
4919 
4920 	if(old_stream) {
4921 		timing_out->vic = old_stream->timing.vic;
4922 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4923 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4924 	} else {
4925 		timing_out->vic = drm_match_cea_mode(mode_in);
4926 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4927 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4928 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4929 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4930 	}
4931 
4932 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4933 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4934 		timing_out->vic = avi_frame.video_code;
4935 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4936 		timing_out->hdmi_vic = hv_frame.vic;
4937 	}
4938 
4939 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4940 	timing_out->h_total = mode_in->crtc_htotal;
4941 	timing_out->h_sync_width =
4942 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4943 	timing_out->h_front_porch =
4944 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4945 	timing_out->v_total = mode_in->crtc_vtotal;
4946 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4947 	timing_out->v_front_porch =
4948 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4949 	timing_out->v_sync_width =
4950 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4951 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4952 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4953 
4954 	stream->output_color_space = get_output_color_space(timing_out);
4955 
4956 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4957 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4958 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4959 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4960 		    drm_mode_is_420_also(info, mode_in) &&
4961 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4962 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4963 			adjust_colour_depth_from_display_info(timing_out, info);
4964 		}
4965 	}
4966 }
4967 
4968 static void fill_audio_info(struct audio_info *audio_info,
4969 			    const struct drm_connector *drm_connector,
4970 			    const struct dc_sink *dc_sink)
4971 {
4972 	int i = 0;
4973 	int cea_revision = 0;
4974 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4975 
4976 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4977 	audio_info->product_id = edid_caps->product_id;
4978 
4979 	cea_revision = drm_connector->display_info.cea_rev;
4980 
4981 	strscpy(audio_info->display_name,
4982 		edid_caps->display_name,
4983 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4984 
4985 	if (cea_revision >= 3) {
4986 		audio_info->mode_count = edid_caps->audio_mode_count;
4987 
4988 		for (i = 0; i < audio_info->mode_count; ++i) {
4989 			audio_info->modes[i].format_code =
4990 					(enum audio_format_code)
4991 					(edid_caps->audio_modes[i].format_code);
4992 			audio_info->modes[i].channel_count =
4993 					edid_caps->audio_modes[i].channel_count;
4994 			audio_info->modes[i].sample_rates.all =
4995 					edid_caps->audio_modes[i].sample_rate;
4996 			audio_info->modes[i].sample_size =
4997 					edid_caps->audio_modes[i].sample_size;
4998 		}
4999 	}
5000 
5001 	audio_info->flags.all = edid_caps->speaker_flags;
5002 
5003 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5004 	if (drm_connector->latency_present[0]) {
5005 		audio_info->video_latency = drm_connector->video_latency[0];
5006 		audio_info->audio_latency = drm_connector->audio_latency[0];
5007 	}
5008 
5009 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5010 
5011 }
5012 
5013 static void
5014 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5015 				      struct drm_display_mode *dst_mode)
5016 {
5017 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5018 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5019 	dst_mode->crtc_clock = src_mode->crtc_clock;
5020 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5021 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5022 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5023 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5024 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5025 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5026 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5027 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5028 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5029 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5030 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5031 }
5032 
5033 static void
5034 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5035 					const struct drm_display_mode *native_mode,
5036 					bool scale_enabled)
5037 {
5038 	if (scale_enabled) {
5039 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5040 	} else if (native_mode->clock == drm_mode->clock &&
5041 			native_mode->htotal == drm_mode->htotal &&
5042 			native_mode->vtotal == drm_mode->vtotal) {
5043 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5044 	} else {
5045 		/* no scaling nor amdgpu inserted, no need to patch */
5046 	}
5047 }
5048 
5049 static struct dc_sink *
5050 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5051 {
5052 	struct dc_sink_init_data sink_init_data = { 0 };
5053 	struct dc_sink *sink = NULL;
5054 	sink_init_data.link = aconnector->dc_link;
5055 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5056 
5057 	sink = dc_sink_create(&sink_init_data);
5058 	if (!sink) {
5059 		DRM_ERROR("Failed to create sink!\n");
5060 		return NULL;
5061 	}
5062 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5063 
5064 	return sink;
5065 }
5066 
5067 static void set_multisync_trigger_params(
5068 		struct dc_stream_state *stream)
5069 {
5070 	if (stream->triggered_crtc_reset.enabled) {
5071 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5072 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5073 	}
5074 }
5075 
5076 static void set_master_stream(struct dc_stream_state *stream_set[],
5077 			      int stream_count)
5078 {
5079 	int j, highest_rfr = 0, master_stream = 0;
5080 
5081 	for (j = 0;  j < stream_count; j++) {
5082 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5083 			int refresh_rate = 0;
5084 
5085 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5086 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5087 			if (refresh_rate > highest_rfr) {
5088 				highest_rfr = refresh_rate;
5089 				master_stream = j;
5090 			}
5091 		}
5092 	}
5093 	for (j = 0;  j < stream_count; j++) {
5094 		if (stream_set[j])
5095 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5096 	}
5097 }
5098 
5099 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5100 {
5101 	int i = 0;
5102 
5103 	if (context->stream_count < 2)
5104 		return;
5105 	for (i = 0; i < context->stream_count ; i++) {
5106 		if (!context->streams[i])
5107 			continue;
5108 		/*
5109 		 * TODO: add a function to read AMD VSDB bits and set
5110 		 * crtc_sync_master.multi_sync_enabled flag
5111 		 * For now it's set to false
5112 		 */
5113 		set_multisync_trigger_params(context->streams[i]);
5114 	}
5115 	set_master_stream(context->streams, context->stream_count);
5116 }
5117 
5118 static struct dc_stream_state *
5119 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5120 		       const struct drm_display_mode *drm_mode,
5121 		       const struct dm_connector_state *dm_state,
5122 		       const struct dc_stream_state *old_stream,
5123 		       int requested_bpc)
5124 {
5125 	struct drm_display_mode *preferred_mode = NULL;
5126 	struct drm_connector *drm_connector;
5127 	const struct drm_connector_state *con_state =
5128 		dm_state ? &dm_state->base : NULL;
5129 	struct dc_stream_state *stream = NULL;
5130 	struct drm_display_mode mode = *drm_mode;
5131 	bool native_mode_found = false;
5132 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5133 	int mode_refresh;
5134 	int preferred_refresh = 0;
5135 #if defined(CONFIG_DRM_AMD_DC_DCN)
5136 	struct dsc_dec_dpcd_caps dsc_caps;
5137 	uint32_t link_bandwidth_kbps;
5138 #endif
5139 	struct dc_sink *sink = NULL;
5140 	if (aconnector == NULL) {
5141 		DRM_ERROR("aconnector is NULL!\n");
5142 		return stream;
5143 	}
5144 
5145 	drm_connector = &aconnector->base;
5146 
5147 	if (!aconnector->dc_sink) {
5148 		sink = create_fake_sink(aconnector);
5149 		if (!sink)
5150 			return stream;
5151 	} else {
5152 		sink = aconnector->dc_sink;
5153 		dc_sink_retain(sink);
5154 	}
5155 
5156 	stream = dc_create_stream_for_sink(sink);
5157 
5158 	if (stream == NULL) {
5159 		DRM_ERROR("Failed to create stream for sink!\n");
5160 		goto finish;
5161 	}
5162 
5163 	stream->dm_stream_context = aconnector;
5164 
5165 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5166 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5167 
5168 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5169 		/* Search for preferred mode */
5170 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5171 			native_mode_found = true;
5172 			break;
5173 		}
5174 	}
5175 	if (!native_mode_found)
5176 		preferred_mode = list_first_entry_or_null(
5177 				&aconnector->base.modes,
5178 				struct drm_display_mode,
5179 				head);
5180 
5181 	mode_refresh = drm_mode_vrefresh(&mode);
5182 
5183 	if (preferred_mode == NULL) {
5184 		/*
5185 		 * This may not be an error, the use case is when we have no
5186 		 * usermode calls to reset and set mode upon hotplug. In this
5187 		 * case, we call set mode ourselves to restore the previous mode
5188 		 * and the modelist may not be filled in in time.
5189 		 */
5190 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5191 	} else {
5192 		decide_crtc_timing_for_drm_display_mode(
5193 				&mode, preferred_mode,
5194 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5195 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5196 	}
5197 
5198 	if (!dm_state)
5199 		drm_mode_set_crtcinfo(&mode, 0);
5200 
5201 	/*
5202 	* If scaling is enabled and refresh rate didn't change
5203 	* we copy the vic and polarities of the old timings
5204 	*/
5205 	if (!scale || mode_refresh != preferred_refresh)
5206 		fill_stream_properties_from_drm_display_mode(stream,
5207 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
5208 	else
5209 		fill_stream_properties_from_drm_display_mode(stream,
5210 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
5211 
5212 	stream->timing.flags.DSC = 0;
5213 
5214 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5215 #if defined(CONFIG_DRM_AMD_DC_DCN)
5216 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5217 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5218 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5219 				      &dsc_caps);
5220 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5221 							     dc_link_get_link_cap(aconnector->dc_link));
5222 
5223 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5224 			/* Set DSC policy according to dsc_clock_en */
5225 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5226 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5227 
5228 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5229 						  &dsc_caps,
5230 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5231 						  0,
5232 						  link_bandwidth_kbps,
5233 						  &stream->timing,
5234 						  &stream->timing.dsc_cfg))
5235 				stream->timing.flags.DSC = 1;
5236 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5237 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5238 				stream->timing.flags.DSC = 1;
5239 
5240 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5241 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5242 
5243 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5244 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5245 
5246 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5247 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5248 		}
5249 #endif
5250 	}
5251 
5252 	update_stream_scaling_settings(&mode, dm_state, stream);
5253 
5254 	fill_audio_info(
5255 		&stream->audio_info,
5256 		drm_connector,
5257 		sink);
5258 
5259 	update_stream_signal(stream, sink);
5260 
5261 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5262 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5263 
5264 	if (stream->link->psr_settings.psr_feature_enabled) {
5265 		//
5266 		// should decide stream support vsc sdp colorimetry capability
5267 		// before building vsc info packet
5268 		//
5269 		stream->use_vsc_sdp_for_colorimetry = false;
5270 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5271 			stream->use_vsc_sdp_for_colorimetry =
5272 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5273 		} else {
5274 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5275 				stream->use_vsc_sdp_for_colorimetry = true;
5276 		}
5277 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5278 	}
5279 finish:
5280 	dc_sink_release(sink);
5281 
5282 	return stream;
5283 }
5284 
5285 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5286 {
5287 	drm_crtc_cleanup(crtc);
5288 	kfree(crtc);
5289 }
5290 
5291 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5292 				  struct drm_crtc_state *state)
5293 {
5294 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5295 
5296 	/* TODO Destroy dc_stream objects are stream object is flattened */
5297 	if (cur->stream)
5298 		dc_stream_release(cur->stream);
5299 
5300 
5301 	__drm_atomic_helper_crtc_destroy_state(state);
5302 
5303 
5304 	kfree(state);
5305 }
5306 
5307 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5308 {
5309 	struct dm_crtc_state *state;
5310 
5311 	if (crtc->state)
5312 		dm_crtc_destroy_state(crtc, crtc->state);
5313 
5314 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5315 	if (WARN_ON(!state))
5316 		return;
5317 
5318 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5319 }
5320 
5321 static struct drm_crtc_state *
5322 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5323 {
5324 	struct dm_crtc_state *state, *cur;
5325 
5326 	cur = to_dm_crtc_state(crtc->state);
5327 
5328 	if (WARN_ON(!crtc->state))
5329 		return NULL;
5330 
5331 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5332 	if (!state)
5333 		return NULL;
5334 
5335 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5336 
5337 	if (cur->stream) {
5338 		state->stream = cur->stream;
5339 		dc_stream_retain(state->stream);
5340 	}
5341 
5342 	state->active_planes = cur->active_planes;
5343 	state->vrr_infopacket = cur->vrr_infopacket;
5344 	state->abm_level = cur->abm_level;
5345 	state->vrr_supported = cur->vrr_supported;
5346 	state->freesync_config = cur->freesync_config;
5347 	state->crc_src = cur->crc_src;
5348 	state->cm_has_degamma = cur->cm_has_degamma;
5349 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5350 
5351 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5352 
5353 	return &state->base;
5354 }
5355 
5356 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5357 {
5358 	enum dc_irq_source irq_source;
5359 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5360 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5361 	int rc;
5362 
5363 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5364 
5365 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5366 
5367 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5368 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
5369 	return rc;
5370 }
5371 
5372 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5373 {
5374 	enum dc_irq_source irq_source;
5375 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5376 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5377 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5378 	struct amdgpu_display_manager *dm = &adev->dm;
5379 	int rc = 0;
5380 
5381 	if (enable) {
5382 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5383 		if (amdgpu_dm_vrr_active(acrtc_state))
5384 			rc = dm_set_vupdate_irq(crtc, true);
5385 	} else {
5386 		/* vblank irq off -> vupdate irq off */
5387 		rc = dm_set_vupdate_irq(crtc, false);
5388 	}
5389 
5390 	if (rc)
5391 		return rc;
5392 
5393 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5394 
5395 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5396 		return -EBUSY;
5397 
5398 #if defined(CONFIG_DRM_AMD_DC_DCN)
5399 	if (amdgpu_in_reset(adev))
5400 		return 0;
5401 
5402 	mutex_lock(&dm->dc_lock);
5403 
5404 	if (enable)
5405 		dm->active_vblank_irq_count++;
5406 	else
5407 		dm->active_vblank_irq_count--;
5408 
5409 #if defined(CONFIG_DRM_AMD_DC_DCN)
5410 	dc_allow_idle_optimizations(
5411 		adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
5412 
5413 	DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
5414 #endif
5415 
5416 	mutex_unlock(&dm->dc_lock);
5417 
5418 #endif
5419 	return 0;
5420 }
5421 
5422 static int dm_enable_vblank(struct drm_crtc *crtc)
5423 {
5424 	return dm_set_vblank(crtc, true);
5425 }
5426 
5427 static void dm_disable_vblank(struct drm_crtc *crtc)
5428 {
5429 	dm_set_vblank(crtc, false);
5430 }
5431 
5432 /* Implemented only the options currently availible for the driver */
5433 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5434 	.reset = dm_crtc_reset_state,
5435 	.destroy = amdgpu_dm_crtc_destroy,
5436 	.set_config = drm_atomic_helper_set_config,
5437 	.page_flip = drm_atomic_helper_page_flip,
5438 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5439 	.atomic_destroy_state = dm_crtc_destroy_state,
5440 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5441 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5442 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5443 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5444 	.enable_vblank = dm_enable_vblank,
5445 	.disable_vblank = dm_disable_vblank,
5446 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5447 };
5448 
5449 static enum drm_connector_status
5450 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5451 {
5452 	bool connected;
5453 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5454 
5455 	/*
5456 	 * Notes:
5457 	 * 1. This interface is NOT called in context of HPD irq.
5458 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5459 	 * makes it a bad place for *any* MST-related activity.
5460 	 */
5461 
5462 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5463 	    !aconnector->fake_enable)
5464 		connected = (aconnector->dc_sink != NULL);
5465 	else
5466 		connected = (aconnector->base.force == DRM_FORCE_ON);
5467 
5468 	update_subconnector_property(aconnector);
5469 
5470 	return (connected ? connector_status_connected :
5471 			connector_status_disconnected);
5472 }
5473 
5474 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5475 					    struct drm_connector_state *connector_state,
5476 					    struct drm_property *property,
5477 					    uint64_t val)
5478 {
5479 	struct drm_device *dev = connector->dev;
5480 	struct amdgpu_device *adev = drm_to_adev(dev);
5481 	struct dm_connector_state *dm_old_state =
5482 		to_dm_connector_state(connector->state);
5483 	struct dm_connector_state *dm_new_state =
5484 		to_dm_connector_state(connector_state);
5485 
5486 	int ret = -EINVAL;
5487 
5488 	if (property == dev->mode_config.scaling_mode_property) {
5489 		enum amdgpu_rmx_type rmx_type;
5490 
5491 		switch (val) {
5492 		case DRM_MODE_SCALE_CENTER:
5493 			rmx_type = RMX_CENTER;
5494 			break;
5495 		case DRM_MODE_SCALE_ASPECT:
5496 			rmx_type = RMX_ASPECT;
5497 			break;
5498 		case DRM_MODE_SCALE_FULLSCREEN:
5499 			rmx_type = RMX_FULL;
5500 			break;
5501 		case DRM_MODE_SCALE_NONE:
5502 		default:
5503 			rmx_type = RMX_OFF;
5504 			break;
5505 		}
5506 
5507 		if (dm_old_state->scaling == rmx_type)
5508 			return 0;
5509 
5510 		dm_new_state->scaling = rmx_type;
5511 		ret = 0;
5512 	} else if (property == adev->mode_info.underscan_hborder_property) {
5513 		dm_new_state->underscan_hborder = val;
5514 		ret = 0;
5515 	} else if (property == adev->mode_info.underscan_vborder_property) {
5516 		dm_new_state->underscan_vborder = val;
5517 		ret = 0;
5518 	} else if (property == adev->mode_info.underscan_property) {
5519 		dm_new_state->underscan_enable = val;
5520 		ret = 0;
5521 	} else if (property == adev->mode_info.abm_level_property) {
5522 		dm_new_state->abm_level = val;
5523 		ret = 0;
5524 	}
5525 
5526 	return ret;
5527 }
5528 
5529 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5530 					    const struct drm_connector_state *state,
5531 					    struct drm_property *property,
5532 					    uint64_t *val)
5533 {
5534 	struct drm_device *dev = connector->dev;
5535 	struct amdgpu_device *adev = drm_to_adev(dev);
5536 	struct dm_connector_state *dm_state =
5537 		to_dm_connector_state(state);
5538 	int ret = -EINVAL;
5539 
5540 	if (property == dev->mode_config.scaling_mode_property) {
5541 		switch (dm_state->scaling) {
5542 		case RMX_CENTER:
5543 			*val = DRM_MODE_SCALE_CENTER;
5544 			break;
5545 		case RMX_ASPECT:
5546 			*val = DRM_MODE_SCALE_ASPECT;
5547 			break;
5548 		case RMX_FULL:
5549 			*val = DRM_MODE_SCALE_FULLSCREEN;
5550 			break;
5551 		case RMX_OFF:
5552 		default:
5553 			*val = DRM_MODE_SCALE_NONE;
5554 			break;
5555 		}
5556 		ret = 0;
5557 	} else if (property == adev->mode_info.underscan_hborder_property) {
5558 		*val = dm_state->underscan_hborder;
5559 		ret = 0;
5560 	} else if (property == adev->mode_info.underscan_vborder_property) {
5561 		*val = dm_state->underscan_vborder;
5562 		ret = 0;
5563 	} else if (property == adev->mode_info.underscan_property) {
5564 		*val = dm_state->underscan_enable;
5565 		ret = 0;
5566 	} else if (property == adev->mode_info.abm_level_property) {
5567 		*val = dm_state->abm_level;
5568 		ret = 0;
5569 	}
5570 
5571 	return ret;
5572 }
5573 
5574 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5575 {
5576 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5577 
5578 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5579 }
5580 
5581 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5582 {
5583 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5584 	const struct dc_link *link = aconnector->dc_link;
5585 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5586 	struct amdgpu_display_manager *dm = &adev->dm;
5587 
5588 	/*
5589 	 * Call only if mst_mgr was iniitalized before since it's not done
5590 	 * for all connector types.
5591 	 */
5592 	if (aconnector->mst_mgr.dev)
5593 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5594 
5595 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5596 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5597 
5598 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5599 	    link->type != dc_connection_none &&
5600 	    dm->backlight_dev) {
5601 		backlight_device_unregister(dm->backlight_dev);
5602 		dm->backlight_dev = NULL;
5603 	}
5604 #endif
5605 
5606 	if (aconnector->dc_em_sink)
5607 		dc_sink_release(aconnector->dc_em_sink);
5608 	aconnector->dc_em_sink = NULL;
5609 	if (aconnector->dc_sink)
5610 		dc_sink_release(aconnector->dc_sink);
5611 	aconnector->dc_sink = NULL;
5612 
5613 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5614 	drm_connector_unregister(connector);
5615 	drm_connector_cleanup(connector);
5616 	if (aconnector->i2c) {
5617 		i2c_del_adapter(&aconnector->i2c->base);
5618 		kfree(aconnector->i2c);
5619 	}
5620 	kfree(aconnector->dm_dp_aux.aux.name);
5621 
5622 	kfree(connector);
5623 }
5624 
5625 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5626 {
5627 	struct dm_connector_state *state =
5628 		to_dm_connector_state(connector->state);
5629 
5630 	if (connector->state)
5631 		__drm_atomic_helper_connector_destroy_state(connector->state);
5632 
5633 	kfree(state);
5634 
5635 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5636 
5637 	if (state) {
5638 		state->scaling = RMX_OFF;
5639 		state->underscan_enable = false;
5640 		state->underscan_hborder = 0;
5641 		state->underscan_vborder = 0;
5642 		state->base.max_requested_bpc = 8;
5643 		state->vcpi_slots = 0;
5644 		state->pbn = 0;
5645 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5646 			state->abm_level = amdgpu_dm_abm_level;
5647 
5648 		__drm_atomic_helper_connector_reset(connector, &state->base);
5649 	}
5650 }
5651 
5652 struct drm_connector_state *
5653 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5654 {
5655 	struct dm_connector_state *state =
5656 		to_dm_connector_state(connector->state);
5657 
5658 	struct dm_connector_state *new_state =
5659 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5660 
5661 	if (!new_state)
5662 		return NULL;
5663 
5664 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5665 
5666 	new_state->freesync_capable = state->freesync_capable;
5667 	new_state->abm_level = state->abm_level;
5668 	new_state->scaling = state->scaling;
5669 	new_state->underscan_enable = state->underscan_enable;
5670 	new_state->underscan_hborder = state->underscan_hborder;
5671 	new_state->underscan_vborder = state->underscan_vborder;
5672 	new_state->vcpi_slots = state->vcpi_slots;
5673 	new_state->pbn = state->pbn;
5674 	return &new_state->base;
5675 }
5676 
5677 static int
5678 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5679 {
5680 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5681 		to_amdgpu_dm_connector(connector);
5682 	int r;
5683 
5684 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5685 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5686 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5687 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5688 		if (r)
5689 			return r;
5690 	}
5691 
5692 #if defined(CONFIG_DEBUG_FS)
5693 	connector_debugfs_init(amdgpu_dm_connector);
5694 #endif
5695 
5696 	return 0;
5697 }
5698 
5699 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5700 	.reset = amdgpu_dm_connector_funcs_reset,
5701 	.detect = amdgpu_dm_connector_detect,
5702 	.fill_modes = drm_helper_probe_single_connector_modes,
5703 	.destroy = amdgpu_dm_connector_destroy,
5704 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5705 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5706 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5707 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5708 	.late_register = amdgpu_dm_connector_late_register,
5709 	.early_unregister = amdgpu_dm_connector_unregister
5710 };
5711 
5712 static int get_modes(struct drm_connector *connector)
5713 {
5714 	return amdgpu_dm_connector_get_modes(connector);
5715 }
5716 
5717 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5718 {
5719 	struct dc_sink_init_data init_params = {
5720 			.link = aconnector->dc_link,
5721 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5722 	};
5723 	struct edid *edid;
5724 
5725 	if (!aconnector->base.edid_blob_ptr) {
5726 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5727 				aconnector->base.name);
5728 
5729 		aconnector->base.force = DRM_FORCE_OFF;
5730 		aconnector->base.override_edid = false;
5731 		return;
5732 	}
5733 
5734 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5735 
5736 	aconnector->edid = edid;
5737 
5738 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5739 		aconnector->dc_link,
5740 		(uint8_t *)edid,
5741 		(edid->extensions + 1) * EDID_LENGTH,
5742 		&init_params);
5743 
5744 	if (aconnector->base.force == DRM_FORCE_ON) {
5745 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5746 		aconnector->dc_link->local_sink :
5747 		aconnector->dc_em_sink;
5748 		dc_sink_retain(aconnector->dc_sink);
5749 	}
5750 }
5751 
5752 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5753 {
5754 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5755 
5756 	/*
5757 	 * In case of headless boot with force on for DP managed connector
5758 	 * Those settings have to be != 0 to get initial modeset
5759 	 */
5760 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5761 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5762 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5763 	}
5764 
5765 
5766 	aconnector->base.override_edid = true;
5767 	create_eml_sink(aconnector);
5768 }
5769 
5770 static struct dc_stream_state *
5771 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5772 				const struct drm_display_mode *drm_mode,
5773 				const struct dm_connector_state *dm_state,
5774 				const struct dc_stream_state *old_stream)
5775 {
5776 	struct drm_connector *connector = &aconnector->base;
5777 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5778 	struct dc_stream_state *stream;
5779 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5780 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5781 	enum dc_status dc_result = DC_OK;
5782 
5783 	do {
5784 		stream = create_stream_for_sink(aconnector, drm_mode,
5785 						dm_state, old_stream,
5786 						requested_bpc);
5787 		if (stream == NULL) {
5788 			DRM_ERROR("Failed to create stream for sink!\n");
5789 			break;
5790 		}
5791 
5792 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5793 
5794 		if (dc_result != DC_OK) {
5795 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5796 				      drm_mode->hdisplay,
5797 				      drm_mode->vdisplay,
5798 				      drm_mode->clock,
5799 				      dc_result,
5800 				      dc_status_to_str(dc_result));
5801 
5802 			dc_stream_release(stream);
5803 			stream = NULL;
5804 			requested_bpc -= 2; /* lower bpc to retry validation */
5805 		}
5806 
5807 	} while (stream == NULL && requested_bpc >= 6);
5808 
5809 	return stream;
5810 }
5811 
5812 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5813 				   struct drm_display_mode *mode)
5814 {
5815 	int result = MODE_ERROR;
5816 	struct dc_sink *dc_sink;
5817 	/* TODO: Unhardcode stream count */
5818 	struct dc_stream_state *stream;
5819 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5820 
5821 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5822 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5823 		return result;
5824 
5825 	/*
5826 	 * Only run this the first time mode_valid is called to initilialize
5827 	 * EDID mgmt
5828 	 */
5829 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5830 		!aconnector->dc_em_sink)
5831 		handle_edid_mgmt(aconnector);
5832 
5833 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5834 
5835 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5836 				aconnector->base.force != DRM_FORCE_ON) {
5837 		DRM_ERROR("dc_sink is NULL!\n");
5838 		goto fail;
5839 	}
5840 
5841 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5842 	if (stream) {
5843 		dc_stream_release(stream);
5844 		result = MODE_OK;
5845 	}
5846 
5847 fail:
5848 	/* TODO: error handling*/
5849 	return result;
5850 }
5851 
5852 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5853 				struct dc_info_packet *out)
5854 {
5855 	struct hdmi_drm_infoframe frame;
5856 	unsigned char buf[30]; /* 26 + 4 */
5857 	ssize_t len;
5858 	int ret, i;
5859 
5860 	memset(out, 0, sizeof(*out));
5861 
5862 	if (!state->hdr_output_metadata)
5863 		return 0;
5864 
5865 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5866 	if (ret)
5867 		return ret;
5868 
5869 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5870 	if (len < 0)
5871 		return (int)len;
5872 
5873 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5874 	if (len != 30)
5875 		return -EINVAL;
5876 
5877 	/* Prepare the infopacket for DC. */
5878 	switch (state->connector->connector_type) {
5879 	case DRM_MODE_CONNECTOR_HDMIA:
5880 		out->hb0 = 0x87; /* type */
5881 		out->hb1 = 0x01; /* version */
5882 		out->hb2 = 0x1A; /* length */
5883 		out->sb[0] = buf[3]; /* checksum */
5884 		i = 1;
5885 		break;
5886 
5887 	case DRM_MODE_CONNECTOR_DisplayPort:
5888 	case DRM_MODE_CONNECTOR_eDP:
5889 		out->hb0 = 0x00; /* sdp id, zero */
5890 		out->hb1 = 0x87; /* type */
5891 		out->hb2 = 0x1D; /* payload len - 1 */
5892 		out->hb3 = (0x13 << 2); /* sdp version */
5893 		out->sb[0] = 0x01; /* version */
5894 		out->sb[1] = 0x1A; /* length */
5895 		i = 2;
5896 		break;
5897 
5898 	default:
5899 		return -EINVAL;
5900 	}
5901 
5902 	memcpy(&out->sb[i], &buf[4], 26);
5903 	out->valid = true;
5904 
5905 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5906 		       sizeof(out->sb), false);
5907 
5908 	return 0;
5909 }
5910 
5911 static bool
5912 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5913 			  const struct drm_connector_state *new_state)
5914 {
5915 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5916 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5917 
5918 	if (old_blob != new_blob) {
5919 		if (old_blob && new_blob &&
5920 		    old_blob->length == new_blob->length)
5921 			return memcmp(old_blob->data, new_blob->data,
5922 				      old_blob->length);
5923 
5924 		return true;
5925 	}
5926 
5927 	return false;
5928 }
5929 
5930 static int
5931 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5932 				 struct drm_atomic_state *state)
5933 {
5934 	struct drm_connector_state *new_con_state =
5935 		drm_atomic_get_new_connector_state(state, conn);
5936 	struct drm_connector_state *old_con_state =
5937 		drm_atomic_get_old_connector_state(state, conn);
5938 	struct drm_crtc *crtc = new_con_state->crtc;
5939 	struct drm_crtc_state *new_crtc_state;
5940 	int ret;
5941 
5942 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
5943 
5944 	if (!crtc)
5945 		return 0;
5946 
5947 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5948 		struct dc_info_packet hdr_infopacket;
5949 
5950 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5951 		if (ret)
5952 			return ret;
5953 
5954 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5955 		if (IS_ERR(new_crtc_state))
5956 			return PTR_ERR(new_crtc_state);
5957 
5958 		/*
5959 		 * DC considers the stream backends changed if the
5960 		 * static metadata changes. Forcing the modeset also
5961 		 * gives a simple way for userspace to switch from
5962 		 * 8bpc to 10bpc when setting the metadata to enter
5963 		 * or exit HDR.
5964 		 *
5965 		 * Changing the static metadata after it's been
5966 		 * set is permissible, however. So only force a
5967 		 * modeset if we're entering or exiting HDR.
5968 		 */
5969 		new_crtc_state->mode_changed =
5970 			!old_con_state->hdr_output_metadata ||
5971 			!new_con_state->hdr_output_metadata;
5972 	}
5973 
5974 	return 0;
5975 }
5976 
5977 static const struct drm_connector_helper_funcs
5978 amdgpu_dm_connector_helper_funcs = {
5979 	/*
5980 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5981 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5982 	 * are missing after user start lightdm. So we need to renew modes list.
5983 	 * in get_modes call back, not just return the modes count
5984 	 */
5985 	.get_modes = get_modes,
5986 	.mode_valid = amdgpu_dm_connector_mode_valid,
5987 	.atomic_check = amdgpu_dm_connector_atomic_check,
5988 };
5989 
5990 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5991 {
5992 }
5993 
5994 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5995 {
5996 	struct drm_atomic_state *state = new_crtc_state->state;
5997 	struct drm_plane *plane;
5998 	int num_active = 0;
5999 
6000 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6001 		struct drm_plane_state *new_plane_state;
6002 
6003 		/* Cursor planes are "fake". */
6004 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6005 			continue;
6006 
6007 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6008 
6009 		if (!new_plane_state) {
6010 			/*
6011 			 * The plane is enable on the CRTC and hasn't changed
6012 			 * state. This means that it previously passed
6013 			 * validation and is therefore enabled.
6014 			 */
6015 			num_active += 1;
6016 			continue;
6017 		}
6018 
6019 		/* We need a framebuffer to be considered enabled. */
6020 		num_active += (new_plane_state->fb != NULL);
6021 	}
6022 
6023 	return num_active;
6024 }
6025 
6026 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6027 					 struct drm_crtc_state *new_crtc_state)
6028 {
6029 	struct dm_crtc_state *dm_new_crtc_state =
6030 		to_dm_crtc_state(new_crtc_state);
6031 
6032 	dm_new_crtc_state->active_planes = 0;
6033 
6034 	if (!dm_new_crtc_state->stream)
6035 		return;
6036 
6037 	dm_new_crtc_state->active_planes =
6038 		count_crtc_active_planes(new_crtc_state);
6039 }
6040 
6041 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6042 				       struct drm_atomic_state *state)
6043 {
6044 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6045 									  crtc);
6046 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6047 	struct dc *dc = adev->dm.dc;
6048 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6049 	int ret = -EINVAL;
6050 
6051 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6052 
6053 	dm_update_crtc_active_planes(crtc, crtc_state);
6054 
6055 	if (unlikely(!dm_crtc_state->stream &&
6056 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6057 		WARN_ON(1);
6058 		return ret;
6059 	}
6060 
6061 	/*
6062 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6063 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6064 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6065 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6066 	 */
6067 	if (crtc_state->enable &&
6068 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6069 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6070 		return -EINVAL;
6071 	}
6072 
6073 	/* In some use cases, like reset, no stream is attached */
6074 	if (!dm_crtc_state->stream)
6075 		return 0;
6076 
6077 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6078 		return 0;
6079 
6080 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6081 	return ret;
6082 }
6083 
6084 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6085 				      const struct drm_display_mode *mode,
6086 				      struct drm_display_mode *adjusted_mode)
6087 {
6088 	return true;
6089 }
6090 
6091 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6092 	.disable = dm_crtc_helper_disable,
6093 	.atomic_check = dm_crtc_helper_atomic_check,
6094 	.mode_fixup = dm_crtc_helper_mode_fixup,
6095 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6096 };
6097 
6098 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6099 {
6100 
6101 }
6102 
6103 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6104 {
6105 	switch (display_color_depth) {
6106 		case COLOR_DEPTH_666:
6107 			return 6;
6108 		case COLOR_DEPTH_888:
6109 			return 8;
6110 		case COLOR_DEPTH_101010:
6111 			return 10;
6112 		case COLOR_DEPTH_121212:
6113 			return 12;
6114 		case COLOR_DEPTH_141414:
6115 			return 14;
6116 		case COLOR_DEPTH_161616:
6117 			return 16;
6118 		default:
6119 			break;
6120 		}
6121 	return 0;
6122 }
6123 
6124 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6125 					  struct drm_crtc_state *crtc_state,
6126 					  struct drm_connector_state *conn_state)
6127 {
6128 	struct drm_atomic_state *state = crtc_state->state;
6129 	struct drm_connector *connector = conn_state->connector;
6130 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6131 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6132 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6133 	struct drm_dp_mst_topology_mgr *mst_mgr;
6134 	struct drm_dp_mst_port *mst_port;
6135 	enum dc_color_depth color_depth;
6136 	int clock, bpp = 0;
6137 	bool is_y420 = false;
6138 
6139 	if (!aconnector->port || !aconnector->dc_sink)
6140 		return 0;
6141 
6142 	mst_port = aconnector->port;
6143 	mst_mgr = &aconnector->mst_port->mst_mgr;
6144 
6145 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6146 		return 0;
6147 
6148 	if (!state->duplicated) {
6149 		int max_bpc = conn_state->max_requested_bpc;
6150 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6151 				aconnector->force_yuv420_output;
6152 		color_depth = convert_color_depth_from_display_info(connector,
6153 								    is_y420,
6154 								    max_bpc);
6155 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6156 		clock = adjusted_mode->clock;
6157 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6158 	}
6159 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6160 									   mst_mgr,
6161 									   mst_port,
6162 									   dm_new_connector_state->pbn,
6163 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6164 	if (dm_new_connector_state->vcpi_slots < 0) {
6165 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6166 		return dm_new_connector_state->vcpi_slots;
6167 	}
6168 	return 0;
6169 }
6170 
6171 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6172 	.disable = dm_encoder_helper_disable,
6173 	.atomic_check = dm_encoder_helper_atomic_check
6174 };
6175 
6176 #if defined(CONFIG_DRM_AMD_DC_DCN)
6177 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6178 					    struct dc_state *dc_state)
6179 {
6180 	struct dc_stream_state *stream = NULL;
6181 	struct drm_connector *connector;
6182 	struct drm_connector_state *new_con_state, *old_con_state;
6183 	struct amdgpu_dm_connector *aconnector;
6184 	struct dm_connector_state *dm_conn_state;
6185 	int i, j, clock, bpp;
6186 	int vcpi, pbn_div, pbn = 0;
6187 
6188 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6189 
6190 		aconnector = to_amdgpu_dm_connector(connector);
6191 
6192 		if (!aconnector->port)
6193 			continue;
6194 
6195 		if (!new_con_state || !new_con_state->crtc)
6196 			continue;
6197 
6198 		dm_conn_state = to_dm_connector_state(new_con_state);
6199 
6200 		for (j = 0; j < dc_state->stream_count; j++) {
6201 			stream = dc_state->streams[j];
6202 			if (!stream)
6203 				continue;
6204 
6205 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6206 				break;
6207 
6208 			stream = NULL;
6209 		}
6210 
6211 		if (!stream)
6212 			continue;
6213 
6214 		if (stream->timing.flags.DSC != 1) {
6215 			drm_dp_mst_atomic_enable_dsc(state,
6216 						     aconnector->port,
6217 						     dm_conn_state->pbn,
6218 						     0,
6219 						     false);
6220 			continue;
6221 		}
6222 
6223 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6224 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6225 		clock = stream->timing.pix_clk_100hz / 10;
6226 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6227 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6228 						    aconnector->port,
6229 						    pbn, pbn_div,
6230 						    true);
6231 		if (vcpi < 0)
6232 			return vcpi;
6233 
6234 		dm_conn_state->pbn = pbn;
6235 		dm_conn_state->vcpi_slots = vcpi;
6236 	}
6237 	return 0;
6238 }
6239 #endif
6240 
6241 static void dm_drm_plane_reset(struct drm_plane *plane)
6242 {
6243 	struct dm_plane_state *amdgpu_state = NULL;
6244 
6245 	if (plane->state)
6246 		plane->funcs->atomic_destroy_state(plane, plane->state);
6247 
6248 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6249 	WARN_ON(amdgpu_state == NULL);
6250 
6251 	if (amdgpu_state)
6252 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6253 }
6254 
6255 static struct drm_plane_state *
6256 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6257 {
6258 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6259 
6260 	old_dm_plane_state = to_dm_plane_state(plane->state);
6261 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6262 	if (!dm_plane_state)
6263 		return NULL;
6264 
6265 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6266 
6267 	if (old_dm_plane_state->dc_state) {
6268 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6269 		dc_plane_state_retain(dm_plane_state->dc_state);
6270 	}
6271 
6272 	return &dm_plane_state->base;
6273 }
6274 
6275 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6276 				struct drm_plane_state *state)
6277 {
6278 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6279 
6280 	if (dm_plane_state->dc_state)
6281 		dc_plane_state_release(dm_plane_state->dc_state);
6282 
6283 	drm_atomic_helper_plane_destroy_state(plane, state);
6284 }
6285 
6286 static const struct drm_plane_funcs dm_plane_funcs = {
6287 	.update_plane	= drm_atomic_helper_update_plane,
6288 	.disable_plane	= drm_atomic_helper_disable_plane,
6289 	.destroy	= drm_primary_helper_destroy,
6290 	.reset = dm_drm_plane_reset,
6291 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6292 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6293 	.format_mod_supported = dm_plane_format_mod_supported,
6294 };
6295 
6296 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6297 				      struct drm_plane_state *new_state)
6298 {
6299 	struct amdgpu_framebuffer *afb;
6300 	struct drm_gem_object *obj;
6301 	struct amdgpu_device *adev;
6302 	struct amdgpu_bo *rbo;
6303 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6304 	struct list_head list;
6305 	struct ttm_validate_buffer tv;
6306 	struct ww_acquire_ctx ticket;
6307 	uint32_t domain;
6308 	int r;
6309 
6310 	if (!new_state->fb) {
6311 		DRM_DEBUG_DRIVER("No FB bound\n");
6312 		return 0;
6313 	}
6314 
6315 	afb = to_amdgpu_framebuffer(new_state->fb);
6316 	obj = new_state->fb->obj[0];
6317 	rbo = gem_to_amdgpu_bo(obj);
6318 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6319 	INIT_LIST_HEAD(&list);
6320 
6321 	tv.bo = &rbo->tbo;
6322 	tv.num_shared = 1;
6323 	list_add(&tv.head, &list);
6324 
6325 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6326 	if (r) {
6327 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6328 		return r;
6329 	}
6330 
6331 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6332 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6333 	else
6334 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6335 
6336 	r = amdgpu_bo_pin(rbo, domain);
6337 	if (unlikely(r != 0)) {
6338 		if (r != -ERESTARTSYS)
6339 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6340 		ttm_eu_backoff_reservation(&ticket, &list);
6341 		return r;
6342 	}
6343 
6344 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6345 	if (unlikely(r != 0)) {
6346 		amdgpu_bo_unpin(rbo);
6347 		ttm_eu_backoff_reservation(&ticket, &list);
6348 		DRM_ERROR("%p bind failed\n", rbo);
6349 		return r;
6350 	}
6351 
6352 	ttm_eu_backoff_reservation(&ticket, &list);
6353 
6354 	afb->address = amdgpu_bo_gpu_offset(rbo);
6355 
6356 	amdgpu_bo_ref(rbo);
6357 
6358 	/**
6359 	 * We don't do surface updates on planes that have been newly created,
6360 	 * but we also don't have the afb->address during atomic check.
6361 	 *
6362 	 * Fill in buffer attributes depending on the address here, but only on
6363 	 * newly created planes since they're not being used by DC yet and this
6364 	 * won't modify global state.
6365 	 */
6366 	dm_plane_state_old = to_dm_plane_state(plane->state);
6367 	dm_plane_state_new = to_dm_plane_state(new_state);
6368 
6369 	if (dm_plane_state_new->dc_state &&
6370 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6371 		struct dc_plane_state *plane_state =
6372 			dm_plane_state_new->dc_state;
6373 		bool force_disable_dcc = !plane_state->dcc.enable;
6374 
6375 		fill_plane_buffer_attributes(
6376 			adev, afb, plane_state->format, plane_state->rotation,
6377 			afb->tiling_flags,
6378 			&plane_state->tiling_info, &plane_state->plane_size,
6379 			&plane_state->dcc, &plane_state->address,
6380 			afb->tmz_surface, force_disable_dcc);
6381 	}
6382 
6383 	return 0;
6384 }
6385 
6386 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6387 				       struct drm_plane_state *old_state)
6388 {
6389 	struct amdgpu_bo *rbo;
6390 	int r;
6391 
6392 	if (!old_state->fb)
6393 		return;
6394 
6395 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6396 	r = amdgpu_bo_reserve(rbo, false);
6397 	if (unlikely(r)) {
6398 		DRM_ERROR("failed to reserve rbo before unpin\n");
6399 		return;
6400 	}
6401 
6402 	amdgpu_bo_unpin(rbo);
6403 	amdgpu_bo_unreserve(rbo);
6404 	amdgpu_bo_unref(&rbo);
6405 }
6406 
6407 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6408 				       struct drm_crtc_state *new_crtc_state)
6409 {
6410 	struct drm_framebuffer *fb = state->fb;
6411 	int min_downscale, max_upscale;
6412 	int min_scale = 0;
6413 	int max_scale = INT_MAX;
6414 
6415 	/* Plane enabled? Get min/max allowed scaling factors from plane caps. */
6416 	if (fb && state->crtc) {
6417 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6418 					     &min_downscale, &max_upscale);
6419 		/*
6420 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
6421 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6422 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6423 		 */
6424 		min_scale = (1000 << 16) / max_upscale;
6425 		max_scale = (1000 << 16) / min_downscale;
6426 	}
6427 
6428 	return drm_atomic_helper_check_plane_state(
6429 		state, new_crtc_state, min_scale, max_scale, true, true);
6430 }
6431 
6432 static int dm_plane_atomic_check(struct drm_plane *plane,
6433 				 struct drm_plane_state *state)
6434 {
6435 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6436 	struct dc *dc = adev->dm.dc;
6437 	struct dm_plane_state *dm_plane_state;
6438 	struct dc_scaling_info scaling_info;
6439 	struct drm_crtc_state *new_crtc_state;
6440 	int ret;
6441 
6442 	trace_amdgpu_dm_plane_atomic_check(state);
6443 
6444 	dm_plane_state = to_dm_plane_state(state);
6445 
6446 	if (!dm_plane_state->dc_state)
6447 		return 0;
6448 
6449 	new_crtc_state =
6450 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
6451 	if (!new_crtc_state)
6452 		return -EINVAL;
6453 
6454 	ret = dm_plane_helper_check_state(state, new_crtc_state);
6455 	if (ret)
6456 		return ret;
6457 
6458 	ret = fill_dc_scaling_info(state, &scaling_info);
6459 	if (ret)
6460 		return ret;
6461 
6462 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6463 		return 0;
6464 
6465 	return -EINVAL;
6466 }
6467 
6468 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6469 				       struct drm_plane_state *new_plane_state)
6470 {
6471 	/* Only support async updates on cursor planes. */
6472 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6473 		return -EINVAL;
6474 
6475 	return 0;
6476 }
6477 
6478 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6479 					 struct drm_plane_state *new_state)
6480 {
6481 	struct drm_plane_state *old_state =
6482 		drm_atomic_get_old_plane_state(new_state->state, plane);
6483 
6484 	trace_amdgpu_dm_atomic_update_cursor(new_state);
6485 
6486 	swap(plane->state->fb, new_state->fb);
6487 
6488 	plane->state->src_x = new_state->src_x;
6489 	plane->state->src_y = new_state->src_y;
6490 	plane->state->src_w = new_state->src_w;
6491 	plane->state->src_h = new_state->src_h;
6492 	plane->state->crtc_x = new_state->crtc_x;
6493 	plane->state->crtc_y = new_state->crtc_y;
6494 	plane->state->crtc_w = new_state->crtc_w;
6495 	plane->state->crtc_h = new_state->crtc_h;
6496 
6497 	handle_cursor_update(plane, old_state);
6498 }
6499 
6500 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6501 	.prepare_fb = dm_plane_helper_prepare_fb,
6502 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6503 	.atomic_check = dm_plane_atomic_check,
6504 	.atomic_async_check = dm_plane_atomic_async_check,
6505 	.atomic_async_update = dm_plane_atomic_async_update
6506 };
6507 
6508 /*
6509  * TODO: these are currently initialized to rgb formats only.
6510  * For future use cases we should either initialize them dynamically based on
6511  * plane capabilities, or initialize this array to all formats, so internal drm
6512  * check will succeed, and let DC implement proper check
6513  */
6514 static const uint32_t rgb_formats[] = {
6515 	DRM_FORMAT_XRGB8888,
6516 	DRM_FORMAT_ARGB8888,
6517 	DRM_FORMAT_RGBA8888,
6518 	DRM_FORMAT_XRGB2101010,
6519 	DRM_FORMAT_XBGR2101010,
6520 	DRM_FORMAT_ARGB2101010,
6521 	DRM_FORMAT_ABGR2101010,
6522 	DRM_FORMAT_XBGR8888,
6523 	DRM_FORMAT_ABGR8888,
6524 	DRM_FORMAT_RGB565,
6525 };
6526 
6527 static const uint32_t overlay_formats[] = {
6528 	DRM_FORMAT_XRGB8888,
6529 	DRM_FORMAT_ARGB8888,
6530 	DRM_FORMAT_RGBA8888,
6531 	DRM_FORMAT_XBGR8888,
6532 	DRM_FORMAT_ABGR8888,
6533 	DRM_FORMAT_RGB565
6534 };
6535 
6536 static const u32 cursor_formats[] = {
6537 	DRM_FORMAT_ARGB8888
6538 };
6539 
6540 static int get_plane_formats(const struct drm_plane *plane,
6541 			     const struct dc_plane_cap *plane_cap,
6542 			     uint32_t *formats, int max_formats)
6543 {
6544 	int i, num_formats = 0;
6545 
6546 	/*
6547 	 * TODO: Query support for each group of formats directly from
6548 	 * DC plane caps. This will require adding more formats to the
6549 	 * caps list.
6550 	 */
6551 
6552 	switch (plane->type) {
6553 	case DRM_PLANE_TYPE_PRIMARY:
6554 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6555 			if (num_formats >= max_formats)
6556 				break;
6557 
6558 			formats[num_formats++] = rgb_formats[i];
6559 		}
6560 
6561 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6562 			formats[num_formats++] = DRM_FORMAT_NV12;
6563 		if (plane_cap && plane_cap->pixel_format_support.p010)
6564 			formats[num_formats++] = DRM_FORMAT_P010;
6565 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6566 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6567 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6568 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6569 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6570 		}
6571 		break;
6572 
6573 	case DRM_PLANE_TYPE_OVERLAY:
6574 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6575 			if (num_formats >= max_formats)
6576 				break;
6577 
6578 			formats[num_formats++] = overlay_formats[i];
6579 		}
6580 		break;
6581 
6582 	case DRM_PLANE_TYPE_CURSOR:
6583 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6584 			if (num_formats >= max_formats)
6585 				break;
6586 
6587 			formats[num_formats++] = cursor_formats[i];
6588 		}
6589 		break;
6590 	}
6591 
6592 	return num_formats;
6593 }
6594 
6595 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6596 				struct drm_plane *plane,
6597 				unsigned long possible_crtcs,
6598 				const struct dc_plane_cap *plane_cap)
6599 {
6600 	uint32_t formats[32];
6601 	int num_formats;
6602 	int res = -EPERM;
6603 	unsigned int supported_rotations;
6604 	uint64_t *modifiers = NULL;
6605 
6606 	num_formats = get_plane_formats(plane, plane_cap, formats,
6607 					ARRAY_SIZE(formats));
6608 
6609 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6610 	if (res)
6611 		return res;
6612 
6613 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6614 				       &dm_plane_funcs, formats, num_formats,
6615 				       modifiers, plane->type, NULL);
6616 	kfree(modifiers);
6617 	if (res)
6618 		return res;
6619 
6620 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6621 	    plane_cap && plane_cap->per_pixel_alpha) {
6622 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6623 					  BIT(DRM_MODE_BLEND_PREMULTI);
6624 
6625 		drm_plane_create_alpha_property(plane);
6626 		drm_plane_create_blend_mode_property(plane, blend_caps);
6627 	}
6628 
6629 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6630 	    plane_cap &&
6631 	    (plane_cap->pixel_format_support.nv12 ||
6632 	     plane_cap->pixel_format_support.p010)) {
6633 		/* This only affects YUV formats. */
6634 		drm_plane_create_color_properties(
6635 			plane,
6636 			BIT(DRM_COLOR_YCBCR_BT601) |
6637 			BIT(DRM_COLOR_YCBCR_BT709) |
6638 			BIT(DRM_COLOR_YCBCR_BT2020),
6639 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6640 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6641 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6642 	}
6643 
6644 	supported_rotations =
6645 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6646 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6647 
6648 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
6649 	    plane->type != DRM_PLANE_TYPE_CURSOR)
6650 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6651 						   supported_rotations);
6652 
6653 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6654 
6655 	/* Create (reset) the plane state */
6656 	if (plane->funcs->reset)
6657 		plane->funcs->reset(plane);
6658 
6659 	return 0;
6660 }
6661 
6662 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6663 			       struct drm_plane *plane,
6664 			       uint32_t crtc_index)
6665 {
6666 	struct amdgpu_crtc *acrtc = NULL;
6667 	struct drm_plane *cursor_plane;
6668 
6669 	int res = -ENOMEM;
6670 
6671 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6672 	if (!cursor_plane)
6673 		goto fail;
6674 
6675 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6676 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6677 
6678 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6679 	if (!acrtc)
6680 		goto fail;
6681 
6682 	res = drm_crtc_init_with_planes(
6683 			dm->ddev,
6684 			&acrtc->base,
6685 			plane,
6686 			cursor_plane,
6687 			&amdgpu_dm_crtc_funcs, NULL);
6688 
6689 	if (res)
6690 		goto fail;
6691 
6692 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6693 
6694 	/* Create (reset) the plane state */
6695 	if (acrtc->base.funcs->reset)
6696 		acrtc->base.funcs->reset(&acrtc->base);
6697 
6698 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6699 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6700 
6701 	acrtc->crtc_id = crtc_index;
6702 	acrtc->base.enabled = false;
6703 	acrtc->otg_inst = -1;
6704 
6705 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6706 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6707 				   true, MAX_COLOR_LUT_ENTRIES);
6708 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6709 
6710 	return 0;
6711 
6712 fail:
6713 	kfree(acrtc);
6714 	kfree(cursor_plane);
6715 	return res;
6716 }
6717 
6718 
6719 static int to_drm_connector_type(enum signal_type st)
6720 {
6721 	switch (st) {
6722 	case SIGNAL_TYPE_HDMI_TYPE_A:
6723 		return DRM_MODE_CONNECTOR_HDMIA;
6724 	case SIGNAL_TYPE_EDP:
6725 		return DRM_MODE_CONNECTOR_eDP;
6726 	case SIGNAL_TYPE_LVDS:
6727 		return DRM_MODE_CONNECTOR_LVDS;
6728 	case SIGNAL_TYPE_RGB:
6729 		return DRM_MODE_CONNECTOR_VGA;
6730 	case SIGNAL_TYPE_DISPLAY_PORT:
6731 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6732 		return DRM_MODE_CONNECTOR_DisplayPort;
6733 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6734 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6735 		return DRM_MODE_CONNECTOR_DVID;
6736 	case SIGNAL_TYPE_VIRTUAL:
6737 		return DRM_MODE_CONNECTOR_VIRTUAL;
6738 
6739 	default:
6740 		return DRM_MODE_CONNECTOR_Unknown;
6741 	}
6742 }
6743 
6744 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6745 {
6746 	struct drm_encoder *encoder;
6747 
6748 	/* There is only one encoder per connector */
6749 	drm_connector_for_each_possible_encoder(connector, encoder)
6750 		return encoder;
6751 
6752 	return NULL;
6753 }
6754 
6755 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6756 {
6757 	struct drm_encoder *encoder;
6758 	struct amdgpu_encoder *amdgpu_encoder;
6759 
6760 	encoder = amdgpu_dm_connector_to_encoder(connector);
6761 
6762 	if (encoder == NULL)
6763 		return;
6764 
6765 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6766 
6767 	amdgpu_encoder->native_mode.clock = 0;
6768 
6769 	if (!list_empty(&connector->probed_modes)) {
6770 		struct drm_display_mode *preferred_mode = NULL;
6771 
6772 		list_for_each_entry(preferred_mode,
6773 				    &connector->probed_modes,
6774 				    head) {
6775 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6776 				amdgpu_encoder->native_mode = *preferred_mode;
6777 
6778 			break;
6779 		}
6780 
6781 	}
6782 }
6783 
6784 static struct drm_display_mode *
6785 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6786 			     char *name,
6787 			     int hdisplay, int vdisplay)
6788 {
6789 	struct drm_device *dev = encoder->dev;
6790 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6791 	struct drm_display_mode *mode = NULL;
6792 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6793 
6794 	mode = drm_mode_duplicate(dev, native_mode);
6795 
6796 	if (mode == NULL)
6797 		return NULL;
6798 
6799 	mode->hdisplay = hdisplay;
6800 	mode->vdisplay = vdisplay;
6801 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6802 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6803 
6804 	return mode;
6805 
6806 }
6807 
6808 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6809 						 struct drm_connector *connector)
6810 {
6811 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6812 	struct drm_display_mode *mode = NULL;
6813 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6814 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6815 				to_amdgpu_dm_connector(connector);
6816 	int i;
6817 	int n;
6818 	struct mode_size {
6819 		char name[DRM_DISPLAY_MODE_LEN];
6820 		int w;
6821 		int h;
6822 	} common_modes[] = {
6823 		{  "640x480",  640,  480},
6824 		{  "800x600",  800,  600},
6825 		{ "1024x768", 1024,  768},
6826 		{ "1280x720", 1280,  720},
6827 		{ "1280x800", 1280,  800},
6828 		{"1280x1024", 1280, 1024},
6829 		{ "1440x900", 1440,  900},
6830 		{"1680x1050", 1680, 1050},
6831 		{"1600x1200", 1600, 1200},
6832 		{"1920x1080", 1920, 1080},
6833 		{"1920x1200", 1920, 1200}
6834 	};
6835 
6836 	n = ARRAY_SIZE(common_modes);
6837 
6838 	for (i = 0; i < n; i++) {
6839 		struct drm_display_mode *curmode = NULL;
6840 		bool mode_existed = false;
6841 
6842 		if (common_modes[i].w > native_mode->hdisplay ||
6843 		    common_modes[i].h > native_mode->vdisplay ||
6844 		   (common_modes[i].w == native_mode->hdisplay &&
6845 		    common_modes[i].h == native_mode->vdisplay))
6846 			continue;
6847 
6848 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6849 			if (common_modes[i].w == curmode->hdisplay &&
6850 			    common_modes[i].h == curmode->vdisplay) {
6851 				mode_existed = true;
6852 				break;
6853 			}
6854 		}
6855 
6856 		if (mode_existed)
6857 			continue;
6858 
6859 		mode = amdgpu_dm_create_common_mode(encoder,
6860 				common_modes[i].name, common_modes[i].w,
6861 				common_modes[i].h);
6862 		drm_mode_probed_add(connector, mode);
6863 		amdgpu_dm_connector->num_modes++;
6864 	}
6865 }
6866 
6867 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6868 					      struct edid *edid)
6869 {
6870 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6871 			to_amdgpu_dm_connector(connector);
6872 
6873 	if (edid) {
6874 		/* empty probed_modes */
6875 		INIT_LIST_HEAD(&connector->probed_modes);
6876 		amdgpu_dm_connector->num_modes =
6877 				drm_add_edid_modes(connector, edid);
6878 
6879 		/* sorting the probed modes before calling function
6880 		 * amdgpu_dm_get_native_mode() since EDID can have
6881 		 * more than one preferred mode. The modes that are
6882 		 * later in the probed mode list could be of higher
6883 		 * and preferred resolution. For example, 3840x2160
6884 		 * resolution in base EDID preferred timing and 4096x2160
6885 		 * preferred resolution in DID extension block later.
6886 		 */
6887 		drm_mode_sort(&connector->probed_modes);
6888 		amdgpu_dm_get_native_mode(connector);
6889 	} else {
6890 		amdgpu_dm_connector->num_modes = 0;
6891 	}
6892 }
6893 
6894 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6895 {
6896 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6897 			to_amdgpu_dm_connector(connector);
6898 	struct drm_encoder *encoder;
6899 	struct edid *edid = amdgpu_dm_connector->edid;
6900 
6901 	encoder = amdgpu_dm_connector_to_encoder(connector);
6902 
6903 	if (!drm_edid_is_valid(edid)) {
6904 		amdgpu_dm_connector->num_modes =
6905 				drm_add_modes_noedid(connector, 640, 480);
6906 	} else {
6907 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6908 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6909 	}
6910 	amdgpu_dm_fbc_init(connector);
6911 
6912 	return amdgpu_dm_connector->num_modes;
6913 }
6914 
6915 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6916 				     struct amdgpu_dm_connector *aconnector,
6917 				     int connector_type,
6918 				     struct dc_link *link,
6919 				     int link_index)
6920 {
6921 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6922 
6923 	/*
6924 	 * Some of the properties below require access to state, like bpc.
6925 	 * Allocate some default initial connector state with our reset helper.
6926 	 */
6927 	if (aconnector->base.funcs->reset)
6928 		aconnector->base.funcs->reset(&aconnector->base);
6929 
6930 	aconnector->connector_id = link_index;
6931 	aconnector->dc_link = link;
6932 	aconnector->base.interlace_allowed = false;
6933 	aconnector->base.doublescan_allowed = false;
6934 	aconnector->base.stereo_allowed = false;
6935 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6936 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6937 	aconnector->audio_inst = -1;
6938 	mutex_init(&aconnector->hpd_lock);
6939 
6940 	/*
6941 	 * configure support HPD hot plug connector_>polled default value is 0
6942 	 * which means HPD hot plug not supported
6943 	 */
6944 	switch (connector_type) {
6945 	case DRM_MODE_CONNECTOR_HDMIA:
6946 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6947 		aconnector->base.ycbcr_420_allowed =
6948 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6949 		break;
6950 	case DRM_MODE_CONNECTOR_DisplayPort:
6951 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6952 		aconnector->base.ycbcr_420_allowed =
6953 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6954 		break;
6955 	case DRM_MODE_CONNECTOR_DVID:
6956 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6957 		break;
6958 	default:
6959 		break;
6960 	}
6961 
6962 	drm_object_attach_property(&aconnector->base.base,
6963 				dm->ddev->mode_config.scaling_mode_property,
6964 				DRM_MODE_SCALE_NONE);
6965 
6966 	drm_object_attach_property(&aconnector->base.base,
6967 				adev->mode_info.underscan_property,
6968 				UNDERSCAN_OFF);
6969 	drm_object_attach_property(&aconnector->base.base,
6970 				adev->mode_info.underscan_hborder_property,
6971 				0);
6972 	drm_object_attach_property(&aconnector->base.base,
6973 				adev->mode_info.underscan_vborder_property,
6974 				0);
6975 
6976 	if (!aconnector->mst_port)
6977 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6978 
6979 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6980 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6981 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6982 
6983 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6984 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6985 		drm_object_attach_property(&aconnector->base.base,
6986 				adev->mode_info.abm_level_property, 0);
6987 	}
6988 
6989 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6990 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6991 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6992 		drm_object_attach_property(
6993 			&aconnector->base.base,
6994 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6995 
6996 		if (!aconnector->mst_port)
6997 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6998 
6999 #ifdef CONFIG_DRM_AMD_DC_HDCP
7000 		if (adev->dm.hdcp_workqueue)
7001 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7002 #endif
7003 	}
7004 }
7005 
7006 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7007 			      struct i2c_msg *msgs, int num)
7008 {
7009 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7010 	struct ddc_service *ddc_service = i2c->ddc_service;
7011 	struct i2c_command cmd;
7012 	int i;
7013 	int result = -EIO;
7014 
7015 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7016 
7017 	if (!cmd.payloads)
7018 		return result;
7019 
7020 	cmd.number_of_payloads = num;
7021 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7022 	cmd.speed = 100;
7023 
7024 	for (i = 0; i < num; i++) {
7025 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7026 		cmd.payloads[i].address = msgs[i].addr;
7027 		cmd.payloads[i].length = msgs[i].len;
7028 		cmd.payloads[i].data = msgs[i].buf;
7029 	}
7030 
7031 	if (dc_submit_i2c(
7032 			ddc_service->ctx->dc,
7033 			ddc_service->ddc_pin->hw_info.ddc_channel,
7034 			&cmd))
7035 		result = num;
7036 
7037 	kfree(cmd.payloads);
7038 	return result;
7039 }
7040 
7041 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7042 {
7043 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7044 }
7045 
7046 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7047 	.master_xfer = amdgpu_dm_i2c_xfer,
7048 	.functionality = amdgpu_dm_i2c_func,
7049 };
7050 
7051 static struct amdgpu_i2c_adapter *
7052 create_i2c(struct ddc_service *ddc_service,
7053 	   int link_index,
7054 	   int *res)
7055 {
7056 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7057 	struct amdgpu_i2c_adapter *i2c;
7058 
7059 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7060 	if (!i2c)
7061 		return NULL;
7062 	i2c->base.owner = THIS_MODULE;
7063 	i2c->base.class = I2C_CLASS_DDC;
7064 	i2c->base.dev.parent = &adev->pdev->dev;
7065 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7066 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7067 	i2c_set_adapdata(&i2c->base, i2c);
7068 	i2c->ddc_service = ddc_service;
7069 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7070 
7071 	return i2c;
7072 }
7073 
7074 
7075 /*
7076  * Note: this function assumes that dc_link_detect() was called for the
7077  * dc_link which will be represented by this aconnector.
7078  */
7079 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7080 				    struct amdgpu_dm_connector *aconnector,
7081 				    uint32_t link_index,
7082 				    struct amdgpu_encoder *aencoder)
7083 {
7084 	int res = 0;
7085 	int connector_type;
7086 	struct dc *dc = dm->dc;
7087 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7088 	struct amdgpu_i2c_adapter *i2c;
7089 
7090 	link->priv = aconnector;
7091 
7092 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7093 
7094 	i2c = create_i2c(link->ddc, link->link_index, &res);
7095 	if (!i2c) {
7096 		DRM_ERROR("Failed to create i2c adapter data\n");
7097 		return -ENOMEM;
7098 	}
7099 
7100 	aconnector->i2c = i2c;
7101 	res = i2c_add_adapter(&i2c->base);
7102 
7103 	if (res) {
7104 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7105 		goto out_free;
7106 	}
7107 
7108 	connector_type = to_drm_connector_type(link->connector_signal);
7109 
7110 	res = drm_connector_init_with_ddc(
7111 			dm->ddev,
7112 			&aconnector->base,
7113 			&amdgpu_dm_connector_funcs,
7114 			connector_type,
7115 			&i2c->base);
7116 
7117 	if (res) {
7118 		DRM_ERROR("connector_init failed\n");
7119 		aconnector->connector_id = -1;
7120 		goto out_free;
7121 	}
7122 
7123 	drm_connector_helper_add(
7124 			&aconnector->base,
7125 			&amdgpu_dm_connector_helper_funcs);
7126 
7127 	amdgpu_dm_connector_init_helper(
7128 		dm,
7129 		aconnector,
7130 		connector_type,
7131 		link,
7132 		link_index);
7133 
7134 	drm_connector_attach_encoder(
7135 		&aconnector->base, &aencoder->base);
7136 
7137 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7138 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7139 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7140 
7141 out_free:
7142 	if (res) {
7143 		kfree(i2c);
7144 		aconnector->i2c = NULL;
7145 	}
7146 	return res;
7147 }
7148 
7149 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7150 {
7151 	switch (adev->mode_info.num_crtc) {
7152 	case 1:
7153 		return 0x1;
7154 	case 2:
7155 		return 0x3;
7156 	case 3:
7157 		return 0x7;
7158 	case 4:
7159 		return 0xf;
7160 	case 5:
7161 		return 0x1f;
7162 	case 6:
7163 	default:
7164 		return 0x3f;
7165 	}
7166 }
7167 
7168 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7169 				  struct amdgpu_encoder *aencoder,
7170 				  uint32_t link_index)
7171 {
7172 	struct amdgpu_device *adev = drm_to_adev(dev);
7173 
7174 	int res = drm_encoder_init(dev,
7175 				   &aencoder->base,
7176 				   &amdgpu_dm_encoder_funcs,
7177 				   DRM_MODE_ENCODER_TMDS,
7178 				   NULL);
7179 
7180 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7181 
7182 	if (!res)
7183 		aencoder->encoder_id = link_index;
7184 	else
7185 		aencoder->encoder_id = -1;
7186 
7187 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7188 
7189 	return res;
7190 }
7191 
7192 static void manage_dm_interrupts(struct amdgpu_device *adev,
7193 				 struct amdgpu_crtc *acrtc,
7194 				 bool enable)
7195 {
7196 	/*
7197 	 * We have no guarantee that the frontend index maps to the same
7198 	 * backend index - some even map to more than one.
7199 	 *
7200 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7201 	 */
7202 	int irq_type =
7203 		amdgpu_display_crtc_idx_to_irq_type(
7204 			adev,
7205 			acrtc->crtc_id);
7206 
7207 	if (enable) {
7208 		drm_crtc_vblank_on(&acrtc->base);
7209 		amdgpu_irq_get(
7210 			adev,
7211 			&adev->pageflip_irq,
7212 			irq_type);
7213 	} else {
7214 
7215 		amdgpu_irq_put(
7216 			adev,
7217 			&adev->pageflip_irq,
7218 			irq_type);
7219 		drm_crtc_vblank_off(&acrtc->base);
7220 	}
7221 }
7222 
7223 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7224 				      struct amdgpu_crtc *acrtc)
7225 {
7226 	int irq_type =
7227 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7228 
7229 	/**
7230 	 * This reads the current state for the IRQ and force reapplies
7231 	 * the setting to hardware.
7232 	 */
7233 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7234 }
7235 
7236 static bool
7237 is_scaling_state_different(const struct dm_connector_state *dm_state,
7238 			   const struct dm_connector_state *old_dm_state)
7239 {
7240 	if (dm_state->scaling != old_dm_state->scaling)
7241 		return true;
7242 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7243 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7244 			return true;
7245 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7246 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7247 			return true;
7248 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7249 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7250 		return true;
7251 	return false;
7252 }
7253 
7254 #ifdef CONFIG_DRM_AMD_DC_HDCP
7255 static bool is_content_protection_different(struct drm_connector_state *state,
7256 					    const struct drm_connector_state *old_state,
7257 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7258 {
7259 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7260 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7261 
7262 	/* Handle: Type0/1 change */
7263 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7264 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7265 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7266 		return true;
7267 	}
7268 
7269 	/* CP is being re enabled, ignore this
7270 	 *
7271 	 * Handles:	ENABLED -> DESIRED
7272 	 */
7273 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7274 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7275 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7276 		return false;
7277 	}
7278 
7279 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7280 	 *
7281 	 * Handles:	UNDESIRED -> ENABLED
7282 	 */
7283 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7284 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7285 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7286 
7287 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7288 	 * hot-plug, headless s3, dpms
7289 	 *
7290 	 * Handles:	DESIRED -> DESIRED (Special case)
7291 	 */
7292 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7293 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7294 		dm_con_state->update_hdcp = false;
7295 		return true;
7296 	}
7297 
7298 	/*
7299 	 * Handles:	UNDESIRED -> UNDESIRED
7300 	 *		DESIRED -> DESIRED
7301 	 *		ENABLED -> ENABLED
7302 	 */
7303 	if (old_state->content_protection == state->content_protection)
7304 		return false;
7305 
7306 	/*
7307 	 * Handles:	UNDESIRED -> DESIRED
7308 	 *		DESIRED -> UNDESIRED
7309 	 *		ENABLED -> UNDESIRED
7310 	 */
7311 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7312 		return true;
7313 
7314 	/*
7315 	 * Handles:	DESIRED -> ENABLED
7316 	 */
7317 	return false;
7318 }
7319 
7320 #endif
7321 static void remove_stream(struct amdgpu_device *adev,
7322 			  struct amdgpu_crtc *acrtc,
7323 			  struct dc_stream_state *stream)
7324 {
7325 	/* this is the update mode case */
7326 
7327 	acrtc->otg_inst = -1;
7328 	acrtc->enabled = false;
7329 }
7330 
7331 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7332 			       struct dc_cursor_position *position)
7333 {
7334 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7335 	int x, y;
7336 	int xorigin = 0, yorigin = 0;
7337 
7338 	position->enable = false;
7339 	position->x = 0;
7340 	position->y = 0;
7341 
7342 	if (!crtc || !plane->state->fb)
7343 		return 0;
7344 
7345 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7346 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7347 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7348 			  __func__,
7349 			  plane->state->crtc_w,
7350 			  plane->state->crtc_h);
7351 		return -EINVAL;
7352 	}
7353 
7354 	x = plane->state->crtc_x;
7355 	y = plane->state->crtc_y;
7356 
7357 	if (x <= -amdgpu_crtc->max_cursor_width ||
7358 	    y <= -amdgpu_crtc->max_cursor_height)
7359 		return 0;
7360 
7361 	if (x < 0) {
7362 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7363 		x = 0;
7364 	}
7365 	if (y < 0) {
7366 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7367 		y = 0;
7368 	}
7369 	position->enable = true;
7370 	position->translate_by_source = true;
7371 	position->x = x;
7372 	position->y = y;
7373 	position->x_hotspot = xorigin;
7374 	position->y_hotspot = yorigin;
7375 
7376 	return 0;
7377 }
7378 
7379 static void handle_cursor_update(struct drm_plane *plane,
7380 				 struct drm_plane_state *old_plane_state)
7381 {
7382 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7383 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7384 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7385 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7386 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7387 	uint64_t address = afb ? afb->address : 0;
7388 	struct dc_cursor_position position;
7389 	struct dc_cursor_attributes attributes;
7390 	int ret;
7391 
7392 	if (!plane->state->fb && !old_plane_state->fb)
7393 		return;
7394 
7395 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7396 			 __func__,
7397 			 amdgpu_crtc->crtc_id,
7398 			 plane->state->crtc_w,
7399 			 plane->state->crtc_h);
7400 
7401 	ret = get_cursor_position(plane, crtc, &position);
7402 	if (ret)
7403 		return;
7404 
7405 	if (!position.enable) {
7406 		/* turn off cursor */
7407 		if (crtc_state && crtc_state->stream) {
7408 			mutex_lock(&adev->dm.dc_lock);
7409 			dc_stream_set_cursor_position(crtc_state->stream,
7410 						      &position);
7411 			mutex_unlock(&adev->dm.dc_lock);
7412 		}
7413 		return;
7414 	}
7415 
7416 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
7417 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
7418 
7419 	memset(&attributes, 0, sizeof(attributes));
7420 	attributes.address.high_part = upper_32_bits(address);
7421 	attributes.address.low_part  = lower_32_bits(address);
7422 	attributes.width             = plane->state->crtc_w;
7423 	attributes.height            = plane->state->crtc_h;
7424 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7425 	attributes.rotation_angle    = 0;
7426 	attributes.attribute_flags.value = 0;
7427 
7428 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7429 
7430 	if (crtc_state->stream) {
7431 		mutex_lock(&adev->dm.dc_lock);
7432 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7433 							 &attributes))
7434 			DRM_ERROR("DC failed to set cursor attributes\n");
7435 
7436 		if (!dc_stream_set_cursor_position(crtc_state->stream,
7437 						   &position))
7438 			DRM_ERROR("DC failed to set cursor position\n");
7439 		mutex_unlock(&adev->dm.dc_lock);
7440 	}
7441 }
7442 
7443 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7444 {
7445 
7446 	assert_spin_locked(&acrtc->base.dev->event_lock);
7447 	WARN_ON(acrtc->event);
7448 
7449 	acrtc->event = acrtc->base.state->event;
7450 
7451 	/* Set the flip status */
7452 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7453 
7454 	/* Mark this event as consumed */
7455 	acrtc->base.state->event = NULL;
7456 
7457 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7458 						 acrtc->crtc_id);
7459 }
7460 
7461 static void update_freesync_state_on_stream(
7462 	struct amdgpu_display_manager *dm,
7463 	struct dm_crtc_state *new_crtc_state,
7464 	struct dc_stream_state *new_stream,
7465 	struct dc_plane_state *surface,
7466 	u32 flip_timestamp_in_us)
7467 {
7468 	struct mod_vrr_params vrr_params;
7469 	struct dc_info_packet vrr_infopacket = {0};
7470 	struct amdgpu_device *adev = dm->adev;
7471 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7472 	unsigned long flags;
7473 
7474 	if (!new_stream)
7475 		return;
7476 
7477 	/*
7478 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7479 	 * For now it's sufficient to just guard against these conditions.
7480 	 */
7481 
7482 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7483 		return;
7484 
7485 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7486         vrr_params = acrtc->dm_irq_params.vrr_params;
7487 
7488 	if (surface) {
7489 		mod_freesync_handle_preflip(
7490 			dm->freesync_module,
7491 			surface,
7492 			new_stream,
7493 			flip_timestamp_in_us,
7494 			&vrr_params);
7495 
7496 		if (adev->family < AMDGPU_FAMILY_AI &&
7497 		    amdgpu_dm_vrr_active(new_crtc_state)) {
7498 			mod_freesync_handle_v_update(dm->freesync_module,
7499 						     new_stream, &vrr_params);
7500 
7501 			/* Need to call this before the frame ends. */
7502 			dc_stream_adjust_vmin_vmax(dm->dc,
7503 						   new_crtc_state->stream,
7504 						   &vrr_params.adjust);
7505 		}
7506 	}
7507 
7508 	mod_freesync_build_vrr_infopacket(
7509 		dm->freesync_module,
7510 		new_stream,
7511 		&vrr_params,
7512 		PACKET_TYPE_VRR,
7513 		TRANSFER_FUNC_UNKNOWN,
7514 		&vrr_infopacket);
7515 
7516 	new_crtc_state->freesync_timing_changed |=
7517 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7518 			&vrr_params.adjust,
7519 			sizeof(vrr_params.adjust)) != 0);
7520 
7521 	new_crtc_state->freesync_vrr_info_changed |=
7522 		(memcmp(&new_crtc_state->vrr_infopacket,
7523 			&vrr_infopacket,
7524 			sizeof(vrr_infopacket)) != 0);
7525 
7526 	acrtc->dm_irq_params.vrr_params = vrr_params;
7527 	new_crtc_state->vrr_infopacket = vrr_infopacket;
7528 
7529 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7530 	new_stream->vrr_infopacket = vrr_infopacket;
7531 
7532 	if (new_crtc_state->freesync_vrr_info_changed)
7533 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7534 			      new_crtc_state->base.crtc->base.id,
7535 			      (int)new_crtc_state->base.vrr_enabled,
7536 			      (int)vrr_params.state);
7537 
7538 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7539 }
7540 
7541 static void update_stream_irq_parameters(
7542 	struct amdgpu_display_manager *dm,
7543 	struct dm_crtc_state *new_crtc_state)
7544 {
7545 	struct dc_stream_state *new_stream = new_crtc_state->stream;
7546 	struct mod_vrr_params vrr_params;
7547 	struct mod_freesync_config config = new_crtc_state->freesync_config;
7548 	struct amdgpu_device *adev = dm->adev;
7549 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7550 	unsigned long flags;
7551 
7552 	if (!new_stream)
7553 		return;
7554 
7555 	/*
7556 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7557 	 * For now it's sufficient to just guard against these conditions.
7558 	 */
7559 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7560 		return;
7561 
7562 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7563 	vrr_params = acrtc->dm_irq_params.vrr_params;
7564 
7565 	if (new_crtc_state->vrr_supported &&
7566 	    config.min_refresh_in_uhz &&
7567 	    config.max_refresh_in_uhz) {
7568 		config.state = new_crtc_state->base.vrr_enabled ?
7569 			VRR_STATE_ACTIVE_VARIABLE :
7570 			VRR_STATE_INACTIVE;
7571 	} else {
7572 		config.state = VRR_STATE_UNSUPPORTED;
7573 	}
7574 
7575 	mod_freesync_build_vrr_params(dm->freesync_module,
7576 				      new_stream,
7577 				      &config, &vrr_params);
7578 
7579 	new_crtc_state->freesync_timing_changed |=
7580 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7581 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7582 
7583 	new_crtc_state->freesync_config = config;
7584 	/* Copy state for access from DM IRQ handler */
7585 	acrtc->dm_irq_params.freesync_config = config;
7586 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7587 	acrtc->dm_irq_params.vrr_params = vrr_params;
7588 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7589 }
7590 
7591 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7592 					    struct dm_crtc_state *new_state)
7593 {
7594 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7595 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7596 
7597 	if (!old_vrr_active && new_vrr_active) {
7598 		/* Transition VRR inactive -> active:
7599 		 * While VRR is active, we must not disable vblank irq, as a
7600 		 * reenable after disable would compute bogus vblank/pflip
7601 		 * timestamps if it likely happened inside display front-porch.
7602 		 *
7603 		 * We also need vupdate irq for the actual core vblank handling
7604 		 * at end of vblank.
7605 		 */
7606 		dm_set_vupdate_irq(new_state->base.crtc, true);
7607 		drm_crtc_vblank_get(new_state->base.crtc);
7608 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7609 				 __func__, new_state->base.crtc->base.id);
7610 	} else if (old_vrr_active && !new_vrr_active) {
7611 		/* Transition VRR active -> inactive:
7612 		 * Allow vblank irq disable again for fixed refresh rate.
7613 		 */
7614 		dm_set_vupdate_irq(new_state->base.crtc, false);
7615 		drm_crtc_vblank_put(new_state->base.crtc);
7616 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7617 				 __func__, new_state->base.crtc->base.id);
7618 	}
7619 }
7620 
7621 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7622 {
7623 	struct drm_plane *plane;
7624 	struct drm_plane_state *old_plane_state, *new_plane_state;
7625 	int i;
7626 
7627 	/*
7628 	 * TODO: Make this per-stream so we don't issue redundant updates for
7629 	 * commits with multiple streams.
7630 	 */
7631 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7632 				       new_plane_state, i)
7633 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7634 			handle_cursor_update(plane, old_plane_state);
7635 }
7636 
7637 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7638 				    struct dc_state *dc_state,
7639 				    struct drm_device *dev,
7640 				    struct amdgpu_display_manager *dm,
7641 				    struct drm_crtc *pcrtc,
7642 				    bool wait_for_vblank)
7643 {
7644 	uint32_t i;
7645 	uint64_t timestamp_ns;
7646 	struct drm_plane *plane;
7647 	struct drm_plane_state *old_plane_state, *new_plane_state;
7648 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7649 	struct drm_crtc_state *new_pcrtc_state =
7650 			drm_atomic_get_new_crtc_state(state, pcrtc);
7651 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7652 	struct dm_crtc_state *dm_old_crtc_state =
7653 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7654 	int planes_count = 0, vpos, hpos;
7655 	long r;
7656 	unsigned long flags;
7657 	struct amdgpu_bo *abo;
7658 	uint32_t target_vblank, last_flip_vblank;
7659 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7660 	bool pflip_present = false;
7661 	struct {
7662 		struct dc_surface_update surface_updates[MAX_SURFACES];
7663 		struct dc_plane_info plane_infos[MAX_SURFACES];
7664 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7665 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7666 		struct dc_stream_update stream_update;
7667 	} *bundle;
7668 
7669 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7670 
7671 	if (!bundle) {
7672 		dm_error("Failed to allocate update bundle\n");
7673 		goto cleanup;
7674 	}
7675 
7676 	/*
7677 	 * Disable the cursor first if we're disabling all the planes.
7678 	 * It'll remain on the screen after the planes are re-enabled
7679 	 * if we don't.
7680 	 */
7681 	if (acrtc_state->active_planes == 0)
7682 		amdgpu_dm_commit_cursors(state);
7683 
7684 	/* update planes when needed */
7685 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7686 		struct drm_crtc *crtc = new_plane_state->crtc;
7687 		struct drm_crtc_state *new_crtc_state;
7688 		struct drm_framebuffer *fb = new_plane_state->fb;
7689 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7690 		bool plane_needs_flip;
7691 		struct dc_plane_state *dc_plane;
7692 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7693 
7694 		/* Cursor plane is handled after stream updates */
7695 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7696 			continue;
7697 
7698 		if (!fb || !crtc || pcrtc != crtc)
7699 			continue;
7700 
7701 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7702 		if (!new_crtc_state->active)
7703 			continue;
7704 
7705 		dc_plane = dm_new_plane_state->dc_state;
7706 
7707 		bundle->surface_updates[planes_count].surface = dc_plane;
7708 		if (new_pcrtc_state->color_mgmt_changed) {
7709 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7710 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7711 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7712 		}
7713 
7714 		fill_dc_scaling_info(new_plane_state,
7715 				     &bundle->scaling_infos[planes_count]);
7716 
7717 		bundle->surface_updates[planes_count].scaling_info =
7718 			&bundle->scaling_infos[planes_count];
7719 
7720 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7721 
7722 		pflip_present = pflip_present || plane_needs_flip;
7723 
7724 		if (!plane_needs_flip) {
7725 			planes_count += 1;
7726 			continue;
7727 		}
7728 
7729 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7730 
7731 		/*
7732 		 * Wait for all fences on this FB. Do limited wait to avoid
7733 		 * deadlock during GPU reset when this fence will not signal
7734 		 * but we hold reservation lock for the BO.
7735 		 */
7736 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7737 							false,
7738 							msecs_to_jiffies(5000));
7739 		if (unlikely(r <= 0))
7740 			DRM_ERROR("Waiting for fences timed out!");
7741 
7742 		fill_dc_plane_info_and_addr(
7743 			dm->adev, new_plane_state,
7744 			afb->tiling_flags,
7745 			&bundle->plane_infos[planes_count],
7746 			&bundle->flip_addrs[planes_count].address,
7747 			afb->tmz_surface, false);
7748 
7749 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7750 				 new_plane_state->plane->index,
7751 				 bundle->plane_infos[planes_count].dcc.enable);
7752 
7753 		bundle->surface_updates[planes_count].plane_info =
7754 			&bundle->plane_infos[planes_count];
7755 
7756 		/*
7757 		 * Only allow immediate flips for fast updates that don't
7758 		 * change FB pitch, DCC state, rotation or mirroing.
7759 		 */
7760 		bundle->flip_addrs[planes_count].flip_immediate =
7761 			crtc->state->async_flip &&
7762 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7763 
7764 		timestamp_ns = ktime_get_ns();
7765 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7766 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7767 		bundle->surface_updates[planes_count].surface = dc_plane;
7768 
7769 		if (!bundle->surface_updates[planes_count].surface) {
7770 			DRM_ERROR("No surface for CRTC: id=%d\n",
7771 					acrtc_attach->crtc_id);
7772 			continue;
7773 		}
7774 
7775 		if (plane == pcrtc->primary)
7776 			update_freesync_state_on_stream(
7777 				dm,
7778 				acrtc_state,
7779 				acrtc_state->stream,
7780 				dc_plane,
7781 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7782 
7783 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7784 				 __func__,
7785 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7786 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7787 
7788 		planes_count += 1;
7789 
7790 	}
7791 
7792 	if (pflip_present) {
7793 		if (!vrr_active) {
7794 			/* Use old throttling in non-vrr fixed refresh rate mode
7795 			 * to keep flip scheduling based on target vblank counts
7796 			 * working in a backwards compatible way, e.g., for
7797 			 * clients using the GLX_OML_sync_control extension or
7798 			 * DRI3/Present extension with defined target_msc.
7799 			 */
7800 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7801 		}
7802 		else {
7803 			/* For variable refresh rate mode only:
7804 			 * Get vblank of last completed flip to avoid > 1 vrr
7805 			 * flips per video frame by use of throttling, but allow
7806 			 * flip programming anywhere in the possibly large
7807 			 * variable vrr vblank interval for fine-grained flip
7808 			 * timing control and more opportunity to avoid stutter
7809 			 * on late submission of flips.
7810 			 */
7811 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7812 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7813 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7814 		}
7815 
7816 		target_vblank = last_flip_vblank + wait_for_vblank;
7817 
7818 		/*
7819 		 * Wait until we're out of the vertical blank period before the one
7820 		 * targeted by the flip
7821 		 */
7822 		while ((acrtc_attach->enabled &&
7823 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7824 							    0, &vpos, &hpos, NULL,
7825 							    NULL, &pcrtc->hwmode)
7826 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7827 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7828 			(int)(target_vblank -
7829 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7830 			usleep_range(1000, 1100);
7831 		}
7832 
7833 		/**
7834 		 * Prepare the flip event for the pageflip interrupt to handle.
7835 		 *
7836 		 * This only works in the case where we've already turned on the
7837 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7838 		 * from 0 -> n planes we have to skip a hardware generated event
7839 		 * and rely on sending it from software.
7840 		 */
7841 		if (acrtc_attach->base.state->event &&
7842 		    acrtc_state->active_planes > 0) {
7843 			drm_crtc_vblank_get(pcrtc);
7844 
7845 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7846 
7847 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7848 			prepare_flip_isr(acrtc_attach);
7849 
7850 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7851 		}
7852 
7853 		if (acrtc_state->stream) {
7854 			if (acrtc_state->freesync_vrr_info_changed)
7855 				bundle->stream_update.vrr_infopacket =
7856 					&acrtc_state->stream->vrr_infopacket;
7857 		}
7858 	}
7859 
7860 	/* Update the planes if changed or disable if we don't have any. */
7861 	if ((planes_count || acrtc_state->active_planes == 0) &&
7862 		acrtc_state->stream) {
7863 		bundle->stream_update.stream = acrtc_state->stream;
7864 		if (new_pcrtc_state->mode_changed) {
7865 			bundle->stream_update.src = acrtc_state->stream->src;
7866 			bundle->stream_update.dst = acrtc_state->stream->dst;
7867 		}
7868 
7869 		if (new_pcrtc_state->color_mgmt_changed) {
7870 			/*
7871 			 * TODO: This isn't fully correct since we've actually
7872 			 * already modified the stream in place.
7873 			 */
7874 			bundle->stream_update.gamut_remap =
7875 				&acrtc_state->stream->gamut_remap_matrix;
7876 			bundle->stream_update.output_csc_transform =
7877 				&acrtc_state->stream->csc_color_matrix;
7878 			bundle->stream_update.out_transfer_func =
7879 				acrtc_state->stream->out_transfer_func;
7880 		}
7881 
7882 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7883 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7884 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7885 
7886 		/*
7887 		 * If FreeSync state on the stream has changed then we need to
7888 		 * re-adjust the min/max bounds now that DC doesn't handle this
7889 		 * as part of commit.
7890 		 */
7891 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7892 		    amdgpu_dm_vrr_active(acrtc_state)) {
7893 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7894 			dc_stream_adjust_vmin_vmax(
7895 				dm->dc, acrtc_state->stream,
7896 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7897 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7898 		}
7899 		mutex_lock(&dm->dc_lock);
7900 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7901 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7902 			amdgpu_dm_psr_disable(acrtc_state->stream);
7903 
7904 		dc_commit_updates_for_stream(dm->dc,
7905 						     bundle->surface_updates,
7906 						     planes_count,
7907 						     acrtc_state->stream,
7908 						     &bundle->stream_update,
7909 						     dc_state);
7910 
7911 		/**
7912 		 * Enable or disable the interrupts on the backend.
7913 		 *
7914 		 * Most pipes are put into power gating when unused.
7915 		 *
7916 		 * When power gating is enabled on a pipe we lose the
7917 		 * interrupt enablement state when power gating is disabled.
7918 		 *
7919 		 * So we need to update the IRQ control state in hardware
7920 		 * whenever the pipe turns on (since it could be previously
7921 		 * power gated) or off (since some pipes can't be power gated
7922 		 * on some ASICs).
7923 		 */
7924 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7925 			dm_update_pflip_irq_state(drm_to_adev(dev),
7926 						  acrtc_attach);
7927 
7928 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7929 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7930 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7931 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7932 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7933 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7934 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7935 			amdgpu_dm_psr_enable(acrtc_state->stream);
7936 		}
7937 
7938 		mutex_unlock(&dm->dc_lock);
7939 	}
7940 
7941 	/*
7942 	 * Update cursor state *after* programming all the planes.
7943 	 * This avoids redundant programming in the case where we're going
7944 	 * to be disabling a single plane - those pipes are being disabled.
7945 	 */
7946 	if (acrtc_state->active_planes)
7947 		amdgpu_dm_commit_cursors(state);
7948 
7949 cleanup:
7950 	kfree(bundle);
7951 }
7952 
7953 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7954 				   struct drm_atomic_state *state)
7955 {
7956 	struct amdgpu_device *adev = drm_to_adev(dev);
7957 	struct amdgpu_dm_connector *aconnector;
7958 	struct drm_connector *connector;
7959 	struct drm_connector_state *old_con_state, *new_con_state;
7960 	struct drm_crtc_state *new_crtc_state;
7961 	struct dm_crtc_state *new_dm_crtc_state;
7962 	const struct dc_stream_status *status;
7963 	int i, inst;
7964 
7965 	/* Notify device removals. */
7966 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7967 		if (old_con_state->crtc != new_con_state->crtc) {
7968 			/* CRTC changes require notification. */
7969 			goto notify;
7970 		}
7971 
7972 		if (!new_con_state->crtc)
7973 			continue;
7974 
7975 		new_crtc_state = drm_atomic_get_new_crtc_state(
7976 			state, new_con_state->crtc);
7977 
7978 		if (!new_crtc_state)
7979 			continue;
7980 
7981 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7982 			continue;
7983 
7984 	notify:
7985 		aconnector = to_amdgpu_dm_connector(connector);
7986 
7987 		mutex_lock(&adev->dm.audio_lock);
7988 		inst = aconnector->audio_inst;
7989 		aconnector->audio_inst = -1;
7990 		mutex_unlock(&adev->dm.audio_lock);
7991 
7992 		amdgpu_dm_audio_eld_notify(adev, inst);
7993 	}
7994 
7995 	/* Notify audio device additions. */
7996 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7997 		if (!new_con_state->crtc)
7998 			continue;
7999 
8000 		new_crtc_state = drm_atomic_get_new_crtc_state(
8001 			state, new_con_state->crtc);
8002 
8003 		if (!new_crtc_state)
8004 			continue;
8005 
8006 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8007 			continue;
8008 
8009 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8010 		if (!new_dm_crtc_state->stream)
8011 			continue;
8012 
8013 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8014 		if (!status)
8015 			continue;
8016 
8017 		aconnector = to_amdgpu_dm_connector(connector);
8018 
8019 		mutex_lock(&adev->dm.audio_lock);
8020 		inst = status->audio_inst;
8021 		aconnector->audio_inst = inst;
8022 		mutex_unlock(&adev->dm.audio_lock);
8023 
8024 		amdgpu_dm_audio_eld_notify(adev, inst);
8025 	}
8026 }
8027 
8028 /*
8029  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8030  * @crtc_state: the DRM CRTC state
8031  * @stream_state: the DC stream state.
8032  *
8033  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8034  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8035  */
8036 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8037 						struct dc_stream_state *stream_state)
8038 {
8039 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8040 }
8041 
8042 /**
8043  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8044  * @state: The atomic state to commit
8045  *
8046  * This will tell DC to commit the constructed DC state from atomic_check,
8047  * programming the hardware. Any failures here implies a hardware failure, since
8048  * atomic check should have filtered anything non-kosher.
8049  */
8050 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8051 {
8052 	struct drm_device *dev = state->dev;
8053 	struct amdgpu_device *adev = drm_to_adev(dev);
8054 	struct amdgpu_display_manager *dm = &adev->dm;
8055 	struct dm_atomic_state *dm_state;
8056 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8057 	uint32_t i, j;
8058 	struct drm_crtc *crtc;
8059 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8060 	unsigned long flags;
8061 	bool wait_for_vblank = true;
8062 	struct drm_connector *connector;
8063 	struct drm_connector_state *old_con_state, *new_con_state;
8064 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8065 	int crtc_disable_count = 0;
8066 	bool mode_set_reset_required = false;
8067 
8068 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8069 
8070 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8071 
8072 	dm_state = dm_atomic_get_new_state(state);
8073 	if (dm_state && dm_state->context) {
8074 		dc_state = dm_state->context;
8075 	} else {
8076 		/* No state changes, retain current state. */
8077 		dc_state_temp = dc_create_state(dm->dc);
8078 		ASSERT(dc_state_temp);
8079 		dc_state = dc_state_temp;
8080 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8081 	}
8082 
8083 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8084 				       new_crtc_state, i) {
8085 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8086 
8087 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8088 
8089 		if (old_crtc_state->active &&
8090 		    (!new_crtc_state->active ||
8091 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8092 			manage_dm_interrupts(adev, acrtc, false);
8093 			dc_stream_release(dm_old_crtc_state->stream);
8094 		}
8095 	}
8096 
8097 	drm_atomic_helper_calc_timestamping_constants(state);
8098 
8099 	/* update changed items */
8100 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8101 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8102 
8103 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8104 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8105 
8106 		DRM_DEBUG_DRIVER(
8107 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8108 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8109 			"connectors_changed:%d\n",
8110 			acrtc->crtc_id,
8111 			new_crtc_state->enable,
8112 			new_crtc_state->active,
8113 			new_crtc_state->planes_changed,
8114 			new_crtc_state->mode_changed,
8115 			new_crtc_state->active_changed,
8116 			new_crtc_state->connectors_changed);
8117 
8118 		/* Disable cursor if disabling crtc */
8119 		if (old_crtc_state->active && !new_crtc_state->active) {
8120 			struct dc_cursor_position position;
8121 
8122 			memset(&position, 0, sizeof(position));
8123 			mutex_lock(&dm->dc_lock);
8124 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8125 			mutex_unlock(&dm->dc_lock);
8126 		}
8127 
8128 		/* Copy all transient state flags into dc state */
8129 		if (dm_new_crtc_state->stream) {
8130 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8131 							    dm_new_crtc_state->stream);
8132 		}
8133 
8134 		/* handles headless hotplug case, updating new_state and
8135 		 * aconnector as needed
8136 		 */
8137 
8138 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8139 
8140 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8141 
8142 			if (!dm_new_crtc_state->stream) {
8143 				/*
8144 				 * this could happen because of issues with
8145 				 * userspace notifications delivery.
8146 				 * In this case userspace tries to set mode on
8147 				 * display which is disconnected in fact.
8148 				 * dc_sink is NULL in this case on aconnector.
8149 				 * We expect reset mode will come soon.
8150 				 *
8151 				 * This can also happen when unplug is done
8152 				 * during resume sequence ended
8153 				 *
8154 				 * In this case, we want to pretend we still
8155 				 * have a sink to keep the pipe running so that
8156 				 * hw state is consistent with the sw state
8157 				 */
8158 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8159 						__func__, acrtc->base.base.id);
8160 				continue;
8161 			}
8162 
8163 			if (dm_old_crtc_state->stream)
8164 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8165 
8166 			pm_runtime_get_noresume(dev->dev);
8167 
8168 			acrtc->enabled = true;
8169 			acrtc->hw_mode = new_crtc_state->mode;
8170 			crtc->hwmode = new_crtc_state->mode;
8171 			mode_set_reset_required = true;
8172 		} else if (modereset_required(new_crtc_state)) {
8173 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8174 			/* i.e. reset mode */
8175 			if (dm_old_crtc_state->stream)
8176 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8177 			mode_set_reset_required = true;
8178 		}
8179 	} /* for_each_crtc_in_state() */
8180 
8181 	if (dc_state) {
8182 		/* if there mode set or reset, disable eDP PSR */
8183 		if (mode_set_reset_required)
8184 			amdgpu_dm_psr_disable_all(dm);
8185 
8186 		dm_enable_per_frame_crtc_master_sync(dc_state);
8187 		mutex_lock(&dm->dc_lock);
8188 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8189 		mutex_unlock(&dm->dc_lock);
8190 	}
8191 
8192 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8193 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8194 
8195 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8196 
8197 		if (dm_new_crtc_state->stream != NULL) {
8198 			const struct dc_stream_status *status =
8199 					dc_stream_get_status(dm_new_crtc_state->stream);
8200 
8201 			if (!status)
8202 				status = dc_stream_get_status_from_state(dc_state,
8203 									 dm_new_crtc_state->stream);
8204 			if (!status)
8205 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8206 			else
8207 				acrtc->otg_inst = status->primary_otg_inst;
8208 		}
8209 	}
8210 #ifdef CONFIG_DRM_AMD_DC_HDCP
8211 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8212 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8213 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8214 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8215 
8216 		new_crtc_state = NULL;
8217 
8218 		if (acrtc)
8219 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8220 
8221 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8222 
8223 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8224 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8225 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8226 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8227 			dm_new_con_state->update_hdcp = true;
8228 			continue;
8229 		}
8230 
8231 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8232 			hdcp_update_display(
8233 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8234 				new_con_state->hdcp_content_type,
8235 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8236 													 : false);
8237 	}
8238 #endif
8239 
8240 	/* Handle connector state changes */
8241 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8242 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8243 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8244 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8245 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8246 		struct dc_stream_update stream_update;
8247 		struct dc_info_packet hdr_packet;
8248 		struct dc_stream_status *status = NULL;
8249 		bool abm_changed, hdr_changed, scaling_changed;
8250 
8251 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8252 		memset(&stream_update, 0, sizeof(stream_update));
8253 
8254 		if (acrtc) {
8255 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8256 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8257 		}
8258 
8259 		/* Skip any modesets/resets */
8260 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8261 			continue;
8262 
8263 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8264 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8265 
8266 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8267 							     dm_old_con_state);
8268 
8269 		abm_changed = dm_new_crtc_state->abm_level !=
8270 			      dm_old_crtc_state->abm_level;
8271 
8272 		hdr_changed =
8273 			is_hdr_metadata_different(old_con_state, new_con_state);
8274 
8275 		if (!scaling_changed && !abm_changed && !hdr_changed)
8276 			continue;
8277 
8278 		stream_update.stream = dm_new_crtc_state->stream;
8279 		if (scaling_changed) {
8280 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8281 					dm_new_con_state, dm_new_crtc_state->stream);
8282 
8283 			stream_update.src = dm_new_crtc_state->stream->src;
8284 			stream_update.dst = dm_new_crtc_state->stream->dst;
8285 		}
8286 
8287 		if (abm_changed) {
8288 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8289 
8290 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8291 		}
8292 
8293 		if (hdr_changed) {
8294 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8295 			stream_update.hdr_static_metadata = &hdr_packet;
8296 		}
8297 
8298 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8299 		WARN_ON(!status);
8300 		WARN_ON(!status->plane_count);
8301 
8302 		/*
8303 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8304 		 * Here we create an empty update on each plane.
8305 		 * To fix this, DC should permit updating only stream properties.
8306 		 */
8307 		for (j = 0; j < status->plane_count; j++)
8308 			dummy_updates[j].surface = status->plane_states[0];
8309 
8310 
8311 		mutex_lock(&dm->dc_lock);
8312 		dc_commit_updates_for_stream(dm->dc,
8313 						     dummy_updates,
8314 						     status->plane_count,
8315 						     dm_new_crtc_state->stream,
8316 						     &stream_update,
8317 						     dc_state);
8318 		mutex_unlock(&dm->dc_lock);
8319 	}
8320 
8321 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8322 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8323 				      new_crtc_state, i) {
8324 		if (old_crtc_state->active && !new_crtc_state->active)
8325 			crtc_disable_count++;
8326 
8327 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8328 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8329 
8330 		/* For freesync config update on crtc state and params for irq */
8331 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8332 
8333 		/* Handle vrr on->off / off->on transitions */
8334 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8335 						dm_new_crtc_state);
8336 	}
8337 
8338 	/**
8339 	 * Enable interrupts for CRTCs that are newly enabled or went through
8340 	 * a modeset. It was intentionally deferred until after the front end
8341 	 * state was modified to wait until the OTG was on and so the IRQ
8342 	 * handlers didn't access stale or invalid state.
8343 	 */
8344 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8345 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8346 
8347 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8348 
8349 		if (new_crtc_state->active &&
8350 		    (!old_crtc_state->active ||
8351 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8352 			dc_stream_retain(dm_new_crtc_state->stream);
8353 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8354 			manage_dm_interrupts(adev, acrtc, true);
8355 
8356 #ifdef CONFIG_DEBUG_FS
8357 			/**
8358 			 * Frontend may have changed so reapply the CRC capture
8359 			 * settings for the stream.
8360 			 */
8361 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8362 
8363 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8364 				amdgpu_dm_crtc_configure_crc_source(
8365 					crtc, dm_new_crtc_state,
8366 					dm_new_crtc_state->crc_src);
8367 			}
8368 #endif
8369 		}
8370 	}
8371 
8372 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8373 		if (new_crtc_state->async_flip)
8374 			wait_for_vblank = false;
8375 
8376 	/* update planes when needed per crtc*/
8377 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8378 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8379 
8380 		if (dm_new_crtc_state->stream)
8381 			amdgpu_dm_commit_planes(state, dc_state, dev,
8382 						dm, crtc, wait_for_vblank);
8383 	}
8384 
8385 	/* Update audio instances for each connector. */
8386 	amdgpu_dm_commit_audio(dev, state);
8387 
8388 	/*
8389 	 * send vblank event on all events not handled in flip and
8390 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8391 	 */
8392 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8393 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8394 
8395 		if (new_crtc_state->event)
8396 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8397 
8398 		new_crtc_state->event = NULL;
8399 	}
8400 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8401 
8402 	/* Signal HW programming completion */
8403 	drm_atomic_helper_commit_hw_done(state);
8404 
8405 	if (wait_for_vblank)
8406 		drm_atomic_helper_wait_for_flip_done(dev, state);
8407 
8408 	drm_atomic_helper_cleanup_planes(dev, state);
8409 
8410 	/* return the stolen vga memory back to VRAM */
8411 	if (!adev->mman.keep_stolen_vga_memory)
8412 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8413 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8414 
8415 	/*
8416 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8417 	 * so we can put the GPU into runtime suspend if we're not driving any
8418 	 * displays anymore
8419 	 */
8420 	for (i = 0; i < crtc_disable_count; i++)
8421 		pm_runtime_put_autosuspend(dev->dev);
8422 	pm_runtime_mark_last_busy(dev->dev);
8423 
8424 	if (dc_state_temp)
8425 		dc_release_state(dc_state_temp);
8426 }
8427 
8428 
8429 static int dm_force_atomic_commit(struct drm_connector *connector)
8430 {
8431 	int ret = 0;
8432 	struct drm_device *ddev = connector->dev;
8433 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8434 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8435 	struct drm_plane *plane = disconnected_acrtc->base.primary;
8436 	struct drm_connector_state *conn_state;
8437 	struct drm_crtc_state *crtc_state;
8438 	struct drm_plane_state *plane_state;
8439 
8440 	if (!state)
8441 		return -ENOMEM;
8442 
8443 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
8444 
8445 	/* Construct an atomic state to restore previous display setting */
8446 
8447 	/*
8448 	 * Attach connectors to drm_atomic_state
8449 	 */
8450 	conn_state = drm_atomic_get_connector_state(state, connector);
8451 
8452 	ret = PTR_ERR_OR_ZERO(conn_state);
8453 	if (ret)
8454 		goto err;
8455 
8456 	/* Attach crtc to drm_atomic_state*/
8457 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8458 
8459 	ret = PTR_ERR_OR_ZERO(crtc_state);
8460 	if (ret)
8461 		goto err;
8462 
8463 	/* force a restore */
8464 	crtc_state->mode_changed = true;
8465 
8466 	/* Attach plane to drm_atomic_state */
8467 	plane_state = drm_atomic_get_plane_state(state, plane);
8468 
8469 	ret = PTR_ERR_OR_ZERO(plane_state);
8470 	if (ret)
8471 		goto err;
8472 
8473 
8474 	/* Call commit internally with the state we just constructed */
8475 	ret = drm_atomic_commit(state);
8476 	if (!ret)
8477 		return 0;
8478 
8479 err:
8480 	DRM_ERROR("Restoring old state failed with %i\n", ret);
8481 	drm_atomic_state_put(state);
8482 
8483 	return ret;
8484 }
8485 
8486 /*
8487  * This function handles all cases when set mode does not come upon hotplug.
8488  * This includes when a display is unplugged then plugged back into the
8489  * same port and when running without usermode desktop manager supprot
8490  */
8491 void dm_restore_drm_connector_state(struct drm_device *dev,
8492 				    struct drm_connector *connector)
8493 {
8494 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8495 	struct amdgpu_crtc *disconnected_acrtc;
8496 	struct dm_crtc_state *acrtc_state;
8497 
8498 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8499 		return;
8500 
8501 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8502 	if (!disconnected_acrtc)
8503 		return;
8504 
8505 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8506 	if (!acrtc_state->stream)
8507 		return;
8508 
8509 	/*
8510 	 * If the previous sink is not released and different from the current,
8511 	 * we deduce we are in a state where we can not rely on usermode call
8512 	 * to turn on the display, so we do it here
8513 	 */
8514 	if (acrtc_state->stream->sink != aconnector->dc_sink)
8515 		dm_force_atomic_commit(&aconnector->base);
8516 }
8517 
8518 /*
8519  * Grabs all modesetting locks to serialize against any blocking commits,
8520  * Waits for completion of all non blocking commits.
8521  */
8522 static int do_aquire_global_lock(struct drm_device *dev,
8523 				 struct drm_atomic_state *state)
8524 {
8525 	struct drm_crtc *crtc;
8526 	struct drm_crtc_commit *commit;
8527 	long ret;
8528 
8529 	/*
8530 	 * Adding all modeset locks to aquire_ctx will
8531 	 * ensure that when the framework release it the
8532 	 * extra locks we are locking here will get released to
8533 	 */
8534 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8535 	if (ret)
8536 		return ret;
8537 
8538 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8539 		spin_lock(&crtc->commit_lock);
8540 		commit = list_first_entry_or_null(&crtc->commit_list,
8541 				struct drm_crtc_commit, commit_entry);
8542 		if (commit)
8543 			drm_crtc_commit_get(commit);
8544 		spin_unlock(&crtc->commit_lock);
8545 
8546 		if (!commit)
8547 			continue;
8548 
8549 		/*
8550 		 * Make sure all pending HW programming completed and
8551 		 * page flips done
8552 		 */
8553 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8554 
8555 		if (ret > 0)
8556 			ret = wait_for_completion_interruptible_timeout(
8557 					&commit->flip_done, 10*HZ);
8558 
8559 		if (ret == 0)
8560 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8561 				  "timed out\n", crtc->base.id, crtc->name);
8562 
8563 		drm_crtc_commit_put(commit);
8564 	}
8565 
8566 	return ret < 0 ? ret : 0;
8567 }
8568 
8569 static void get_freesync_config_for_crtc(
8570 	struct dm_crtc_state *new_crtc_state,
8571 	struct dm_connector_state *new_con_state)
8572 {
8573 	struct mod_freesync_config config = {0};
8574 	struct amdgpu_dm_connector *aconnector =
8575 			to_amdgpu_dm_connector(new_con_state->base.connector);
8576 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
8577 	int vrefresh = drm_mode_vrefresh(mode);
8578 
8579 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8580 					vrefresh >= aconnector->min_vfreq &&
8581 					vrefresh <= aconnector->max_vfreq;
8582 
8583 	if (new_crtc_state->vrr_supported) {
8584 		new_crtc_state->stream->ignore_msa_timing_param = true;
8585 		config.state = new_crtc_state->base.vrr_enabled ?
8586 				VRR_STATE_ACTIVE_VARIABLE :
8587 				VRR_STATE_INACTIVE;
8588 		config.min_refresh_in_uhz =
8589 				aconnector->min_vfreq * 1000000;
8590 		config.max_refresh_in_uhz =
8591 				aconnector->max_vfreq * 1000000;
8592 		config.vsif_supported = true;
8593 		config.btr = true;
8594 	}
8595 
8596 	new_crtc_state->freesync_config = config;
8597 }
8598 
8599 static void reset_freesync_config_for_crtc(
8600 	struct dm_crtc_state *new_crtc_state)
8601 {
8602 	new_crtc_state->vrr_supported = false;
8603 
8604 	memset(&new_crtc_state->vrr_infopacket, 0,
8605 	       sizeof(new_crtc_state->vrr_infopacket));
8606 }
8607 
8608 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8609 				struct drm_atomic_state *state,
8610 				struct drm_crtc *crtc,
8611 				struct drm_crtc_state *old_crtc_state,
8612 				struct drm_crtc_state *new_crtc_state,
8613 				bool enable,
8614 				bool *lock_and_validation_needed)
8615 {
8616 	struct dm_atomic_state *dm_state = NULL;
8617 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8618 	struct dc_stream_state *new_stream;
8619 	int ret = 0;
8620 
8621 	/*
8622 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8623 	 * update changed items
8624 	 */
8625 	struct amdgpu_crtc *acrtc = NULL;
8626 	struct amdgpu_dm_connector *aconnector = NULL;
8627 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8628 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8629 
8630 	new_stream = NULL;
8631 
8632 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8633 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8634 	acrtc = to_amdgpu_crtc(crtc);
8635 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8636 
8637 	/* TODO This hack should go away */
8638 	if (aconnector && enable) {
8639 		/* Make sure fake sink is created in plug-in scenario */
8640 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8641 							    &aconnector->base);
8642 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8643 							    &aconnector->base);
8644 
8645 		if (IS_ERR(drm_new_conn_state)) {
8646 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8647 			goto fail;
8648 		}
8649 
8650 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8651 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8652 
8653 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8654 			goto skip_modeset;
8655 
8656 		new_stream = create_validate_stream_for_sink(aconnector,
8657 							     &new_crtc_state->mode,
8658 							     dm_new_conn_state,
8659 							     dm_old_crtc_state->stream);
8660 
8661 		/*
8662 		 * we can have no stream on ACTION_SET if a display
8663 		 * was disconnected during S3, in this case it is not an
8664 		 * error, the OS will be updated after detection, and
8665 		 * will do the right thing on next atomic commit
8666 		 */
8667 
8668 		if (!new_stream) {
8669 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8670 					__func__, acrtc->base.base.id);
8671 			ret = -ENOMEM;
8672 			goto fail;
8673 		}
8674 
8675 		/*
8676 		 * TODO: Check VSDB bits to decide whether this should
8677 		 * be enabled or not.
8678 		 */
8679 		new_stream->triggered_crtc_reset.enabled =
8680 			dm->force_timing_sync;
8681 
8682 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8683 
8684 		ret = fill_hdr_info_packet(drm_new_conn_state,
8685 					   &new_stream->hdr_static_metadata);
8686 		if (ret)
8687 			goto fail;
8688 
8689 		/*
8690 		 * If we already removed the old stream from the context
8691 		 * (and set the new stream to NULL) then we can't reuse
8692 		 * the old stream even if the stream and scaling are unchanged.
8693 		 * We'll hit the BUG_ON and black screen.
8694 		 *
8695 		 * TODO: Refactor this function to allow this check to work
8696 		 * in all conditions.
8697 		 */
8698 		if (dm_new_crtc_state->stream &&
8699 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8700 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8701 			new_crtc_state->mode_changed = false;
8702 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8703 					 new_crtc_state->mode_changed);
8704 		}
8705 	}
8706 
8707 	/* mode_changed flag may get updated above, need to check again */
8708 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8709 		goto skip_modeset;
8710 
8711 	DRM_DEBUG_DRIVER(
8712 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8713 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8714 		"connectors_changed:%d\n",
8715 		acrtc->crtc_id,
8716 		new_crtc_state->enable,
8717 		new_crtc_state->active,
8718 		new_crtc_state->planes_changed,
8719 		new_crtc_state->mode_changed,
8720 		new_crtc_state->active_changed,
8721 		new_crtc_state->connectors_changed);
8722 
8723 	/* Remove stream for any changed/disabled CRTC */
8724 	if (!enable) {
8725 
8726 		if (!dm_old_crtc_state->stream)
8727 			goto skip_modeset;
8728 
8729 		ret = dm_atomic_get_state(state, &dm_state);
8730 		if (ret)
8731 			goto fail;
8732 
8733 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8734 				crtc->base.id);
8735 
8736 		/* i.e. reset mode */
8737 		if (dc_remove_stream_from_ctx(
8738 				dm->dc,
8739 				dm_state->context,
8740 				dm_old_crtc_state->stream) != DC_OK) {
8741 			ret = -EINVAL;
8742 			goto fail;
8743 		}
8744 
8745 		dc_stream_release(dm_old_crtc_state->stream);
8746 		dm_new_crtc_state->stream = NULL;
8747 
8748 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8749 
8750 		*lock_and_validation_needed = true;
8751 
8752 	} else {/* Add stream for any updated/enabled CRTC */
8753 		/*
8754 		 * Quick fix to prevent NULL pointer on new_stream when
8755 		 * added MST connectors not found in existing crtc_state in the chained mode
8756 		 * TODO: need to dig out the root cause of that
8757 		 */
8758 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8759 			goto skip_modeset;
8760 
8761 		if (modereset_required(new_crtc_state))
8762 			goto skip_modeset;
8763 
8764 		if (modeset_required(new_crtc_state, new_stream,
8765 				     dm_old_crtc_state->stream)) {
8766 
8767 			WARN_ON(dm_new_crtc_state->stream);
8768 
8769 			ret = dm_atomic_get_state(state, &dm_state);
8770 			if (ret)
8771 				goto fail;
8772 
8773 			dm_new_crtc_state->stream = new_stream;
8774 
8775 			dc_stream_retain(new_stream);
8776 
8777 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8778 						crtc->base.id);
8779 
8780 			if (dc_add_stream_to_ctx(
8781 					dm->dc,
8782 					dm_state->context,
8783 					dm_new_crtc_state->stream) != DC_OK) {
8784 				ret = -EINVAL;
8785 				goto fail;
8786 			}
8787 
8788 			*lock_and_validation_needed = true;
8789 		}
8790 	}
8791 
8792 skip_modeset:
8793 	/* Release extra reference */
8794 	if (new_stream)
8795 		 dc_stream_release(new_stream);
8796 
8797 	/*
8798 	 * We want to do dc stream updates that do not require a
8799 	 * full modeset below.
8800 	 */
8801 	if (!(enable && aconnector && new_crtc_state->active))
8802 		return 0;
8803 	/*
8804 	 * Given above conditions, the dc state cannot be NULL because:
8805 	 * 1. We're in the process of enabling CRTCs (just been added
8806 	 *    to the dc context, or already is on the context)
8807 	 * 2. Has a valid connector attached, and
8808 	 * 3. Is currently active and enabled.
8809 	 * => The dc stream state currently exists.
8810 	 */
8811 	BUG_ON(dm_new_crtc_state->stream == NULL);
8812 
8813 	/* Scaling or underscan settings */
8814 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8815 		update_stream_scaling_settings(
8816 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8817 
8818 	/* ABM settings */
8819 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8820 
8821 	/*
8822 	 * Color management settings. We also update color properties
8823 	 * when a modeset is needed, to ensure it gets reprogrammed.
8824 	 */
8825 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8826 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8827 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8828 		if (ret)
8829 			goto fail;
8830 	}
8831 
8832 	/* Update Freesync settings. */
8833 	get_freesync_config_for_crtc(dm_new_crtc_state,
8834 				     dm_new_conn_state);
8835 
8836 	return ret;
8837 
8838 fail:
8839 	if (new_stream)
8840 		dc_stream_release(new_stream);
8841 	return ret;
8842 }
8843 
8844 static bool should_reset_plane(struct drm_atomic_state *state,
8845 			       struct drm_plane *plane,
8846 			       struct drm_plane_state *old_plane_state,
8847 			       struct drm_plane_state *new_plane_state)
8848 {
8849 	struct drm_plane *other;
8850 	struct drm_plane_state *old_other_state, *new_other_state;
8851 	struct drm_crtc_state *new_crtc_state;
8852 	int i;
8853 
8854 	/*
8855 	 * TODO: Remove this hack once the checks below are sufficient
8856 	 * enough to determine when we need to reset all the planes on
8857 	 * the stream.
8858 	 */
8859 	if (state->allow_modeset)
8860 		return true;
8861 
8862 	/* Exit early if we know that we're adding or removing the plane. */
8863 	if (old_plane_state->crtc != new_plane_state->crtc)
8864 		return true;
8865 
8866 	/* old crtc == new_crtc == NULL, plane not in context. */
8867 	if (!new_plane_state->crtc)
8868 		return false;
8869 
8870 	new_crtc_state =
8871 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8872 
8873 	if (!new_crtc_state)
8874 		return true;
8875 
8876 	/* CRTC Degamma changes currently require us to recreate planes. */
8877 	if (new_crtc_state->color_mgmt_changed)
8878 		return true;
8879 
8880 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8881 		return true;
8882 
8883 	/*
8884 	 * If there are any new primary or overlay planes being added or
8885 	 * removed then the z-order can potentially change. To ensure
8886 	 * correct z-order and pipe acquisition the current DC architecture
8887 	 * requires us to remove and recreate all existing planes.
8888 	 *
8889 	 * TODO: Come up with a more elegant solution for this.
8890 	 */
8891 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8892 		struct amdgpu_framebuffer *old_afb, *new_afb;
8893 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8894 			continue;
8895 
8896 		if (old_other_state->crtc != new_plane_state->crtc &&
8897 		    new_other_state->crtc != new_plane_state->crtc)
8898 			continue;
8899 
8900 		if (old_other_state->crtc != new_other_state->crtc)
8901 			return true;
8902 
8903 		/* Src/dst size and scaling updates. */
8904 		if (old_other_state->src_w != new_other_state->src_w ||
8905 		    old_other_state->src_h != new_other_state->src_h ||
8906 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8907 		    old_other_state->crtc_h != new_other_state->crtc_h)
8908 			return true;
8909 
8910 		/* Rotation / mirroring updates. */
8911 		if (old_other_state->rotation != new_other_state->rotation)
8912 			return true;
8913 
8914 		/* Blending updates. */
8915 		if (old_other_state->pixel_blend_mode !=
8916 		    new_other_state->pixel_blend_mode)
8917 			return true;
8918 
8919 		/* Alpha updates. */
8920 		if (old_other_state->alpha != new_other_state->alpha)
8921 			return true;
8922 
8923 		/* Colorspace changes. */
8924 		if (old_other_state->color_range != new_other_state->color_range ||
8925 		    old_other_state->color_encoding != new_other_state->color_encoding)
8926 			return true;
8927 
8928 		/* Framebuffer checks fall at the end. */
8929 		if (!old_other_state->fb || !new_other_state->fb)
8930 			continue;
8931 
8932 		/* Pixel format changes can require bandwidth updates. */
8933 		if (old_other_state->fb->format != new_other_state->fb->format)
8934 			return true;
8935 
8936 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8937 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8938 
8939 		/* Tiling and DCC changes also require bandwidth updates. */
8940 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
8941 		    old_afb->base.modifier != new_afb->base.modifier)
8942 			return true;
8943 	}
8944 
8945 	return false;
8946 }
8947 
8948 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8949 			      struct drm_plane_state *new_plane_state,
8950 			      struct drm_framebuffer *fb)
8951 {
8952 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8953 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
8954 	unsigned int pitch;
8955 	bool linear;
8956 
8957 	if (fb->width > new_acrtc->max_cursor_width ||
8958 	    fb->height > new_acrtc->max_cursor_height) {
8959 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
8960 				 new_plane_state->fb->width,
8961 				 new_plane_state->fb->height);
8962 		return -EINVAL;
8963 	}
8964 	if (new_plane_state->src_w != fb->width << 16 ||
8965 	    new_plane_state->src_h != fb->height << 16) {
8966 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
8967 		return -EINVAL;
8968 	}
8969 
8970 	/* Pitch in pixels */
8971 	pitch = fb->pitches[0] / fb->format->cpp[0];
8972 
8973 	if (fb->width != pitch) {
8974 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
8975 				 fb->width, pitch);
8976 		return -EINVAL;
8977 	}
8978 
8979 	switch (pitch) {
8980 	case 64:
8981 	case 128:
8982 	case 256:
8983 		/* FB pitch is supported by cursor plane */
8984 		break;
8985 	default:
8986 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
8987 		return -EINVAL;
8988 	}
8989 
8990 	/* Core DRM takes care of checking FB modifiers, so we only need to
8991 	 * check tiling flags when the FB doesn't have a modifier. */
8992 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
8993 		if (adev->family < AMDGPU_FAMILY_AI) {
8994 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
8995 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
8996 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
8997 		} else {
8998 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
8999 		}
9000 		if (!linear) {
9001 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9002 			return -EINVAL;
9003 		}
9004 	}
9005 
9006 	return 0;
9007 }
9008 
9009 static int dm_update_plane_state(struct dc *dc,
9010 				 struct drm_atomic_state *state,
9011 				 struct drm_plane *plane,
9012 				 struct drm_plane_state *old_plane_state,
9013 				 struct drm_plane_state *new_plane_state,
9014 				 bool enable,
9015 				 bool *lock_and_validation_needed)
9016 {
9017 
9018 	struct dm_atomic_state *dm_state = NULL;
9019 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9020 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9021 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9022 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9023 	struct amdgpu_crtc *new_acrtc;
9024 	bool needs_reset;
9025 	int ret = 0;
9026 
9027 
9028 	new_plane_crtc = new_plane_state->crtc;
9029 	old_plane_crtc = old_plane_state->crtc;
9030 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9031 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9032 
9033 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9034 		if (!enable || !new_plane_crtc ||
9035 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9036 			return 0;
9037 
9038 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9039 
9040 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9041 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9042 			return -EINVAL;
9043 		}
9044 
9045 		if (new_plane_state->fb) {
9046 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9047 						 new_plane_state->fb);
9048 			if (ret)
9049 				return ret;
9050 		}
9051 
9052 		return 0;
9053 	}
9054 
9055 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9056 					 new_plane_state);
9057 
9058 	/* Remove any changed/removed planes */
9059 	if (!enable) {
9060 		if (!needs_reset)
9061 			return 0;
9062 
9063 		if (!old_plane_crtc)
9064 			return 0;
9065 
9066 		old_crtc_state = drm_atomic_get_old_crtc_state(
9067 				state, old_plane_crtc);
9068 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9069 
9070 		if (!dm_old_crtc_state->stream)
9071 			return 0;
9072 
9073 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9074 				plane->base.id, old_plane_crtc->base.id);
9075 
9076 		ret = dm_atomic_get_state(state, &dm_state);
9077 		if (ret)
9078 			return ret;
9079 
9080 		if (!dc_remove_plane_from_context(
9081 				dc,
9082 				dm_old_crtc_state->stream,
9083 				dm_old_plane_state->dc_state,
9084 				dm_state->context)) {
9085 
9086 			return -EINVAL;
9087 		}
9088 
9089 
9090 		dc_plane_state_release(dm_old_plane_state->dc_state);
9091 		dm_new_plane_state->dc_state = NULL;
9092 
9093 		*lock_and_validation_needed = true;
9094 
9095 	} else { /* Add new planes */
9096 		struct dc_plane_state *dc_new_plane_state;
9097 
9098 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9099 			return 0;
9100 
9101 		if (!new_plane_crtc)
9102 			return 0;
9103 
9104 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9105 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9106 
9107 		if (!dm_new_crtc_state->stream)
9108 			return 0;
9109 
9110 		if (!needs_reset)
9111 			return 0;
9112 
9113 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9114 		if (ret)
9115 			return ret;
9116 
9117 		WARN_ON(dm_new_plane_state->dc_state);
9118 
9119 		dc_new_plane_state = dc_create_plane_state(dc);
9120 		if (!dc_new_plane_state)
9121 			return -ENOMEM;
9122 
9123 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9124 				plane->base.id, new_plane_crtc->base.id);
9125 
9126 		ret = fill_dc_plane_attributes(
9127 			drm_to_adev(new_plane_crtc->dev),
9128 			dc_new_plane_state,
9129 			new_plane_state,
9130 			new_crtc_state);
9131 		if (ret) {
9132 			dc_plane_state_release(dc_new_plane_state);
9133 			return ret;
9134 		}
9135 
9136 		ret = dm_atomic_get_state(state, &dm_state);
9137 		if (ret) {
9138 			dc_plane_state_release(dc_new_plane_state);
9139 			return ret;
9140 		}
9141 
9142 		/*
9143 		 * Any atomic check errors that occur after this will
9144 		 * not need a release. The plane state will be attached
9145 		 * to the stream, and therefore part of the atomic
9146 		 * state. It'll be released when the atomic state is
9147 		 * cleaned.
9148 		 */
9149 		if (!dc_add_plane_to_context(
9150 				dc,
9151 				dm_new_crtc_state->stream,
9152 				dc_new_plane_state,
9153 				dm_state->context)) {
9154 
9155 			dc_plane_state_release(dc_new_plane_state);
9156 			return -EINVAL;
9157 		}
9158 
9159 		dm_new_plane_state->dc_state = dc_new_plane_state;
9160 
9161 		/* Tell DC to do a full surface update every time there
9162 		 * is a plane change. Inefficient, but works for now.
9163 		 */
9164 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9165 
9166 		*lock_and_validation_needed = true;
9167 	}
9168 
9169 
9170 	return ret;
9171 }
9172 
9173 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9174 				struct drm_crtc *crtc,
9175 				struct drm_crtc_state *new_crtc_state)
9176 {
9177 	struct drm_plane_state *new_cursor_state, *new_primary_state;
9178 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9179 
9180 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9181 	 * cursor per pipe but it's going to inherit the scaling and
9182 	 * positioning from the underlying pipe. Check the cursor plane's
9183 	 * blending properties match the primary plane's. */
9184 
9185 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9186 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9187 	if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9188 		return 0;
9189 	}
9190 
9191 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9192 			 (new_cursor_state->src_w >> 16);
9193 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9194 			 (new_cursor_state->src_h >> 16);
9195 
9196 	primary_scale_w = new_primary_state->crtc_w * 1000 /
9197 			 (new_primary_state->src_w >> 16);
9198 	primary_scale_h = new_primary_state->crtc_h * 1000 /
9199 			 (new_primary_state->src_h >> 16);
9200 
9201 	if (cursor_scale_w != primary_scale_w ||
9202 	    cursor_scale_h != primary_scale_h) {
9203 		DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9204 		return -EINVAL;
9205 	}
9206 
9207 	return 0;
9208 }
9209 
9210 #if defined(CONFIG_DRM_AMD_DC_DCN)
9211 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9212 {
9213 	struct drm_connector *connector;
9214 	struct drm_connector_state *conn_state;
9215 	struct amdgpu_dm_connector *aconnector = NULL;
9216 	int i;
9217 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9218 		if (conn_state->crtc != crtc)
9219 			continue;
9220 
9221 		aconnector = to_amdgpu_dm_connector(connector);
9222 		if (!aconnector->port || !aconnector->mst_port)
9223 			aconnector = NULL;
9224 		else
9225 			break;
9226 	}
9227 
9228 	if (!aconnector)
9229 		return 0;
9230 
9231 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9232 }
9233 #endif
9234 
9235 /**
9236  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9237  * @dev: The DRM device
9238  * @state: The atomic state to commit
9239  *
9240  * Validate that the given atomic state is programmable by DC into hardware.
9241  * This involves constructing a &struct dc_state reflecting the new hardware
9242  * state we wish to commit, then querying DC to see if it is programmable. It's
9243  * important not to modify the existing DC state. Otherwise, atomic_check
9244  * may unexpectedly commit hardware changes.
9245  *
9246  * When validating the DC state, it's important that the right locks are
9247  * acquired. For full updates case which removes/adds/updates streams on one
9248  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9249  * that any such full update commit will wait for completion of any outstanding
9250  * flip using DRMs synchronization events.
9251  *
9252  * Note that DM adds the affected connectors for all CRTCs in state, when that
9253  * might not seem necessary. This is because DC stream creation requires the
9254  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9255  * be possible but non-trivial - a possible TODO item.
9256  *
9257  * Return: -Error code if validation failed.
9258  */
9259 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9260 				  struct drm_atomic_state *state)
9261 {
9262 	struct amdgpu_device *adev = drm_to_adev(dev);
9263 	struct dm_atomic_state *dm_state = NULL;
9264 	struct dc *dc = adev->dm.dc;
9265 	struct drm_connector *connector;
9266 	struct drm_connector_state *old_con_state, *new_con_state;
9267 	struct drm_crtc *crtc;
9268 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9269 	struct drm_plane *plane;
9270 	struct drm_plane_state *old_plane_state, *new_plane_state;
9271 	enum dc_status status;
9272 	int ret, i;
9273 	bool lock_and_validation_needed = false;
9274 	struct dm_crtc_state *dm_old_crtc_state;
9275 
9276 	trace_amdgpu_dm_atomic_check_begin(state);
9277 
9278 	ret = drm_atomic_helper_check_modeset(dev, state);
9279 	if (ret)
9280 		goto fail;
9281 
9282 	/* Check connector changes */
9283 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9284 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9285 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9286 
9287 		/* Skip connectors that are disabled or part of modeset already. */
9288 		if (!old_con_state->crtc && !new_con_state->crtc)
9289 			continue;
9290 
9291 		if (!new_con_state->crtc)
9292 			continue;
9293 
9294 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9295 		if (IS_ERR(new_crtc_state)) {
9296 			ret = PTR_ERR(new_crtc_state);
9297 			goto fail;
9298 		}
9299 
9300 		if (dm_old_con_state->abm_level !=
9301 		    dm_new_con_state->abm_level)
9302 			new_crtc_state->connectors_changed = true;
9303 	}
9304 
9305 #if defined(CONFIG_DRM_AMD_DC_DCN)
9306 	if (adev->asic_type >= CHIP_NAVI10) {
9307 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9308 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9309 				ret = add_affected_mst_dsc_crtcs(state, crtc);
9310 				if (ret)
9311 					goto fail;
9312 			}
9313 		}
9314 	}
9315 #endif
9316 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9317 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9318 
9319 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9320 		    !new_crtc_state->color_mgmt_changed &&
9321 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9322 			dm_old_crtc_state->dsc_force_changed == false)
9323 			continue;
9324 
9325 		if (!new_crtc_state->enable)
9326 			continue;
9327 
9328 		ret = drm_atomic_add_affected_connectors(state, crtc);
9329 		if (ret)
9330 			return ret;
9331 
9332 		ret = drm_atomic_add_affected_planes(state, crtc);
9333 		if (ret)
9334 			goto fail;
9335 
9336 		if (dm_old_crtc_state->dsc_force_changed)
9337 			new_crtc_state->mode_changed = true;
9338 	}
9339 
9340 	/*
9341 	 * Add all primary and overlay planes on the CRTC to the state
9342 	 * whenever a plane is enabled to maintain correct z-ordering
9343 	 * and to enable fast surface updates.
9344 	 */
9345 	drm_for_each_crtc(crtc, dev) {
9346 		bool modified = false;
9347 
9348 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9349 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9350 				continue;
9351 
9352 			if (new_plane_state->crtc == crtc ||
9353 			    old_plane_state->crtc == crtc) {
9354 				modified = true;
9355 				break;
9356 			}
9357 		}
9358 
9359 		if (!modified)
9360 			continue;
9361 
9362 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9363 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
9364 				continue;
9365 
9366 			new_plane_state =
9367 				drm_atomic_get_plane_state(state, plane);
9368 
9369 			if (IS_ERR(new_plane_state)) {
9370 				ret = PTR_ERR(new_plane_state);
9371 				goto fail;
9372 			}
9373 		}
9374 	}
9375 
9376 	/* Remove exiting planes if they are modified */
9377 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9378 		ret = dm_update_plane_state(dc, state, plane,
9379 					    old_plane_state,
9380 					    new_plane_state,
9381 					    false,
9382 					    &lock_and_validation_needed);
9383 		if (ret)
9384 			goto fail;
9385 	}
9386 
9387 	/* Disable all crtcs which require disable */
9388 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9389 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9390 					   old_crtc_state,
9391 					   new_crtc_state,
9392 					   false,
9393 					   &lock_and_validation_needed);
9394 		if (ret)
9395 			goto fail;
9396 	}
9397 
9398 	/* Enable all crtcs which require enable */
9399 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9400 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
9401 					   old_crtc_state,
9402 					   new_crtc_state,
9403 					   true,
9404 					   &lock_and_validation_needed);
9405 		if (ret)
9406 			goto fail;
9407 	}
9408 
9409 	/* Add new/modified planes */
9410 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9411 		ret = dm_update_plane_state(dc, state, plane,
9412 					    old_plane_state,
9413 					    new_plane_state,
9414 					    true,
9415 					    &lock_and_validation_needed);
9416 		if (ret)
9417 			goto fail;
9418 	}
9419 
9420 	/* Run this here since we want to validate the streams we created */
9421 	ret = drm_atomic_helper_check_planes(dev, state);
9422 	if (ret)
9423 		goto fail;
9424 
9425 	/* Check cursor planes scaling */
9426 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9427 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9428 		if (ret)
9429 			goto fail;
9430 	}
9431 
9432 	if (state->legacy_cursor_update) {
9433 		/*
9434 		 * This is a fast cursor update coming from the plane update
9435 		 * helper, check if it can be done asynchronously for better
9436 		 * performance.
9437 		 */
9438 		state->async_update =
9439 			!drm_atomic_helper_async_check(dev, state);
9440 
9441 		/*
9442 		 * Skip the remaining global validation if this is an async
9443 		 * update. Cursor updates can be done without affecting
9444 		 * state or bandwidth calcs and this avoids the performance
9445 		 * penalty of locking the private state object and
9446 		 * allocating a new dc_state.
9447 		 */
9448 		if (state->async_update)
9449 			return 0;
9450 	}
9451 
9452 	/* Check scaling and underscan changes*/
9453 	/* TODO Removed scaling changes validation due to inability to commit
9454 	 * new stream into context w\o causing full reset. Need to
9455 	 * decide how to handle.
9456 	 */
9457 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9458 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9459 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9460 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9461 
9462 		/* Skip any modesets/resets */
9463 		if (!acrtc || drm_atomic_crtc_needs_modeset(
9464 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9465 			continue;
9466 
9467 		/* Skip any thing not scale or underscan changes */
9468 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9469 			continue;
9470 
9471 		lock_and_validation_needed = true;
9472 	}
9473 
9474 	/**
9475 	 * Streams and planes are reset when there are changes that affect
9476 	 * bandwidth. Anything that affects bandwidth needs to go through
9477 	 * DC global validation to ensure that the configuration can be applied
9478 	 * to hardware.
9479 	 *
9480 	 * We have to currently stall out here in atomic_check for outstanding
9481 	 * commits to finish in this case because our IRQ handlers reference
9482 	 * DRM state directly - we can end up disabling interrupts too early
9483 	 * if we don't.
9484 	 *
9485 	 * TODO: Remove this stall and drop DM state private objects.
9486 	 */
9487 	if (lock_and_validation_needed) {
9488 		ret = dm_atomic_get_state(state, &dm_state);
9489 		if (ret)
9490 			goto fail;
9491 
9492 		ret = do_aquire_global_lock(dev, state);
9493 		if (ret)
9494 			goto fail;
9495 
9496 #if defined(CONFIG_DRM_AMD_DC_DCN)
9497 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9498 			goto fail;
9499 
9500 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9501 		if (ret)
9502 			goto fail;
9503 #endif
9504 
9505 		/*
9506 		 * Perform validation of MST topology in the state:
9507 		 * We need to perform MST atomic check before calling
9508 		 * dc_validate_global_state(), or there is a chance
9509 		 * to get stuck in an infinite loop and hang eventually.
9510 		 */
9511 		ret = drm_dp_mst_atomic_check(state);
9512 		if (ret)
9513 			goto fail;
9514 		status = dc_validate_global_state(dc, dm_state->context, false);
9515 		if (status != DC_OK) {
9516 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
9517 				       dc_status_to_str(status), status);
9518 			ret = -EINVAL;
9519 			goto fail;
9520 		}
9521 	} else {
9522 		/*
9523 		 * The commit is a fast update. Fast updates shouldn't change
9524 		 * the DC context, affect global validation, and can have their
9525 		 * commit work done in parallel with other commits not touching
9526 		 * the same resource. If we have a new DC context as part of
9527 		 * the DM atomic state from validation we need to free it and
9528 		 * retain the existing one instead.
9529 		 *
9530 		 * Furthermore, since the DM atomic state only contains the DC
9531 		 * context and can safely be annulled, we can free the state
9532 		 * and clear the associated private object now to free
9533 		 * some memory and avoid a possible use-after-free later.
9534 		 */
9535 
9536 		for (i = 0; i < state->num_private_objs; i++) {
9537 			struct drm_private_obj *obj = state->private_objs[i].ptr;
9538 
9539 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
9540 				int j = state->num_private_objs-1;
9541 
9542 				dm_atomic_destroy_state(obj,
9543 						state->private_objs[i].state);
9544 
9545 				/* If i is not at the end of the array then the
9546 				 * last element needs to be moved to where i was
9547 				 * before the array can safely be truncated.
9548 				 */
9549 				if (i != j)
9550 					state->private_objs[i] =
9551 						state->private_objs[j];
9552 
9553 				state->private_objs[j].ptr = NULL;
9554 				state->private_objs[j].state = NULL;
9555 				state->private_objs[j].old_state = NULL;
9556 				state->private_objs[j].new_state = NULL;
9557 
9558 				state->num_private_objs = j;
9559 				break;
9560 			}
9561 		}
9562 	}
9563 
9564 	/* Store the overall update type for use later in atomic check. */
9565 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9566 		struct dm_crtc_state *dm_new_crtc_state =
9567 			to_dm_crtc_state(new_crtc_state);
9568 
9569 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
9570 							 UPDATE_TYPE_FULL :
9571 							 UPDATE_TYPE_FAST;
9572 	}
9573 
9574 	/* Must be success */
9575 	WARN_ON(ret);
9576 
9577 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9578 
9579 	return ret;
9580 
9581 fail:
9582 	if (ret == -EDEADLK)
9583 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9584 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9585 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9586 	else
9587 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9588 
9589 	trace_amdgpu_dm_atomic_check_finish(state, ret);
9590 
9591 	return ret;
9592 }
9593 
9594 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9595 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
9596 {
9597 	uint8_t dpcd_data;
9598 	bool capable = false;
9599 
9600 	if (amdgpu_dm_connector->dc_link &&
9601 		dm_helpers_dp_read_dpcd(
9602 				NULL,
9603 				amdgpu_dm_connector->dc_link,
9604 				DP_DOWN_STREAM_PORT_COUNT,
9605 				&dpcd_data,
9606 				sizeof(dpcd_data))) {
9607 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9608 	}
9609 
9610 	return capable;
9611 }
9612 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9613 					struct edid *edid)
9614 {
9615 	int i;
9616 	bool edid_check_required;
9617 	struct detailed_timing *timing;
9618 	struct detailed_non_pixel *data;
9619 	struct detailed_data_monitor_range *range;
9620 	struct amdgpu_dm_connector *amdgpu_dm_connector =
9621 			to_amdgpu_dm_connector(connector);
9622 	struct dm_connector_state *dm_con_state = NULL;
9623 
9624 	struct drm_device *dev = connector->dev;
9625 	struct amdgpu_device *adev = drm_to_adev(dev);
9626 	bool freesync_capable = false;
9627 
9628 	if (!connector->state) {
9629 		DRM_ERROR("%s - Connector has no state", __func__);
9630 		goto update;
9631 	}
9632 
9633 	if (!edid) {
9634 		dm_con_state = to_dm_connector_state(connector->state);
9635 
9636 		amdgpu_dm_connector->min_vfreq = 0;
9637 		amdgpu_dm_connector->max_vfreq = 0;
9638 		amdgpu_dm_connector->pixel_clock_mhz = 0;
9639 
9640 		goto update;
9641 	}
9642 
9643 	dm_con_state = to_dm_connector_state(connector->state);
9644 
9645 	edid_check_required = false;
9646 	if (!amdgpu_dm_connector->dc_sink) {
9647 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9648 		goto update;
9649 	}
9650 	if (!adev->dm.freesync_module)
9651 		goto update;
9652 	/*
9653 	 * if edid non zero restrict freesync only for dp and edp
9654 	 */
9655 	if (edid) {
9656 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9657 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9658 			edid_check_required = is_dp_capable_without_timing_msa(
9659 						adev->dm.dc,
9660 						amdgpu_dm_connector);
9661 		}
9662 	}
9663 	if (edid_check_required == true && (edid->version > 1 ||
9664 	   (edid->version == 1 && edid->revision > 1))) {
9665 		for (i = 0; i < 4; i++) {
9666 
9667 			timing	= &edid->detailed_timings[i];
9668 			data	= &timing->data.other_data;
9669 			range	= &data->data.range;
9670 			/*
9671 			 * Check if monitor has continuous frequency mode
9672 			 */
9673 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
9674 				continue;
9675 			/*
9676 			 * Check for flag range limits only. If flag == 1 then
9677 			 * no additional timing information provided.
9678 			 * Default GTF, GTF Secondary curve and CVT are not
9679 			 * supported
9680 			 */
9681 			if (range->flags != 1)
9682 				continue;
9683 
9684 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9685 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9686 			amdgpu_dm_connector->pixel_clock_mhz =
9687 				range->pixel_clock_mhz * 10;
9688 
9689 			connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
9690 			connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
9691 
9692 			break;
9693 		}
9694 
9695 		if (amdgpu_dm_connector->max_vfreq -
9696 		    amdgpu_dm_connector->min_vfreq > 10) {
9697 
9698 			freesync_capable = true;
9699 		}
9700 	}
9701 
9702 update:
9703 	if (dm_con_state)
9704 		dm_con_state->freesync_capable = freesync_capable;
9705 
9706 	if (connector->vrr_capable_property)
9707 		drm_connector_set_vrr_capable_property(connector,
9708 						       freesync_capable);
9709 }
9710 
9711 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9712 {
9713 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9714 
9715 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9716 		return;
9717 	if (link->type == dc_connection_none)
9718 		return;
9719 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9720 					dpcd_data, sizeof(dpcd_data))) {
9721 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9722 
9723 		if (dpcd_data[0] == 0) {
9724 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9725 			link->psr_settings.psr_feature_enabled = false;
9726 		} else {
9727 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9728 			link->psr_settings.psr_feature_enabled = true;
9729 		}
9730 
9731 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9732 	}
9733 }
9734 
9735 /*
9736  * amdgpu_dm_link_setup_psr() - configure psr link
9737  * @stream: stream state
9738  *
9739  * Return: true if success
9740  */
9741 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9742 {
9743 	struct dc_link *link = NULL;
9744 	struct psr_config psr_config = {0};
9745 	struct psr_context psr_context = {0};
9746 	bool ret = false;
9747 
9748 	if (stream == NULL)
9749 		return false;
9750 
9751 	link = stream->link;
9752 
9753 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9754 
9755 	if (psr_config.psr_version > 0) {
9756 		psr_config.psr_exit_link_training_required = 0x1;
9757 		psr_config.psr_frame_capture_indication_req = 0;
9758 		psr_config.psr_rfb_setup_time = 0x37;
9759 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9760 		psr_config.allow_smu_optimizations = 0x0;
9761 
9762 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9763 
9764 	}
9765 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9766 
9767 	return ret;
9768 }
9769 
9770 /*
9771  * amdgpu_dm_psr_enable() - enable psr f/w
9772  * @stream: stream state
9773  *
9774  * Return: true if success
9775  */
9776 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9777 {
9778 	struct dc_link *link = stream->link;
9779 	unsigned int vsync_rate_hz = 0;
9780 	struct dc_static_screen_params params = {0};
9781 	/* Calculate number of static frames before generating interrupt to
9782 	 * enter PSR.
9783 	 */
9784 	// Init fail safe of 2 frames static
9785 	unsigned int num_frames_static = 2;
9786 
9787 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9788 
9789 	vsync_rate_hz = div64_u64(div64_u64((
9790 			stream->timing.pix_clk_100hz * 100),
9791 			stream->timing.v_total),
9792 			stream->timing.h_total);
9793 
9794 	/* Round up
9795 	 * Calculate number of frames such that at least 30 ms of time has
9796 	 * passed.
9797 	 */
9798 	if (vsync_rate_hz != 0) {
9799 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9800 		num_frames_static = (30000 / frame_time_microsec) + 1;
9801 	}
9802 
9803 	params.triggers.cursor_update = true;
9804 	params.triggers.overlay_update = true;
9805 	params.triggers.surface_update = true;
9806 	params.num_frames = num_frames_static;
9807 
9808 	dc_stream_set_static_screen_params(link->ctx->dc,
9809 					   &stream, 1,
9810 					   &params);
9811 
9812 	return dc_link_set_psr_allow_active(link, true, false, false);
9813 }
9814 
9815 /*
9816  * amdgpu_dm_psr_disable() - disable psr f/w
9817  * @stream:  stream state
9818  *
9819  * Return: true if success
9820  */
9821 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9822 {
9823 
9824 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9825 
9826 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
9827 }
9828 
9829 /*
9830  * amdgpu_dm_psr_disable() - disable psr f/w
9831  * if psr is enabled on any stream
9832  *
9833  * Return: true if success
9834  */
9835 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9836 {
9837 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9838 	return dc_set_psr_allow_active(dm->dc, false);
9839 }
9840 
9841 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9842 {
9843 	struct amdgpu_device *adev = drm_to_adev(dev);
9844 	struct dc *dc = adev->dm.dc;
9845 	int i;
9846 
9847 	mutex_lock(&adev->dm.dc_lock);
9848 	if (dc->current_state) {
9849 		for (i = 0; i < dc->current_state->stream_count; ++i)
9850 			dc->current_state->streams[i]
9851 				->triggered_crtc_reset.enabled =
9852 				adev->dm.force_timing_sync;
9853 
9854 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9855 		dc_trigger_sync(dc, dc->current_state);
9856 	}
9857 	mutex_unlock(&adev->dm.dc_lock);
9858 }
9859 
9860 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9861 		       uint32_t value, const char *func_name)
9862 {
9863 #ifdef DM_CHECK_ADDR_0
9864 	if (address == 0) {
9865 		DC_ERR("invalid register write. address = 0");
9866 		return;
9867 	}
9868 #endif
9869 	cgs_write_register(ctx->cgs_device, address, value);
9870 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9871 }
9872 
9873 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9874 			  const char *func_name)
9875 {
9876 	uint32_t value;
9877 #ifdef DM_CHECK_ADDR_0
9878 	if (address == 0) {
9879 		DC_ERR("invalid register read; address = 0\n");
9880 		return 0;
9881 	}
9882 #endif
9883 
9884 	if (ctx->dmub_srv &&
9885 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9886 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9887 		ASSERT(false);
9888 		return 0;
9889 	}
9890 
9891 	value = cgs_read_register(ctx->cgs_device, address);
9892 
9893 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
9894 
9895 	return value;
9896 }
9897