1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
39 
40 #include "vid.h"
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
44 #include "atom.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
49 #endif
50 #include "amdgpu_pm.h"
51 
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
58 #endif
59 
60 #include "ivsrcid/ivsrcid_vislands30.h"
61 
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69 
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107 
108 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110 
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113 
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116 
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119 
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129 
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
134 
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 {
137 	switch (link->dpcd_caps.dongle_type) {
138 	case DISPLAY_DONGLE_NONE:
139 		return DRM_MODE_SUBCONNECTOR_Native;
140 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 		return DRM_MODE_SUBCONNECTOR_VGA;
142 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 		return DRM_MODE_SUBCONNECTOR_DVID;
145 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 		return DRM_MODE_SUBCONNECTOR_HDMIA;
148 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 	default:
150 		return DRM_MODE_SUBCONNECTOR_Unknown;
151 	}
152 }
153 
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 {
156 	struct dc_link *link = aconnector->dc_link;
157 	struct drm_connector *connector = &aconnector->base;
158 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159 
160 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
161 		return;
162 
163 	if (aconnector->dc_sink)
164 		subconnector = get_subconnector_type(link);
165 
166 	drm_object_property_set_value(&connector->base,
167 			connector->dev->mode_config.dp_subconnector_property,
168 			subconnector);
169 }
170 
171 /*
172  * initializes drm_device display related structures, based on the information
173  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174  * drm_encoder, drm_mode_config
175  *
176  * Returns 0 on success
177  */
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181 
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183 				struct drm_plane *plane,
184 				unsigned long possible_crtcs,
185 				const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 			       struct drm_plane *plane,
188 			       uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
191 				    uint32_t link_index,
192 				    struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 				  struct amdgpu_encoder *aencoder,
195 				  uint32_t link_index);
196 
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198 
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200 
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202 				  struct drm_atomic_state *state);
203 
204 static void handle_cursor_update(struct drm_plane *plane,
205 				 struct drm_plane_state *old_plane_state);
206 
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212 
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
215 
216 static bool
217 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
218 				 struct drm_crtc_state *new_crtc_state);
219 /*
220  * dm_vblank_get_counter
221  *
222  * @brief
223  * Get counter for number of vertical blanks
224  *
225  * @param
226  * struct amdgpu_device *adev - [in] desired amdgpu device
227  * int disp_idx - [in] which CRTC to get the counter from
228  *
229  * @return
230  * Counter for vertical blanks
231  */
232 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
233 {
234 	if (crtc >= adev->mode_info.num_crtc)
235 		return 0;
236 	else {
237 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
238 
239 		if (acrtc->dm_irq_params.stream == NULL) {
240 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241 				  crtc);
242 			return 0;
243 		}
244 
245 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
246 	}
247 }
248 
249 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
250 				  u32 *vbl, u32 *position)
251 {
252 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
253 
254 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
255 		return -EINVAL;
256 	else {
257 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258 
259 		if (acrtc->dm_irq_params.stream ==  NULL) {
260 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
261 				  crtc);
262 			return 0;
263 		}
264 
265 		/*
266 		 * TODO rework base driver to use values directly.
267 		 * for now parse it back into reg-format
268 		 */
269 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
270 					 &v_blank_start,
271 					 &v_blank_end,
272 					 &h_position,
273 					 &v_position);
274 
275 		*position = v_position | (h_position << 16);
276 		*vbl = v_blank_start | (v_blank_end << 16);
277 	}
278 
279 	return 0;
280 }
281 
282 static bool dm_is_idle(void *handle)
283 {
284 	/* XXX todo */
285 	return true;
286 }
287 
288 static int dm_wait_for_idle(void *handle)
289 {
290 	/* XXX todo */
291 	return 0;
292 }
293 
294 static bool dm_check_soft_reset(void *handle)
295 {
296 	return false;
297 }
298 
299 static int dm_soft_reset(void *handle)
300 {
301 	/* XXX todo */
302 	return 0;
303 }
304 
305 static struct amdgpu_crtc *
306 get_crtc_by_otg_inst(struct amdgpu_device *adev,
307 		     int otg_inst)
308 {
309 	struct drm_device *dev = adev_to_drm(adev);
310 	struct drm_crtc *crtc;
311 	struct amdgpu_crtc *amdgpu_crtc;
312 
313 	if (otg_inst == -1) {
314 		WARN_ON(1);
315 		return adev->mode_info.crtcs[0];
316 	}
317 
318 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
319 		amdgpu_crtc = to_amdgpu_crtc(crtc);
320 
321 		if (amdgpu_crtc->otg_inst == otg_inst)
322 			return amdgpu_crtc;
323 	}
324 
325 	return NULL;
326 }
327 
328 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
329 {
330 	return acrtc->dm_irq_params.freesync_config.state ==
331 		       VRR_STATE_ACTIVE_VARIABLE ||
332 	       acrtc->dm_irq_params.freesync_config.state ==
333 		       VRR_STATE_ACTIVE_FIXED;
334 }
335 
336 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
337 {
338 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
339 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 }
341 
342 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343 					      struct dm_crtc_state *new_state)
344 {
345 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346 		return true;
347 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
348 		return true;
349 	else
350 		return false;
351 }
352 
353 /**
354  * dm_pflip_high_irq() - Handle pageflip interrupt
355  * @interrupt_params: ignored
356  *
357  * Handles the pageflip interrupt by notifying all interested parties
358  * that the pageflip has been completed.
359  */
360 static void dm_pflip_high_irq(void *interrupt_params)
361 {
362 	struct amdgpu_crtc *amdgpu_crtc;
363 	struct common_irq_params *irq_params = interrupt_params;
364 	struct amdgpu_device *adev = irq_params->adev;
365 	unsigned long flags;
366 	struct drm_pending_vblank_event *e;
367 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
368 	bool vrr_active;
369 
370 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
371 
372 	/* IRQ could occur when in initial stage */
373 	/* TODO work and BO cleanup */
374 	if (amdgpu_crtc == NULL) {
375 		DC_LOG_PFLIP("CRTC is null, returning.\n");
376 		return;
377 	}
378 
379 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
380 
381 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
382 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
383 						 amdgpu_crtc->pflip_status,
384 						 AMDGPU_FLIP_SUBMITTED,
385 						 amdgpu_crtc->crtc_id,
386 						 amdgpu_crtc);
387 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
388 		return;
389 	}
390 
391 	/* page flip completed. */
392 	e = amdgpu_crtc->event;
393 	amdgpu_crtc->event = NULL;
394 
395 	if (!e)
396 		WARN_ON(1);
397 
398 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
399 
400 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
401 	if (!vrr_active ||
402 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
403 				      &v_blank_end, &hpos, &vpos) ||
404 	    (vpos < v_blank_start)) {
405 		/* Update to correct count and vblank timestamp if racing with
406 		 * vblank irq. This also updates to the correct vblank timestamp
407 		 * even in VRR mode, as scanout is past the front-porch atm.
408 		 */
409 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
410 
411 		/* Wake up userspace by sending the pageflip event with proper
412 		 * count and timestamp of vblank of flip completion.
413 		 */
414 		if (e) {
415 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
416 
417 			/* Event sent, so done with vblank for this flip */
418 			drm_crtc_vblank_put(&amdgpu_crtc->base);
419 		}
420 	} else if (e) {
421 		/* VRR active and inside front-porch: vblank count and
422 		 * timestamp for pageflip event will only be up to date after
423 		 * drm_crtc_handle_vblank() has been executed from late vblank
424 		 * irq handler after start of back-porch (vline 0). We queue the
425 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
426 		 * updated timestamp and count, once it runs after us.
427 		 *
428 		 * We need to open-code this instead of using the helper
429 		 * drm_crtc_arm_vblank_event(), as that helper would
430 		 * call drm_crtc_accurate_vblank_count(), which we must
431 		 * not call in VRR mode while we are in front-porch!
432 		 */
433 
434 		/* sequence will be replaced by real count during send-out. */
435 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
436 		e->pipe = amdgpu_crtc->crtc_id;
437 
438 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
439 		e = NULL;
440 	}
441 
442 	/* Keep track of vblank of this flip for flip throttling. We use the
443 	 * cooked hw counter, as that one incremented at start of this vblank
444 	 * of pageflip completion, so last_flip_vblank is the forbidden count
445 	 * for queueing new pageflips if vsync + VRR is enabled.
446 	 */
447 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
448 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
449 
450 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
451 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
452 
453 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
454 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
455 		     vrr_active, (int) !e);
456 }
457 
458 static void dm_vupdate_high_irq(void *interrupt_params)
459 {
460 	struct common_irq_params *irq_params = interrupt_params;
461 	struct amdgpu_device *adev = irq_params->adev;
462 	struct amdgpu_crtc *acrtc;
463 	struct drm_device *drm_dev;
464 	struct drm_vblank_crtc *vblank;
465 	ktime_t frame_duration_ns, previous_timestamp;
466 	unsigned long flags;
467 	int vrr_active;
468 
469 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
470 
471 	if (acrtc) {
472 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
473 		drm_dev = acrtc->base.dev;
474 		vblank = &drm_dev->vblank[acrtc->base.index];
475 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
476 		frame_duration_ns = vblank->time - previous_timestamp;
477 
478 		if (frame_duration_ns > 0) {
479 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
480 						frame_duration_ns,
481 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
482 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
483 		}
484 
485 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
486 			      acrtc->crtc_id,
487 			      vrr_active);
488 
489 		/* Core vblank handling is done here after end of front-porch in
490 		 * vrr mode, as vblank timestamping will give valid results
491 		 * while now done after front-porch. This will also deliver
492 		 * page-flip completion events that have been queued to us
493 		 * if a pageflip happened inside front-porch.
494 		 */
495 		if (vrr_active) {
496 			drm_crtc_handle_vblank(&acrtc->base);
497 
498 			/* BTR processing for pre-DCE12 ASICs */
499 			if (acrtc->dm_irq_params.stream &&
500 			    adev->family < AMDGPU_FAMILY_AI) {
501 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
502 				mod_freesync_handle_v_update(
503 				    adev->dm.freesync_module,
504 				    acrtc->dm_irq_params.stream,
505 				    &acrtc->dm_irq_params.vrr_params);
506 
507 				dc_stream_adjust_vmin_vmax(
508 				    adev->dm.dc,
509 				    acrtc->dm_irq_params.stream,
510 				    &acrtc->dm_irq_params.vrr_params.adjust);
511 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
512 			}
513 		}
514 	}
515 }
516 
517 /**
518  * dm_crtc_high_irq() - Handles CRTC interrupt
519  * @interrupt_params: used for determining the CRTC instance
520  *
521  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
522  * event handler.
523  */
524 static void dm_crtc_high_irq(void *interrupt_params)
525 {
526 	struct common_irq_params *irq_params = interrupt_params;
527 	struct amdgpu_device *adev = irq_params->adev;
528 	struct amdgpu_crtc *acrtc;
529 	unsigned long flags;
530 	int vrr_active;
531 
532 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
533 	if (!acrtc)
534 		return;
535 
536 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
537 
538 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
539 		      vrr_active, acrtc->dm_irq_params.active_planes);
540 
541 	/**
542 	 * Core vblank handling at start of front-porch is only possible
543 	 * in non-vrr mode, as only there vblank timestamping will give
544 	 * valid results while done in front-porch. Otherwise defer it
545 	 * to dm_vupdate_high_irq after end of front-porch.
546 	 */
547 	if (!vrr_active)
548 		drm_crtc_handle_vblank(&acrtc->base);
549 
550 	/**
551 	 * Following stuff must happen at start of vblank, for crc
552 	 * computation and below-the-range btr support in vrr mode.
553 	 */
554 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
555 
556 	/* BTR updates need to happen before VUPDATE on Vega and above. */
557 	if (adev->family < AMDGPU_FAMILY_AI)
558 		return;
559 
560 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
561 
562 	if (acrtc->dm_irq_params.stream &&
563 	    acrtc->dm_irq_params.vrr_params.supported &&
564 	    acrtc->dm_irq_params.freesync_config.state ==
565 		    VRR_STATE_ACTIVE_VARIABLE) {
566 		mod_freesync_handle_v_update(adev->dm.freesync_module,
567 					     acrtc->dm_irq_params.stream,
568 					     &acrtc->dm_irq_params.vrr_params);
569 
570 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
571 					   &acrtc->dm_irq_params.vrr_params.adjust);
572 	}
573 
574 	/*
575 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
576 	 * In that case, pageflip completion interrupts won't fire and pageflip
577 	 * completion events won't get delivered. Prevent this by sending
578 	 * pending pageflip events from here if a flip is still pending.
579 	 *
580 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
581 	 * avoid race conditions between flip programming and completion,
582 	 * which could cause too early flip completion events.
583 	 */
584 	if (adev->family >= AMDGPU_FAMILY_RV &&
585 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
586 	    acrtc->dm_irq_params.active_planes == 0) {
587 		if (acrtc->event) {
588 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
589 			acrtc->event = NULL;
590 			drm_crtc_vblank_put(&acrtc->base);
591 		}
592 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
593 	}
594 
595 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
596 }
597 
598 #if defined(CONFIG_DRM_AMD_DC_DCN)
599 /**
600  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
601  * DCN generation ASICs
602  * @interrupt params - interrupt parameters
603  *
604  * Used to set crc window/read out crc value at vertical line 0 position
605  */
606 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
607 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
608 {
609 	struct common_irq_params *irq_params = interrupt_params;
610 	struct amdgpu_device *adev = irq_params->adev;
611 	struct amdgpu_crtc *acrtc;
612 
613 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
614 
615 	if (!acrtc)
616 		return;
617 
618 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
619 }
620 #endif
621 #endif
622 
623 static int dm_set_clockgating_state(void *handle,
624 		  enum amd_clockgating_state state)
625 {
626 	return 0;
627 }
628 
629 static int dm_set_powergating_state(void *handle,
630 		  enum amd_powergating_state state)
631 {
632 	return 0;
633 }
634 
635 /* Prototypes of private functions */
636 static int dm_early_init(void* handle);
637 
638 /* Allocate memory for FBC compressed data  */
639 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
640 {
641 	struct drm_device *dev = connector->dev;
642 	struct amdgpu_device *adev = drm_to_adev(dev);
643 	struct dm_compressor_info *compressor = &adev->dm.compressor;
644 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
645 	struct drm_display_mode *mode;
646 	unsigned long max_size = 0;
647 
648 	if (adev->dm.dc->fbc_compressor == NULL)
649 		return;
650 
651 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
652 		return;
653 
654 	if (compressor->bo_ptr)
655 		return;
656 
657 
658 	list_for_each_entry(mode, &connector->modes, head) {
659 		if (max_size < mode->htotal * mode->vtotal)
660 			max_size = mode->htotal * mode->vtotal;
661 	}
662 
663 	if (max_size) {
664 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
665 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
666 			    &compressor->gpu_addr, &compressor->cpu_addr);
667 
668 		if (r)
669 			DRM_ERROR("DM: Failed to initialize FBC\n");
670 		else {
671 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
672 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
673 		}
674 
675 	}
676 
677 }
678 
679 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
680 					  int pipe, bool *enabled,
681 					  unsigned char *buf, int max_bytes)
682 {
683 	struct drm_device *dev = dev_get_drvdata(kdev);
684 	struct amdgpu_device *adev = drm_to_adev(dev);
685 	struct drm_connector *connector;
686 	struct drm_connector_list_iter conn_iter;
687 	struct amdgpu_dm_connector *aconnector;
688 	int ret = 0;
689 
690 	*enabled = false;
691 
692 	mutex_lock(&adev->dm.audio_lock);
693 
694 	drm_connector_list_iter_begin(dev, &conn_iter);
695 	drm_for_each_connector_iter(connector, &conn_iter) {
696 		aconnector = to_amdgpu_dm_connector(connector);
697 		if (aconnector->audio_inst != port)
698 			continue;
699 
700 		*enabled = true;
701 		ret = drm_eld_size(connector->eld);
702 		memcpy(buf, connector->eld, min(max_bytes, ret));
703 
704 		break;
705 	}
706 	drm_connector_list_iter_end(&conn_iter);
707 
708 	mutex_unlock(&adev->dm.audio_lock);
709 
710 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
711 
712 	return ret;
713 }
714 
715 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
716 	.get_eld = amdgpu_dm_audio_component_get_eld,
717 };
718 
719 static int amdgpu_dm_audio_component_bind(struct device *kdev,
720 				       struct device *hda_kdev, void *data)
721 {
722 	struct drm_device *dev = dev_get_drvdata(kdev);
723 	struct amdgpu_device *adev = drm_to_adev(dev);
724 	struct drm_audio_component *acomp = data;
725 
726 	acomp->ops = &amdgpu_dm_audio_component_ops;
727 	acomp->dev = kdev;
728 	adev->dm.audio_component = acomp;
729 
730 	return 0;
731 }
732 
733 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
734 					  struct device *hda_kdev, void *data)
735 {
736 	struct drm_device *dev = dev_get_drvdata(kdev);
737 	struct amdgpu_device *adev = drm_to_adev(dev);
738 	struct drm_audio_component *acomp = data;
739 
740 	acomp->ops = NULL;
741 	acomp->dev = NULL;
742 	adev->dm.audio_component = NULL;
743 }
744 
745 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
746 	.bind	= amdgpu_dm_audio_component_bind,
747 	.unbind	= amdgpu_dm_audio_component_unbind,
748 };
749 
750 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
751 {
752 	int i, ret;
753 
754 	if (!amdgpu_audio)
755 		return 0;
756 
757 	adev->mode_info.audio.enabled = true;
758 
759 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
760 
761 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
762 		adev->mode_info.audio.pin[i].channels = -1;
763 		adev->mode_info.audio.pin[i].rate = -1;
764 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
765 		adev->mode_info.audio.pin[i].status_bits = 0;
766 		adev->mode_info.audio.pin[i].category_code = 0;
767 		adev->mode_info.audio.pin[i].connected = false;
768 		adev->mode_info.audio.pin[i].id =
769 			adev->dm.dc->res_pool->audios[i]->inst;
770 		adev->mode_info.audio.pin[i].offset = 0;
771 	}
772 
773 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
774 	if (ret < 0)
775 		return ret;
776 
777 	adev->dm.audio_registered = true;
778 
779 	return 0;
780 }
781 
782 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
783 {
784 	if (!amdgpu_audio)
785 		return;
786 
787 	if (!adev->mode_info.audio.enabled)
788 		return;
789 
790 	if (adev->dm.audio_registered) {
791 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
792 		adev->dm.audio_registered = false;
793 	}
794 
795 	/* TODO: Disable audio? */
796 
797 	adev->mode_info.audio.enabled = false;
798 }
799 
800 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
801 {
802 	struct drm_audio_component *acomp = adev->dm.audio_component;
803 
804 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
805 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
806 
807 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
808 						 pin, -1);
809 	}
810 }
811 
812 static int dm_dmub_hw_init(struct amdgpu_device *adev)
813 {
814 	const struct dmcub_firmware_header_v1_0 *hdr;
815 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
816 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
817 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
818 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
819 	struct abm *abm = adev->dm.dc->res_pool->abm;
820 	struct dmub_srv_hw_params hw_params;
821 	enum dmub_status status;
822 	const unsigned char *fw_inst_const, *fw_bss_data;
823 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
824 	bool has_hw_support;
825 
826 	if (!dmub_srv)
827 		/* DMUB isn't supported on the ASIC. */
828 		return 0;
829 
830 	if (!fb_info) {
831 		DRM_ERROR("No framebuffer info for DMUB service.\n");
832 		return -EINVAL;
833 	}
834 
835 	if (!dmub_fw) {
836 		/* Firmware required for DMUB support. */
837 		DRM_ERROR("No firmware provided for DMUB.\n");
838 		return -EINVAL;
839 	}
840 
841 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
842 	if (status != DMUB_STATUS_OK) {
843 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
844 		return -EINVAL;
845 	}
846 
847 	if (!has_hw_support) {
848 		DRM_INFO("DMUB unsupported on ASIC\n");
849 		return 0;
850 	}
851 
852 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
853 
854 	fw_inst_const = dmub_fw->data +
855 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
856 			PSP_HEADER_BYTES;
857 
858 	fw_bss_data = dmub_fw->data +
859 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
860 		      le32_to_cpu(hdr->inst_const_bytes);
861 
862 	/* Copy firmware and bios info into FB memory. */
863 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
864 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
865 
866 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
867 
868 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
869 	 * amdgpu_ucode_init_single_fw will load dmub firmware
870 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
871 	 * will be done by dm_dmub_hw_init
872 	 */
873 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
874 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
875 				fw_inst_const_size);
876 	}
877 
878 	if (fw_bss_data_size)
879 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
880 		       fw_bss_data, fw_bss_data_size);
881 
882 	/* Copy firmware bios info into FB memory. */
883 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
884 	       adev->bios_size);
885 
886 	/* Reset regions that need to be reset. */
887 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
888 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
889 
890 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
891 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
892 
893 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
894 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
895 
896 	/* Initialize hardware. */
897 	memset(&hw_params, 0, sizeof(hw_params));
898 	hw_params.fb_base = adev->gmc.fb_start;
899 	hw_params.fb_offset = adev->gmc.aper_base;
900 
901 	/* backdoor load firmware and trigger dmub running */
902 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
903 		hw_params.load_inst_const = true;
904 
905 	if (dmcu)
906 		hw_params.psp_version = dmcu->psp_version;
907 
908 	for (i = 0; i < fb_info->num_fb; ++i)
909 		hw_params.fb[i] = &fb_info->fb[i];
910 
911 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
912 	if (status != DMUB_STATUS_OK) {
913 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
914 		return -EINVAL;
915 	}
916 
917 	/* Wait for firmware load to finish. */
918 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
919 	if (status != DMUB_STATUS_OK)
920 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
921 
922 	/* Init DMCU and ABM if available. */
923 	if (dmcu && abm) {
924 		dmcu->funcs->dmcu_init(dmcu);
925 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
926 	}
927 
928 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
929 	if (!adev->dm.dc->ctx->dmub_srv) {
930 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
931 		return -ENOMEM;
932 	}
933 
934 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
935 		 adev->dm.dmcub_fw_version);
936 
937 	return 0;
938 }
939 
940 #if defined(CONFIG_DRM_AMD_DC_DCN)
941 #define DMUB_TRACE_MAX_READ 64
942 static void dm_dmub_trace_high_irq(void *interrupt_params)
943 {
944 	struct common_irq_params *irq_params = interrupt_params;
945 	struct amdgpu_device *adev = irq_params->adev;
946 	struct amdgpu_display_manager *dm = &adev->dm;
947 	struct dmcub_trace_buf_entry entry = { 0 };
948 	uint32_t count = 0;
949 
950 	do {
951 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
952 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
953 							entry.param0, entry.param1);
954 
955 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
956 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
957 		} else
958 			break;
959 
960 		count++;
961 
962 	} while (count <= DMUB_TRACE_MAX_READ);
963 
964 	ASSERT(count <= DMUB_TRACE_MAX_READ);
965 }
966 
967 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
968 {
969 	uint64_t pt_base;
970 	uint32_t logical_addr_low;
971 	uint32_t logical_addr_high;
972 	uint32_t agp_base, agp_bot, agp_top;
973 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
974 
975 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
976 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
977 
978 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
979 		/*
980 		 * Raven2 has a HW issue that it is unable to use the vram which
981 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
982 		 * workaround that increase system aperture high address (add 1)
983 		 * to get rid of the VM fault and hardware hang.
984 		 */
985 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
986 	else
987 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
988 
989 	agp_base = 0;
990 	agp_bot = adev->gmc.agp_start >> 24;
991 	agp_top = adev->gmc.agp_end >> 24;
992 
993 
994 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
995 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
996 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
997 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
998 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
999 	page_table_base.low_part = lower_32_bits(pt_base);
1000 
1001 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1002 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1003 
1004 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1005 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1006 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1007 
1008 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1009 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1010 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1011 
1012 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1013 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1014 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1015 
1016 	pa_config->is_hvm_enabled = 0;
1017 
1018 }
1019 #endif
1020 #if defined(CONFIG_DRM_AMD_DC_DCN)
1021 static void event_mall_stutter(struct work_struct *work)
1022 {
1023 
1024 	struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
1025 	struct amdgpu_display_manager *dm = vblank_work->dm;
1026 
1027 	mutex_lock(&dm->dc_lock);
1028 
1029 	if (vblank_work->enable)
1030 		dm->active_vblank_irq_count++;
1031 	else if(dm->active_vblank_irq_count)
1032 		dm->active_vblank_irq_count--;
1033 
1034 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1035 
1036 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1037 
1038 	mutex_unlock(&dm->dc_lock);
1039 }
1040 
1041 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1042 {
1043 
1044 	int max_caps = dc->caps.max_links;
1045 	struct vblank_workqueue *vblank_work;
1046 	int i = 0;
1047 
1048 	vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1049 	if (ZERO_OR_NULL_PTR(vblank_work)) {
1050 		kfree(vblank_work);
1051 		return NULL;
1052 	}
1053 
1054 	for (i = 0; i < max_caps; i++)
1055 		INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1056 
1057 	return vblank_work;
1058 }
1059 #endif
1060 static int amdgpu_dm_init(struct amdgpu_device *adev)
1061 {
1062 	struct dc_init_data init_data;
1063 #ifdef CONFIG_DRM_AMD_DC_HDCP
1064 	struct dc_callback_init init_params;
1065 #endif
1066 	int r;
1067 
1068 	adev->dm.ddev = adev_to_drm(adev);
1069 	adev->dm.adev = adev;
1070 
1071 	/* Zero all the fields */
1072 	memset(&init_data, 0, sizeof(init_data));
1073 #ifdef CONFIG_DRM_AMD_DC_HDCP
1074 	memset(&init_params, 0, sizeof(init_params));
1075 #endif
1076 
1077 	mutex_init(&adev->dm.dc_lock);
1078 	mutex_init(&adev->dm.audio_lock);
1079 #if defined(CONFIG_DRM_AMD_DC_DCN)
1080 	spin_lock_init(&adev->dm.vblank_lock);
1081 #endif
1082 
1083 	if(amdgpu_dm_irq_init(adev)) {
1084 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1085 		goto error;
1086 	}
1087 
1088 	init_data.asic_id.chip_family = adev->family;
1089 
1090 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1091 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1092 
1093 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1094 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1095 	init_data.asic_id.atombios_base_address =
1096 		adev->mode_info.atom_context->bios;
1097 
1098 	init_data.driver = adev;
1099 
1100 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1101 
1102 	if (!adev->dm.cgs_device) {
1103 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1104 		goto error;
1105 	}
1106 
1107 	init_data.cgs_device = adev->dm.cgs_device;
1108 
1109 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1110 
1111 	switch (adev->asic_type) {
1112 	case CHIP_CARRIZO:
1113 	case CHIP_STONEY:
1114 	case CHIP_RAVEN:
1115 	case CHIP_RENOIR:
1116 		init_data.flags.gpu_vm_support = true;
1117 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1118 			init_data.flags.disable_dmcu = true;
1119 		break;
1120 #if defined(CONFIG_DRM_AMD_DC_DCN)
1121 	case CHIP_VANGOGH:
1122 		init_data.flags.gpu_vm_support = true;
1123 		break;
1124 #endif
1125 	default:
1126 		break;
1127 	}
1128 
1129 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1130 		init_data.flags.fbc_support = true;
1131 
1132 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1133 		init_data.flags.multi_mon_pp_mclk_switch = true;
1134 
1135 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1136 		init_data.flags.disable_fractional_pwm = true;
1137 
1138 	init_data.flags.power_down_display_on_boot = true;
1139 
1140 	INIT_LIST_HEAD(&adev->dm.da_list);
1141 	/* Display Core create. */
1142 	adev->dm.dc = dc_create(&init_data);
1143 
1144 	if (adev->dm.dc) {
1145 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1146 	} else {
1147 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1148 		goto error;
1149 	}
1150 
1151 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1152 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1153 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1154 	}
1155 
1156 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1157 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1158 
1159 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1160 		adev->dm.dc->debug.disable_stutter = true;
1161 
1162 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1163 		adev->dm.dc->debug.disable_dsc = true;
1164 
1165 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1166 		adev->dm.dc->debug.disable_clock_gate = true;
1167 
1168 	r = dm_dmub_hw_init(adev);
1169 	if (r) {
1170 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1171 		goto error;
1172 	}
1173 
1174 	dc_hardware_init(adev->dm.dc);
1175 
1176 #if defined(CONFIG_DRM_AMD_DC_DCN)
1177 	if (adev->apu_flags) {
1178 		struct dc_phy_addr_space_config pa_config;
1179 
1180 		mmhub_read_system_context(adev, &pa_config);
1181 
1182 		// Call the DC init_memory func
1183 		dc_setup_system_context(adev->dm.dc, &pa_config);
1184 	}
1185 #endif
1186 
1187 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1188 	if (!adev->dm.freesync_module) {
1189 		DRM_ERROR(
1190 		"amdgpu: failed to initialize freesync_module.\n");
1191 	} else
1192 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1193 				adev->dm.freesync_module);
1194 
1195 	amdgpu_dm_init_color_mod();
1196 
1197 #if defined(CONFIG_DRM_AMD_DC_DCN)
1198 	if (adev->dm.dc->caps.max_links > 0) {
1199 		adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1200 
1201 		if (!adev->dm.vblank_workqueue)
1202 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1203 		else
1204 			DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1205 	}
1206 #endif
1207 
1208 #ifdef CONFIG_DRM_AMD_DC_HDCP
1209 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1210 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1211 
1212 		if (!adev->dm.hdcp_workqueue)
1213 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1214 		else
1215 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1216 
1217 		dc_init_callbacks(adev->dm.dc, &init_params);
1218 	}
1219 #endif
1220 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1221 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1222 #endif
1223 	if (amdgpu_dm_initialize_drm_device(adev)) {
1224 		DRM_ERROR(
1225 		"amdgpu: failed to initialize sw for display support.\n");
1226 		goto error;
1227 	}
1228 
1229 	/* create fake encoders for MST */
1230 	dm_dp_create_fake_mst_encoders(adev);
1231 
1232 	/* TODO: Add_display_info? */
1233 
1234 	/* TODO use dynamic cursor width */
1235 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1236 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1237 
1238 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1239 		DRM_ERROR(
1240 		"amdgpu: failed to initialize sw for display support.\n");
1241 		goto error;
1242 	}
1243 
1244 
1245 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1246 
1247 	return 0;
1248 error:
1249 	amdgpu_dm_fini(adev);
1250 
1251 	return -EINVAL;
1252 }
1253 
1254 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1255 {
1256 	int i;
1257 
1258 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1259 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1260 	}
1261 
1262 	amdgpu_dm_audio_fini(adev);
1263 
1264 	amdgpu_dm_destroy_drm_device(&adev->dm);
1265 
1266 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1267 	if (adev->dm.crc_rd_wrk) {
1268 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1269 		kfree(adev->dm.crc_rd_wrk);
1270 		adev->dm.crc_rd_wrk = NULL;
1271 	}
1272 #endif
1273 #ifdef CONFIG_DRM_AMD_DC_HDCP
1274 	if (adev->dm.hdcp_workqueue) {
1275 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1276 		adev->dm.hdcp_workqueue = NULL;
1277 	}
1278 
1279 	if (adev->dm.dc)
1280 		dc_deinit_callbacks(adev->dm.dc);
1281 #endif
1282 
1283 #if defined(CONFIG_DRM_AMD_DC_DCN)
1284 	if (adev->dm.vblank_workqueue) {
1285 		adev->dm.vblank_workqueue->dm = NULL;
1286 		kfree(adev->dm.vblank_workqueue);
1287 		adev->dm.vblank_workqueue = NULL;
1288 	}
1289 #endif
1290 
1291 	if (adev->dm.dc->ctx->dmub_srv) {
1292 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1293 		adev->dm.dc->ctx->dmub_srv = NULL;
1294 	}
1295 
1296 	if (adev->dm.dmub_bo)
1297 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1298 				      &adev->dm.dmub_bo_gpu_addr,
1299 				      &adev->dm.dmub_bo_cpu_addr);
1300 
1301 	/* DC Destroy TODO: Replace destroy DAL */
1302 	if (adev->dm.dc)
1303 		dc_destroy(&adev->dm.dc);
1304 	/*
1305 	 * TODO: pageflip, vlank interrupt
1306 	 *
1307 	 * amdgpu_dm_irq_fini(adev);
1308 	 */
1309 
1310 	if (adev->dm.cgs_device) {
1311 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1312 		adev->dm.cgs_device = NULL;
1313 	}
1314 	if (adev->dm.freesync_module) {
1315 		mod_freesync_destroy(adev->dm.freesync_module);
1316 		adev->dm.freesync_module = NULL;
1317 	}
1318 
1319 	mutex_destroy(&adev->dm.audio_lock);
1320 	mutex_destroy(&adev->dm.dc_lock);
1321 
1322 	return;
1323 }
1324 
1325 static int load_dmcu_fw(struct amdgpu_device *adev)
1326 {
1327 	const char *fw_name_dmcu = NULL;
1328 	int r;
1329 	const struct dmcu_firmware_header_v1_0 *hdr;
1330 
1331 	switch(adev->asic_type) {
1332 #if defined(CONFIG_DRM_AMD_DC_SI)
1333 	case CHIP_TAHITI:
1334 	case CHIP_PITCAIRN:
1335 	case CHIP_VERDE:
1336 	case CHIP_OLAND:
1337 #endif
1338 	case CHIP_BONAIRE:
1339 	case CHIP_HAWAII:
1340 	case CHIP_KAVERI:
1341 	case CHIP_KABINI:
1342 	case CHIP_MULLINS:
1343 	case CHIP_TONGA:
1344 	case CHIP_FIJI:
1345 	case CHIP_CARRIZO:
1346 	case CHIP_STONEY:
1347 	case CHIP_POLARIS11:
1348 	case CHIP_POLARIS10:
1349 	case CHIP_POLARIS12:
1350 	case CHIP_VEGAM:
1351 	case CHIP_VEGA10:
1352 	case CHIP_VEGA12:
1353 	case CHIP_VEGA20:
1354 	case CHIP_NAVI10:
1355 	case CHIP_NAVI14:
1356 	case CHIP_RENOIR:
1357 	case CHIP_SIENNA_CICHLID:
1358 	case CHIP_NAVY_FLOUNDER:
1359 	case CHIP_DIMGREY_CAVEFISH:
1360 	case CHIP_VANGOGH:
1361 		return 0;
1362 	case CHIP_NAVI12:
1363 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1364 		break;
1365 	case CHIP_RAVEN:
1366 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1367 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1368 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1369 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1370 		else
1371 			return 0;
1372 		break;
1373 	default:
1374 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1375 		return -EINVAL;
1376 	}
1377 
1378 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1379 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1380 		return 0;
1381 	}
1382 
1383 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1384 	if (r == -ENOENT) {
1385 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1386 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1387 		adev->dm.fw_dmcu = NULL;
1388 		return 0;
1389 	}
1390 	if (r) {
1391 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1392 			fw_name_dmcu);
1393 		return r;
1394 	}
1395 
1396 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1397 	if (r) {
1398 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1399 			fw_name_dmcu);
1400 		release_firmware(adev->dm.fw_dmcu);
1401 		adev->dm.fw_dmcu = NULL;
1402 		return r;
1403 	}
1404 
1405 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1406 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1407 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1408 	adev->firmware.fw_size +=
1409 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1410 
1411 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1412 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1413 	adev->firmware.fw_size +=
1414 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1415 
1416 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1417 
1418 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1419 
1420 	return 0;
1421 }
1422 
1423 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1424 {
1425 	struct amdgpu_device *adev = ctx;
1426 
1427 	return dm_read_reg(adev->dm.dc->ctx, address);
1428 }
1429 
1430 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1431 				     uint32_t value)
1432 {
1433 	struct amdgpu_device *adev = ctx;
1434 
1435 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1436 }
1437 
1438 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1439 {
1440 	struct dmub_srv_create_params create_params;
1441 	struct dmub_srv_region_params region_params;
1442 	struct dmub_srv_region_info region_info;
1443 	struct dmub_srv_fb_params fb_params;
1444 	struct dmub_srv_fb_info *fb_info;
1445 	struct dmub_srv *dmub_srv;
1446 	const struct dmcub_firmware_header_v1_0 *hdr;
1447 	const char *fw_name_dmub;
1448 	enum dmub_asic dmub_asic;
1449 	enum dmub_status status;
1450 	int r;
1451 
1452 	switch (adev->asic_type) {
1453 	case CHIP_RENOIR:
1454 		dmub_asic = DMUB_ASIC_DCN21;
1455 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1456 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1457 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1458 		break;
1459 	case CHIP_SIENNA_CICHLID:
1460 		dmub_asic = DMUB_ASIC_DCN30;
1461 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1462 		break;
1463 	case CHIP_NAVY_FLOUNDER:
1464 		dmub_asic = DMUB_ASIC_DCN30;
1465 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1466 		break;
1467 	case CHIP_VANGOGH:
1468 		dmub_asic = DMUB_ASIC_DCN301;
1469 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1470 		break;
1471 	case CHIP_DIMGREY_CAVEFISH:
1472 		dmub_asic = DMUB_ASIC_DCN302;
1473 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1474 		break;
1475 
1476 	default:
1477 		/* ASIC doesn't support DMUB. */
1478 		return 0;
1479 	}
1480 
1481 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1482 	if (r) {
1483 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1484 		return 0;
1485 	}
1486 
1487 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1488 	if (r) {
1489 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1490 		return 0;
1491 	}
1492 
1493 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1494 
1495 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1496 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1497 			AMDGPU_UCODE_ID_DMCUB;
1498 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1499 			adev->dm.dmub_fw;
1500 		adev->firmware.fw_size +=
1501 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1502 
1503 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1504 			 adev->dm.dmcub_fw_version);
1505 	}
1506 
1507 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1508 
1509 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1510 	dmub_srv = adev->dm.dmub_srv;
1511 
1512 	if (!dmub_srv) {
1513 		DRM_ERROR("Failed to allocate DMUB service!\n");
1514 		return -ENOMEM;
1515 	}
1516 
1517 	memset(&create_params, 0, sizeof(create_params));
1518 	create_params.user_ctx = adev;
1519 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1520 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1521 	create_params.asic = dmub_asic;
1522 
1523 	/* Create the DMUB service. */
1524 	status = dmub_srv_create(dmub_srv, &create_params);
1525 	if (status != DMUB_STATUS_OK) {
1526 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1527 		return -EINVAL;
1528 	}
1529 
1530 	/* Calculate the size of all the regions for the DMUB service. */
1531 	memset(&region_params, 0, sizeof(region_params));
1532 
1533 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1534 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1535 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1536 	region_params.vbios_size = adev->bios_size;
1537 	region_params.fw_bss_data = region_params.bss_data_size ?
1538 		adev->dm.dmub_fw->data +
1539 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1540 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1541 	region_params.fw_inst_const =
1542 		adev->dm.dmub_fw->data +
1543 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1544 		PSP_HEADER_BYTES;
1545 
1546 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1547 					   &region_info);
1548 
1549 	if (status != DMUB_STATUS_OK) {
1550 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1551 		return -EINVAL;
1552 	}
1553 
1554 	/*
1555 	 * Allocate a framebuffer based on the total size of all the regions.
1556 	 * TODO: Move this into GART.
1557 	 */
1558 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1559 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1560 				    &adev->dm.dmub_bo_gpu_addr,
1561 				    &adev->dm.dmub_bo_cpu_addr);
1562 	if (r)
1563 		return r;
1564 
1565 	/* Rebase the regions on the framebuffer address. */
1566 	memset(&fb_params, 0, sizeof(fb_params));
1567 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1568 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1569 	fb_params.region_info = &region_info;
1570 
1571 	adev->dm.dmub_fb_info =
1572 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1573 	fb_info = adev->dm.dmub_fb_info;
1574 
1575 	if (!fb_info) {
1576 		DRM_ERROR(
1577 			"Failed to allocate framebuffer info for DMUB service!\n");
1578 		return -ENOMEM;
1579 	}
1580 
1581 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1582 	if (status != DMUB_STATUS_OK) {
1583 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1584 		return -EINVAL;
1585 	}
1586 
1587 	return 0;
1588 }
1589 
1590 static int dm_sw_init(void *handle)
1591 {
1592 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1593 	int r;
1594 
1595 	r = dm_dmub_sw_init(adev);
1596 	if (r)
1597 		return r;
1598 
1599 	return load_dmcu_fw(adev);
1600 }
1601 
1602 static int dm_sw_fini(void *handle)
1603 {
1604 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1605 
1606 	kfree(adev->dm.dmub_fb_info);
1607 	adev->dm.dmub_fb_info = NULL;
1608 
1609 	if (adev->dm.dmub_srv) {
1610 		dmub_srv_destroy(adev->dm.dmub_srv);
1611 		adev->dm.dmub_srv = NULL;
1612 	}
1613 
1614 	release_firmware(adev->dm.dmub_fw);
1615 	adev->dm.dmub_fw = NULL;
1616 
1617 	release_firmware(adev->dm.fw_dmcu);
1618 	adev->dm.fw_dmcu = NULL;
1619 
1620 	return 0;
1621 }
1622 
1623 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1624 {
1625 	struct amdgpu_dm_connector *aconnector;
1626 	struct drm_connector *connector;
1627 	struct drm_connector_list_iter iter;
1628 	int ret = 0;
1629 
1630 	drm_connector_list_iter_begin(dev, &iter);
1631 	drm_for_each_connector_iter(connector, &iter) {
1632 		aconnector = to_amdgpu_dm_connector(connector);
1633 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1634 		    aconnector->mst_mgr.aux) {
1635 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1636 					 aconnector,
1637 					 aconnector->base.base.id);
1638 
1639 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1640 			if (ret < 0) {
1641 				DRM_ERROR("DM_MST: Failed to start MST\n");
1642 				aconnector->dc_link->type =
1643 					dc_connection_single;
1644 				break;
1645 			}
1646 		}
1647 	}
1648 	drm_connector_list_iter_end(&iter);
1649 
1650 	return ret;
1651 }
1652 
1653 static int dm_late_init(void *handle)
1654 {
1655 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1656 
1657 	struct dmcu_iram_parameters params;
1658 	unsigned int linear_lut[16];
1659 	int i;
1660 	struct dmcu *dmcu = NULL;
1661 	bool ret = true;
1662 
1663 	dmcu = adev->dm.dc->res_pool->dmcu;
1664 
1665 	for (i = 0; i < 16; i++)
1666 		linear_lut[i] = 0xFFFF * i / 15;
1667 
1668 	params.set = 0;
1669 	params.backlight_ramping_start = 0xCCCC;
1670 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1671 	params.backlight_lut_array_size = 16;
1672 	params.backlight_lut_array = linear_lut;
1673 
1674 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1675 	 * 0xFFFF x 0.01 = 0x28F
1676 	 */
1677 	params.min_abm_backlight = 0x28F;
1678 
1679 	/* In the case where abm is implemented on dmcub,
1680 	 * dmcu object will be null.
1681 	 * ABM 2.4 and up are implemented on dmcub.
1682 	 */
1683 	if (dmcu)
1684 		ret = dmcu_load_iram(dmcu, params);
1685 	else if (adev->dm.dc->ctx->dmub_srv)
1686 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1687 
1688 	if (!ret)
1689 		return -EINVAL;
1690 
1691 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1692 }
1693 
1694 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1695 {
1696 	struct amdgpu_dm_connector *aconnector;
1697 	struct drm_connector *connector;
1698 	struct drm_connector_list_iter iter;
1699 	struct drm_dp_mst_topology_mgr *mgr;
1700 	int ret;
1701 	bool need_hotplug = false;
1702 
1703 	drm_connector_list_iter_begin(dev, &iter);
1704 	drm_for_each_connector_iter(connector, &iter) {
1705 		aconnector = to_amdgpu_dm_connector(connector);
1706 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1707 		    aconnector->mst_port)
1708 			continue;
1709 
1710 		mgr = &aconnector->mst_mgr;
1711 
1712 		if (suspend) {
1713 			drm_dp_mst_topology_mgr_suspend(mgr);
1714 		} else {
1715 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1716 			if (ret < 0) {
1717 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1718 				need_hotplug = true;
1719 			}
1720 		}
1721 	}
1722 	drm_connector_list_iter_end(&iter);
1723 
1724 	if (need_hotplug)
1725 		drm_kms_helper_hotplug_event(dev);
1726 }
1727 
1728 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1729 {
1730 	struct smu_context *smu = &adev->smu;
1731 	int ret = 0;
1732 
1733 	if (!is_support_sw_smu(adev))
1734 		return 0;
1735 
1736 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1737 	 * on window driver dc implementation.
1738 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1739 	 * should be passed to smu during boot up and resume from s3.
1740 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1741 	 * dcn20_resource_construct
1742 	 * then call pplib functions below to pass the settings to smu:
1743 	 * smu_set_watermarks_for_clock_ranges
1744 	 * smu_set_watermarks_table
1745 	 * navi10_set_watermarks_table
1746 	 * smu_write_watermarks_table
1747 	 *
1748 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1749 	 * dc has implemented different flow for window driver:
1750 	 * dc_hardware_init / dc_set_power_state
1751 	 * dcn10_init_hw
1752 	 * notify_wm_ranges
1753 	 * set_wm_ranges
1754 	 * -- Linux
1755 	 * smu_set_watermarks_for_clock_ranges
1756 	 * renoir_set_watermarks_table
1757 	 * smu_write_watermarks_table
1758 	 *
1759 	 * For Linux,
1760 	 * dc_hardware_init -> amdgpu_dm_init
1761 	 * dc_set_power_state --> dm_resume
1762 	 *
1763 	 * therefore, this function apply to navi10/12/14 but not Renoir
1764 	 * *
1765 	 */
1766 	switch(adev->asic_type) {
1767 	case CHIP_NAVI10:
1768 	case CHIP_NAVI14:
1769 	case CHIP_NAVI12:
1770 		break;
1771 	default:
1772 		return 0;
1773 	}
1774 
1775 	ret = smu_write_watermarks_table(smu);
1776 	if (ret) {
1777 		DRM_ERROR("Failed to update WMTABLE!\n");
1778 		return ret;
1779 	}
1780 
1781 	return 0;
1782 }
1783 
1784 /**
1785  * dm_hw_init() - Initialize DC device
1786  * @handle: The base driver device containing the amdgpu_dm device.
1787  *
1788  * Initialize the &struct amdgpu_display_manager device. This involves calling
1789  * the initializers of each DM component, then populating the struct with them.
1790  *
1791  * Although the function implies hardware initialization, both hardware and
1792  * software are initialized here. Splitting them out to their relevant init
1793  * hooks is a future TODO item.
1794  *
1795  * Some notable things that are initialized here:
1796  *
1797  * - Display Core, both software and hardware
1798  * - DC modules that we need (freesync and color management)
1799  * - DRM software states
1800  * - Interrupt sources and handlers
1801  * - Vblank support
1802  * - Debug FS entries, if enabled
1803  */
1804 static int dm_hw_init(void *handle)
1805 {
1806 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1807 	/* Create DAL display manager */
1808 	amdgpu_dm_init(adev);
1809 	amdgpu_dm_hpd_init(adev);
1810 
1811 	return 0;
1812 }
1813 
1814 /**
1815  * dm_hw_fini() - Teardown DC device
1816  * @handle: The base driver device containing the amdgpu_dm device.
1817  *
1818  * Teardown components within &struct amdgpu_display_manager that require
1819  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1820  * were loaded. Also flush IRQ workqueues and disable them.
1821  */
1822 static int dm_hw_fini(void *handle)
1823 {
1824 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1825 
1826 	amdgpu_dm_hpd_fini(adev);
1827 
1828 	amdgpu_dm_irq_fini(adev);
1829 	amdgpu_dm_fini(adev);
1830 	return 0;
1831 }
1832 
1833 
1834 static int dm_enable_vblank(struct drm_crtc *crtc);
1835 static void dm_disable_vblank(struct drm_crtc *crtc);
1836 
1837 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1838 				 struct dc_state *state, bool enable)
1839 {
1840 	enum dc_irq_source irq_source;
1841 	struct amdgpu_crtc *acrtc;
1842 	int rc = -EBUSY;
1843 	int i = 0;
1844 
1845 	for (i = 0; i < state->stream_count; i++) {
1846 		acrtc = get_crtc_by_otg_inst(
1847 				adev, state->stream_status[i].primary_otg_inst);
1848 
1849 		if (acrtc && state->stream_status[i].plane_count != 0) {
1850 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1851 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1852 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
1853 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
1854 			if (rc)
1855 				DRM_WARN("Failed to %s pflip interrupts\n",
1856 					 enable ? "enable" : "disable");
1857 
1858 			if (enable) {
1859 				rc = dm_enable_vblank(&acrtc->base);
1860 				if (rc)
1861 					DRM_WARN("Failed to enable vblank interrupts\n");
1862 			} else {
1863 				dm_disable_vblank(&acrtc->base);
1864 			}
1865 
1866 		}
1867 	}
1868 
1869 }
1870 
1871 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1872 {
1873 	struct dc_state *context = NULL;
1874 	enum dc_status res = DC_ERROR_UNEXPECTED;
1875 	int i;
1876 	struct dc_stream_state *del_streams[MAX_PIPES];
1877 	int del_streams_count = 0;
1878 
1879 	memset(del_streams, 0, sizeof(del_streams));
1880 
1881 	context = dc_create_state(dc);
1882 	if (context == NULL)
1883 		goto context_alloc_fail;
1884 
1885 	dc_resource_state_copy_construct_current(dc, context);
1886 
1887 	/* First remove from context all streams */
1888 	for (i = 0; i < context->stream_count; i++) {
1889 		struct dc_stream_state *stream = context->streams[i];
1890 
1891 		del_streams[del_streams_count++] = stream;
1892 	}
1893 
1894 	/* Remove all planes for removed streams and then remove the streams */
1895 	for (i = 0; i < del_streams_count; i++) {
1896 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1897 			res = DC_FAIL_DETACH_SURFACES;
1898 			goto fail;
1899 		}
1900 
1901 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1902 		if (res != DC_OK)
1903 			goto fail;
1904 	}
1905 
1906 
1907 	res = dc_validate_global_state(dc, context, false);
1908 
1909 	if (res != DC_OK) {
1910 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1911 		goto fail;
1912 	}
1913 
1914 	res = dc_commit_state(dc, context);
1915 
1916 fail:
1917 	dc_release_state(context);
1918 
1919 context_alloc_fail:
1920 	return res;
1921 }
1922 
1923 static int dm_suspend(void *handle)
1924 {
1925 	struct amdgpu_device *adev = handle;
1926 	struct amdgpu_display_manager *dm = &adev->dm;
1927 	int ret = 0;
1928 
1929 	if (amdgpu_in_reset(adev)) {
1930 		mutex_lock(&dm->dc_lock);
1931 
1932 #if defined(CONFIG_DRM_AMD_DC_DCN)
1933 		dc_allow_idle_optimizations(adev->dm.dc, false);
1934 #endif
1935 
1936 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1937 
1938 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1939 
1940 		amdgpu_dm_commit_zero_streams(dm->dc);
1941 
1942 		amdgpu_dm_irq_suspend(adev);
1943 
1944 		return ret;
1945 	}
1946 
1947 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1948 	amdgpu_dm_crtc_secure_display_suspend(adev);
1949 #endif
1950 	WARN_ON(adev->dm.cached_state);
1951 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1952 
1953 	s3_handle_mst(adev_to_drm(adev), true);
1954 
1955 	amdgpu_dm_irq_suspend(adev);
1956 
1957 
1958 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1959 
1960 	return 0;
1961 }
1962 
1963 static struct amdgpu_dm_connector *
1964 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1965 					     struct drm_crtc *crtc)
1966 {
1967 	uint32_t i;
1968 	struct drm_connector_state *new_con_state;
1969 	struct drm_connector *connector;
1970 	struct drm_crtc *crtc_from_state;
1971 
1972 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1973 		crtc_from_state = new_con_state->crtc;
1974 
1975 		if (crtc_from_state == crtc)
1976 			return to_amdgpu_dm_connector(connector);
1977 	}
1978 
1979 	return NULL;
1980 }
1981 
1982 static void emulated_link_detect(struct dc_link *link)
1983 {
1984 	struct dc_sink_init_data sink_init_data = { 0 };
1985 	struct display_sink_capability sink_caps = { 0 };
1986 	enum dc_edid_status edid_status;
1987 	struct dc_context *dc_ctx = link->ctx;
1988 	struct dc_sink *sink = NULL;
1989 	struct dc_sink *prev_sink = NULL;
1990 
1991 	link->type = dc_connection_none;
1992 	prev_sink = link->local_sink;
1993 
1994 	if (prev_sink)
1995 		dc_sink_release(prev_sink);
1996 
1997 	switch (link->connector_signal) {
1998 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1999 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2000 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2001 		break;
2002 	}
2003 
2004 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2005 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2006 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2007 		break;
2008 	}
2009 
2010 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2011 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2012 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2013 		break;
2014 	}
2015 
2016 	case SIGNAL_TYPE_LVDS: {
2017 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2018 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2019 		break;
2020 	}
2021 
2022 	case SIGNAL_TYPE_EDP: {
2023 		sink_caps.transaction_type =
2024 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2025 		sink_caps.signal = SIGNAL_TYPE_EDP;
2026 		break;
2027 	}
2028 
2029 	case SIGNAL_TYPE_DISPLAY_PORT: {
2030 		sink_caps.transaction_type =
2031 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2032 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2033 		break;
2034 	}
2035 
2036 	default:
2037 		DC_ERROR("Invalid connector type! signal:%d\n",
2038 			link->connector_signal);
2039 		return;
2040 	}
2041 
2042 	sink_init_data.link = link;
2043 	sink_init_data.sink_signal = sink_caps.signal;
2044 
2045 	sink = dc_sink_create(&sink_init_data);
2046 	if (!sink) {
2047 		DC_ERROR("Failed to create sink!\n");
2048 		return;
2049 	}
2050 
2051 	/* dc_sink_create returns a new reference */
2052 	link->local_sink = sink;
2053 
2054 	edid_status = dm_helpers_read_local_edid(
2055 			link->ctx,
2056 			link,
2057 			sink);
2058 
2059 	if (edid_status != EDID_OK)
2060 		DC_ERROR("Failed to read EDID");
2061 
2062 }
2063 
2064 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2065 				     struct amdgpu_display_manager *dm)
2066 {
2067 	struct {
2068 		struct dc_surface_update surface_updates[MAX_SURFACES];
2069 		struct dc_plane_info plane_infos[MAX_SURFACES];
2070 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2071 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2072 		struct dc_stream_update stream_update;
2073 	} * bundle;
2074 	int k, m;
2075 
2076 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2077 
2078 	if (!bundle) {
2079 		dm_error("Failed to allocate update bundle\n");
2080 		goto cleanup;
2081 	}
2082 
2083 	for (k = 0; k < dc_state->stream_count; k++) {
2084 		bundle->stream_update.stream = dc_state->streams[k];
2085 
2086 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2087 			bundle->surface_updates[m].surface =
2088 				dc_state->stream_status->plane_states[m];
2089 			bundle->surface_updates[m].surface->force_full_update =
2090 				true;
2091 		}
2092 		dc_commit_updates_for_stream(
2093 			dm->dc, bundle->surface_updates,
2094 			dc_state->stream_status->plane_count,
2095 			dc_state->streams[k], &bundle->stream_update, dc_state);
2096 	}
2097 
2098 cleanup:
2099 	kfree(bundle);
2100 
2101 	return;
2102 }
2103 
2104 static void dm_set_dpms_off(struct dc_link *link)
2105 {
2106 	struct dc_stream_state *stream_state;
2107 	struct amdgpu_dm_connector *aconnector = link->priv;
2108 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2109 	struct dc_stream_update stream_update;
2110 	bool dpms_off = true;
2111 
2112 	memset(&stream_update, 0, sizeof(stream_update));
2113 	stream_update.dpms_off = &dpms_off;
2114 
2115 	mutex_lock(&adev->dm.dc_lock);
2116 	stream_state = dc_stream_find_from_link(link);
2117 
2118 	if (stream_state == NULL) {
2119 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2120 		mutex_unlock(&adev->dm.dc_lock);
2121 		return;
2122 	}
2123 
2124 	stream_update.stream = stream_state;
2125 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2126 				     stream_state, &stream_update,
2127 				     stream_state->ctx->dc->current_state);
2128 	mutex_unlock(&adev->dm.dc_lock);
2129 }
2130 
2131 static int dm_resume(void *handle)
2132 {
2133 	struct amdgpu_device *adev = handle;
2134 	struct drm_device *ddev = adev_to_drm(adev);
2135 	struct amdgpu_display_manager *dm = &adev->dm;
2136 	struct amdgpu_dm_connector *aconnector;
2137 	struct drm_connector *connector;
2138 	struct drm_connector_list_iter iter;
2139 	struct drm_crtc *crtc;
2140 	struct drm_crtc_state *new_crtc_state;
2141 	struct dm_crtc_state *dm_new_crtc_state;
2142 	struct drm_plane *plane;
2143 	struct drm_plane_state *new_plane_state;
2144 	struct dm_plane_state *dm_new_plane_state;
2145 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2146 	enum dc_connection_type new_connection_type = dc_connection_none;
2147 	struct dc_state *dc_state;
2148 	int i, r, j;
2149 
2150 	if (amdgpu_in_reset(adev)) {
2151 		dc_state = dm->cached_dc_state;
2152 
2153 		r = dm_dmub_hw_init(adev);
2154 		if (r)
2155 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2156 
2157 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2158 		dc_resume(dm->dc);
2159 
2160 		amdgpu_dm_irq_resume_early(adev);
2161 
2162 		for (i = 0; i < dc_state->stream_count; i++) {
2163 			dc_state->streams[i]->mode_changed = true;
2164 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2165 				dc_state->stream_status->plane_states[j]->update_flags.raw
2166 					= 0xffffffff;
2167 			}
2168 		}
2169 
2170 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2171 
2172 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2173 
2174 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2175 
2176 		dc_release_state(dm->cached_dc_state);
2177 		dm->cached_dc_state = NULL;
2178 
2179 		amdgpu_dm_irq_resume_late(adev);
2180 
2181 		mutex_unlock(&dm->dc_lock);
2182 
2183 		return 0;
2184 	}
2185 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2186 	dc_release_state(dm_state->context);
2187 	dm_state->context = dc_create_state(dm->dc);
2188 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2189 	dc_resource_state_construct(dm->dc, dm_state->context);
2190 
2191 	/* Before powering on DC we need to re-initialize DMUB. */
2192 	r = dm_dmub_hw_init(adev);
2193 	if (r)
2194 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2195 
2196 	/* power on hardware */
2197 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2198 
2199 	/* program HPD filter */
2200 	dc_resume(dm->dc);
2201 
2202 	/*
2203 	 * early enable HPD Rx IRQ, should be done before set mode as short
2204 	 * pulse interrupts are used for MST
2205 	 */
2206 	amdgpu_dm_irq_resume_early(adev);
2207 
2208 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2209 	s3_handle_mst(ddev, false);
2210 
2211 	/* Do detection*/
2212 	drm_connector_list_iter_begin(ddev, &iter);
2213 	drm_for_each_connector_iter(connector, &iter) {
2214 		aconnector = to_amdgpu_dm_connector(connector);
2215 
2216 		/*
2217 		 * this is the case when traversing through already created
2218 		 * MST connectors, should be skipped
2219 		 */
2220 		if (aconnector->mst_port)
2221 			continue;
2222 
2223 		mutex_lock(&aconnector->hpd_lock);
2224 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2225 			DRM_ERROR("KMS: Failed to detect connector\n");
2226 
2227 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2228 			emulated_link_detect(aconnector->dc_link);
2229 		else
2230 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2231 
2232 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2233 			aconnector->fake_enable = false;
2234 
2235 		if (aconnector->dc_sink)
2236 			dc_sink_release(aconnector->dc_sink);
2237 		aconnector->dc_sink = NULL;
2238 		amdgpu_dm_update_connector_after_detect(aconnector);
2239 		mutex_unlock(&aconnector->hpd_lock);
2240 	}
2241 	drm_connector_list_iter_end(&iter);
2242 
2243 	/* Force mode set in atomic commit */
2244 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2245 		new_crtc_state->active_changed = true;
2246 
2247 	/*
2248 	 * atomic_check is expected to create the dc states. We need to release
2249 	 * them here, since they were duplicated as part of the suspend
2250 	 * procedure.
2251 	 */
2252 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2253 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2254 		if (dm_new_crtc_state->stream) {
2255 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2256 			dc_stream_release(dm_new_crtc_state->stream);
2257 			dm_new_crtc_state->stream = NULL;
2258 		}
2259 	}
2260 
2261 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2262 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2263 		if (dm_new_plane_state->dc_state) {
2264 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2265 			dc_plane_state_release(dm_new_plane_state->dc_state);
2266 			dm_new_plane_state->dc_state = NULL;
2267 		}
2268 	}
2269 
2270 	drm_atomic_helper_resume(ddev, dm->cached_state);
2271 
2272 	dm->cached_state = NULL;
2273 
2274 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2275 	amdgpu_dm_crtc_secure_display_resume(adev);
2276 #endif
2277 
2278 	amdgpu_dm_irq_resume_late(adev);
2279 
2280 	amdgpu_dm_smu_write_watermarks_table(adev);
2281 
2282 	return 0;
2283 }
2284 
2285 /**
2286  * DOC: DM Lifecycle
2287  *
2288  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2289  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2290  * the base driver's device list to be initialized and torn down accordingly.
2291  *
2292  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2293  */
2294 
2295 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2296 	.name = "dm",
2297 	.early_init = dm_early_init,
2298 	.late_init = dm_late_init,
2299 	.sw_init = dm_sw_init,
2300 	.sw_fini = dm_sw_fini,
2301 	.hw_init = dm_hw_init,
2302 	.hw_fini = dm_hw_fini,
2303 	.suspend = dm_suspend,
2304 	.resume = dm_resume,
2305 	.is_idle = dm_is_idle,
2306 	.wait_for_idle = dm_wait_for_idle,
2307 	.check_soft_reset = dm_check_soft_reset,
2308 	.soft_reset = dm_soft_reset,
2309 	.set_clockgating_state = dm_set_clockgating_state,
2310 	.set_powergating_state = dm_set_powergating_state,
2311 };
2312 
2313 const struct amdgpu_ip_block_version dm_ip_block =
2314 {
2315 	.type = AMD_IP_BLOCK_TYPE_DCE,
2316 	.major = 1,
2317 	.minor = 0,
2318 	.rev = 0,
2319 	.funcs = &amdgpu_dm_funcs,
2320 };
2321 
2322 
2323 /**
2324  * DOC: atomic
2325  *
2326  * *WIP*
2327  */
2328 
2329 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2330 	.fb_create = amdgpu_display_user_framebuffer_create,
2331 	.get_format_info = amd_get_format_info,
2332 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2333 	.atomic_check = amdgpu_dm_atomic_check,
2334 	.atomic_commit = drm_atomic_helper_commit,
2335 };
2336 
2337 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2338 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2339 };
2340 
2341 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2342 {
2343 	u32 max_cll, min_cll, max, min, q, r;
2344 	struct amdgpu_dm_backlight_caps *caps;
2345 	struct amdgpu_display_manager *dm;
2346 	struct drm_connector *conn_base;
2347 	struct amdgpu_device *adev;
2348 	struct dc_link *link = NULL;
2349 	static const u8 pre_computed_values[] = {
2350 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2351 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2352 
2353 	if (!aconnector || !aconnector->dc_link)
2354 		return;
2355 
2356 	link = aconnector->dc_link;
2357 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2358 		return;
2359 
2360 	conn_base = &aconnector->base;
2361 	adev = drm_to_adev(conn_base->dev);
2362 	dm = &adev->dm;
2363 	caps = &dm->backlight_caps;
2364 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2365 	caps->aux_support = false;
2366 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2367 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2368 
2369 	if (caps->ext_caps->bits.oled == 1 ||
2370 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2371 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2372 		caps->aux_support = true;
2373 
2374 	if (amdgpu_backlight == 0)
2375 		caps->aux_support = false;
2376 	else if (amdgpu_backlight == 1)
2377 		caps->aux_support = true;
2378 
2379 	/* From the specification (CTA-861-G), for calculating the maximum
2380 	 * luminance we need to use:
2381 	 *	Luminance = 50*2**(CV/32)
2382 	 * Where CV is a one-byte value.
2383 	 * For calculating this expression we may need float point precision;
2384 	 * to avoid this complexity level, we take advantage that CV is divided
2385 	 * by a constant. From the Euclids division algorithm, we know that CV
2386 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2387 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2388 	 * need to pre-compute the value of r/32. For pre-computing the values
2389 	 * We just used the following Ruby line:
2390 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2391 	 * The results of the above expressions can be verified at
2392 	 * pre_computed_values.
2393 	 */
2394 	q = max_cll >> 5;
2395 	r = max_cll % 32;
2396 	max = (1 << q) * pre_computed_values[r];
2397 
2398 	// min luminance: maxLum * (CV/255)^2 / 100
2399 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2400 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2401 
2402 	caps->aux_max_input_signal = max;
2403 	caps->aux_min_input_signal = min;
2404 }
2405 
2406 void amdgpu_dm_update_connector_after_detect(
2407 		struct amdgpu_dm_connector *aconnector)
2408 {
2409 	struct drm_connector *connector = &aconnector->base;
2410 	struct drm_device *dev = connector->dev;
2411 	struct dc_sink *sink;
2412 
2413 	/* MST handled by drm_mst framework */
2414 	if (aconnector->mst_mgr.mst_state == true)
2415 		return;
2416 
2417 	sink = aconnector->dc_link->local_sink;
2418 	if (sink)
2419 		dc_sink_retain(sink);
2420 
2421 	/*
2422 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2423 	 * the connector sink is set to either fake or physical sink depends on link status.
2424 	 * Skip if already done during boot.
2425 	 */
2426 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2427 			&& aconnector->dc_em_sink) {
2428 
2429 		/*
2430 		 * For S3 resume with headless use eml_sink to fake stream
2431 		 * because on resume connector->sink is set to NULL
2432 		 */
2433 		mutex_lock(&dev->mode_config.mutex);
2434 
2435 		if (sink) {
2436 			if (aconnector->dc_sink) {
2437 				amdgpu_dm_update_freesync_caps(connector, NULL);
2438 				/*
2439 				 * retain and release below are used to
2440 				 * bump up refcount for sink because the link doesn't point
2441 				 * to it anymore after disconnect, so on next crtc to connector
2442 				 * reshuffle by UMD we will get into unwanted dc_sink release
2443 				 */
2444 				dc_sink_release(aconnector->dc_sink);
2445 			}
2446 			aconnector->dc_sink = sink;
2447 			dc_sink_retain(aconnector->dc_sink);
2448 			amdgpu_dm_update_freesync_caps(connector,
2449 					aconnector->edid);
2450 		} else {
2451 			amdgpu_dm_update_freesync_caps(connector, NULL);
2452 			if (!aconnector->dc_sink) {
2453 				aconnector->dc_sink = aconnector->dc_em_sink;
2454 				dc_sink_retain(aconnector->dc_sink);
2455 			}
2456 		}
2457 
2458 		mutex_unlock(&dev->mode_config.mutex);
2459 
2460 		if (sink)
2461 			dc_sink_release(sink);
2462 		return;
2463 	}
2464 
2465 	/*
2466 	 * TODO: temporary guard to look for proper fix
2467 	 * if this sink is MST sink, we should not do anything
2468 	 */
2469 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2470 		dc_sink_release(sink);
2471 		return;
2472 	}
2473 
2474 	if (aconnector->dc_sink == sink) {
2475 		/*
2476 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2477 		 * Do nothing!!
2478 		 */
2479 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2480 				aconnector->connector_id);
2481 		if (sink)
2482 			dc_sink_release(sink);
2483 		return;
2484 	}
2485 
2486 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2487 		aconnector->connector_id, aconnector->dc_sink, sink);
2488 
2489 	mutex_lock(&dev->mode_config.mutex);
2490 
2491 	/*
2492 	 * 1. Update status of the drm connector
2493 	 * 2. Send an event and let userspace tell us what to do
2494 	 */
2495 	if (sink) {
2496 		/*
2497 		 * TODO: check if we still need the S3 mode update workaround.
2498 		 * If yes, put it here.
2499 		 */
2500 		if (aconnector->dc_sink) {
2501 			amdgpu_dm_update_freesync_caps(connector, NULL);
2502 			dc_sink_release(aconnector->dc_sink);
2503 		}
2504 
2505 		aconnector->dc_sink = sink;
2506 		dc_sink_retain(aconnector->dc_sink);
2507 		if (sink->dc_edid.length == 0) {
2508 			aconnector->edid = NULL;
2509 			if (aconnector->dc_link->aux_mode) {
2510 				drm_dp_cec_unset_edid(
2511 					&aconnector->dm_dp_aux.aux);
2512 			}
2513 		} else {
2514 			aconnector->edid =
2515 				(struct edid *)sink->dc_edid.raw_edid;
2516 
2517 			drm_connector_update_edid_property(connector,
2518 							   aconnector->edid);
2519 			if (aconnector->dc_link->aux_mode)
2520 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2521 						    aconnector->edid);
2522 		}
2523 
2524 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2525 		update_connector_ext_caps(aconnector);
2526 	} else {
2527 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2528 		amdgpu_dm_update_freesync_caps(connector, NULL);
2529 		drm_connector_update_edid_property(connector, NULL);
2530 		aconnector->num_modes = 0;
2531 		dc_sink_release(aconnector->dc_sink);
2532 		aconnector->dc_sink = NULL;
2533 		aconnector->edid = NULL;
2534 #ifdef CONFIG_DRM_AMD_DC_HDCP
2535 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2536 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2537 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2538 #endif
2539 	}
2540 
2541 	mutex_unlock(&dev->mode_config.mutex);
2542 
2543 	update_subconnector_property(aconnector);
2544 
2545 	if (sink)
2546 		dc_sink_release(sink);
2547 }
2548 
2549 static void handle_hpd_irq(void *param)
2550 {
2551 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2552 	struct drm_connector *connector = &aconnector->base;
2553 	struct drm_device *dev = connector->dev;
2554 	enum dc_connection_type new_connection_type = dc_connection_none;
2555 	struct amdgpu_device *adev = drm_to_adev(dev);
2556 #ifdef CONFIG_DRM_AMD_DC_HDCP
2557 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2558 #endif
2559 
2560 	if (adev->dm.disable_hpd_irq)
2561 		return;
2562 
2563 	/*
2564 	 * In case of failure or MST no need to update connector status or notify the OS
2565 	 * since (for MST case) MST does this in its own context.
2566 	 */
2567 	mutex_lock(&aconnector->hpd_lock);
2568 
2569 #ifdef CONFIG_DRM_AMD_DC_HDCP
2570 	if (adev->dm.hdcp_workqueue) {
2571 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2572 		dm_con_state->update_hdcp = true;
2573 	}
2574 #endif
2575 	if (aconnector->fake_enable)
2576 		aconnector->fake_enable = false;
2577 
2578 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2579 		DRM_ERROR("KMS: Failed to detect connector\n");
2580 
2581 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2582 		emulated_link_detect(aconnector->dc_link);
2583 
2584 
2585 		drm_modeset_lock_all(dev);
2586 		dm_restore_drm_connector_state(dev, connector);
2587 		drm_modeset_unlock_all(dev);
2588 
2589 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2590 			drm_kms_helper_hotplug_event(dev);
2591 
2592 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2593 		if (new_connection_type == dc_connection_none &&
2594 		    aconnector->dc_link->type == dc_connection_none)
2595 			dm_set_dpms_off(aconnector->dc_link);
2596 
2597 		amdgpu_dm_update_connector_after_detect(aconnector);
2598 
2599 		drm_modeset_lock_all(dev);
2600 		dm_restore_drm_connector_state(dev, connector);
2601 		drm_modeset_unlock_all(dev);
2602 
2603 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2604 			drm_kms_helper_hotplug_event(dev);
2605 	}
2606 	mutex_unlock(&aconnector->hpd_lock);
2607 
2608 }
2609 
2610 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2611 {
2612 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2613 	uint8_t dret;
2614 	bool new_irq_handled = false;
2615 	int dpcd_addr;
2616 	int dpcd_bytes_to_read;
2617 
2618 	const int max_process_count = 30;
2619 	int process_count = 0;
2620 
2621 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2622 
2623 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2624 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2625 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2626 		dpcd_addr = DP_SINK_COUNT;
2627 	} else {
2628 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2629 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2630 		dpcd_addr = DP_SINK_COUNT_ESI;
2631 	}
2632 
2633 	dret = drm_dp_dpcd_read(
2634 		&aconnector->dm_dp_aux.aux,
2635 		dpcd_addr,
2636 		esi,
2637 		dpcd_bytes_to_read);
2638 
2639 	while (dret == dpcd_bytes_to_read &&
2640 		process_count < max_process_count) {
2641 		uint8_t retry;
2642 		dret = 0;
2643 
2644 		process_count++;
2645 
2646 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2647 		/* handle HPD short pulse irq */
2648 		if (aconnector->mst_mgr.mst_state)
2649 			drm_dp_mst_hpd_irq(
2650 				&aconnector->mst_mgr,
2651 				esi,
2652 				&new_irq_handled);
2653 
2654 		if (new_irq_handled) {
2655 			/* ACK at DPCD to notify down stream */
2656 			const int ack_dpcd_bytes_to_write =
2657 				dpcd_bytes_to_read - 1;
2658 
2659 			for (retry = 0; retry < 3; retry++) {
2660 				uint8_t wret;
2661 
2662 				wret = drm_dp_dpcd_write(
2663 					&aconnector->dm_dp_aux.aux,
2664 					dpcd_addr + 1,
2665 					&esi[1],
2666 					ack_dpcd_bytes_to_write);
2667 				if (wret == ack_dpcd_bytes_to_write)
2668 					break;
2669 			}
2670 
2671 			/* check if there is new irq to be handled */
2672 			dret = drm_dp_dpcd_read(
2673 				&aconnector->dm_dp_aux.aux,
2674 				dpcd_addr,
2675 				esi,
2676 				dpcd_bytes_to_read);
2677 
2678 			new_irq_handled = false;
2679 		} else {
2680 			break;
2681 		}
2682 	}
2683 
2684 	if (process_count == max_process_count)
2685 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2686 }
2687 
2688 static void handle_hpd_rx_irq(void *param)
2689 {
2690 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2691 	struct drm_connector *connector = &aconnector->base;
2692 	struct drm_device *dev = connector->dev;
2693 	struct dc_link *dc_link = aconnector->dc_link;
2694 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2695 	bool result = false;
2696 	enum dc_connection_type new_connection_type = dc_connection_none;
2697 	struct amdgpu_device *adev = drm_to_adev(dev);
2698 	union hpd_irq_data hpd_irq_data;
2699 
2700 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2701 
2702 	if (adev->dm.disable_hpd_irq)
2703 		return;
2704 
2705 
2706 	/*
2707 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2708 	 * conflict, after implement i2c helper, this mutex should be
2709 	 * retired.
2710 	 */
2711 	mutex_lock(&aconnector->hpd_lock);
2712 
2713 	read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2714 
2715 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2716 		(dc_link->type == dc_connection_mst_branch)) {
2717 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2718 			result = true;
2719 			dm_handle_hpd_rx_irq(aconnector);
2720 			goto out;
2721 		} else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2722 			result = false;
2723 			dm_handle_hpd_rx_irq(aconnector);
2724 			goto out;
2725 		}
2726 	}
2727 
2728 	if (!amdgpu_in_reset(adev))
2729 		mutex_lock(&adev->dm.dc_lock);
2730 #ifdef CONFIG_DRM_AMD_DC_HDCP
2731 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2732 #else
2733 	result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2734 #endif
2735 	if (!amdgpu_in_reset(adev))
2736 		mutex_unlock(&adev->dm.dc_lock);
2737 
2738 out:
2739 	if (result && !is_mst_root_connector) {
2740 		/* Downstream Port status changed. */
2741 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2742 			DRM_ERROR("KMS: Failed to detect connector\n");
2743 
2744 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2745 			emulated_link_detect(dc_link);
2746 
2747 			if (aconnector->fake_enable)
2748 				aconnector->fake_enable = false;
2749 
2750 			amdgpu_dm_update_connector_after_detect(aconnector);
2751 
2752 
2753 			drm_modeset_lock_all(dev);
2754 			dm_restore_drm_connector_state(dev, connector);
2755 			drm_modeset_unlock_all(dev);
2756 
2757 			drm_kms_helper_hotplug_event(dev);
2758 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2759 
2760 			if (aconnector->fake_enable)
2761 				aconnector->fake_enable = false;
2762 
2763 			amdgpu_dm_update_connector_after_detect(aconnector);
2764 
2765 
2766 			drm_modeset_lock_all(dev);
2767 			dm_restore_drm_connector_state(dev, connector);
2768 			drm_modeset_unlock_all(dev);
2769 
2770 			drm_kms_helper_hotplug_event(dev);
2771 		}
2772 	}
2773 #ifdef CONFIG_DRM_AMD_DC_HDCP
2774 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2775 		if (adev->dm.hdcp_workqueue)
2776 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2777 	}
2778 #endif
2779 
2780 	if (dc_link->type != dc_connection_mst_branch)
2781 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2782 
2783 	mutex_unlock(&aconnector->hpd_lock);
2784 }
2785 
2786 static void register_hpd_handlers(struct amdgpu_device *adev)
2787 {
2788 	struct drm_device *dev = adev_to_drm(adev);
2789 	struct drm_connector *connector;
2790 	struct amdgpu_dm_connector *aconnector;
2791 	const struct dc_link *dc_link;
2792 	struct dc_interrupt_params int_params = {0};
2793 
2794 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2795 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2796 
2797 	list_for_each_entry(connector,
2798 			&dev->mode_config.connector_list, head)	{
2799 
2800 		aconnector = to_amdgpu_dm_connector(connector);
2801 		dc_link = aconnector->dc_link;
2802 
2803 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2804 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2805 			int_params.irq_source = dc_link->irq_source_hpd;
2806 
2807 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2808 					handle_hpd_irq,
2809 					(void *) aconnector);
2810 		}
2811 
2812 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2813 
2814 			/* Also register for DP short pulse (hpd_rx). */
2815 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2816 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2817 
2818 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2819 					handle_hpd_rx_irq,
2820 					(void *) aconnector);
2821 		}
2822 	}
2823 }
2824 
2825 #if defined(CONFIG_DRM_AMD_DC_SI)
2826 /* Register IRQ sources and initialize IRQ callbacks */
2827 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2828 {
2829 	struct dc *dc = adev->dm.dc;
2830 	struct common_irq_params *c_irq_params;
2831 	struct dc_interrupt_params int_params = {0};
2832 	int r;
2833 	int i;
2834 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2835 
2836 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2837 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2838 
2839 	/*
2840 	 * Actions of amdgpu_irq_add_id():
2841 	 * 1. Register a set() function with base driver.
2842 	 *    Base driver will call set() function to enable/disable an
2843 	 *    interrupt in DC hardware.
2844 	 * 2. Register amdgpu_dm_irq_handler().
2845 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2846 	 *    coming from DC hardware.
2847 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2848 	 *    for acknowledging and handling. */
2849 
2850 	/* Use VBLANK interrupt */
2851 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2852 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2853 		if (r) {
2854 			DRM_ERROR("Failed to add crtc irq id!\n");
2855 			return r;
2856 		}
2857 
2858 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2859 		int_params.irq_source =
2860 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2861 
2862 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2863 
2864 		c_irq_params->adev = adev;
2865 		c_irq_params->irq_src = int_params.irq_source;
2866 
2867 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2868 				dm_crtc_high_irq, c_irq_params);
2869 	}
2870 
2871 	/* Use GRPH_PFLIP interrupt */
2872 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2873 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2874 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2875 		if (r) {
2876 			DRM_ERROR("Failed to add page flip irq id!\n");
2877 			return r;
2878 		}
2879 
2880 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2881 		int_params.irq_source =
2882 			dc_interrupt_to_irq_source(dc, i, 0);
2883 
2884 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2885 
2886 		c_irq_params->adev = adev;
2887 		c_irq_params->irq_src = int_params.irq_source;
2888 
2889 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2890 				dm_pflip_high_irq, c_irq_params);
2891 
2892 	}
2893 
2894 	/* HPD */
2895 	r = amdgpu_irq_add_id(adev, client_id,
2896 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2897 	if (r) {
2898 		DRM_ERROR("Failed to add hpd irq id!\n");
2899 		return r;
2900 	}
2901 
2902 	register_hpd_handlers(adev);
2903 
2904 	return 0;
2905 }
2906 #endif
2907 
2908 /* Register IRQ sources and initialize IRQ callbacks */
2909 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2910 {
2911 	struct dc *dc = adev->dm.dc;
2912 	struct common_irq_params *c_irq_params;
2913 	struct dc_interrupt_params int_params = {0};
2914 	int r;
2915 	int i;
2916 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2917 
2918 	if (adev->asic_type >= CHIP_VEGA10)
2919 		client_id = SOC15_IH_CLIENTID_DCE;
2920 
2921 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2922 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2923 
2924 	/*
2925 	 * Actions of amdgpu_irq_add_id():
2926 	 * 1. Register a set() function with base driver.
2927 	 *    Base driver will call set() function to enable/disable an
2928 	 *    interrupt in DC hardware.
2929 	 * 2. Register amdgpu_dm_irq_handler().
2930 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2931 	 *    coming from DC hardware.
2932 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2933 	 *    for acknowledging and handling. */
2934 
2935 	/* Use VBLANK interrupt */
2936 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2937 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2938 		if (r) {
2939 			DRM_ERROR("Failed to add crtc irq id!\n");
2940 			return r;
2941 		}
2942 
2943 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2944 		int_params.irq_source =
2945 			dc_interrupt_to_irq_source(dc, i, 0);
2946 
2947 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2948 
2949 		c_irq_params->adev = adev;
2950 		c_irq_params->irq_src = int_params.irq_source;
2951 
2952 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2953 				dm_crtc_high_irq, c_irq_params);
2954 	}
2955 
2956 	/* Use VUPDATE interrupt */
2957 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2958 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2959 		if (r) {
2960 			DRM_ERROR("Failed to add vupdate irq id!\n");
2961 			return r;
2962 		}
2963 
2964 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2965 		int_params.irq_source =
2966 			dc_interrupt_to_irq_source(dc, i, 0);
2967 
2968 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2969 
2970 		c_irq_params->adev = adev;
2971 		c_irq_params->irq_src = int_params.irq_source;
2972 
2973 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2974 				dm_vupdate_high_irq, c_irq_params);
2975 	}
2976 
2977 	/* Use GRPH_PFLIP interrupt */
2978 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2979 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2980 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2981 		if (r) {
2982 			DRM_ERROR("Failed to add page flip irq id!\n");
2983 			return r;
2984 		}
2985 
2986 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2987 		int_params.irq_source =
2988 			dc_interrupt_to_irq_source(dc, i, 0);
2989 
2990 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2991 
2992 		c_irq_params->adev = adev;
2993 		c_irq_params->irq_src = int_params.irq_source;
2994 
2995 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2996 				dm_pflip_high_irq, c_irq_params);
2997 
2998 	}
2999 
3000 	/* HPD */
3001 	r = amdgpu_irq_add_id(adev, client_id,
3002 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3003 	if (r) {
3004 		DRM_ERROR("Failed to add hpd irq id!\n");
3005 		return r;
3006 	}
3007 
3008 	register_hpd_handlers(adev);
3009 
3010 	return 0;
3011 }
3012 
3013 #if defined(CONFIG_DRM_AMD_DC_DCN)
3014 /* Register IRQ sources and initialize IRQ callbacks */
3015 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3016 {
3017 	struct dc *dc = adev->dm.dc;
3018 	struct common_irq_params *c_irq_params;
3019 	struct dc_interrupt_params int_params = {0};
3020 	int r;
3021 	int i;
3022 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3023 	static const unsigned int vrtl_int_srcid[] = {
3024 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3025 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3026 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3027 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3028 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3029 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3030 	};
3031 #endif
3032 
3033 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3034 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3035 
3036 	/*
3037 	 * Actions of amdgpu_irq_add_id():
3038 	 * 1. Register a set() function with base driver.
3039 	 *    Base driver will call set() function to enable/disable an
3040 	 *    interrupt in DC hardware.
3041 	 * 2. Register amdgpu_dm_irq_handler().
3042 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3043 	 *    coming from DC hardware.
3044 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3045 	 *    for acknowledging and handling.
3046 	 */
3047 
3048 	/* Use VSTARTUP interrupt */
3049 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3050 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3051 			i++) {
3052 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3053 
3054 		if (r) {
3055 			DRM_ERROR("Failed to add crtc irq id!\n");
3056 			return r;
3057 		}
3058 
3059 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3060 		int_params.irq_source =
3061 			dc_interrupt_to_irq_source(dc, i, 0);
3062 
3063 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3064 
3065 		c_irq_params->adev = adev;
3066 		c_irq_params->irq_src = int_params.irq_source;
3067 
3068 		amdgpu_dm_irq_register_interrupt(
3069 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3070 	}
3071 
3072 	/* Use otg vertical line interrupt */
3073 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3074 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3075 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3076 				vrtl_int_srcid[i], &adev->vline0_irq);
3077 
3078 		if (r) {
3079 			DRM_ERROR("Failed to add vline0 irq id!\n");
3080 			return r;
3081 		}
3082 
3083 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3084 		int_params.irq_source =
3085 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3086 
3087 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3088 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3089 			break;
3090 		}
3091 
3092 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3093 					- DC_IRQ_SOURCE_DC1_VLINE0];
3094 
3095 		c_irq_params->adev = adev;
3096 		c_irq_params->irq_src = int_params.irq_source;
3097 
3098 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3099 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3100 	}
3101 #endif
3102 
3103 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3104 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3105 	 * to trigger at end of each vblank, regardless of state of the lock,
3106 	 * matching DCE behaviour.
3107 	 */
3108 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3109 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3110 	     i++) {
3111 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3112 
3113 		if (r) {
3114 			DRM_ERROR("Failed to add vupdate irq id!\n");
3115 			return r;
3116 		}
3117 
3118 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3119 		int_params.irq_source =
3120 			dc_interrupt_to_irq_source(dc, i, 0);
3121 
3122 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3123 
3124 		c_irq_params->adev = adev;
3125 		c_irq_params->irq_src = int_params.irq_source;
3126 
3127 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3128 				dm_vupdate_high_irq, c_irq_params);
3129 	}
3130 
3131 	/* Use GRPH_PFLIP interrupt */
3132 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3133 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3134 			i++) {
3135 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3136 		if (r) {
3137 			DRM_ERROR("Failed to add page flip irq id!\n");
3138 			return r;
3139 		}
3140 
3141 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3142 		int_params.irq_source =
3143 			dc_interrupt_to_irq_source(dc, i, 0);
3144 
3145 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3146 
3147 		c_irq_params->adev = adev;
3148 		c_irq_params->irq_src = int_params.irq_source;
3149 
3150 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3151 				dm_pflip_high_irq, c_irq_params);
3152 
3153 	}
3154 
3155 	if (dc->ctx->dmub_srv) {
3156 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
3157 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
3158 
3159 		if (r) {
3160 			DRM_ERROR("Failed to add dmub trace irq id!\n");
3161 			return r;
3162 		}
3163 
3164 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3165 		int_params.irq_source =
3166 			dc_interrupt_to_irq_source(dc, i, 0);
3167 
3168 		c_irq_params = &adev->dm.dmub_trace_params[0];
3169 
3170 		c_irq_params->adev = adev;
3171 		c_irq_params->irq_src = int_params.irq_source;
3172 
3173 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3174 				dm_dmub_trace_high_irq, c_irq_params);
3175 	}
3176 
3177 	/* HPD */
3178 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3179 			&adev->hpd_irq);
3180 	if (r) {
3181 		DRM_ERROR("Failed to add hpd irq id!\n");
3182 		return r;
3183 	}
3184 
3185 	register_hpd_handlers(adev);
3186 
3187 	return 0;
3188 }
3189 #endif
3190 
3191 /*
3192  * Acquires the lock for the atomic state object and returns
3193  * the new atomic state.
3194  *
3195  * This should only be called during atomic check.
3196  */
3197 static int dm_atomic_get_state(struct drm_atomic_state *state,
3198 			       struct dm_atomic_state **dm_state)
3199 {
3200 	struct drm_device *dev = state->dev;
3201 	struct amdgpu_device *adev = drm_to_adev(dev);
3202 	struct amdgpu_display_manager *dm = &adev->dm;
3203 	struct drm_private_state *priv_state;
3204 
3205 	if (*dm_state)
3206 		return 0;
3207 
3208 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3209 	if (IS_ERR(priv_state))
3210 		return PTR_ERR(priv_state);
3211 
3212 	*dm_state = to_dm_atomic_state(priv_state);
3213 
3214 	return 0;
3215 }
3216 
3217 static struct dm_atomic_state *
3218 dm_atomic_get_new_state(struct drm_atomic_state *state)
3219 {
3220 	struct drm_device *dev = state->dev;
3221 	struct amdgpu_device *adev = drm_to_adev(dev);
3222 	struct amdgpu_display_manager *dm = &adev->dm;
3223 	struct drm_private_obj *obj;
3224 	struct drm_private_state *new_obj_state;
3225 	int i;
3226 
3227 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3228 		if (obj->funcs == dm->atomic_obj.funcs)
3229 			return to_dm_atomic_state(new_obj_state);
3230 	}
3231 
3232 	return NULL;
3233 }
3234 
3235 static struct drm_private_state *
3236 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3237 {
3238 	struct dm_atomic_state *old_state, *new_state;
3239 
3240 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3241 	if (!new_state)
3242 		return NULL;
3243 
3244 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3245 
3246 	old_state = to_dm_atomic_state(obj->state);
3247 
3248 	if (old_state && old_state->context)
3249 		new_state->context = dc_copy_state(old_state->context);
3250 
3251 	if (!new_state->context) {
3252 		kfree(new_state);
3253 		return NULL;
3254 	}
3255 
3256 	return &new_state->base;
3257 }
3258 
3259 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3260 				    struct drm_private_state *state)
3261 {
3262 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3263 
3264 	if (dm_state && dm_state->context)
3265 		dc_release_state(dm_state->context);
3266 
3267 	kfree(dm_state);
3268 }
3269 
3270 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3271 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3272 	.atomic_destroy_state = dm_atomic_destroy_state,
3273 };
3274 
3275 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3276 {
3277 	struct dm_atomic_state *state;
3278 	int r;
3279 
3280 	adev->mode_info.mode_config_initialized = true;
3281 
3282 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3283 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3284 
3285 	adev_to_drm(adev)->mode_config.max_width = 16384;
3286 	adev_to_drm(adev)->mode_config.max_height = 16384;
3287 
3288 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3289 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3290 	/* indicates support for immediate flip */
3291 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3292 
3293 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3294 
3295 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3296 	if (!state)
3297 		return -ENOMEM;
3298 
3299 	state->context = dc_create_state(adev->dm.dc);
3300 	if (!state->context) {
3301 		kfree(state);
3302 		return -ENOMEM;
3303 	}
3304 
3305 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3306 
3307 	drm_atomic_private_obj_init(adev_to_drm(adev),
3308 				    &adev->dm.atomic_obj,
3309 				    &state->base,
3310 				    &dm_atomic_state_funcs);
3311 
3312 	r = amdgpu_display_modeset_create_props(adev);
3313 	if (r) {
3314 		dc_release_state(state->context);
3315 		kfree(state);
3316 		return r;
3317 	}
3318 
3319 	r = amdgpu_dm_audio_init(adev);
3320 	if (r) {
3321 		dc_release_state(state->context);
3322 		kfree(state);
3323 		return r;
3324 	}
3325 
3326 	return 0;
3327 }
3328 
3329 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3330 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3331 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3332 
3333 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3334 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3335 
3336 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3337 {
3338 #if defined(CONFIG_ACPI)
3339 	struct amdgpu_dm_backlight_caps caps;
3340 
3341 	memset(&caps, 0, sizeof(caps));
3342 
3343 	if (dm->backlight_caps.caps_valid)
3344 		return;
3345 
3346 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3347 	if (caps.caps_valid) {
3348 		dm->backlight_caps.caps_valid = true;
3349 		if (caps.aux_support)
3350 			return;
3351 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3352 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3353 	} else {
3354 		dm->backlight_caps.min_input_signal =
3355 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3356 		dm->backlight_caps.max_input_signal =
3357 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3358 	}
3359 #else
3360 	if (dm->backlight_caps.aux_support)
3361 		return;
3362 
3363 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3364 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3365 #endif
3366 }
3367 
3368 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3369 				unsigned *min, unsigned *max)
3370 {
3371 	if (!caps)
3372 		return 0;
3373 
3374 	if (caps->aux_support) {
3375 		// Firmware limits are in nits, DC API wants millinits.
3376 		*max = 1000 * caps->aux_max_input_signal;
3377 		*min = 1000 * caps->aux_min_input_signal;
3378 	} else {
3379 		// Firmware limits are 8-bit, PWM control is 16-bit.
3380 		*max = 0x101 * caps->max_input_signal;
3381 		*min = 0x101 * caps->min_input_signal;
3382 	}
3383 	return 1;
3384 }
3385 
3386 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3387 					uint32_t brightness)
3388 {
3389 	unsigned min, max;
3390 
3391 	if (!get_brightness_range(caps, &min, &max))
3392 		return brightness;
3393 
3394 	// Rescale 0..255 to min..max
3395 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3396 				       AMDGPU_MAX_BL_LEVEL);
3397 }
3398 
3399 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3400 				      uint32_t brightness)
3401 {
3402 	unsigned min, max;
3403 
3404 	if (!get_brightness_range(caps, &min, &max))
3405 		return brightness;
3406 
3407 	if (brightness < min)
3408 		return 0;
3409 	// Rescale min..max to 0..255
3410 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3411 				 max - min);
3412 }
3413 
3414 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3415 {
3416 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3417 	struct amdgpu_dm_backlight_caps caps;
3418 	struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
3419 	u32 brightness;
3420 	bool rc;
3421 	int i;
3422 
3423 	amdgpu_dm_update_backlight_caps(dm);
3424 	caps = dm->backlight_caps;
3425 
3426 	for (i = 0; i < dm->num_of_edps; i++)
3427 		link[i] = (struct dc_link *)dm->backlight_link[i];
3428 
3429 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3430 	// Change brightness based on AUX property
3431 	if (caps.aux_support) {
3432 		for (i = 0; i < dm->num_of_edps; i++) {
3433 			rc = dc_link_set_backlight_level_nits(link[i], true, brightness,
3434 				AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3435 			if (!rc) {
3436 				DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
3437 				break;
3438 			}
3439 		}
3440 	} else {
3441 		for (i = 0; i < dm->num_of_edps; i++) {
3442 			rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness, 0);
3443 			if (!rc) {
3444 				DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
3445 				break;
3446 			}
3447 		}
3448 	}
3449 
3450 	return rc ? 0 : 1;
3451 }
3452 
3453 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3454 {
3455 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3456 	struct amdgpu_dm_backlight_caps caps;
3457 
3458 	amdgpu_dm_update_backlight_caps(dm);
3459 	caps = dm->backlight_caps;
3460 
3461 	if (caps.aux_support) {
3462 		struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
3463 		u32 avg, peak;
3464 		bool rc;
3465 
3466 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3467 		if (!rc)
3468 			return bd->props.brightness;
3469 		return convert_brightness_to_user(&caps, avg);
3470 	} else {
3471 		int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
3472 
3473 		if (ret == DC_ERROR_UNEXPECTED)
3474 			return bd->props.brightness;
3475 		return convert_brightness_to_user(&caps, ret);
3476 	}
3477 }
3478 
3479 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3480 	.options = BL_CORE_SUSPENDRESUME,
3481 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3482 	.update_status	= amdgpu_dm_backlight_update_status,
3483 };
3484 
3485 static void
3486 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3487 {
3488 	char bl_name[16];
3489 	struct backlight_properties props = { 0 };
3490 
3491 	amdgpu_dm_update_backlight_caps(dm);
3492 
3493 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3494 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3495 	props.type = BACKLIGHT_RAW;
3496 
3497 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3498 		 adev_to_drm(dm->adev)->primary->index);
3499 
3500 	dm->backlight_dev = backlight_device_register(bl_name,
3501 						      adev_to_drm(dm->adev)->dev,
3502 						      dm,
3503 						      &amdgpu_dm_backlight_ops,
3504 						      &props);
3505 
3506 	if (IS_ERR(dm->backlight_dev))
3507 		DRM_ERROR("DM: Backlight registration failed!\n");
3508 	else
3509 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3510 }
3511 
3512 #endif
3513 
3514 static int initialize_plane(struct amdgpu_display_manager *dm,
3515 			    struct amdgpu_mode_info *mode_info, int plane_id,
3516 			    enum drm_plane_type plane_type,
3517 			    const struct dc_plane_cap *plane_cap)
3518 {
3519 	struct drm_plane *plane;
3520 	unsigned long possible_crtcs;
3521 	int ret = 0;
3522 
3523 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3524 	if (!plane) {
3525 		DRM_ERROR("KMS: Failed to allocate plane\n");
3526 		return -ENOMEM;
3527 	}
3528 	plane->type = plane_type;
3529 
3530 	/*
3531 	 * HACK: IGT tests expect that the primary plane for a CRTC
3532 	 * can only have one possible CRTC. Only expose support for
3533 	 * any CRTC if they're not going to be used as a primary plane
3534 	 * for a CRTC - like overlay or underlay planes.
3535 	 */
3536 	possible_crtcs = 1 << plane_id;
3537 	if (plane_id >= dm->dc->caps.max_streams)
3538 		possible_crtcs = 0xff;
3539 
3540 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3541 
3542 	if (ret) {
3543 		DRM_ERROR("KMS: Failed to initialize plane\n");
3544 		kfree(plane);
3545 		return ret;
3546 	}
3547 
3548 	if (mode_info)
3549 		mode_info->planes[plane_id] = plane;
3550 
3551 	return ret;
3552 }
3553 
3554 
3555 static void register_backlight_device(struct amdgpu_display_manager *dm,
3556 				      struct dc_link *link)
3557 {
3558 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3559 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3560 
3561 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3562 	    link->type != dc_connection_none) {
3563 		/*
3564 		 * Event if registration failed, we should continue with
3565 		 * DM initialization because not having a backlight control
3566 		 * is better then a black screen.
3567 		 */
3568 		if (!dm->backlight_dev)
3569 			amdgpu_dm_register_backlight_device(dm);
3570 
3571 		if (dm->backlight_dev) {
3572 			dm->backlight_link[dm->num_of_edps] = link;
3573 			dm->num_of_edps++;
3574 		}
3575 	}
3576 #endif
3577 }
3578 
3579 
3580 /*
3581  * In this architecture, the association
3582  * connector -> encoder -> crtc
3583  * id not really requried. The crtc and connector will hold the
3584  * display_index as an abstraction to use with DAL component
3585  *
3586  * Returns 0 on success
3587  */
3588 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3589 {
3590 	struct amdgpu_display_manager *dm = &adev->dm;
3591 	int32_t i;
3592 	struct amdgpu_dm_connector *aconnector = NULL;
3593 	struct amdgpu_encoder *aencoder = NULL;
3594 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3595 	uint32_t link_cnt;
3596 	int32_t primary_planes;
3597 	enum dc_connection_type new_connection_type = dc_connection_none;
3598 	const struct dc_plane_cap *plane;
3599 
3600 	dm->display_indexes_num = dm->dc->caps.max_streams;
3601 	/* Update the actual used number of crtc */
3602 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3603 
3604 	link_cnt = dm->dc->caps.max_links;
3605 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3606 		DRM_ERROR("DM: Failed to initialize mode config\n");
3607 		return -EINVAL;
3608 	}
3609 
3610 	/* There is one primary plane per CRTC */
3611 	primary_planes = dm->dc->caps.max_streams;
3612 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3613 
3614 	/*
3615 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3616 	 * Order is reversed to match iteration order in atomic check.
3617 	 */
3618 	for (i = (primary_planes - 1); i >= 0; i--) {
3619 		plane = &dm->dc->caps.planes[i];
3620 
3621 		if (initialize_plane(dm, mode_info, i,
3622 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3623 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3624 			goto fail;
3625 		}
3626 	}
3627 
3628 	/*
3629 	 * Initialize overlay planes, index starting after primary planes.
3630 	 * These planes have a higher DRM index than the primary planes since
3631 	 * they should be considered as having a higher z-order.
3632 	 * Order is reversed to match iteration order in atomic check.
3633 	 *
3634 	 * Only support DCN for now, and only expose one so we don't encourage
3635 	 * userspace to use up all the pipes.
3636 	 */
3637 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3638 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3639 
3640 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3641 			continue;
3642 
3643 		if (!plane->blends_with_above || !plane->blends_with_below)
3644 			continue;
3645 
3646 		if (!plane->pixel_format_support.argb8888)
3647 			continue;
3648 
3649 		if (initialize_plane(dm, NULL, primary_planes + i,
3650 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3651 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3652 			goto fail;
3653 		}
3654 
3655 		/* Only create one overlay plane. */
3656 		break;
3657 	}
3658 
3659 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3660 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3661 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3662 			goto fail;
3663 		}
3664 
3665 	/* loops over all connectors on the board */
3666 	for (i = 0; i < link_cnt; i++) {
3667 		struct dc_link *link = NULL;
3668 
3669 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3670 			DRM_ERROR(
3671 				"KMS: Cannot support more than %d display indexes\n",
3672 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3673 			continue;
3674 		}
3675 
3676 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3677 		if (!aconnector)
3678 			goto fail;
3679 
3680 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3681 		if (!aencoder)
3682 			goto fail;
3683 
3684 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3685 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3686 			goto fail;
3687 		}
3688 
3689 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3690 			DRM_ERROR("KMS: Failed to initialize connector\n");
3691 			goto fail;
3692 		}
3693 
3694 		link = dc_get_link_at_index(dm->dc, i);
3695 
3696 		if (!dc_link_detect_sink(link, &new_connection_type))
3697 			DRM_ERROR("KMS: Failed to detect connector\n");
3698 
3699 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3700 			emulated_link_detect(link);
3701 			amdgpu_dm_update_connector_after_detect(aconnector);
3702 
3703 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3704 			amdgpu_dm_update_connector_after_detect(aconnector);
3705 			register_backlight_device(dm, link);
3706 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3707 				amdgpu_dm_set_psr_caps(link);
3708 		}
3709 
3710 
3711 	}
3712 
3713 	/* Software is initialized. Now we can register interrupt handlers. */
3714 	switch (adev->asic_type) {
3715 #if defined(CONFIG_DRM_AMD_DC_SI)
3716 	case CHIP_TAHITI:
3717 	case CHIP_PITCAIRN:
3718 	case CHIP_VERDE:
3719 	case CHIP_OLAND:
3720 		if (dce60_register_irq_handlers(dm->adev)) {
3721 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3722 			goto fail;
3723 		}
3724 		break;
3725 #endif
3726 	case CHIP_BONAIRE:
3727 	case CHIP_HAWAII:
3728 	case CHIP_KAVERI:
3729 	case CHIP_KABINI:
3730 	case CHIP_MULLINS:
3731 	case CHIP_TONGA:
3732 	case CHIP_FIJI:
3733 	case CHIP_CARRIZO:
3734 	case CHIP_STONEY:
3735 	case CHIP_POLARIS11:
3736 	case CHIP_POLARIS10:
3737 	case CHIP_POLARIS12:
3738 	case CHIP_VEGAM:
3739 	case CHIP_VEGA10:
3740 	case CHIP_VEGA12:
3741 	case CHIP_VEGA20:
3742 		if (dce110_register_irq_handlers(dm->adev)) {
3743 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3744 			goto fail;
3745 		}
3746 		break;
3747 #if defined(CONFIG_DRM_AMD_DC_DCN)
3748 	case CHIP_RAVEN:
3749 	case CHIP_NAVI12:
3750 	case CHIP_NAVI10:
3751 	case CHIP_NAVI14:
3752 	case CHIP_RENOIR:
3753 	case CHIP_SIENNA_CICHLID:
3754 	case CHIP_NAVY_FLOUNDER:
3755 	case CHIP_DIMGREY_CAVEFISH:
3756 	case CHIP_VANGOGH:
3757 		if (dcn10_register_irq_handlers(dm->adev)) {
3758 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3759 			goto fail;
3760 		}
3761 		break;
3762 #endif
3763 	default:
3764 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3765 		goto fail;
3766 	}
3767 
3768 	return 0;
3769 fail:
3770 	kfree(aencoder);
3771 	kfree(aconnector);
3772 
3773 	return -EINVAL;
3774 }
3775 
3776 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3777 {
3778 	drm_mode_config_cleanup(dm->ddev);
3779 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3780 	return;
3781 }
3782 
3783 /******************************************************************************
3784  * amdgpu_display_funcs functions
3785  *****************************************************************************/
3786 
3787 /*
3788  * dm_bandwidth_update - program display watermarks
3789  *
3790  * @adev: amdgpu_device pointer
3791  *
3792  * Calculate and program the display watermarks and line buffer allocation.
3793  */
3794 static void dm_bandwidth_update(struct amdgpu_device *adev)
3795 {
3796 	/* TODO: implement later */
3797 }
3798 
3799 static const struct amdgpu_display_funcs dm_display_funcs = {
3800 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3801 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3802 	.backlight_set_level = NULL, /* never called for DC */
3803 	.backlight_get_level = NULL, /* never called for DC */
3804 	.hpd_sense = NULL,/* called unconditionally */
3805 	.hpd_set_polarity = NULL, /* called unconditionally */
3806 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3807 	.page_flip_get_scanoutpos =
3808 		dm_crtc_get_scanoutpos,/* called unconditionally */
3809 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3810 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3811 };
3812 
3813 #if defined(CONFIG_DEBUG_KERNEL_DC)
3814 
3815 static ssize_t s3_debug_store(struct device *device,
3816 			      struct device_attribute *attr,
3817 			      const char *buf,
3818 			      size_t count)
3819 {
3820 	int ret;
3821 	int s3_state;
3822 	struct drm_device *drm_dev = dev_get_drvdata(device);
3823 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3824 
3825 	ret = kstrtoint(buf, 0, &s3_state);
3826 
3827 	if (ret == 0) {
3828 		if (s3_state) {
3829 			dm_resume(adev);
3830 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3831 		} else
3832 			dm_suspend(adev);
3833 	}
3834 
3835 	return ret == 0 ? count : 0;
3836 }
3837 
3838 DEVICE_ATTR_WO(s3_debug);
3839 
3840 #endif
3841 
3842 static int dm_early_init(void *handle)
3843 {
3844 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3845 
3846 	switch (adev->asic_type) {
3847 #if defined(CONFIG_DRM_AMD_DC_SI)
3848 	case CHIP_TAHITI:
3849 	case CHIP_PITCAIRN:
3850 	case CHIP_VERDE:
3851 		adev->mode_info.num_crtc = 6;
3852 		adev->mode_info.num_hpd = 6;
3853 		adev->mode_info.num_dig = 6;
3854 		break;
3855 	case CHIP_OLAND:
3856 		adev->mode_info.num_crtc = 2;
3857 		adev->mode_info.num_hpd = 2;
3858 		adev->mode_info.num_dig = 2;
3859 		break;
3860 #endif
3861 	case CHIP_BONAIRE:
3862 	case CHIP_HAWAII:
3863 		adev->mode_info.num_crtc = 6;
3864 		adev->mode_info.num_hpd = 6;
3865 		adev->mode_info.num_dig = 6;
3866 		break;
3867 	case CHIP_KAVERI:
3868 		adev->mode_info.num_crtc = 4;
3869 		adev->mode_info.num_hpd = 6;
3870 		adev->mode_info.num_dig = 7;
3871 		break;
3872 	case CHIP_KABINI:
3873 	case CHIP_MULLINS:
3874 		adev->mode_info.num_crtc = 2;
3875 		adev->mode_info.num_hpd = 6;
3876 		adev->mode_info.num_dig = 6;
3877 		break;
3878 	case CHIP_FIJI:
3879 	case CHIP_TONGA:
3880 		adev->mode_info.num_crtc = 6;
3881 		adev->mode_info.num_hpd = 6;
3882 		adev->mode_info.num_dig = 7;
3883 		break;
3884 	case CHIP_CARRIZO:
3885 		adev->mode_info.num_crtc = 3;
3886 		adev->mode_info.num_hpd = 6;
3887 		adev->mode_info.num_dig = 9;
3888 		break;
3889 	case CHIP_STONEY:
3890 		adev->mode_info.num_crtc = 2;
3891 		adev->mode_info.num_hpd = 6;
3892 		adev->mode_info.num_dig = 9;
3893 		break;
3894 	case CHIP_POLARIS11:
3895 	case CHIP_POLARIS12:
3896 		adev->mode_info.num_crtc = 5;
3897 		adev->mode_info.num_hpd = 5;
3898 		adev->mode_info.num_dig = 5;
3899 		break;
3900 	case CHIP_POLARIS10:
3901 	case CHIP_VEGAM:
3902 		adev->mode_info.num_crtc = 6;
3903 		adev->mode_info.num_hpd = 6;
3904 		adev->mode_info.num_dig = 6;
3905 		break;
3906 	case CHIP_VEGA10:
3907 	case CHIP_VEGA12:
3908 	case CHIP_VEGA20:
3909 		adev->mode_info.num_crtc = 6;
3910 		adev->mode_info.num_hpd = 6;
3911 		adev->mode_info.num_dig = 6;
3912 		break;
3913 #if defined(CONFIG_DRM_AMD_DC_DCN)
3914 	case CHIP_RAVEN:
3915 	case CHIP_RENOIR:
3916 	case CHIP_VANGOGH:
3917 		adev->mode_info.num_crtc = 4;
3918 		adev->mode_info.num_hpd = 4;
3919 		adev->mode_info.num_dig = 4;
3920 		break;
3921 	case CHIP_NAVI10:
3922 	case CHIP_NAVI12:
3923 	case CHIP_SIENNA_CICHLID:
3924 	case CHIP_NAVY_FLOUNDER:
3925 		adev->mode_info.num_crtc = 6;
3926 		adev->mode_info.num_hpd = 6;
3927 		adev->mode_info.num_dig = 6;
3928 		break;
3929 	case CHIP_NAVI14:
3930 	case CHIP_DIMGREY_CAVEFISH:
3931 		adev->mode_info.num_crtc = 5;
3932 		adev->mode_info.num_hpd = 5;
3933 		adev->mode_info.num_dig = 5;
3934 		break;
3935 #endif
3936 	default:
3937 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3938 		return -EINVAL;
3939 	}
3940 
3941 	amdgpu_dm_set_irq_funcs(adev);
3942 
3943 	if (adev->mode_info.funcs == NULL)
3944 		adev->mode_info.funcs = &dm_display_funcs;
3945 
3946 	/*
3947 	 * Note: Do NOT change adev->audio_endpt_rreg and
3948 	 * adev->audio_endpt_wreg because they are initialised in
3949 	 * amdgpu_device_init()
3950 	 */
3951 #if defined(CONFIG_DEBUG_KERNEL_DC)
3952 	device_create_file(
3953 		adev_to_drm(adev)->dev,
3954 		&dev_attr_s3_debug);
3955 #endif
3956 
3957 	return 0;
3958 }
3959 
3960 static bool modeset_required(struct drm_crtc_state *crtc_state,
3961 			     struct dc_stream_state *new_stream,
3962 			     struct dc_stream_state *old_stream)
3963 {
3964 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3965 }
3966 
3967 static bool modereset_required(struct drm_crtc_state *crtc_state)
3968 {
3969 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3970 }
3971 
3972 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3973 {
3974 	drm_encoder_cleanup(encoder);
3975 	kfree(encoder);
3976 }
3977 
3978 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3979 	.destroy = amdgpu_dm_encoder_destroy,
3980 };
3981 
3982 
3983 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3984 					 struct drm_framebuffer *fb,
3985 					 int *min_downscale, int *max_upscale)
3986 {
3987 	struct amdgpu_device *adev = drm_to_adev(dev);
3988 	struct dc *dc = adev->dm.dc;
3989 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3990 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3991 
3992 	switch (fb->format->format) {
3993 	case DRM_FORMAT_P010:
3994 	case DRM_FORMAT_NV12:
3995 	case DRM_FORMAT_NV21:
3996 		*max_upscale = plane_cap->max_upscale_factor.nv12;
3997 		*min_downscale = plane_cap->max_downscale_factor.nv12;
3998 		break;
3999 
4000 	case DRM_FORMAT_XRGB16161616F:
4001 	case DRM_FORMAT_ARGB16161616F:
4002 	case DRM_FORMAT_XBGR16161616F:
4003 	case DRM_FORMAT_ABGR16161616F:
4004 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4005 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4006 		break;
4007 
4008 	default:
4009 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4010 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4011 		break;
4012 	}
4013 
4014 	/*
4015 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4016 	 * scaling factor of 1.0 == 1000 units.
4017 	 */
4018 	if (*max_upscale == 1)
4019 		*max_upscale = 1000;
4020 
4021 	if (*min_downscale == 1)
4022 		*min_downscale = 1000;
4023 }
4024 
4025 
4026 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4027 				struct dc_scaling_info *scaling_info)
4028 {
4029 	int scale_w, scale_h, min_downscale, max_upscale;
4030 
4031 	memset(scaling_info, 0, sizeof(*scaling_info));
4032 
4033 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4034 	scaling_info->src_rect.x = state->src_x >> 16;
4035 	scaling_info->src_rect.y = state->src_y >> 16;
4036 
4037 	/*
4038 	 * For reasons we don't (yet) fully understand a non-zero
4039 	 * src_y coordinate into an NV12 buffer can cause a
4040 	 * system hang. To avoid hangs (and maybe be overly cautious)
4041 	 * let's reject both non-zero src_x and src_y.
4042 	 *
4043 	 * We currently know of only one use-case to reproduce a
4044 	 * scenario with non-zero src_x and src_y for NV12, which
4045 	 * is to gesture the YouTube Android app into full screen
4046 	 * on ChromeOS.
4047 	 */
4048 	if (state->fb &&
4049 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4050 	    (scaling_info->src_rect.x != 0 ||
4051 	     scaling_info->src_rect.y != 0))
4052 		return -EINVAL;
4053 
4054 	scaling_info->src_rect.width = state->src_w >> 16;
4055 	if (scaling_info->src_rect.width == 0)
4056 		return -EINVAL;
4057 
4058 	scaling_info->src_rect.height = state->src_h >> 16;
4059 	if (scaling_info->src_rect.height == 0)
4060 		return -EINVAL;
4061 
4062 	scaling_info->dst_rect.x = state->crtc_x;
4063 	scaling_info->dst_rect.y = state->crtc_y;
4064 
4065 	if (state->crtc_w == 0)
4066 		return -EINVAL;
4067 
4068 	scaling_info->dst_rect.width = state->crtc_w;
4069 
4070 	if (state->crtc_h == 0)
4071 		return -EINVAL;
4072 
4073 	scaling_info->dst_rect.height = state->crtc_h;
4074 
4075 	/* DRM doesn't specify clipping on destination output. */
4076 	scaling_info->clip_rect = scaling_info->dst_rect;
4077 
4078 	/* Validate scaling per-format with DC plane caps */
4079 	if (state->plane && state->plane->dev && state->fb) {
4080 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4081 					     &min_downscale, &max_upscale);
4082 	} else {
4083 		min_downscale = 250;
4084 		max_upscale = 16000;
4085 	}
4086 
4087 	scale_w = scaling_info->dst_rect.width * 1000 /
4088 		  scaling_info->src_rect.width;
4089 
4090 	if (scale_w < min_downscale || scale_w > max_upscale)
4091 		return -EINVAL;
4092 
4093 	scale_h = scaling_info->dst_rect.height * 1000 /
4094 		  scaling_info->src_rect.height;
4095 
4096 	if (scale_h < min_downscale || scale_h > max_upscale)
4097 		return -EINVAL;
4098 
4099 	/*
4100 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4101 	 * assume reasonable defaults based on the format.
4102 	 */
4103 
4104 	return 0;
4105 }
4106 
4107 static void
4108 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4109 				 uint64_t tiling_flags)
4110 {
4111 	/* Fill GFX8 params */
4112 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4113 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4114 
4115 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4116 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4117 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4118 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4119 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4120 
4121 		/* XXX fix me for VI */
4122 		tiling_info->gfx8.num_banks = num_banks;
4123 		tiling_info->gfx8.array_mode =
4124 				DC_ARRAY_2D_TILED_THIN1;
4125 		tiling_info->gfx8.tile_split = tile_split;
4126 		tiling_info->gfx8.bank_width = bankw;
4127 		tiling_info->gfx8.bank_height = bankh;
4128 		tiling_info->gfx8.tile_aspect = mtaspect;
4129 		tiling_info->gfx8.tile_mode =
4130 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4131 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4132 			== DC_ARRAY_1D_TILED_THIN1) {
4133 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4134 	}
4135 
4136 	tiling_info->gfx8.pipe_config =
4137 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4138 }
4139 
4140 static void
4141 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4142 				  union dc_tiling_info *tiling_info)
4143 {
4144 	tiling_info->gfx9.num_pipes =
4145 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4146 	tiling_info->gfx9.num_banks =
4147 		adev->gfx.config.gb_addr_config_fields.num_banks;
4148 	tiling_info->gfx9.pipe_interleave =
4149 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4150 	tiling_info->gfx9.num_shader_engines =
4151 		adev->gfx.config.gb_addr_config_fields.num_se;
4152 	tiling_info->gfx9.max_compressed_frags =
4153 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4154 	tiling_info->gfx9.num_rb_per_se =
4155 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4156 	tiling_info->gfx9.shaderEnable = 1;
4157 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4158 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4159 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4160 	    adev->asic_type == CHIP_VANGOGH)
4161 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4162 }
4163 
4164 static int
4165 validate_dcc(struct amdgpu_device *adev,
4166 	     const enum surface_pixel_format format,
4167 	     const enum dc_rotation_angle rotation,
4168 	     const union dc_tiling_info *tiling_info,
4169 	     const struct dc_plane_dcc_param *dcc,
4170 	     const struct dc_plane_address *address,
4171 	     const struct plane_size *plane_size)
4172 {
4173 	struct dc *dc = adev->dm.dc;
4174 	struct dc_dcc_surface_param input;
4175 	struct dc_surface_dcc_cap output;
4176 
4177 	memset(&input, 0, sizeof(input));
4178 	memset(&output, 0, sizeof(output));
4179 
4180 	if (!dcc->enable)
4181 		return 0;
4182 
4183 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4184 	    !dc->cap_funcs.get_dcc_compression_cap)
4185 		return -EINVAL;
4186 
4187 	input.format = format;
4188 	input.surface_size.width = plane_size->surface_size.width;
4189 	input.surface_size.height = plane_size->surface_size.height;
4190 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4191 
4192 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4193 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4194 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4195 		input.scan = SCAN_DIRECTION_VERTICAL;
4196 
4197 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4198 		return -EINVAL;
4199 
4200 	if (!output.capable)
4201 		return -EINVAL;
4202 
4203 	if (dcc->independent_64b_blks == 0 &&
4204 	    output.grph.rgb.independent_64b_blks != 0)
4205 		return -EINVAL;
4206 
4207 	return 0;
4208 }
4209 
4210 static bool
4211 modifier_has_dcc(uint64_t modifier)
4212 {
4213 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4214 }
4215 
4216 static unsigned
4217 modifier_gfx9_swizzle_mode(uint64_t modifier)
4218 {
4219 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4220 		return 0;
4221 
4222 	return AMD_FMT_MOD_GET(TILE, modifier);
4223 }
4224 
4225 static const struct drm_format_info *
4226 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4227 {
4228 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4229 }
4230 
4231 static void
4232 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4233 				    union dc_tiling_info *tiling_info,
4234 				    uint64_t modifier)
4235 {
4236 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4237 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4238 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4239 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4240 
4241 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4242 
4243 	if (!IS_AMD_FMT_MOD(modifier))
4244 		return;
4245 
4246 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4247 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4248 
4249 	if (adev->family >= AMDGPU_FAMILY_NV) {
4250 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4251 	} else {
4252 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4253 
4254 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4255 	}
4256 }
4257 
4258 enum dm_micro_swizzle {
4259 	MICRO_SWIZZLE_Z = 0,
4260 	MICRO_SWIZZLE_S = 1,
4261 	MICRO_SWIZZLE_D = 2,
4262 	MICRO_SWIZZLE_R = 3
4263 };
4264 
4265 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4266 					  uint32_t format,
4267 					  uint64_t modifier)
4268 {
4269 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4270 	const struct drm_format_info *info = drm_format_info(format);
4271 	int i;
4272 
4273 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4274 
4275 	if (!info)
4276 		return false;
4277 
4278 	/*
4279 	 * We always have to allow these modifiers:
4280 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4281 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4282 	 */
4283 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4284 	    modifier == DRM_FORMAT_MOD_INVALID) {
4285 		return true;
4286 	}
4287 
4288 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4289 	for (i = 0; i < plane->modifier_count; i++) {
4290 		if (modifier == plane->modifiers[i])
4291 			break;
4292 	}
4293 	if (i == plane->modifier_count)
4294 		return false;
4295 
4296 	/*
4297 	 * For D swizzle the canonical modifier depends on the bpp, so check
4298 	 * it here.
4299 	 */
4300 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4301 	    adev->family >= AMDGPU_FAMILY_NV) {
4302 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4303 			return false;
4304 	}
4305 
4306 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4307 	    info->cpp[0] < 8)
4308 		return false;
4309 
4310 	if (modifier_has_dcc(modifier)) {
4311 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4312 		if (info->cpp[0] != 4)
4313 			return false;
4314 		/* We support multi-planar formats, but not when combined with
4315 		 * additional DCC metadata planes. */
4316 		if (info->num_planes > 1)
4317 			return false;
4318 	}
4319 
4320 	return true;
4321 }
4322 
4323 static void
4324 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4325 {
4326 	if (!*mods)
4327 		return;
4328 
4329 	if (*cap - *size < 1) {
4330 		uint64_t new_cap = *cap * 2;
4331 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4332 
4333 		if (!new_mods) {
4334 			kfree(*mods);
4335 			*mods = NULL;
4336 			return;
4337 		}
4338 
4339 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4340 		kfree(*mods);
4341 		*mods = new_mods;
4342 		*cap = new_cap;
4343 	}
4344 
4345 	(*mods)[*size] = mod;
4346 	*size += 1;
4347 }
4348 
4349 static void
4350 add_gfx9_modifiers(const struct amdgpu_device *adev,
4351 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4352 {
4353 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4354 	int pipe_xor_bits = min(8, pipes +
4355 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4356 	int bank_xor_bits = min(8 - pipe_xor_bits,
4357 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4358 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4359 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4360 
4361 
4362 	if (adev->family == AMDGPU_FAMILY_RV) {
4363 		/* Raven2 and later */
4364 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4365 
4366 		/*
4367 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4368 		 * doesn't support _D on DCN
4369 		 */
4370 
4371 		if (has_constant_encode) {
4372 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4373 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4374 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4375 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4376 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4377 				    AMD_FMT_MOD_SET(DCC, 1) |
4378 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4379 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4380 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4381 		}
4382 
4383 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4384 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4385 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4386 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4387 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4388 			    AMD_FMT_MOD_SET(DCC, 1) |
4389 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4390 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4391 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4392 
4393 		if (has_constant_encode) {
4394 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4395 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4396 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4397 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4398 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4399 				    AMD_FMT_MOD_SET(DCC, 1) |
4400 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4401 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4402 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4403 
4404 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4405 				    AMD_FMT_MOD_SET(RB, rb) |
4406 				    AMD_FMT_MOD_SET(PIPE, pipes));
4407 		}
4408 
4409 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4410 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4411 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4412 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4413 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4414 			    AMD_FMT_MOD_SET(DCC, 1) |
4415 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4416 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4417 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4418 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4419 			    AMD_FMT_MOD_SET(RB, rb) |
4420 			    AMD_FMT_MOD_SET(PIPE, pipes));
4421 	}
4422 
4423 	/*
4424 	 * Only supported for 64bpp on Raven, will be filtered on format in
4425 	 * dm_plane_format_mod_supported.
4426 	 */
4427 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4428 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4429 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4430 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4431 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4432 
4433 	if (adev->family == AMDGPU_FAMILY_RV) {
4434 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4435 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4436 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4437 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4438 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4439 	}
4440 
4441 	/*
4442 	 * Only supported for 64bpp on Raven, will be filtered on format in
4443 	 * dm_plane_format_mod_supported.
4444 	 */
4445 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4446 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4447 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4448 
4449 	if (adev->family == AMDGPU_FAMILY_RV) {
4450 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4451 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4452 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4453 	}
4454 }
4455 
4456 static void
4457 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4458 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4459 {
4460 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4461 
4462 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4463 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4464 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4465 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4466 		    AMD_FMT_MOD_SET(DCC, 1) |
4467 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4468 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4469 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4470 
4471 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4472 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4473 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4474 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4475 		    AMD_FMT_MOD_SET(DCC, 1) |
4476 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4477 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4478 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4479 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4480 
4481 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4482 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4483 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4484 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4485 
4486 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4487 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4488 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4489 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4490 
4491 
4492 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4493 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4494 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4495 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4496 
4497 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4498 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4499 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4500 }
4501 
4502 static void
4503 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4504 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4505 {
4506 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4507 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4508 
4509 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4510 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4511 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4512 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4513 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4514 		    AMD_FMT_MOD_SET(DCC, 1) |
4515 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4516 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4517 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4518 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4519 
4520 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4521 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4522 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4523 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4524 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4525 		    AMD_FMT_MOD_SET(DCC, 1) |
4526 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4527 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4528 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4529 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4530 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4531 
4532 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4533 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4534 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4535 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4536 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4537 
4538 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4539 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4540 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4541 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4542 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4543 
4544 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4545 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4546 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4547 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4548 
4549 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4550 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4551 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4552 }
4553 
4554 static int
4555 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4556 {
4557 	uint64_t size = 0, capacity = 128;
4558 	*mods = NULL;
4559 
4560 	/* We have not hooked up any pre-GFX9 modifiers. */
4561 	if (adev->family < AMDGPU_FAMILY_AI)
4562 		return 0;
4563 
4564 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4565 
4566 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4567 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4568 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4569 		return *mods ? 0 : -ENOMEM;
4570 	}
4571 
4572 	switch (adev->family) {
4573 	case AMDGPU_FAMILY_AI:
4574 	case AMDGPU_FAMILY_RV:
4575 		add_gfx9_modifiers(adev, mods, &size, &capacity);
4576 		break;
4577 	case AMDGPU_FAMILY_NV:
4578 	case AMDGPU_FAMILY_VGH:
4579 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4580 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4581 		else
4582 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4583 		break;
4584 	}
4585 
4586 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4587 
4588 	/* INVALID marks the end of the list. */
4589 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4590 
4591 	if (!*mods)
4592 		return -ENOMEM;
4593 
4594 	return 0;
4595 }
4596 
4597 static int
4598 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4599 					  const struct amdgpu_framebuffer *afb,
4600 					  const enum surface_pixel_format format,
4601 					  const enum dc_rotation_angle rotation,
4602 					  const struct plane_size *plane_size,
4603 					  union dc_tiling_info *tiling_info,
4604 					  struct dc_plane_dcc_param *dcc,
4605 					  struct dc_plane_address *address,
4606 					  const bool force_disable_dcc)
4607 {
4608 	const uint64_t modifier = afb->base.modifier;
4609 	int ret;
4610 
4611 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4612 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4613 
4614 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4615 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
4616 
4617 		dcc->enable = 1;
4618 		dcc->meta_pitch = afb->base.pitches[1];
4619 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4620 
4621 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4622 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4623 	}
4624 
4625 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4626 	if (ret)
4627 		return ret;
4628 
4629 	return 0;
4630 }
4631 
4632 static int
4633 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4634 			     const struct amdgpu_framebuffer *afb,
4635 			     const enum surface_pixel_format format,
4636 			     const enum dc_rotation_angle rotation,
4637 			     const uint64_t tiling_flags,
4638 			     union dc_tiling_info *tiling_info,
4639 			     struct plane_size *plane_size,
4640 			     struct dc_plane_dcc_param *dcc,
4641 			     struct dc_plane_address *address,
4642 			     bool tmz_surface,
4643 			     bool force_disable_dcc)
4644 {
4645 	const struct drm_framebuffer *fb = &afb->base;
4646 	int ret;
4647 
4648 	memset(tiling_info, 0, sizeof(*tiling_info));
4649 	memset(plane_size, 0, sizeof(*plane_size));
4650 	memset(dcc, 0, sizeof(*dcc));
4651 	memset(address, 0, sizeof(*address));
4652 
4653 	address->tmz_surface = tmz_surface;
4654 
4655 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4656 		uint64_t addr = afb->address + fb->offsets[0];
4657 
4658 		plane_size->surface_size.x = 0;
4659 		plane_size->surface_size.y = 0;
4660 		plane_size->surface_size.width = fb->width;
4661 		plane_size->surface_size.height = fb->height;
4662 		plane_size->surface_pitch =
4663 			fb->pitches[0] / fb->format->cpp[0];
4664 
4665 		address->type = PLN_ADDR_TYPE_GRAPHICS;
4666 		address->grph.addr.low_part = lower_32_bits(addr);
4667 		address->grph.addr.high_part = upper_32_bits(addr);
4668 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4669 		uint64_t luma_addr = afb->address + fb->offsets[0];
4670 		uint64_t chroma_addr = afb->address + fb->offsets[1];
4671 
4672 		plane_size->surface_size.x = 0;
4673 		plane_size->surface_size.y = 0;
4674 		plane_size->surface_size.width = fb->width;
4675 		plane_size->surface_size.height = fb->height;
4676 		plane_size->surface_pitch =
4677 			fb->pitches[0] / fb->format->cpp[0];
4678 
4679 		plane_size->chroma_size.x = 0;
4680 		plane_size->chroma_size.y = 0;
4681 		/* TODO: set these based on surface format */
4682 		plane_size->chroma_size.width = fb->width / 2;
4683 		plane_size->chroma_size.height = fb->height / 2;
4684 
4685 		plane_size->chroma_pitch =
4686 			fb->pitches[1] / fb->format->cpp[1];
4687 
4688 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4689 		address->video_progressive.luma_addr.low_part =
4690 			lower_32_bits(luma_addr);
4691 		address->video_progressive.luma_addr.high_part =
4692 			upper_32_bits(luma_addr);
4693 		address->video_progressive.chroma_addr.low_part =
4694 			lower_32_bits(chroma_addr);
4695 		address->video_progressive.chroma_addr.high_part =
4696 			upper_32_bits(chroma_addr);
4697 	}
4698 
4699 	if (adev->family >= AMDGPU_FAMILY_AI) {
4700 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4701 								rotation, plane_size,
4702 								tiling_info, dcc,
4703 								address,
4704 								force_disable_dcc);
4705 		if (ret)
4706 			return ret;
4707 	} else {
4708 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4709 	}
4710 
4711 	return 0;
4712 }
4713 
4714 static void
4715 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4716 			       bool *per_pixel_alpha, bool *global_alpha,
4717 			       int *global_alpha_value)
4718 {
4719 	*per_pixel_alpha = false;
4720 	*global_alpha = false;
4721 	*global_alpha_value = 0xff;
4722 
4723 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4724 		return;
4725 
4726 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4727 		static const uint32_t alpha_formats[] = {
4728 			DRM_FORMAT_ARGB8888,
4729 			DRM_FORMAT_RGBA8888,
4730 			DRM_FORMAT_ABGR8888,
4731 		};
4732 		uint32_t format = plane_state->fb->format->format;
4733 		unsigned int i;
4734 
4735 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4736 			if (format == alpha_formats[i]) {
4737 				*per_pixel_alpha = true;
4738 				break;
4739 			}
4740 		}
4741 	}
4742 
4743 	if (plane_state->alpha < 0xffff) {
4744 		*global_alpha = true;
4745 		*global_alpha_value = plane_state->alpha >> 8;
4746 	}
4747 }
4748 
4749 static int
4750 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4751 			    const enum surface_pixel_format format,
4752 			    enum dc_color_space *color_space)
4753 {
4754 	bool full_range;
4755 
4756 	*color_space = COLOR_SPACE_SRGB;
4757 
4758 	/* DRM color properties only affect non-RGB formats. */
4759 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4760 		return 0;
4761 
4762 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4763 
4764 	switch (plane_state->color_encoding) {
4765 	case DRM_COLOR_YCBCR_BT601:
4766 		if (full_range)
4767 			*color_space = COLOR_SPACE_YCBCR601;
4768 		else
4769 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4770 		break;
4771 
4772 	case DRM_COLOR_YCBCR_BT709:
4773 		if (full_range)
4774 			*color_space = COLOR_SPACE_YCBCR709;
4775 		else
4776 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4777 		break;
4778 
4779 	case DRM_COLOR_YCBCR_BT2020:
4780 		if (full_range)
4781 			*color_space = COLOR_SPACE_2020_YCBCR;
4782 		else
4783 			return -EINVAL;
4784 		break;
4785 
4786 	default:
4787 		return -EINVAL;
4788 	}
4789 
4790 	return 0;
4791 }
4792 
4793 static int
4794 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4795 			    const struct drm_plane_state *plane_state,
4796 			    const uint64_t tiling_flags,
4797 			    struct dc_plane_info *plane_info,
4798 			    struct dc_plane_address *address,
4799 			    bool tmz_surface,
4800 			    bool force_disable_dcc)
4801 {
4802 	const struct drm_framebuffer *fb = plane_state->fb;
4803 	const struct amdgpu_framebuffer *afb =
4804 		to_amdgpu_framebuffer(plane_state->fb);
4805 	int ret;
4806 
4807 	memset(plane_info, 0, sizeof(*plane_info));
4808 
4809 	switch (fb->format->format) {
4810 	case DRM_FORMAT_C8:
4811 		plane_info->format =
4812 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4813 		break;
4814 	case DRM_FORMAT_RGB565:
4815 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4816 		break;
4817 	case DRM_FORMAT_XRGB8888:
4818 	case DRM_FORMAT_ARGB8888:
4819 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4820 		break;
4821 	case DRM_FORMAT_XRGB2101010:
4822 	case DRM_FORMAT_ARGB2101010:
4823 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4824 		break;
4825 	case DRM_FORMAT_XBGR2101010:
4826 	case DRM_FORMAT_ABGR2101010:
4827 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4828 		break;
4829 	case DRM_FORMAT_XBGR8888:
4830 	case DRM_FORMAT_ABGR8888:
4831 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4832 		break;
4833 	case DRM_FORMAT_NV21:
4834 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4835 		break;
4836 	case DRM_FORMAT_NV12:
4837 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4838 		break;
4839 	case DRM_FORMAT_P010:
4840 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4841 		break;
4842 	case DRM_FORMAT_XRGB16161616F:
4843 	case DRM_FORMAT_ARGB16161616F:
4844 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4845 		break;
4846 	case DRM_FORMAT_XBGR16161616F:
4847 	case DRM_FORMAT_ABGR16161616F:
4848 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4849 		break;
4850 	default:
4851 		DRM_ERROR(
4852 			"Unsupported screen format %p4cc\n",
4853 			&fb->format->format);
4854 		return -EINVAL;
4855 	}
4856 
4857 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4858 	case DRM_MODE_ROTATE_0:
4859 		plane_info->rotation = ROTATION_ANGLE_0;
4860 		break;
4861 	case DRM_MODE_ROTATE_90:
4862 		plane_info->rotation = ROTATION_ANGLE_90;
4863 		break;
4864 	case DRM_MODE_ROTATE_180:
4865 		plane_info->rotation = ROTATION_ANGLE_180;
4866 		break;
4867 	case DRM_MODE_ROTATE_270:
4868 		plane_info->rotation = ROTATION_ANGLE_270;
4869 		break;
4870 	default:
4871 		plane_info->rotation = ROTATION_ANGLE_0;
4872 		break;
4873 	}
4874 
4875 	plane_info->visible = true;
4876 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4877 
4878 	plane_info->layer_index = 0;
4879 
4880 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4881 					  &plane_info->color_space);
4882 	if (ret)
4883 		return ret;
4884 
4885 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4886 					   plane_info->rotation, tiling_flags,
4887 					   &plane_info->tiling_info,
4888 					   &plane_info->plane_size,
4889 					   &plane_info->dcc, address, tmz_surface,
4890 					   force_disable_dcc);
4891 	if (ret)
4892 		return ret;
4893 
4894 	fill_blending_from_plane_state(
4895 		plane_state, &plane_info->per_pixel_alpha,
4896 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4897 
4898 	return 0;
4899 }
4900 
4901 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4902 				    struct dc_plane_state *dc_plane_state,
4903 				    struct drm_plane_state *plane_state,
4904 				    struct drm_crtc_state *crtc_state)
4905 {
4906 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4907 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4908 	struct dc_scaling_info scaling_info;
4909 	struct dc_plane_info plane_info;
4910 	int ret;
4911 	bool force_disable_dcc = false;
4912 
4913 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4914 	if (ret)
4915 		return ret;
4916 
4917 	dc_plane_state->src_rect = scaling_info.src_rect;
4918 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4919 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4920 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4921 
4922 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4923 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4924 					  afb->tiling_flags,
4925 					  &plane_info,
4926 					  &dc_plane_state->address,
4927 					  afb->tmz_surface,
4928 					  force_disable_dcc);
4929 	if (ret)
4930 		return ret;
4931 
4932 	dc_plane_state->format = plane_info.format;
4933 	dc_plane_state->color_space = plane_info.color_space;
4934 	dc_plane_state->format = plane_info.format;
4935 	dc_plane_state->plane_size = plane_info.plane_size;
4936 	dc_plane_state->rotation = plane_info.rotation;
4937 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4938 	dc_plane_state->stereo_format = plane_info.stereo_format;
4939 	dc_plane_state->tiling_info = plane_info.tiling_info;
4940 	dc_plane_state->visible = plane_info.visible;
4941 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4942 	dc_plane_state->global_alpha = plane_info.global_alpha;
4943 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4944 	dc_plane_state->dcc = plane_info.dcc;
4945 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4946 	dc_plane_state->flip_int_enabled = true;
4947 
4948 	/*
4949 	 * Always set input transfer function, since plane state is refreshed
4950 	 * every time.
4951 	 */
4952 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4953 	if (ret)
4954 		return ret;
4955 
4956 	return 0;
4957 }
4958 
4959 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4960 					   const struct dm_connector_state *dm_state,
4961 					   struct dc_stream_state *stream)
4962 {
4963 	enum amdgpu_rmx_type rmx_type;
4964 
4965 	struct rect src = { 0 }; /* viewport in composition space*/
4966 	struct rect dst = { 0 }; /* stream addressable area */
4967 
4968 	/* no mode. nothing to be done */
4969 	if (!mode)
4970 		return;
4971 
4972 	/* Full screen scaling by default */
4973 	src.width = mode->hdisplay;
4974 	src.height = mode->vdisplay;
4975 	dst.width = stream->timing.h_addressable;
4976 	dst.height = stream->timing.v_addressable;
4977 
4978 	if (dm_state) {
4979 		rmx_type = dm_state->scaling;
4980 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4981 			if (src.width * dst.height <
4982 					src.height * dst.width) {
4983 				/* height needs less upscaling/more downscaling */
4984 				dst.width = src.width *
4985 						dst.height / src.height;
4986 			} else {
4987 				/* width needs less upscaling/more downscaling */
4988 				dst.height = src.height *
4989 						dst.width / src.width;
4990 			}
4991 		} else if (rmx_type == RMX_CENTER) {
4992 			dst = src;
4993 		}
4994 
4995 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4996 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4997 
4998 		if (dm_state->underscan_enable) {
4999 			dst.x += dm_state->underscan_hborder / 2;
5000 			dst.y += dm_state->underscan_vborder / 2;
5001 			dst.width -= dm_state->underscan_hborder;
5002 			dst.height -= dm_state->underscan_vborder;
5003 		}
5004 	}
5005 
5006 	stream->src = src;
5007 	stream->dst = dst;
5008 
5009 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5010 		      dst.x, dst.y, dst.width, dst.height);
5011 
5012 }
5013 
5014 static enum dc_color_depth
5015 convert_color_depth_from_display_info(const struct drm_connector *connector,
5016 				      bool is_y420, int requested_bpc)
5017 {
5018 	uint8_t bpc;
5019 
5020 	if (is_y420) {
5021 		bpc = 8;
5022 
5023 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5024 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5025 			bpc = 16;
5026 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5027 			bpc = 12;
5028 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5029 			bpc = 10;
5030 	} else {
5031 		bpc = (uint8_t)connector->display_info.bpc;
5032 		/* Assume 8 bpc by default if no bpc is specified. */
5033 		bpc = bpc ? bpc : 8;
5034 	}
5035 
5036 	if (requested_bpc > 0) {
5037 		/*
5038 		 * Cap display bpc based on the user requested value.
5039 		 *
5040 		 * The value for state->max_bpc may not correctly updated
5041 		 * depending on when the connector gets added to the state
5042 		 * or if this was called outside of atomic check, so it
5043 		 * can't be used directly.
5044 		 */
5045 		bpc = min_t(u8, bpc, requested_bpc);
5046 
5047 		/* Round down to the nearest even number. */
5048 		bpc = bpc - (bpc & 1);
5049 	}
5050 
5051 	switch (bpc) {
5052 	case 0:
5053 		/*
5054 		 * Temporary Work around, DRM doesn't parse color depth for
5055 		 * EDID revision before 1.4
5056 		 * TODO: Fix edid parsing
5057 		 */
5058 		return COLOR_DEPTH_888;
5059 	case 6:
5060 		return COLOR_DEPTH_666;
5061 	case 8:
5062 		return COLOR_DEPTH_888;
5063 	case 10:
5064 		return COLOR_DEPTH_101010;
5065 	case 12:
5066 		return COLOR_DEPTH_121212;
5067 	case 14:
5068 		return COLOR_DEPTH_141414;
5069 	case 16:
5070 		return COLOR_DEPTH_161616;
5071 	default:
5072 		return COLOR_DEPTH_UNDEFINED;
5073 	}
5074 }
5075 
5076 static enum dc_aspect_ratio
5077 get_aspect_ratio(const struct drm_display_mode *mode_in)
5078 {
5079 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5080 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5081 }
5082 
5083 static enum dc_color_space
5084 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5085 {
5086 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5087 
5088 	switch (dc_crtc_timing->pixel_encoding)	{
5089 	case PIXEL_ENCODING_YCBCR422:
5090 	case PIXEL_ENCODING_YCBCR444:
5091 	case PIXEL_ENCODING_YCBCR420:
5092 	{
5093 		/*
5094 		 * 27030khz is the separation point between HDTV and SDTV
5095 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5096 		 * respectively
5097 		 */
5098 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5099 			if (dc_crtc_timing->flags.Y_ONLY)
5100 				color_space =
5101 					COLOR_SPACE_YCBCR709_LIMITED;
5102 			else
5103 				color_space = COLOR_SPACE_YCBCR709;
5104 		} else {
5105 			if (dc_crtc_timing->flags.Y_ONLY)
5106 				color_space =
5107 					COLOR_SPACE_YCBCR601_LIMITED;
5108 			else
5109 				color_space = COLOR_SPACE_YCBCR601;
5110 		}
5111 
5112 	}
5113 	break;
5114 	case PIXEL_ENCODING_RGB:
5115 		color_space = COLOR_SPACE_SRGB;
5116 		break;
5117 
5118 	default:
5119 		WARN_ON(1);
5120 		break;
5121 	}
5122 
5123 	return color_space;
5124 }
5125 
5126 static bool adjust_colour_depth_from_display_info(
5127 	struct dc_crtc_timing *timing_out,
5128 	const struct drm_display_info *info)
5129 {
5130 	enum dc_color_depth depth = timing_out->display_color_depth;
5131 	int normalized_clk;
5132 	do {
5133 		normalized_clk = timing_out->pix_clk_100hz / 10;
5134 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5135 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5136 			normalized_clk /= 2;
5137 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5138 		switch (depth) {
5139 		case COLOR_DEPTH_888:
5140 			break;
5141 		case COLOR_DEPTH_101010:
5142 			normalized_clk = (normalized_clk * 30) / 24;
5143 			break;
5144 		case COLOR_DEPTH_121212:
5145 			normalized_clk = (normalized_clk * 36) / 24;
5146 			break;
5147 		case COLOR_DEPTH_161616:
5148 			normalized_clk = (normalized_clk * 48) / 24;
5149 			break;
5150 		default:
5151 			/* The above depths are the only ones valid for HDMI. */
5152 			return false;
5153 		}
5154 		if (normalized_clk <= info->max_tmds_clock) {
5155 			timing_out->display_color_depth = depth;
5156 			return true;
5157 		}
5158 	} while (--depth > COLOR_DEPTH_666);
5159 	return false;
5160 }
5161 
5162 static void fill_stream_properties_from_drm_display_mode(
5163 	struct dc_stream_state *stream,
5164 	const struct drm_display_mode *mode_in,
5165 	const struct drm_connector *connector,
5166 	const struct drm_connector_state *connector_state,
5167 	const struct dc_stream_state *old_stream,
5168 	int requested_bpc)
5169 {
5170 	struct dc_crtc_timing *timing_out = &stream->timing;
5171 	const struct drm_display_info *info = &connector->display_info;
5172 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5173 	struct hdmi_vendor_infoframe hv_frame;
5174 	struct hdmi_avi_infoframe avi_frame;
5175 
5176 	memset(&hv_frame, 0, sizeof(hv_frame));
5177 	memset(&avi_frame, 0, sizeof(avi_frame));
5178 
5179 	timing_out->h_border_left = 0;
5180 	timing_out->h_border_right = 0;
5181 	timing_out->v_border_top = 0;
5182 	timing_out->v_border_bottom = 0;
5183 	/* TODO: un-hardcode */
5184 	if (drm_mode_is_420_only(info, mode_in)
5185 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5186 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5187 	else if (drm_mode_is_420_also(info, mode_in)
5188 			&& aconnector->force_yuv420_output)
5189 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5190 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5191 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5192 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5193 	else
5194 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5195 
5196 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5197 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5198 		connector,
5199 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5200 		requested_bpc);
5201 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5202 	timing_out->hdmi_vic = 0;
5203 
5204 	if(old_stream) {
5205 		timing_out->vic = old_stream->timing.vic;
5206 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5207 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5208 	} else {
5209 		timing_out->vic = drm_match_cea_mode(mode_in);
5210 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5211 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5212 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5213 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5214 	}
5215 
5216 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5217 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5218 		timing_out->vic = avi_frame.video_code;
5219 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5220 		timing_out->hdmi_vic = hv_frame.vic;
5221 	}
5222 
5223 	if (is_freesync_video_mode(mode_in, aconnector)) {
5224 		timing_out->h_addressable = mode_in->hdisplay;
5225 		timing_out->h_total = mode_in->htotal;
5226 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5227 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5228 		timing_out->v_total = mode_in->vtotal;
5229 		timing_out->v_addressable = mode_in->vdisplay;
5230 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5231 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5232 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5233 	} else {
5234 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5235 		timing_out->h_total = mode_in->crtc_htotal;
5236 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5237 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5238 		timing_out->v_total = mode_in->crtc_vtotal;
5239 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5240 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5241 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5242 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5243 	}
5244 
5245 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5246 
5247 	stream->output_color_space = get_output_color_space(timing_out);
5248 
5249 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5250 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5251 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5252 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5253 		    drm_mode_is_420_also(info, mode_in) &&
5254 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5255 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5256 			adjust_colour_depth_from_display_info(timing_out, info);
5257 		}
5258 	}
5259 }
5260 
5261 static void fill_audio_info(struct audio_info *audio_info,
5262 			    const struct drm_connector *drm_connector,
5263 			    const struct dc_sink *dc_sink)
5264 {
5265 	int i = 0;
5266 	int cea_revision = 0;
5267 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5268 
5269 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5270 	audio_info->product_id = edid_caps->product_id;
5271 
5272 	cea_revision = drm_connector->display_info.cea_rev;
5273 
5274 	strscpy(audio_info->display_name,
5275 		edid_caps->display_name,
5276 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5277 
5278 	if (cea_revision >= 3) {
5279 		audio_info->mode_count = edid_caps->audio_mode_count;
5280 
5281 		for (i = 0; i < audio_info->mode_count; ++i) {
5282 			audio_info->modes[i].format_code =
5283 					(enum audio_format_code)
5284 					(edid_caps->audio_modes[i].format_code);
5285 			audio_info->modes[i].channel_count =
5286 					edid_caps->audio_modes[i].channel_count;
5287 			audio_info->modes[i].sample_rates.all =
5288 					edid_caps->audio_modes[i].sample_rate;
5289 			audio_info->modes[i].sample_size =
5290 					edid_caps->audio_modes[i].sample_size;
5291 		}
5292 	}
5293 
5294 	audio_info->flags.all = edid_caps->speaker_flags;
5295 
5296 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5297 	if (drm_connector->latency_present[0]) {
5298 		audio_info->video_latency = drm_connector->video_latency[0];
5299 		audio_info->audio_latency = drm_connector->audio_latency[0];
5300 	}
5301 
5302 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5303 
5304 }
5305 
5306 static void
5307 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5308 				      struct drm_display_mode *dst_mode)
5309 {
5310 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5311 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5312 	dst_mode->crtc_clock = src_mode->crtc_clock;
5313 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5314 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5315 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5316 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5317 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5318 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5319 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5320 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5321 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5322 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5323 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5324 }
5325 
5326 static void
5327 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5328 					const struct drm_display_mode *native_mode,
5329 					bool scale_enabled)
5330 {
5331 	if (scale_enabled) {
5332 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5333 	} else if (native_mode->clock == drm_mode->clock &&
5334 			native_mode->htotal == drm_mode->htotal &&
5335 			native_mode->vtotal == drm_mode->vtotal) {
5336 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5337 	} else {
5338 		/* no scaling nor amdgpu inserted, no need to patch */
5339 	}
5340 }
5341 
5342 static struct dc_sink *
5343 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5344 {
5345 	struct dc_sink_init_data sink_init_data = { 0 };
5346 	struct dc_sink *sink = NULL;
5347 	sink_init_data.link = aconnector->dc_link;
5348 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5349 
5350 	sink = dc_sink_create(&sink_init_data);
5351 	if (!sink) {
5352 		DRM_ERROR("Failed to create sink!\n");
5353 		return NULL;
5354 	}
5355 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5356 
5357 	return sink;
5358 }
5359 
5360 static void set_multisync_trigger_params(
5361 		struct dc_stream_state *stream)
5362 {
5363 	struct dc_stream_state *master = NULL;
5364 
5365 	if (stream->triggered_crtc_reset.enabled) {
5366 		master = stream->triggered_crtc_reset.event_source;
5367 		stream->triggered_crtc_reset.event =
5368 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5369 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5370 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5371 	}
5372 }
5373 
5374 static void set_master_stream(struct dc_stream_state *stream_set[],
5375 			      int stream_count)
5376 {
5377 	int j, highest_rfr = 0, master_stream = 0;
5378 
5379 	for (j = 0;  j < stream_count; j++) {
5380 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5381 			int refresh_rate = 0;
5382 
5383 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5384 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5385 			if (refresh_rate > highest_rfr) {
5386 				highest_rfr = refresh_rate;
5387 				master_stream = j;
5388 			}
5389 		}
5390 	}
5391 	for (j = 0;  j < stream_count; j++) {
5392 		if (stream_set[j])
5393 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5394 	}
5395 }
5396 
5397 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5398 {
5399 	int i = 0;
5400 	struct dc_stream_state *stream;
5401 
5402 	if (context->stream_count < 2)
5403 		return;
5404 	for (i = 0; i < context->stream_count ; i++) {
5405 		if (!context->streams[i])
5406 			continue;
5407 		/*
5408 		 * TODO: add a function to read AMD VSDB bits and set
5409 		 * crtc_sync_master.multi_sync_enabled flag
5410 		 * For now it's set to false
5411 		 */
5412 	}
5413 
5414 	set_master_stream(context->streams, context->stream_count);
5415 
5416 	for (i = 0; i < context->stream_count ; i++) {
5417 		stream = context->streams[i];
5418 
5419 		if (!stream)
5420 			continue;
5421 
5422 		set_multisync_trigger_params(stream);
5423 	}
5424 }
5425 
5426 static struct drm_display_mode *
5427 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5428 			  bool use_probed_modes)
5429 {
5430 	struct drm_display_mode *m, *m_pref = NULL;
5431 	u16 current_refresh, highest_refresh;
5432 	struct list_head *list_head = use_probed_modes ?
5433 						    &aconnector->base.probed_modes :
5434 						    &aconnector->base.modes;
5435 
5436 	if (aconnector->freesync_vid_base.clock != 0)
5437 		return &aconnector->freesync_vid_base;
5438 
5439 	/* Find the preferred mode */
5440 	list_for_each_entry (m, list_head, head) {
5441 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5442 			m_pref = m;
5443 			break;
5444 		}
5445 	}
5446 
5447 	if (!m_pref) {
5448 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5449 		m_pref = list_first_entry_or_null(
5450 			&aconnector->base.modes, struct drm_display_mode, head);
5451 		if (!m_pref) {
5452 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5453 			return NULL;
5454 		}
5455 	}
5456 
5457 	highest_refresh = drm_mode_vrefresh(m_pref);
5458 
5459 	/*
5460 	 * Find the mode with highest refresh rate with same resolution.
5461 	 * For some monitors, preferred mode is not the mode with highest
5462 	 * supported refresh rate.
5463 	 */
5464 	list_for_each_entry (m, list_head, head) {
5465 		current_refresh  = drm_mode_vrefresh(m);
5466 
5467 		if (m->hdisplay == m_pref->hdisplay &&
5468 		    m->vdisplay == m_pref->vdisplay &&
5469 		    highest_refresh < current_refresh) {
5470 			highest_refresh = current_refresh;
5471 			m_pref = m;
5472 		}
5473 	}
5474 
5475 	aconnector->freesync_vid_base = *m_pref;
5476 	return m_pref;
5477 }
5478 
5479 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5480 				   struct amdgpu_dm_connector *aconnector)
5481 {
5482 	struct drm_display_mode *high_mode;
5483 	int timing_diff;
5484 
5485 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5486 	if (!high_mode || !mode)
5487 		return false;
5488 
5489 	timing_diff = high_mode->vtotal - mode->vtotal;
5490 
5491 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5492 	    high_mode->hdisplay != mode->hdisplay ||
5493 	    high_mode->vdisplay != mode->vdisplay ||
5494 	    high_mode->hsync_start != mode->hsync_start ||
5495 	    high_mode->hsync_end != mode->hsync_end ||
5496 	    high_mode->htotal != mode->htotal ||
5497 	    high_mode->hskew != mode->hskew ||
5498 	    high_mode->vscan != mode->vscan ||
5499 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5500 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5501 		return false;
5502 	else
5503 		return true;
5504 }
5505 
5506 static struct dc_stream_state *
5507 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5508 		       const struct drm_display_mode *drm_mode,
5509 		       const struct dm_connector_state *dm_state,
5510 		       const struct dc_stream_state *old_stream,
5511 		       int requested_bpc)
5512 {
5513 	struct drm_display_mode *preferred_mode = NULL;
5514 	struct drm_connector *drm_connector;
5515 	const struct drm_connector_state *con_state =
5516 		dm_state ? &dm_state->base : NULL;
5517 	struct dc_stream_state *stream = NULL;
5518 	struct drm_display_mode mode = *drm_mode;
5519 	struct drm_display_mode saved_mode;
5520 	struct drm_display_mode *freesync_mode = NULL;
5521 	bool native_mode_found = false;
5522 	bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5523 	int mode_refresh;
5524 	int preferred_refresh = 0;
5525 #if defined(CONFIG_DRM_AMD_DC_DCN)
5526 	struct dsc_dec_dpcd_caps dsc_caps;
5527 	uint32_t link_bandwidth_kbps;
5528 #endif
5529 	struct dc_sink *sink = NULL;
5530 
5531 	memset(&saved_mode, 0, sizeof(saved_mode));
5532 
5533 	if (aconnector == NULL) {
5534 		DRM_ERROR("aconnector is NULL!\n");
5535 		return stream;
5536 	}
5537 
5538 	drm_connector = &aconnector->base;
5539 
5540 	if (!aconnector->dc_sink) {
5541 		sink = create_fake_sink(aconnector);
5542 		if (!sink)
5543 			return stream;
5544 	} else {
5545 		sink = aconnector->dc_sink;
5546 		dc_sink_retain(sink);
5547 	}
5548 
5549 	stream = dc_create_stream_for_sink(sink);
5550 
5551 	if (stream == NULL) {
5552 		DRM_ERROR("Failed to create stream for sink!\n");
5553 		goto finish;
5554 	}
5555 
5556 	stream->dm_stream_context = aconnector;
5557 
5558 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5559 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5560 
5561 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5562 		/* Search for preferred mode */
5563 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5564 			native_mode_found = true;
5565 			break;
5566 		}
5567 	}
5568 	if (!native_mode_found)
5569 		preferred_mode = list_first_entry_or_null(
5570 				&aconnector->base.modes,
5571 				struct drm_display_mode,
5572 				head);
5573 
5574 	mode_refresh = drm_mode_vrefresh(&mode);
5575 
5576 	if (preferred_mode == NULL) {
5577 		/*
5578 		 * This may not be an error, the use case is when we have no
5579 		 * usermode calls to reset and set mode upon hotplug. In this
5580 		 * case, we call set mode ourselves to restore the previous mode
5581 		 * and the modelist may not be filled in in time.
5582 		 */
5583 		DRM_DEBUG_DRIVER("No preferred mode found\n");
5584 	} else {
5585 		recalculate_timing |= amdgpu_freesync_vid_mode &&
5586 				 is_freesync_video_mode(&mode, aconnector);
5587 		if (recalculate_timing) {
5588 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5589 			saved_mode = mode;
5590 			mode = *freesync_mode;
5591 		} else {
5592 			decide_crtc_timing_for_drm_display_mode(
5593 				&mode, preferred_mode,
5594 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
5595 		}
5596 
5597 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
5598 	}
5599 
5600 	if (recalculate_timing)
5601 		drm_mode_set_crtcinfo(&saved_mode, 0);
5602 	else if (!dm_state)
5603 		drm_mode_set_crtcinfo(&mode, 0);
5604 
5605        /*
5606 	* If scaling is enabled and refresh rate didn't change
5607 	* we copy the vic and polarities of the old timings
5608 	*/
5609 	if (!recalculate_timing || mode_refresh != preferred_refresh)
5610 		fill_stream_properties_from_drm_display_mode(
5611 			stream, &mode, &aconnector->base, con_state, NULL,
5612 			requested_bpc);
5613 	else
5614 		fill_stream_properties_from_drm_display_mode(
5615 			stream, &mode, &aconnector->base, con_state, old_stream,
5616 			requested_bpc);
5617 
5618 	stream->timing.flags.DSC = 0;
5619 
5620 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5621 #if defined(CONFIG_DRM_AMD_DC_DCN)
5622 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5623 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5624 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5625 				      &dsc_caps);
5626 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5627 							     dc_link_get_link_cap(aconnector->dc_link));
5628 
5629 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5630 			/* Set DSC policy according to dsc_clock_en */
5631 			dc_dsc_policy_set_enable_dsc_when_not_needed(
5632 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5633 
5634 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5635 						  &dsc_caps,
5636 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5637 						  0,
5638 						  link_bandwidth_kbps,
5639 						  &stream->timing,
5640 						  &stream->timing.dsc_cfg))
5641 				stream->timing.flags.DSC = 1;
5642 			/* Overwrite the stream flag if DSC is enabled through debugfs */
5643 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5644 				stream->timing.flags.DSC = 1;
5645 
5646 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5647 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5648 
5649 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5650 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5651 
5652 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5653 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5654 		}
5655 #endif
5656 	}
5657 
5658 	update_stream_scaling_settings(&mode, dm_state, stream);
5659 
5660 	fill_audio_info(
5661 		&stream->audio_info,
5662 		drm_connector,
5663 		sink);
5664 
5665 	update_stream_signal(stream, sink);
5666 
5667 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5668 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5669 
5670 	if (stream->link->psr_settings.psr_feature_enabled) {
5671 		//
5672 		// should decide stream support vsc sdp colorimetry capability
5673 		// before building vsc info packet
5674 		//
5675 		stream->use_vsc_sdp_for_colorimetry = false;
5676 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5677 			stream->use_vsc_sdp_for_colorimetry =
5678 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5679 		} else {
5680 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5681 				stream->use_vsc_sdp_for_colorimetry = true;
5682 		}
5683 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5684 	}
5685 finish:
5686 	dc_sink_release(sink);
5687 
5688 	return stream;
5689 }
5690 
5691 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5692 {
5693 	drm_crtc_cleanup(crtc);
5694 	kfree(crtc);
5695 }
5696 
5697 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5698 				  struct drm_crtc_state *state)
5699 {
5700 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
5701 
5702 	/* TODO Destroy dc_stream objects are stream object is flattened */
5703 	if (cur->stream)
5704 		dc_stream_release(cur->stream);
5705 
5706 
5707 	__drm_atomic_helper_crtc_destroy_state(state);
5708 
5709 
5710 	kfree(state);
5711 }
5712 
5713 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5714 {
5715 	struct dm_crtc_state *state;
5716 
5717 	if (crtc->state)
5718 		dm_crtc_destroy_state(crtc, crtc->state);
5719 
5720 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5721 	if (WARN_ON(!state))
5722 		return;
5723 
5724 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
5725 }
5726 
5727 static struct drm_crtc_state *
5728 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5729 {
5730 	struct dm_crtc_state *state, *cur;
5731 
5732 	cur = to_dm_crtc_state(crtc->state);
5733 
5734 	if (WARN_ON(!crtc->state))
5735 		return NULL;
5736 
5737 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5738 	if (!state)
5739 		return NULL;
5740 
5741 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5742 
5743 	if (cur->stream) {
5744 		state->stream = cur->stream;
5745 		dc_stream_retain(state->stream);
5746 	}
5747 
5748 	state->active_planes = cur->active_planes;
5749 	state->vrr_infopacket = cur->vrr_infopacket;
5750 	state->abm_level = cur->abm_level;
5751 	state->vrr_supported = cur->vrr_supported;
5752 	state->freesync_config = cur->freesync_config;
5753 	state->cm_has_degamma = cur->cm_has_degamma;
5754 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5755 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
5756 
5757 	return &state->base;
5758 }
5759 
5760 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5761 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5762 {
5763 	crtc_debugfs_init(crtc);
5764 
5765 	return 0;
5766 }
5767 #endif
5768 
5769 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5770 {
5771 	enum dc_irq_source irq_source;
5772 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5773 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5774 	int rc;
5775 
5776 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5777 
5778 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5779 
5780 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
5781 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
5782 	return rc;
5783 }
5784 
5785 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5786 {
5787 	enum dc_irq_source irq_source;
5788 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5789 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5790 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5791 #if defined(CONFIG_DRM_AMD_DC_DCN)
5792 	struct amdgpu_display_manager *dm = &adev->dm;
5793 	unsigned long flags;
5794 #endif
5795 	int rc = 0;
5796 
5797 	if (enable) {
5798 		/* vblank irq on -> Only need vupdate irq in vrr mode */
5799 		if (amdgpu_dm_vrr_active(acrtc_state))
5800 			rc = dm_set_vupdate_irq(crtc, true);
5801 	} else {
5802 		/* vblank irq off -> vupdate irq off */
5803 		rc = dm_set_vupdate_irq(crtc, false);
5804 	}
5805 
5806 	if (rc)
5807 		return rc;
5808 
5809 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5810 
5811 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5812 		return -EBUSY;
5813 
5814 	if (amdgpu_in_reset(adev))
5815 		return 0;
5816 
5817 #if defined(CONFIG_DRM_AMD_DC_DCN)
5818 	spin_lock_irqsave(&dm->vblank_lock, flags);
5819 	dm->vblank_workqueue->dm = dm;
5820 	dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5821 	dm->vblank_workqueue->enable = enable;
5822 	spin_unlock_irqrestore(&dm->vblank_lock, flags);
5823 	schedule_work(&dm->vblank_workqueue->mall_work);
5824 #endif
5825 
5826 	return 0;
5827 }
5828 
5829 static int dm_enable_vblank(struct drm_crtc *crtc)
5830 {
5831 	return dm_set_vblank(crtc, true);
5832 }
5833 
5834 static void dm_disable_vblank(struct drm_crtc *crtc)
5835 {
5836 	dm_set_vblank(crtc, false);
5837 }
5838 
5839 /* Implemented only the options currently availible for the driver */
5840 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5841 	.reset = dm_crtc_reset_state,
5842 	.destroy = amdgpu_dm_crtc_destroy,
5843 	.set_config = drm_atomic_helper_set_config,
5844 	.page_flip = drm_atomic_helper_page_flip,
5845 	.atomic_duplicate_state = dm_crtc_duplicate_state,
5846 	.atomic_destroy_state = dm_crtc_destroy_state,
5847 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
5848 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5849 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5850 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
5851 	.enable_vblank = dm_enable_vblank,
5852 	.disable_vblank = dm_disable_vblank,
5853 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5854 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5855 	.late_register = amdgpu_dm_crtc_late_register,
5856 #endif
5857 };
5858 
5859 static enum drm_connector_status
5860 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5861 {
5862 	bool connected;
5863 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5864 
5865 	/*
5866 	 * Notes:
5867 	 * 1. This interface is NOT called in context of HPD irq.
5868 	 * 2. This interface *is called* in context of user-mode ioctl. Which
5869 	 * makes it a bad place for *any* MST-related activity.
5870 	 */
5871 
5872 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5873 	    !aconnector->fake_enable)
5874 		connected = (aconnector->dc_sink != NULL);
5875 	else
5876 		connected = (aconnector->base.force == DRM_FORCE_ON);
5877 
5878 	update_subconnector_property(aconnector);
5879 
5880 	return (connected ? connector_status_connected :
5881 			connector_status_disconnected);
5882 }
5883 
5884 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5885 					    struct drm_connector_state *connector_state,
5886 					    struct drm_property *property,
5887 					    uint64_t val)
5888 {
5889 	struct drm_device *dev = connector->dev;
5890 	struct amdgpu_device *adev = drm_to_adev(dev);
5891 	struct dm_connector_state *dm_old_state =
5892 		to_dm_connector_state(connector->state);
5893 	struct dm_connector_state *dm_new_state =
5894 		to_dm_connector_state(connector_state);
5895 
5896 	int ret = -EINVAL;
5897 
5898 	if (property == dev->mode_config.scaling_mode_property) {
5899 		enum amdgpu_rmx_type rmx_type;
5900 
5901 		switch (val) {
5902 		case DRM_MODE_SCALE_CENTER:
5903 			rmx_type = RMX_CENTER;
5904 			break;
5905 		case DRM_MODE_SCALE_ASPECT:
5906 			rmx_type = RMX_ASPECT;
5907 			break;
5908 		case DRM_MODE_SCALE_FULLSCREEN:
5909 			rmx_type = RMX_FULL;
5910 			break;
5911 		case DRM_MODE_SCALE_NONE:
5912 		default:
5913 			rmx_type = RMX_OFF;
5914 			break;
5915 		}
5916 
5917 		if (dm_old_state->scaling == rmx_type)
5918 			return 0;
5919 
5920 		dm_new_state->scaling = rmx_type;
5921 		ret = 0;
5922 	} else if (property == adev->mode_info.underscan_hborder_property) {
5923 		dm_new_state->underscan_hborder = val;
5924 		ret = 0;
5925 	} else if (property == adev->mode_info.underscan_vborder_property) {
5926 		dm_new_state->underscan_vborder = val;
5927 		ret = 0;
5928 	} else if (property == adev->mode_info.underscan_property) {
5929 		dm_new_state->underscan_enable = val;
5930 		ret = 0;
5931 	} else if (property == adev->mode_info.abm_level_property) {
5932 		dm_new_state->abm_level = val;
5933 		ret = 0;
5934 	}
5935 
5936 	return ret;
5937 }
5938 
5939 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5940 					    const struct drm_connector_state *state,
5941 					    struct drm_property *property,
5942 					    uint64_t *val)
5943 {
5944 	struct drm_device *dev = connector->dev;
5945 	struct amdgpu_device *adev = drm_to_adev(dev);
5946 	struct dm_connector_state *dm_state =
5947 		to_dm_connector_state(state);
5948 	int ret = -EINVAL;
5949 
5950 	if (property == dev->mode_config.scaling_mode_property) {
5951 		switch (dm_state->scaling) {
5952 		case RMX_CENTER:
5953 			*val = DRM_MODE_SCALE_CENTER;
5954 			break;
5955 		case RMX_ASPECT:
5956 			*val = DRM_MODE_SCALE_ASPECT;
5957 			break;
5958 		case RMX_FULL:
5959 			*val = DRM_MODE_SCALE_FULLSCREEN;
5960 			break;
5961 		case RMX_OFF:
5962 		default:
5963 			*val = DRM_MODE_SCALE_NONE;
5964 			break;
5965 		}
5966 		ret = 0;
5967 	} else if (property == adev->mode_info.underscan_hborder_property) {
5968 		*val = dm_state->underscan_hborder;
5969 		ret = 0;
5970 	} else if (property == adev->mode_info.underscan_vborder_property) {
5971 		*val = dm_state->underscan_vborder;
5972 		ret = 0;
5973 	} else if (property == adev->mode_info.underscan_property) {
5974 		*val = dm_state->underscan_enable;
5975 		ret = 0;
5976 	} else if (property == adev->mode_info.abm_level_property) {
5977 		*val = dm_state->abm_level;
5978 		ret = 0;
5979 	}
5980 
5981 	return ret;
5982 }
5983 
5984 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5985 {
5986 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5987 
5988 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5989 }
5990 
5991 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5992 {
5993 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5994 	const struct dc_link *link = aconnector->dc_link;
5995 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5996 	struct amdgpu_display_manager *dm = &adev->dm;
5997 
5998 	/*
5999 	 * Call only if mst_mgr was iniitalized before since it's not done
6000 	 * for all connector types.
6001 	 */
6002 	if (aconnector->mst_mgr.dev)
6003 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6004 
6005 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6006 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6007 
6008 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
6009 	    link->type != dc_connection_none &&
6010 	    dm->backlight_dev) {
6011 		backlight_device_unregister(dm->backlight_dev);
6012 		dm->backlight_dev = NULL;
6013 	}
6014 #endif
6015 
6016 	if (aconnector->dc_em_sink)
6017 		dc_sink_release(aconnector->dc_em_sink);
6018 	aconnector->dc_em_sink = NULL;
6019 	if (aconnector->dc_sink)
6020 		dc_sink_release(aconnector->dc_sink);
6021 	aconnector->dc_sink = NULL;
6022 
6023 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6024 	drm_connector_unregister(connector);
6025 	drm_connector_cleanup(connector);
6026 	if (aconnector->i2c) {
6027 		i2c_del_adapter(&aconnector->i2c->base);
6028 		kfree(aconnector->i2c);
6029 	}
6030 	kfree(aconnector->dm_dp_aux.aux.name);
6031 
6032 	kfree(connector);
6033 }
6034 
6035 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6036 {
6037 	struct dm_connector_state *state =
6038 		to_dm_connector_state(connector->state);
6039 
6040 	if (connector->state)
6041 		__drm_atomic_helper_connector_destroy_state(connector->state);
6042 
6043 	kfree(state);
6044 
6045 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6046 
6047 	if (state) {
6048 		state->scaling = RMX_OFF;
6049 		state->underscan_enable = false;
6050 		state->underscan_hborder = 0;
6051 		state->underscan_vborder = 0;
6052 		state->base.max_requested_bpc = 8;
6053 		state->vcpi_slots = 0;
6054 		state->pbn = 0;
6055 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6056 			state->abm_level = amdgpu_dm_abm_level;
6057 
6058 		__drm_atomic_helper_connector_reset(connector, &state->base);
6059 	}
6060 }
6061 
6062 struct drm_connector_state *
6063 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6064 {
6065 	struct dm_connector_state *state =
6066 		to_dm_connector_state(connector->state);
6067 
6068 	struct dm_connector_state *new_state =
6069 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6070 
6071 	if (!new_state)
6072 		return NULL;
6073 
6074 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6075 
6076 	new_state->freesync_capable = state->freesync_capable;
6077 	new_state->abm_level = state->abm_level;
6078 	new_state->scaling = state->scaling;
6079 	new_state->underscan_enable = state->underscan_enable;
6080 	new_state->underscan_hborder = state->underscan_hborder;
6081 	new_state->underscan_vborder = state->underscan_vborder;
6082 	new_state->vcpi_slots = state->vcpi_slots;
6083 	new_state->pbn = state->pbn;
6084 	return &new_state->base;
6085 }
6086 
6087 static int
6088 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6089 {
6090 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6091 		to_amdgpu_dm_connector(connector);
6092 	int r;
6093 
6094 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6095 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6096 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6097 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6098 		if (r)
6099 			return r;
6100 	}
6101 
6102 #if defined(CONFIG_DEBUG_FS)
6103 	connector_debugfs_init(amdgpu_dm_connector);
6104 #endif
6105 
6106 	return 0;
6107 }
6108 
6109 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6110 	.reset = amdgpu_dm_connector_funcs_reset,
6111 	.detect = amdgpu_dm_connector_detect,
6112 	.fill_modes = drm_helper_probe_single_connector_modes,
6113 	.destroy = amdgpu_dm_connector_destroy,
6114 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6115 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6116 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6117 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6118 	.late_register = amdgpu_dm_connector_late_register,
6119 	.early_unregister = amdgpu_dm_connector_unregister
6120 };
6121 
6122 static int get_modes(struct drm_connector *connector)
6123 {
6124 	return amdgpu_dm_connector_get_modes(connector);
6125 }
6126 
6127 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6128 {
6129 	struct dc_sink_init_data init_params = {
6130 			.link = aconnector->dc_link,
6131 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6132 	};
6133 	struct edid *edid;
6134 
6135 	if (!aconnector->base.edid_blob_ptr) {
6136 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6137 				aconnector->base.name);
6138 
6139 		aconnector->base.force = DRM_FORCE_OFF;
6140 		aconnector->base.override_edid = false;
6141 		return;
6142 	}
6143 
6144 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6145 
6146 	aconnector->edid = edid;
6147 
6148 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6149 		aconnector->dc_link,
6150 		(uint8_t *)edid,
6151 		(edid->extensions + 1) * EDID_LENGTH,
6152 		&init_params);
6153 
6154 	if (aconnector->base.force == DRM_FORCE_ON) {
6155 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6156 		aconnector->dc_link->local_sink :
6157 		aconnector->dc_em_sink;
6158 		dc_sink_retain(aconnector->dc_sink);
6159 	}
6160 }
6161 
6162 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6163 {
6164 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6165 
6166 	/*
6167 	 * In case of headless boot with force on for DP managed connector
6168 	 * Those settings have to be != 0 to get initial modeset
6169 	 */
6170 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6171 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6172 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6173 	}
6174 
6175 
6176 	aconnector->base.override_edid = true;
6177 	create_eml_sink(aconnector);
6178 }
6179 
6180 static struct dc_stream_state *
6181 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6182 				const struct drm_display_mode *drm_mode,
6183 				const struct dm_connector_state *dm_state,
6184 				const struct dc_stream_state *old_stream)
6185 {
6186 	struct drm_connector *connector = &aconnector->base;
6187 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6188 	struct dc_stream_state *stream;
6189 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6190 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6191 	enum dc_status dc_result = DC_OK;
6192 
6193 	do {
6194 		stream = create_stream_for_sink(aconnector, drm_mode,
6195 						dm_state, old_stream,
6196 						requested_bpc);
6197 		if (stream == NULL) {
6198 			DRM_ERROR("Failed to create stream for sink!\n");
6199 			break;
6200 		}
6201 
6202 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6203 
6204 		if (dc_result != DC_OK) {
6205 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6206 				      drm_mode->hdisplay,
6207 				      drm_mode->vdisplay,
6208 				      drm_mode->clock,
6209 				      dc_result,
6210 				      dc_status_to_str(dc_result));
6211 
6212 			dc_stream_release(stream);
6213 			stream = NULL;
6214 			requested_bpc -= 2; /* lower bpc to retry validation */
6215 		}
6216 
6217 	} while (stream == NULL && requested_bpc >= 6);
6218 
6219 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6220 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6221 
6222 		aconnector->force_yuv420_output = true;
6223 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6224 						dm_state, old_stream);
6225 		aconnector->force_yuv420_output = false;
6226 	}
6227 
6228 	return stream;
6229 }
6230 
6231 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6232 				   struct drm_display_mode *mode)
6233 {
6234 	int result = MODE_ERROR;
6235 	struct dc_sink *dc_sink;
6236 	/* TODO: Unhardcode stream count */
6237 	struct dc_stream_state *stream;
6238 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6239 
6240 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6241 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6242 		return result;
6243 
6244 	/*
6245 	 * Only run this the first time mode_valid is called to initilialize
6246 	 * EDID mgmt
6247 	 */
6248 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6249 		!aconnector->dc_em_sink)
6250 		handle_edid_mgmt(aconnector);
6251 
6252 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6253 
6254 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6255 				aconnector->base.force != DRM_FORCE_ON) {
6256 		DRM_ERROR("dc_sink is NULL!\n");
6257 		goto fail;
6258 	}
6259 
6260 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6261 	if (stream) {
6262 		dc_stream_release(stream);
6263 		result = MODE_OK;
6264 	}
6265 
6266 fail:
6267 	/* TODO: error handling*/
6268 	return result;
6269 }
6270 
6271 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6272 				struct dc_info_packet *out)
6273 {
6274 	struct hdmi_drm_infoframe frame;
6275 	unsigned char buf[30]; /* 26 + 4 */
6276 	ssize_t len;
6277 	int ret, i;
6278 
6279 	memset(out, 0, sizeof(*out));
6280 
6281 	if (!state->hdr_output_metadata)
6282 		return 0;
6283 
6284 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6285 	if (ret)
6286 		return ret;
6287 
6288 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6289 	if (len < 0)
6290 		return (int)len;
6291 
6292 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6293 	if (len != 30)
6294 		return -EINVAL;
6295 
6296 	/* Prepare the infopacket for DC. */
6297 	switch (state->connector->connector_type) {
6298 	case DRM_MODE_CONNECTOR_HDMIA:
6299 		out->hb0 = 0x87; /* type */
6300 		out->hb1 = 0x01; /* version */
6301 		out->hb2 = 0x1A; /* length */
6302 		out->sb[0] = buf[3]; /* checksum */
6303 		i = 1;
6304 		break;
6305 
6306 	case DRM_MODE_CONNECTOR_DisplayPort:
6307 	case DRM_MODE_CONNECTOR_eDP:
6308 		out->hb0 = 0x00; /* sdp id, zero */
6309 		out->hb1 = 0x87; /* type */
6310 		out->hb2 = 0x1D; /* payload len - 1 */
6311 		out->hb3 = (0x13 << 2); /* sdp version */
6312 		out->sb[0] = 0x01; /* version */
6313 		out->sb[1] = 0x1A; /* length */
6314 		i = 2;
6315 		break;
6316 
6317 	default:
6318 		return -EINVAL;
6319 	}
6320 
6321 	memcpy(&out->sb[i], &buf[4], 26);
6322 	out->valid = true;
6323 
6324 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6325 		       sizeof(out->sb), false);
6326 
6327 	return 0;
6328 }
6329 
6330 static bool
6331 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6332 			  const struct drm_connector_state *new_state)
6333 {
6334 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6335 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6336 
6337 	if (old_blob != new_blob) {
6338 		if (old_blob && new_blob &&
6339 		    old_blob->length == new_blob->length)
6340 			return memcmp(old_blob->data, new_blob->data,
6341 				      old_blob->length);
6342 
6343 		return true;
6344 	}
6345 
6346 	return false;
6347 }
6348 
6349 static int
6350 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6351 				 struct drm_atomic_state *state)
6352 {
6353 	struct drm_connector_state *new_con_state =
6354 		drm_atomic_get_new_connector_state(state, conn);
6355 	struct drm_connector_state *old_con_state =
6356 		drm_atomic_get_old_connector_state(state, conn);
6357 	struct drm_crtc *crtc = new_con_state->crtc;
6358 	struct drm_crtc_state *new_crtc_state;
6359 	int ret;
6360 
6361 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6362 
6363 	if (!crtc)
6364 		return 0;
6365 
6366 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6367 		struct dc_info_packet hdr_infopacket;
6368 
6369 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6370 		if (ret)
6371 			return ret;
6372 
6373 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6374 		if (IS_ERR(new_crtc_state))
6375 			return PTR_ERR(new_crtc_state);
6376 
6377 		/*
6378 		 * DC considers the stream backends changed if the
6379 		 * static metadata changes. Forcing the modeset also
6380 		 * gives a simple way for userspace to switch from
6381 		 * 8bpc to 10bpc when setting the metadata to enter
6382 		 * or exit HDR.
6383 		 *
6384 		 * Changing the static metadata after it's been
6385 		 * set is permissible, however. So only force a
6386 		 * modeset if we're entering or exiting HDR.
6387 		 */
6388 		new_crtc_state->mode_changed =
6389 			!old_con_state->hdr_output_metadata ||
6390 			!new_con_state->hdr_output_metadata;
6391 	}
6392 
6393 	return 0;
6394 }
6395 
6396 static const struct drm_connector_helper_funcs
6397 amdgpu_dm_connector_helper_funcs = {
6398 	/*
6399 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6400 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6401 	 * are missing after user start lightdm. So we need to renew modes list.
6402 	 * in get_modes call back, not just return the modes count
6403 	 */
6404 	.get_modes = get_modes,
6405 	.mode_valid = amdgpu_dm_connector_mode_valid,
6406 	.atomic_check = amdgpu_dm_connector_atomic_check,
6407 };
6408 
6409 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6410 {
6411 }
6412 
6413 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6414 {
6415 	struct drm_atomic_state *state = new_crtc_state->state;
6416 	struct drm_plane *plane;
6417 	int num_active = 0;
6418 
6419 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6420 		struct drm_plane_state *new_plane_state;
6421 
6422 		/* Cursor planes are "fake". */
6423 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6424 			continue;
6425 
6426 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6427 
6428 		if (!new_plane_state) {
6429 			/*
6430 			 * The plane is enable on the CRTC and hasn't changed
6431 			 * state. This means that it previously passed
6432 			 * validation and is therefore enabled.
6433 			 */
6434 			num_active += 1;
6435 			continue;
6436 		}
6437 
6438 		/* We need a framebuffer to be considered enabled. */
6439 		num_active += (new_plane_state->fb != NULL);
6440 	}
6441 
6442 	return num_active;
6443 }
6444 
6445 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6446 					 struct drm_crtc_state *new_crtc_state)
6447 {
6448 	struct dm_crtc_state *dm_new_crtc_state =
6449 		to_dm_crtc_state(new_crtc_state);
6450 
6451 	dm_new_crtc_state->active_planes = 0;
6452 
6453 	if (!dm_new_crtc_state->stream)
6454 		return;
6455 
6456 	dm_new_crtc_state->active_planes =
6457 		count_crtc_active_planes(new_crtc_state);
6458 }
6459 
6460 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6461 				       struct drm_atomic_state *state)
6462 {
6463 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6464 									  crtc);
6465 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6466 	struct dc *dc = adev->dm.dc;
6467 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6468 	int ret = -EINVAL;
6469 
6470 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6471 
6472 	dm_update_crtc_active_planes(crtc, crtc_state);
6473 
6474 	if (unlikely(!dm_crtc_state->stream &&
6475 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6476 		WARN_ON(1);
6477 		return ret;
6478 	}
6479 
6480 	/*
6481 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6482 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6483 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6484 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6485 	 */
6486 	if (crtc_state->enable &&
6487 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6488 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6489 		return -EINVAL;
6490 	}
6491 
6492 	/* In some use cases, like reset, no stream is attached */
6493 	if (!dm_crtc_state->stream)
6494 		return 0;
6495 
6496 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6497 		return 0;
6498 
6499 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6500 	return ret;
6501 }
6502 
6503 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6504 				      const struct drm_display_mode *mode,
6505 				      struct drm_display_mode *adjusted_mode)
6506 {
6507 	return true;
6508 }
6509 
6510 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6511 	.disable = dm_crtc_helper_disable,
6512 	.atomic_check = dm_crtc_helper_atomic_check,
6513 	.mode_fixup = dm_crtc_helper_mode_fixup,
6514 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
6515 };
6516 
6517 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6518 {
6519 
6520 }
6521 
6522 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6523 {
6524 	switch (display_color_depth) {
6525 		case COLOR_DEPTH_666:
6526 			return 6;
6527 		case COLOR_DEPTH_888:
6528 			return 8;
6529 		case COLOR_DEPTH_101010:
6530 			return 10;
6531 		case COLOR_DEPTH_121212:
6532 			return 12;
6533 		case COLOR_DEPTH_141414:
6534 			return 14;
6535 		case COLOR_DEPTH_161616:
6536 			return 16;
6537 		default:
6538 			break;
6539 		}
6540 	return 0;
6541 }
6542 
6543 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6544 					  struct drm_crtc_state *crtc_state,
6545 					  struct drm_connector_state *conn_state)
6546 {
6547 	struct drm_atomic_state *state = crtc_state->state;
6548 	struct drm_connector *connector = conn_state->connector;
6549 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6550 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6551 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6552 	struct drm_dp_mst_topology_mgr *mst_mgr;
6553 	struct drm_dp_mst_port *mst_port;
6554 	enum dc_color_depth color_depth;
6555 	int clock, bpp = 0;
6556 	bool is_y420 = false;
6557 
6558 	if (!aconnector->port || !aconnector->dc_sink)
6559 		return 0;
6560 
6561 	mst_port = aconnector->port;
6562 	mst_mgr = &aconnector->mst_port->mst_mgr;
6563 
6564 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6565 		return 0;
6566 
6567 	if (!state->duplicated) {
6568 		int max_bpc = conn_state->max_requested_bpc;
6569 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6570 				aconnector->force_yuv420_output;
6571 		color_depth = convert_color_depth_from_display_info(connector,
6572 								    is_y420,
6573 								    max_bpc);
6574 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6575 		clock = adjusted_mode->clock;
6576 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6577 	}
6578 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6579 									   mst_mgr,
6580 									   mst_port,
6581 									   dm_new_connector_state->pbn,
6582 									   dm_mst_get_pbn_divider(aconnector->dc_link));
6583 	if (dm_new_connector_state->vcpi_slots < 0) {
6584 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6585 		return dm_new_connector_state->vcpi_slots;
6586 	}
6587 	return 0;
6588 }
6589 
6590 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6591 	.disable = dm_encoder_helper_disable,
6592 	.atomic_check = dm_encoder_helper_atomic_check
6593 };
6594 
6595 #if defined(CONFIG_DRM_AMD_DC_DCN)
6596 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6597 					    struct dc_state *dc_state)
6598 {
6599 	struct dc_stream_state *stream = NULL;
6600 	struct drm_connector *connector;
6601 	struct drm_connector_state *new_con_state;
6602 	struct amdgpu_dm_connector *aconnector;
6603 	struct dm_connector_state *dm_conn_state;
6604 	int i, j, clock, bpp;
6605 	int vcpi, pbn_div, pbn = 0;
6606 
6607 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6608 
6609 		aconnector = to_amdgpu_dm_connector(connector);
6610 
6611 		if (!aconnector->port)
6612 			continue;
6613 
6614 		if (!new_con_state || !new_con_state->crtc)
6615 			continue;
6616 
6617 		dm_conn_state = to_dm_connector_state(new_con_state);
6618 
6619 		for (j = 0; j < dc_state->stream_count; j++) {
6620 			stream = dc_state->streams[j];
6621 			if (!stream)
6622 				continue;
6623 
6624 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6625 				break;
6626 
6627 			stream = NULL;
6628 		}
6629 
6630 		if (!stream)
6631 			continue;
6632 
6633 		if (stream->timing.flags.DSC != 1) {
6634 			drm_dp_mst_atomic_enable_dsc(state,
6635 						     aconnector->port,
6636 						     dm_conn_state->pbn,
6637 						     0,
6638 						     false);
6639 			continue;
6640 		}
6641 
6642 		pbn_div = dm_mst_get_pbn_divider(stream->link);
6643 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
6644 		clock = stream->timing.pix_clk_100hz / 10;
6645 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6646 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
6647 						    aconnector->port,
6648 						    pbn, pbn_div,
6649 						    true);
6650 		if (vcpi < 0)
6651 			return vcpi;
6652 
6653 		dm_conn_state->pbn = pbn;
6654 		dm_conn_state->vcpi_slots = vcpi;
6655 	}
6656 	return 0;
6657 }
6658 #endif
6659 
6660 static void dm_drm_plane_reset(struct drm_plane *plane)
6661 {
6662 	struct dm_plane_state *amdgpu_state = NULL;
6663 
6664 	if (plane->state)
6665 		plane->funcs->atomic_destroy_state(plane, plane->state);
6666 
6667 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6668 	WARN_ON(amdgpu_state == NULL);
6669 
6670 	if (amdgpu_state)
6671 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6672 }
6673 
6674 static struct drm_plane_state *
6675 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6676 {
6677 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6678 
6679 	old_dm_plane_state = to_dm_plane_state(plane->state);
6680 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6681 	if (!dm_plane_state)
6682 		return NULL;
6683 
6684 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6685 
6686 	if (old_dm_plane_state->dc_state) {
6687 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6688 		dc_plane_state_retain(dm_plane_state->dc_state);
6689 	}
6690 
6691 	return &dm_plane_state->base;
6692 }
6693 
6694 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6695 				struct drm_plane_state *state)
6696 {
6697 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6698 
6699 	if (dm_plane_state->dc_state)
6700 		dc_plane_state_release(dm_plane_state->dc_state);
6701 
6702 	drm_atomic_helper_plane_destroy_state(plane, state);
6703 }
6704 
6705 static const struct drm_plane_funcs dm_plane_funcs = {
6706 	.update_plane	= drm_atomic_helper_update_plane,
6707 	.disable_plane	= drm_atomic_helper_disable_plane,
6708 	.destroy	= drm_primary_helper_destroy,
6709 	.reset = dm_drm_plane_reset,
6710 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
6711 	.atomic_destroy_state = dm_drm_plane_destroy_state,
6712 	.format_mod_supported = dm_plane_format_mod_supported,
6713 };
6714 
6715 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6716 				      struct drm_plane_state *new_state)
6717 {
6718 	struct amdgpu_framebuffer *afb;
6719 	struct drm_gem_object *obj;
6720 	struct amdgpu_device *adev;
6721 	struct amdgpu_bo *rbo;
6722 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6723 	struct list_head list;
6724 	struct ttm_validate_buffer tv;
6725 	struct ww_acquire_ctx ticket;
6726 	uint32_t domain;
6727 	int r;
6728 
6729 	if (!new_state->fb) {
6730 		DRM_DEBUG_KMS("No FB bound\n");
6731 		return 0;
6732 	}
6733 
6734 	afb = to_amdgpu_framebuffer(new_state->fb);
6735 	obj = new_state->fb->obj[0];
6736 	rbo = gem_to_amdgpu_bo(obj);
6737 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6738 	INIT_LIST_HEAD(&list);
6739 
6740 	tv.bo = &rbo->tbo;
6741 	tv.num_shared = 1;
6742 	list_add(&tv.head, &list);
6743 
6744 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6745 	if (r) {
6746 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6747 		return r;
6748 	}
6749 
6750 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6751 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
6752 	else
6753 		domain = AMDGPU_GEM_DOMAIN_VRAM;
6754 
6755 	r = amdgpu_bo_pin(rbo, domain);
6756 	if (unlikely(r != 0)) {
6757 		if (r != -ERESTARTSYS)
6758 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6759 		ttm_eu_backoff_reservation(&ticket, &list);
6760 		return r;
6761 	}
6762 
6763 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6764 	if (unlikely(r != 0)) {
6765 		amdgpu_bo_unpin(rbo);
6766 		ttm_eu_backoff_reservation(&ticket, &list);
6767 		DRM_ERROR("%p bind failed\n", rbo);
6768 		return r;
6769 	}
6770 
6771 	ttm_eu_backoff_reservation(&ticket, &list);
6772 
6773 	afb->address = amdgpu_bo_gpu_offset(rbo);
6774 
6775 	amdgpu_bo_ref(rbo);
6776 
6777 	/**
6778 	 * We don't do surface updates on planes that have been newly created,
6779 	 * but we also don't have the afb->address during atomic check.
6780 	 *
6781 	 * Fill in buffer attributes depending on the address here, but only on
6782 	 * newly created planes since they're not being used by DC yet and this
6783 	 * won't modify global state.
6784 	 */
6785 	dm_plane_state_old = to_dm_plane_state(plane->state);
6786 	dm_plane_state_new = to_dm_plane_state(new_state);
6787 
6788 	if (dm_plane_state_new->dc_state &&
6789 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6790 		struct dc_plane_state *plane_state =
6791 			dm_plane_state_new->dc_state;
6792 		bool force_disable_dcc = !plane_state->dcc.enable;
6793 
6794 		fill_plane_buffer_attributes(
6795 			adev, afb, plane_state->format, plane_state->rotation,
6796 			afb->tiling_flags,
6797 			&plane_state->tiling_info, &plane_state->plane_size,
6798 			&plane_state->dcc, &plane_state->address,
6799 			afb->tmz_surface, force_disable_dcc);
6800 	}
6801 
6802 	return 0;
6803 }
6804 
6805 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6806 				       struct drm_plane_state *old_state)
6807 {
6808 	struct amdgpu_bo *rbo;
6809 	int r;
6810 
6811 	if (!old_state->fb)
6812 		return;
6813 
6814 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6815 	r = amdgpu_bo_reserve(rbo, false);
6816 	if (unlikely(r)) {
6817 		DRM_ERROR("failed to reserve rbo before unpin\n");
6818 		return;
6819 	}
6820 
6821 	amdgpu_bo_unpin(rbo);
6822 	amdgpu_bo_unreserve(rbo);
6823 	amdgpu_bo_unref(&rbo);
6824 }
6825 
6826 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6827 				       struct drm_crtc_state *new_crtc_state)
6828 {
6829 	struct drm_framebuffer *fb = state->fb;
6830 	int min_downscale, max_upscale;
6831 	int min_scale = 0;
6832 	int max_scale = INT_MAX;
6833 
6834 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6835 	if (fb && state->crtc) {
6836 		/* Validate viewport to cover the case when only the position changes */
6837 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6838 			int viewport_width = state->crtc_w;
6839 			int viewport_height = state->crtc_h;
6840 
6841 			if (state->crtc_x < 0)
6842 				viewport_width += state->crtc_x;
6843 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6844 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6845 
6846 			if (state->crtc_y < 0)
6847 				viewport_height += state->crtc_y;
6848 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6849 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6850 
6851 			if (viewport_width < 0 || viewport_height < 0) {
6852 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6853 				return -EINVAL;
6854 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6855 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6856 				return -EINVAL;
6857 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
6858 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6859 				return -EINVAL;
6860 			}
6861 
6862 		}
6863 
6864 		/* Get min/max allowed scaling factors from plane caps. */
6865 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6866 					     &min_downscale, &max_upscale);
6867 		/*
6868 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
6869 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6870 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6871 		 */
6872 		min_scale = (1000 << 16) / max_upscale;
6873 		max_scale = (1000 << 16) / min_downscale;
6874 	}
6875 
6876 	return drm_atomic_helper_check_plane_state(
6877 		state, new_crtc_state, min_scale, max_scale, true, true);
6878 }
6879 
6880 static int dm_plane_atomic_check(struct drm_plane *plane,
6881 				 struct drm_atomic_state *state)
6882 {
6883 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6884 										 plane);
6885 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6886 	struct dc *dc = adev->dm.dc;
6887 	struct dm_plane_state *dm_plane_state;
6888 	struct dc_scaling_info scaling_info;
6889 	struct drm_crtc_state *new_crtc_state;
6890 	int ret;
6891 
6892 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6893 
6894 	dm_plane_state = to_dm_plane_state(new_plane_state);
6895 
6896 	if (!dm_plane_state->dc_state)
6897 		return 0;
6898 
6899 	new_crtc_state =
6900 		drm_atomic_get_new_crtc_state(state,
6901 					      new_plane_state->crtc);
6902 	if (!new_crtc_state)
6903 		return -EINVAL;
6904 
6905 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6906 	if (ret)
6907 		return ret;
6908 
6909 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6910 	if (ret)
6911 		return ret;
6912 
6913 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6914 		return 0;
6915 
6916 	return -EINVAL;
6917 }
6918 
6919 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6920 				       struct drm_atomic_state *state)
6921 {
6922 	/* Only support async updates on cursor planes. */
6923 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
6924 		return -EINVAL;
6925 
6926 	return 0;
6927 }
6928 
6929 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6930 					 struct drm_atomic_state *state)
6931 {
6932 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6933 									   plane);
6934 	struct drm_plane_state *old_state =
6935 		drm_atomic_get_old_plane_state(state, plane);
6936 
6937 	trace_amdgpu_dm_atomic_update_cursor(new_state);
6938 
6939 	swap(plane->state->fb, new_state->fb);
6940 
6941 	plane->state->src_x = new_state->src_x;
6942 	plane->state->src_y = new_state->src_y;
6943 	plane->state->src_w = new_state->src_w;
6944 	plane->state->src_h = new_state->src_h;
6945 	plane->state->crtc_x = new_state->crtc_x;
6946 	plane->state->crtc_y = new_state->crtc_y;
6947 	plane->state->crtc_w = new_state->crtc_w;
6948 	plane->state->crtc_h = new_state->crtc_h;
6949 
6950 	handle_cursor_update(plane, old_state);
6951 }
6952 
6953 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6954 	.prepare_fb = dm_plane_helper_prepare_fb,
6955 	.cleanup_fb = dm_plane_helper_cleanup_fb,
6956 	.atomic_check = dm_plane_atomic_check,
6957 	.atomic_async_check = dm_plane_atomic_async_check,
6958 	.atomic_async_update = dm_plane_atomic_async_update
6959 };
6960 
6961 /*
6962  * TODO: these are currently initialized to rgb formats only.
6963  * For future use cases we should either initialize them dynamically based on
6964  * plane capabilities, or initialize this array to all formats, so internal drm
6965  * check will succeed, and let DC implement proper check
6966  */
6967 static const uint32_t rgb_formats[] = {
6968 	DRM_FORMAT_XRGB8888,
6969 	DRM_FORMAT_ARGB8888,
6970 	DRM_FORMAT_RGBA8888,
6971 	DRM_FORMAT_XRGB2101010,
6972 	DRM_FORMAT_XBGR2101010,
6973 	DRM_FORMAT_ARGB2101010,
6974 	DRM_FORMAT_ABGR2101010,
6975 	DRM_FORMAT_XBGR8888,
6976 	DRM_FORMAT_ABGR8888,
6977 	DRM_FORMAT_RGB565,
6978 };
6979 
6980 static const uint32_t overlay_formats[] = {
6981 	DRM_FORMAT_XRGB8888,
6982 	DRM_FORMAT_ARGB8888,
6983 	DRM_FORMAT_RGBA8888,
6984 	DRM_FORMAT_XBGR8888,
6985 	DRM_FORMAT_ABGR8888,
6986 	DRM_FORMAT_RGB565
6987 };
6988 
6989 static const u32 cursor_formats[] = {
6990 	DRM_FORMAT_ARGB8888
6991 };
6992 
6993 static int get_plane_formats(const struct drm_plane *plane,
6994 			     const struct dc_plane_cap *plane_cap,
6995 			     uint32_t *formats, int max_formats)
6996 {
6997 	int i, num_formats = 0;
6998 
6999 	/*
7000 	 * TODO: Query support for each group of formats directly from
7001 	 * DC plane caps. This will require adding more formats to the
7002 	 * caps list.
7003 	 */
7004 
7005 	switch (plane->type) {
7006 	case DRM_PLANE_TYPE_PRIMARY:
7007 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7008 			if (num_formats >= max_formats)
7009 				break;
7010 
7011 			formats[num_formats++] = rgb_formats[i];
7012 		}
7013 
7014 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7015 			formats[num_formats++] = DRM_FORMAT_NV12;
7016 		if (plane_cap && plane_cap->pixel_format_support.p010)
7017 			formats[num_formats++] = DRM_FORMAT_P010;
7018 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7019 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7020 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7021 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7022 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7023 		}
7024 		break;
7025 
7026 	case DRM_PLANE_TYPE_OVERLAY:
7027 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7028 			if (num_formats >= max_formats)
7029 				break;
7030 
7031 			formats[num_formats++] = overlay_formats[i];
7032 		}
7033 		break;
7034 
7035 	case DRM_PLANE_TYPE_CURSOR:
7036 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7037 			if (num_formats >= max_formats)
7038 				break;
7039 
7040 			formats[num_formats++] = cursor_formats[i];
7041 		}
7042 		break;
7043 	}
7044 
7045 	return num_formats;
7046 }
7047 
7048 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7049 				struct drm_plane *plane,
7050 				unsigned long possible_crtcs,
7051 				const struct dc_plane_cap *plane_cap)
7052 {
7053 	uint32_t formats[32];
7054 	int num_formats;
7055 	int res = -EPERM;
7056 	unsigned int supported_rotations;
7057 	uint64_t *modifiers = NULL;
7058 
7059 	num_formats = get_plane_formats(plane, plane_cap, formats,
7060 					ARRAY_SIZE(formats));
7061 
7062 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7063 	if (res)
7064 		return res;
7065 
7066 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7067 				       &dm_plane_funcs, formats, num_formats,
7068 				       modifiers, plane->type, NULL);
7069 	kfree(modifiers);
7070 	if (res)
7071 		return res;
7072 
7073 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7074 	    plane_cap && plane_cap->per_pixel_alpha) {
7075 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7076 					  BIT(DRM_MODE_BLEND_PREMULTI);
7077 
7078 		drm_plane_create_alpha_property(plane);
7079 		drm_plane_create_blend_mode_property(plane, blend_caps);
7080 	}
7081 
7082 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7083 	    plane_cap &&
7084 	    (plane_cap->pixel_format_support.nv12 ||
7085 	     plane_cap->pixel_format_support.p010)) {
7086 		/* This only affects YUV formats. */
7087 		drm_plane_create_color_properties(
7088 			plane,
7089 			BIT(DRM_COLOR_YCBCR_BT601) |
7090 			BIT(DRM_COLOR_YCBCR_BT709) |
7091 			BIT(DRM_COLOR_YCBCR_BT2020),
7092 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7093 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7094 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7095 	}
7096 
7097 	supported_rotations =
7098 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7099 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7100 
7101 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7102 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7103 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7104 						   supported_rotations);
7105 
7106 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7107 
7108 	/* Create (reset) the plane state */
7109 	if (plane->funcs->reset)
7110 		plane->funcs->reset(plane);
7111 
7112 	return 0;
7113 }
7114 
7115 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7116 			       struct drm_plane *plane,
7117 			       uint32_t crtc_index)
7118 {
7119 	struct amdgpu_crtc *acrtc = NULL;
7120 	struct drm_plane *cursor_plane;
7121 
7122 	int res = -ENOMEM;
7123 
7124 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7125 	if (!cursor_plane)
7126 		goto fail;
7127 
7128 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7129 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7130 
7131 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7132 	if (!acrtc)
7133 		goto fail;
7134 
7135 	res = drm_crtc_init_with_planes(
7136 			dm->ddev,
7137 			&acrtc->base,
7138 			plane,
7139 			cursor_plane,
7140 			&amdgpu_dm_crtc_funcs, NULL);
7141 
7142 	if (res)
7143 		goto fail;
7144 
7145 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7146 
7147 	/* Create (reset) the plane state */
7148 	if (acrtc->base.funcs->reset)
7149 		acrtc->base.funcs->reset(&acrtc->base);
7150 
7151 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7152 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7153 
7154 	acrtc->crtc_id = crtc_index;
7155 	acrtc->base.enabled = false;
7156 	acrtc->otg_inst = -1;
7157 
7158 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7159 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7160 				   true, MAX_COLOR_LUT_ENTRIES);
7161 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7162 
7163 	return 0;
7164 
7165 fail:
7166 	kfree(acrtc);
7167 	kfree(cursor_plane);
7168 	return res;
7169 }
7170 
7171 
7172 static int to_drm_connector_type(enum signal_type st)
7173 {
7174 	switch (st) {
7175 	case SIGNAL_TYPE_HDMI_TYPE_A:
7176 		return DRM_MODE_CONNECTOR_HDMIA;
7177 	case SIGNAL_TYPE_EDP:
7178 		return DRM_MODE_CONNECTOR_eDP;
7179 	case SIGNAL_TYPE_LVDS:
7180 		return DRM_MODE_CONNECTOR_LVDS;
7181 	case SIGNAL_TYPE_RGB:
7182 		return DRM_MODE_CONNECTOR_VGA;
7183 	case SIGNAL_TYPE_DISPLAY_PORT:
7184 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7185 		return DRM_MODE_CONNECTOR_DisplayPort;
7186 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7187 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7188 		return DRM_MODE_CONNECTOR_DVID;
7189 	case SIGNAL_TYPE_VIRTUAL:
7190 		return DRM_MODE_CONNECTOR_VIRTUAL;
7191 
7192 	default:
7193 		return DRM_MODE_CONNECTOR_Unknown;
7194 	}
7195 }
7196 
7197 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7198 {
7199 	struct drm_encoder *encoder;
7200 
7201 	/* There is only one encoder per connector */
7202 	drm_connector_for_each_possible_encoder(connector, encoder)
7203 		return encoder;
7204 
7205 	return NULL;
7206 }
7207 
7208 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7209 {
7210 	struct drm_encoder *encoder;
7211 	struct amdgpu_encoder *amdgpu_encoder;
7212 
7213 	encoder = amdgpu_dm_connector_to_encoder(connector);
7214 
7215 	if (encoder == NULL)
7216 		return;
7217 
7218 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7219 
7220 	amdgpu_encoder->native_mode.clock = 0;
7221 
7222 	if (!list_empty(&connector->probed_modes)) {
7223 		struct drm_display_mode *preferred_mode = NULL;
7224 
7225 		list_for_each_entry(preferred_mode,
7226 				    &connector->probed_modes,
7227 				    head) {
7228 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7229 				amdgpu_encoder->native_mode = *preferred_mode;
7230 
7231 			break;
7232 		}
7233 
7234 	}
7235 }
7236 
7237 static struct drm_display_mode *
7238 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7239 			     char *name,
7240 			     int hdisplay, int vdisplay)
7241 {
7242 	struct drm_device *dev = encoder->dev;
7243 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7244 	struct drm_display_mode *mode = NULL;
7245 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7246 
7247 	mode = drm_mode_duplicate(dev, native_mode);
7248 
7249 	if (mode == NULL)
7250 		return NULL;
7251 
7252 	mode->hdisplay = hdisplay;
7253 	mode->vdisplay = vdisplay;
7254 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7255 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7256 
7257 	return mode;
7258 
7259 }
7260 
7261 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7262 						 struct drm_connector *connector)
7263 {
7264 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7265 	struct drm_display_mode *mode = NULL;
7266 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7267 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7268 				to_amdgpu_dm_connector(connector);
7269 	int i;
7270 	int n;
7271 	struct mode_size {
7272 		char name[DRM_DISPLAY_MODE_LEN];
7273 		int w;
7274 		int h;
7275 	} common_modes[] = {
7276 		{  "640x480",  640,  480},
7277 		{  "800x600",  800,  600},
7278 		{ "1024x768", 1024,  768},
7279 		{ "1280x720", 1280,  720},
7280 		{ "1280x800", 1280,  800},
7281 		{"1280x1024", 1280, 1024},
7282 		{ "1440x900", 1440,  900},
7283 		{"1680x1050", 1680, 1050},
7284 		{"1600x1200", 1600, 1200},
7285 		{"1920x1080", 1920, 1080},
7286 		{"1920x1200", 1920, 1200}
7287 	};
7288 
7289 	n = ARRAY_SIZE(common_modes);
7290 
7291 	for (i = 0; i < n; i++) {
7292 		struct drm_display_mode *curmode = NULL;
7293 		bool mode_existed = false;
7294 
7295 		if (common_modes[i].w > native_mode->hdisplay ||
7296 		    common_modes[i].h > native_mode->vdisplay ||
7297 		   (common_modes[i].w == native_mode->hdisplay &&
7298 		    common_modes[i].h == native_mode->vdisplay))
7299 			continue;
7300 
7301 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7302 			if (common_modes[i].w == curmode->hdisplay &&
7303 			    common_modes[i].h == curmode->vdisplay) {
7304 				mode_existed = true;
7305 				break;
7306 			}
7307 		}
7308 
7309 		if (mode_existed)
7310 			continue;
7311 
7312 		mode = amdgpu_dm_create_common_mode(encoder,
7313 				common_modes[i].name, common_modes[i].w,
7314 				common_modes[i].h);
7315 		drm_mode_probed_add(connector, mode);
7316 		amdgpu_dm_connector->num_modes++;
7317 	}
7318 }
7319 
7320 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7321 					      struct edid *edid)
7322 {
7323 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7324 			to_amdgpu_dm_connector(connector);
7325 
7326 	if (edid) {
7327 		/* empty probed_modes */
7328 		INIT_LIST_HEAD(&connector->probed_modes);
7329 		amdgpu_dm_connector->num_modes =
7330 				drm_add_edid_modes(connector, edid);
7331 
7332 		/* sorting the probed modes before calling function
7333 		 * amdgpu_dm_get_native_mode() since EDID can have
7334 		 * more than one preferred mode. The modes that are
7335 		 * later in the probed mode list could be of higher
7336 		 * and preferred resolution. For example, 3840x2160
7337 		 * resolution in base EDID preferred timing and 4096x2160
7338 		 * preferred resolution in DID extension block later.
7339 		 */
7340 		drm_mode_sort(&connector->probed_modes);
7341 		amdgpu_dm_get_native_mode(connector);
7342 
7343 		/* Freesync capabilities are reset by calling
7344 		 * drm_add_edid_modes() and need to be
7345 		 * restored here.
7346 		 */
7347 		amdgpu_dm_update_freesync_caps(connector, edid);
7348 	} else {
7349 		amdgpu_dm_connector->num_modes = 0;
7350 	}
7351 }
7352 
7353 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7354 			      struct drm_display_mode *mode)
7355 {
7356 	struct drm_display_mode *m;
7357 
7358 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7359 		if (drm_mode_equal(m, mode))
7360 			return true;
7361 	}
7362 
7363 	return false;
7364 }
7365 
7366 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7367 {
7368 	const struct drm_display_mode *m;
7369 	struct drm_display_mode *new_mode;
7370 	uint i;
7371 	uint32_t new_modes_count = 0;
7372 
7373 	/* Standard FPS values
7374 	 *
7375 	 * 23.976   - TV/NTSC
7376 	 * 24 	    - Cinema
7377 	 * 25 	    - TV/PAL
7378 	 * 29.97    - TV/NTSC
7379 	 * 30 	    - TV/NTSC
7380 	 * 48 	    - Cinema HFR
7381 	 * 50 	    - TV/PAL
7382 	 * 60 	    - Commonly used
7383 	 * 48,72,96 - Multiples of 24
7384 	 */
7385 	const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7386 					 48000, 50000, 60000, 72000, 96000 };
7387 
7388 	/*
7389 	 * Find mode with highest refresh rate with the same resolution
7390 	 * as the preferred mode. Some monitors report a preferred mode
7391 	 * with lower resolution than the highest refresh rate supported.
7392 	 */
7393 
7394 	m = get_highest_refresh_rate_mode(aconnector, true);
7395 	if (!m)
7396 		return 0;
7397 
7398 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7399 		uint64_t target_vtotal, target_vtotal_diff;
7400 		uint64_t num, den;
7401 
7402 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7403 			continue;
7404 
7405 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7406 		    common_rates[i] > aconnector->max_vfreq * 1000)
7407 			continue;
7408 
7409 		num = (unsigned long long)m->clock * 1000 * 1000;
7410 		den = common_rates[i] * (unsigned long long)m->htotal;
7411 		target_vtotal = div_u64(num, den);
7412 		target_vtotal_diff = target_vtotal - m->vtotal;
7413 
7414 		/* Check for illegal modes */
7415 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7416 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7417 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7418 			continue;
7419 
7420 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7421 		if (!new_mode)
7422 			goto out;
7423 
7424 		new_mode->vtotal += (u16)target_vtotal_diff;
7425 		new_mode->vsync_start += (u16)target_vtotal_diff;
7426 		new_mode->vsync_end += (u16)target_vtotal_diff;
7427 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7428 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7429 
7430 		if (!is_duplicate_mode(aconnector, new_mode)) {
7431 			drm_mode_probed_add(&aconnector->base, new_mode);
7432 			new_modes_count += 1;
7433 		} else
7434 			drm_mode_destroy(aconnector->base.dev, new_mode);
7435 	}
7436  out:
7437 	return new_modes_count;
7438 }
7439 
7440 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7441 						   struct edid *edid)
7442 {
7443 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7444 		to_amdgpu_dm_connector(connector);
7445 
7446 	if (!(amdgpu_freesync_vid_mode && edid))
7447 		return;
7448 
7449 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7450 		amdgpu_dm_connector->num_modes +=
7451 			add_fs_modes(amdgpu_dm_connector);
7452 }
7453 
7454 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7455 {
7456 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7457 			to_amdgpu_dm_connector(connector);
7458 	struct drm_encoder *encoder;
7459 	struct edid *edid = amdgpu_dm_connector->edid;
7460 
7461 	encoder = amdgpu_dm_connector_to_encoder(connector);
7462 
7463 	if (!drm_edid_is_valid(edid)) {
7464 		amdgpu_dm_connector->num_modes =
7465 				drm_add_modes_noedid(connector, 640, 480);
7466 	} else {
7467 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7468 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7469 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7470 	}
7471 	amdgpu_dm_fbc_init(connector);
7472 
7473 	return amdgpu_dm_connector->num_modes;
7474 }
7475 
7476 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7477 				     struct amdgpu_dm_connector *aconnector,
7478 				     int connector_type,
7479 				     struct dc_link *link,
7480 				     int link_index)
7481 {
7482 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7483 
7484 	/*
7485 	 * Some of the properties below require access to state, like bpc.
7486 	 * Allocate some default initial connector state with our reset helper.
7487 	 */
7488 	if (aconnector->base.funcs->reset)
7489 		aconnector->base.funcs->reset(&aconnector->base);
7490 
7491 	aconnector->connector_id = link_index;
7492 	aconnector->dc_link = link;
7493 	aconnector->base.interlace_allowed = false;
7494 	aconnector->base.doublescan_allowed = false;
7495 	aconnector->base.stereo_allowed = false;
7496 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7497 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7498 	aconnector->audio_inst = -1;
7499 	mutex_init(&aconnector->hpd_lock);
7500 
7501 	/*
7502 	 * configure support HPD hot plug connector_>polled default value is 0
7503 	 * which means HPD hot plug not supported
7504 	 */
7505 	switch (connector_type) {
7506 	case DRM_MODE_CONNECTOR_HDMIA:
7507 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7508 		aconnector->base.ycbcr_420_allowed =
7509 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7510 		break;
7511 	case DRM_MODE_CONNECTOR_DisplayPort:
7512 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7513 		aconnector->base.ycbcr_420_allowed =
7514 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7515 		break;
7516 	case DRM_MODE_CONNECTOR_DVID:
7517 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7518 		break;
7519 	default:
7520 		break;
7521 	}
7522 
7523 	drm_object_attach_property(&aconnector->base.base,
7524 				dm->ddev->mode_config.scaling_mode_property,
7525 				DRM_MODE_SCALE_NONE);
7526 
7527 	drm_object_attach_property(&aconnector->base.base,
7528 				adev->mode_info.underscan_property,
7529 				UNDERSCAN_OFF);
7530 	drm_object_attach_property(&aconnector->base.base,
7531 				adev->mode_info.underscan_hborder_property,
7532 				0);
7533 	drm_object_attach_property(&aconnector->base.base,
7534 				adev->mode_info.underscan_vborder_property,
7535 				0);
7536 
7537 	if (!aconnector->mst_port)
7538 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7539 
7540 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
7541 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7542 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7543 
7544 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7545 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7546 		drm_object_attach_property(&aconnector->base.base,
7547 				adev->mode_info.abm_level_property, 0);
7548 	}
7549 
7550 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7551 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7552 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7553 		drm_object_attach_property(
7554 			&aconnector->base.base,
7555 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
7556 
7557 		if (!aconnector->mst_port)
7558 			drm_connector_attach_vrr_capable_property(&aconnector->base);
7559 
7560 #ifdef CONFIG_DRM_AMD_DC_HDCP
7561 		if (adev->dm.hdcp_workqueue)
7562 			drm_connector_attach_content_protection_property(&aconnector->base, true);
7563 #endif
7564 	}
7565 }
7566 
7567 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7568 			      struct i2c_msg *msgs, int num)
7569 {
7570 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7571 	struct ddc_service *ddc_service = i2c->ddc_service;
7572 	struct i2c_command cmd;
7573 	int i;
7574 	int result = -EIO;
7575 
7576 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7577 
7578 	if (!cmd.payloads)
7579 		return result;
7580 
7581 	cmd.number_of_payloads = num;
7582 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7583 	cmd.speed = 100;
7584 
7585 	for (i = 0; i < num; i++) {
7586 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7587 		cmd.payloads[i].address = msgs[i].addr;
7588 		cmd.payloads[i].length = msgs[i].len;
7589 		cmd.payloads[i].data = msgs[i].buf;
7590 	}
7591 
7592 	if (dc_submit_i2c(
7593 			ddc_service->ctx->dc,
7594 			ddc_service->ddc_pin->hw_info.ddc_channel,
7595 			&cmd))
7596 		result = num;
7597 
7598 	kfree(cmd.payloads);
7599 	return result;
7600 }
7601 
7602 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7603 {
7604 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7605 }
7606 
7607 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7608 	.master_xfer = amdgpu_dm_i2c_xfer,
7609 	.functionality = amdgpu_dm_i2c_func,
7610 };
7611 
7612 static struct amdgpu_i2c_adapter *
7613 create_i2c(struct ddc_service *ddc_service,
7614 	   int link_index,
7615 	   int *res)
7616 {
7617 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7618 	struct amdgpu_i2c_adapter *i2c;
7619 
7620 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7621 	if (!i2c)
7622 		return NULL;
7623 	i2c->base.owner = THIS_MODULE;
7624 	i2c->base.class = I2C_CLASS_DDC;
7625 	i2c->base.dev.parent = &adev->pdev->dev;
7626 	i2c->base.algo = &amdgpu_dm_i2c_algo;
7627 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7628 	i2c_set_adapdata(&i2c->base, i2c);
7629 	i2c->ddc_service = ddc_service;
7630 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7631 
7632 	return i2c;
7633 }
7634 
7635 
7636 /*
7637  * Note: this function assumes that dc_link_detect() was called for the
7638  * dc_link which will be represented by this aconnector.
7639  */
7640 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7641 				    struct amdgpu_dm_connector *aconnector,
7642 				    uint32_t link_index,
7643 				    struct amdgpu_encoder *aencoder)
7644 {
7645 	int res = 0;
7646 	int connector_type;
7647 	struct dc *dc = dm->dc;
7648 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7649 	struct amdgpu_i2c_adapter *i2c;
7650 
7651 	link->priv = aconnector;
7652 
7653 	DRM_DEBUG_DRIVER("%s()\n", __func__);
7654 
7655 	i2c = create_i2c(link->ddc, link->link_index, &res);
7656 	if (!i2c) {
7657 		DRM_ERROR("Failed to create i2c adapter data\n");
7658 		return -ENOMEM;
7659 	}
7660 
7661 	aconnector->i2c = i2c;
7662 	res = i2c_add_adapter(&i2c->base);
7663 
7664 	if (res) {
7665 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7666 		goto out_free;
7667 	}
7668 
7669 	connector_type = to_drm_connector_type(link->connector_signal);
7670 
7671 	res = drm_connector_init_with_ddc(
7672 			dm->ddev,
7673 			&aconnector->base,
7674 			&amdgpu_dm_connector_funcs,
7675 			connector_type,
7676 			&i2c->base);
7677 
7678 	if (res) {
7679 		DRM_ERROR("connector_init failed\n");
7680 		aconnector->connector_id = -1;
7681 		goto out_free;
7682 	}
7683 
7684 	drm_connector_helper_add(
7685 			&aconnector->base,
7686 			&amdgpu_dm_connector_helper_funcs);
7687 
7688 	amdgpu_dm_connector_init_helper(
7689 		dm,
7690 		aconnector,
7691 		connector_type,
7692 		link,
7693 		link_index);
7694 
7695 	drm_connector_attach_encoder(
7696 		&aconnector->base, &aencoder->base);
7697 
7698 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7699 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7700 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7701 
7702 out_free:
7703 	if (res) {
7704 		kfree(i2c);
7705 		aconnector->i2c = NULL;
7706 	}
7707 	return res;
7708 }
7709 
7710 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7711 {
7712 	switch (adev->mode_info.num_crtc) {
7713 	case 1:
7714 		return 0x1;
7715 	case 2:
7716 		return 0x3;
7717 	case 3:
7718 		return 0x7;
7719 	case 4:
7720 		return 0xf;
7721 	case 5:
7722 		return 0x1f;
7723 	case 6:
7724 	default:
7725 		return 0x3f;
7726 	}
7727 }
7728 
7729 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7730 				  struct amdgpu_encoder *aencoder,
7731 				  uint32_t link_index)
7732 {
7733 	struct amdgpu_device *adev = drm_to_adev(dev);
7734 
7735 	int res = drm_encoder_init(dev,
7736 				   &aencoder->base,
7737 				   &amdgpu_dm_encoder_funcs,
7738 				   DRM_MODE_ENCODER_TMDS,
7739 				   NULL);
7740 
7741 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7742 
7743 	if (!res)
7744 		aencoder->encoder_id = link_index;
7745 	else
7746 		aencoder->encoder_id = -1;
7747 
7748 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7749 
7750 	return res;
7751 }
7752 
7753 static void manage_dm_interrupts(struct amdgpu_device *adev,
7754 				 struct amdgpu_crtc *acrtc,
7755 				 bool enable)
7756 {
7757 	/*
7758 	 * We have no guarantee that the frontend index maps to the same
7759 	 * backend index - some even map to more than one.
7760 	 *
7761 	 * TODO: Use a different interrupt or check DC itself for the mapping.
7762 	 */
7763 	int irq_type =
7764 		amdgpu_display_crtc_idx_to_irq_type(
7765 			adev,
7766 			acrtc->crtc_id);
7767 
7768 	if (enable) {
7769 		drm_crtc_vblank_on(&acrtc->base);
7770 		amdgpu_irq_get(
7771 			adev,
7772 			&adev->pageflip_irq,
7773 			irq_type);
7774 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7775 		amdgpu_irq_get(
7776 			adev,
7777 			&adev->vline0_irq,
7778 			irq_type);
7779 #endif
7780 	} else {
7781 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7782 		amdgpu_irq_put(
7783 			adev,
7784 			&adev->vline0_irq,
7785 			irq_type);
7786 #endif
7787 		amdgpu_irq_put(
7788 			adev,
7789 			&adev->pageflip_irq,
7790 			irq_type);
7791 		drm_crtc_vblank_off(&acrtc->base);
7792 	}
7793 }
7794 
7795 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7796 				      struct amdgpu_crtc *acrtc)
7797 {
7798 	int irq_type =
7799 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7800 
7801 	/**
7802 	 * This reads the current state for the IRQ and force reapplies
7803 	 * the setting to hardware.
7804 	 */
7805 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7806 }
7807 
7808 static bool
7809 is_scaling_state_different(const struct dm_connector_state *dm_state,
7810 			   const struct dm_connector_state *old_dm_state)
7811 {
7812 	if (dm_state->scaling != old_dm_state->scaling)
7813 		return true;
7814 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7815 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7816 			return true;
7817 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7818 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7819 			return true;
7820 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7821 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7822 		return true;
7823 	return false;
7824 }
7825 
7826 #ifdef CONFIG_DRM_AMD_DC_HDCP
7827 static bool is_content_protection_different(struct drm_connector_state *state,
7828 					    const struct drm_connector_state *old_state,
7829 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7830 {
7831 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7832 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7833 
7834 	/* Handle: Type0/1 change */
7835 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
7836 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7837 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7838 		return true;
7839 	}
7840 
7841 	/* CP is being re enabled, ignore this
7842 	 *
7843 	 * Handles:	ENABLED -> DESIRED
7844 	 */
7845 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7846 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7847 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7848 		return false;
7849 	}
7850 
7851 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7852 	 *
7853 	 * Handles:	UNDESIRED -> ENABLED
7854 	 */
7855 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7856 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7857 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7858 
7859 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7860 	 * hot-plug, headless s3, dpms
7861 	 *
7862 	 * Handles:	DESIRED -> DESIRED (Special case)
7863 	 */
7864 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7865 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7866 		dm_con_state->update_hdcp = false;
7867 		return true;
7868 	}
7869 
7870 	/*
7871 	 * Handles:	UNDESIRED -> UNDESIRED
7872 	 *		DESIRED -> DESIRED
7873 	 *		ENABLED -> ENABLED
7874 	 */
7875 	if (old_state->content_protection == state->content_protection)
7876 		return false;
7877 
7878 	/*
7879 	 * Handles:	UNDESIRED -> DESIRED
7880 	 *		DESIRED -> UNDESIRED
7881 	 *		ENABLED -> UNDESIRED
7882 	 */
7883 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7884 		return true;
7885 
7886 	/*
7887 	 * Handles:	DESIRED -> ENABLED
7888 	 */
7889 	return false;
7890 }
7891 
7892 #endif
7893 static void remove_stream(struct amdgpu_device *adev,
7894 			  struct amdgpu_crtc *acrtc,
7895 			  struct dc_stream_state *stream)
7896 {
7897 	/* this is the update mode case */
7898 
7899 	acrtc->otg_inst = -1;
7900 	acrtc->enabled = false;
7901 }
7902 
7903 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7904 			       struct dc_cursor_position *position)
7905 {
7906 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7907 	int x, y;
7908 	int xorigin = 0, yorigin = 0;
7909 
7910 	if (!crtc || !plane->state->fb)
7911 		return 0;
7912 
7913 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7914 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7915 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7916 			  __func__,
7917 			  plane->state->crtc_w,
7918 			  plane->state->crtc_h);
7919 		return -EINVAL;
7920 	}
7921 
7922 	x = plane->state->crtc_x;
7923 	y = plane->state->crtc_y;
7924 
7925 	if (x <= -amdgpu_crtc->max_cursor_width ||
7926 	    y <= -amdgpu_crtc->max_cursor_height)
7927 		return 0;
7928 
7929 	if (x < 0) {
7930 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7931 		x = 0;
7932 	}
7933 	if (y < 0) {
7934 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7935 		y = 0;
7936 	}
7937 	position->enable = true;
7938 	position->translate_by_source = true;
7939 	position->x = x;
7940 	position->y = y;
7941 	position->x_hotspot = xorigin;
7942 	position->y_hotspot = yorigin;
7943 
7944 	return 0;
7945 }
7946 
7947 static void handle_cursor_update(struct drm_plane *plane,
7948 				 struct drm_plane_state *old_plane_state)
7949 {
7950 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7951 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7952 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7953 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7954 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7955 	uint64_t address = afb ? afb->address : 0;
7956 	struct dc_cursor_position position = {0};
7957 	struct dc_cursor_attributes attributes;
7958 	int ret;
7959 
7960 	if (!plane->state->fb && !old_plane_state->fb)
7961 		return;
7962 
7963 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
7964 		      __func__,
7965 		      amdgpu_crtc->crtc_id,
7966 		      plane->state->crtc_w,
7967 		      plane->state->crtc_h);
7968 
7969 	ret = get_cursor_position(plane, crtc, &position);
7970 	if (ret)
7971 		return;
7972 
7973 	if (!position.enable) {
7974 		/* turn off cursor */
7975 		if (crtc_state && crtc_state->stream) {
7976 			mutex_lock(&adev->dm.dc_lock);
7977 			dc_stream_set_cursor_position(crtc_state->stream,
7978 						      &position);
7979 			mutex_unlock(&adev->dm.dc_lock);
7980 		}
7981 		return;
7982 	}
7983 
7984 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
7985 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
7986 
7987 	memset(&attributes, 0, sizeof(attributes));
7988 	attributes.address.high_part = upper_32_bits(address);
7989 	attributes.address.low_part  = lower_32_bits(address);
7990 	attributes.width             = plane->state->crtc_w;
7991 	attributes.height            = plane->state->crtc_h;
7992 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7993 	attributes.rotation_angle    = 0;
7994 	attributes.attribute_flags.value = 0;
7995 
7996 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7997 
7998 	if (crtc_state->stream) {
7999 		mutex_lock(&adev->dm.dc_lock);
8000 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8001 							 &attributes))
8002 			DRM_ERROR("DC failed to set cursor attributes\n");
8003 
8004 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8005 						   &position))
8006 			DRM_ERROR("DC failed to set cursor position\n");
8007 		mutex_unlock(&adev->dm.dc_lock);
8008 	}
8009 }
8010 
8011 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8012 {
8013 
8014 	assert_spin_locked(&acrtc->base.dev->event_lock);
8015 	WARN_ON(acrtc->event);
8016 
8017 	acrtc->event = acrtc->base.state->event;
8018 
8019 	/* Set the flip status */
8020 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8021 
8022 	/* Mark this event as consumed */
8023 	acrtc->base.state->event = NULL;
8024 
8025 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8026 		     acrtc->crtc_id);
8027 }
8028 
8029 static void update_freesync_state_on_stream(
8030 	struct amdgpu_display_manager *dm,
8031 	struct dm_crtc_state *new_crtc_state,
8032 	struct dc_stream_state *new_stream,
8033 	struct dc_plane_state *surface,
8034 	u32 flip_timestamp_in_us)
8035 {
8036 	struct mod_vrr_params vrr_params;
8037 	struct dc_info_packet vrr_infopacket = {0};
8038 	struct amdgpu_device *adev = dm->adev;
8039 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8040 	unsigned long flags;
8041 	bool pack_sdp_v1_3 = false;
8042 
8043 	if (!new_stream)
8044 		return;
8045 
8046 	/*
8047 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8048 	 * For now it's sufficient to just guard against these conditions.
8049 	 */
8050 
8051 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8052 		return;
8053 
8054 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8055         vrr_params = acrtc->dm_irq_params.vrr_params;
8056 
8057 	if (surface) {
8058 		mod_freesync_handle_preflip(
8059 			dm->freesync_module,
8060 			surface,
8061 			new_stream,
8062 			flip_timestamp_in_us,
8063 			&vrr_params);
8064 
8065 		if (adev->family < AMDGPU_FAMILY_AI &&
8066 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8067 			mod_freesync_handle_v_update(dm->freesync_module,
8068 						     new_stream, &vrr_params);
8069 
8070 			/* Need to call this before the frame ends. */
8071 			dc_stream_adjust_vmin_vmax(dm->dc,
8072 						   new_crtc_state->stream,
8073 						   &vrr_params.adjust);
8074 		}
8075 	}
8076 
8077 	mod_freesync_build_vrr_infopacket(
8078 		dm->freesync_module,
8079 		new_stream,
8080 		&vrr_params,
8081 		PACKET_TYPE_VRR,
8082 		TRANSFER_FUNC_UNKNOWN,
8083 		&vrr_infopacket,
8084 		pack_sdp_v1_3);
8085 
8086 	new_crtc_state->freesync_timing_changed |=
8087 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8088 			&vrr_params.adjust,
8089 			sizeof(vrr_params.adjust)) != 0);
8090 
8091 	new_crtc_state->freesync_vrr_info_changed |=
8092 		(memcmp(&new_crtc_state->vrr_infopacket,
8093 			&vrr_infopacket,
8094 			sizeof(vrr_infopacket)) != 0);
8095 
8096 	acrtc->dm_irq_params.vrr_params = vrr_params;
8097 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8098 
8099 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8100 	new_stream->vrr_infopacket = vrr_infopacket;
8101 
8102 	if (new_crtc_state->freesync_vrr_info_changed)
8103 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8104 			      new_crtc_state->base.crtc->base.id,
8105 			      (int)new_crtc_state->base.vrr_enabled,
8106 			      (int)vrr_params.state);
8107 
8108 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8109 }
8110 
8111 static void update_stream_irq_parameters(
8112 	struct amdgpu_display_manager *dm,
8113 	struct dm_crtc_state *new_crtc_state)
8114 {
8115 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8116 	struct mod_vrr_params vrr_params;
8117 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8118 	struct amdgpu_device *adev = dm->adev;
8119 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8120 	unsigned long flags;
8121 
8122 	if (!new_stream)
8123 		return;
8124 
8125 	/*
8126 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8127 	 * For now it's sufficient to just guard against these conditions.
8128 	 */
8129 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8130 		return;
8131 
8132 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8133 	vrr_params = acrtc->dm_irq_params.vrr_params;
8134 
8135 	if (new_crtc_state->vrr_supported &&
8136 	    config.min_refresh_in_uhz &&
8137 	    config.max_refresh_in_uhz) {
8138 		/*
8139 		 * if freesync compatible mode was set, config.state will be set
8140 		 * in atomic check
8141 		 */
8142 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8143 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8144 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8145 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8146 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8147 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8148 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8149 		} else {
8150 			config.state = new_crtc_state->base.vrr_enabled ?
8151 						     VRR_STATE_ACTIVE_VARIABLE :
8152 						     VRR_STATE_INACTIVE;
8153 		}
8154 	} else {
8155 		config.state = VRR_STATE_UNSUPPORTED;
8156 	}
8157 
8158 	mod_freesync_build_vrr_params(dm->freesync_module,
8159 				      new_stream,
8160 				      &config, &vrr_params);
8161 
8162 	new_crtc_state->freesync_timing_changed |=
8163 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8164 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8165 
8166 	new_crtc_state->freesync_config = config;
8167 	/* Copy state for access from DM IRQ handler */
8168 	acrtc->dm_irq_params.freesync_config = config;
8169 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8170 	acrtc->dm_irq_params.vrr_params = vrr_params;
8171 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8172 }
8173 
8174 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8175 					    struct dm_crtc_state *new_state)
8176 {
8177 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8178 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8179 
8180 	if (!old_vrr_active && new_vrr_active) {
8181 		/* Transition VRR inactive -> active:
8182 		 * While VRR is active, we must not disable vblank irq, as a
8183 		 * reenable after disable would compute bogus vblank/pflip
8184 		 * timestamps if it likely happened inside display front-porch.
8185 		 *
8186 		 * We also need vupdate irq for the actual core vblank handling
8187 		 * at end of vblank.
8188 		 */
8189 		dm_set_vupdate_irq(new_state->base.crtc, true);
8190 		drm_crtc_vblank_get(new_state->base.crtc);
8191 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8192 				 __func__, new_state->base.crtc->base.id);
8193 	} else if (old_vrr_active && !new_vrr_active) {
8194 		/* Transition VRR active -> inactive:
8195 		 * Allow vblank irq disable again for fixed refresh rate.
8196 		 */
8197 		dm_set_vupdate_irq(new_state->base.crtc, false);
8198 		drm_crtc_vblank_put(new_state->base.crtc);
8199 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8200 				 __func__, new_state->base.crtc->base.id);
8201 	}
8202 }
8203 
8204 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8205 {
8206 	struct drm_plane *plane;
8207 	struct drm_plane_state *old_plane_state;
8208 	int i;
8209 
8210 	/*
8211 	 * TODO: Make this per-stream so we don't issue redundant updates for
8212 	 * commits with multiple streams.
8213 	 */
8214 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8215 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8216 			handle_cursor_update(plane, old_plane_state);
8217 }
8218 
8219 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8220 				    struct dc_state *dc_state,
8221 				    struct drm_device *dev,
8222 				    struct amdgpu_display_manager *dm,
8223 				    struct drm_crtc *pcrtc,
8224 				    bool wait_for_vblank)
8225 {
8226 	uint32_t i;
8227 	uint64_t timestamp_ns;
8228 	struct drm_plane *plane;
8229 	struct drm_plane_state *old_plane_state, *new_plane_state;
8230 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8231 	struct drm_crtc_state *new_pcrtc_state =
8232 			drm_atomic_get_new_crtc_state(state, pcrtc);
8233 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8234 	struct dm_crtc_state *dm_old_crtc_state =
8235 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8236 	int planes_count = 0, vpos, hpos;
8237 	long r;
8238 	unsigned long flags;
8239 	struct amdgpu_bo *abo;
8240 	uint32_t target_vblank, last_flip_vblank;
8241 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8242 	bool pflip_present = false;
8243 	struct {
8244 		struct dc_surface_update surface_updates[MAX_SURFACES];
8245 		struct dc_plane_info plane_infos[MAX_SURFACES];
8246 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8247 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8248 		struct dc_stream_update stream_update;
8249 	} *bundle;
8250 
8251 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8252 
8253 	if (!bundle) {
8254 		dm_error("Failed to allocate update bundle\n");
8255 		goto cleanup;
8256 	}
8257 
8258 	/*
8259 	 * Disable the cursor first if we're disabling all the planes.
8260 	 * It'll remain on the screen after the planes are re-enabled
8261 	 * if we don't.
8262 	 */
8263 	if (acrtc_state->active_planes == 0)
8264 		amdgpu_dm_commit_cursors(state);
8265 
8266 	/* update planes when needed */
8267 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8268 		struct drm_crtc *crtc = new_plane_state->crtc;
8269 		struct drm_crtc_state *new_crtc_state;
8270 		struct drm_framebuffer *fb = new_plane_state->fb;
8271 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8272 		bool plane_needs_flip;
8273 		struct dc_plane_state *dc_plane;
8274 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8275 
8276 		/* Cursor plane is handled after stream updates */
8277 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8278 			continue;
8279 
8280 		if (!fb || !crtc || pcrtc != crtc)
8281 			continue;
8282 
8283 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8284 		if (!new_crtc_state->active)
8285 			continue;
8286 
8287 		dc_plane = dm_new_plane_state->dc_state;
8288 
8289 		bundle->surface_updates[planes_count].surface = dc_plane;
8290 		if (new_pcrtc_state->color_mgmt_changed) {
8291 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8292 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8293 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8294 		}
8295 
8296 		fill_dc_scaling_info(new_plane_state,
8297 				     &bundle->scaling_infos[planes_count]);
8298 
8299 		bundle->surface_updates[planes_count].scaling_info =
8300 			&bundle->scaling_infos[planes_count];
8301 
8302 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8303 
8304 		pflip_present = pflip_present || plane_needs_flip;
8305 
8306 		if (!plane_needs_flip) {
8307 			planes_count += 1;
8308 			continue;
8309 		}
8310 
8311 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8312 
8313 		/*
8314 		 * Wait for all fences on this FB. Do limited wait to avoid
8315 		 * deadlock during GPU reset when this fence will not signal
8316 		 * but we hold reservation lock for the BO.
8317 		 */
8318 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8319 							false,
8320 							msecs_to_jiffies(5000));
8321 		if (unlikely(r <= 0))
8322 			DRM_ERROR("Waiting for fences timed out!");
8323 
8324 		fill_dc_plane_info_and_addr(
8325 			dm->adev, new_plane_state,
8326 			afb->tiling_flags,
8327 			&bundle->plane_infos[planes_count],
8328 			&bundle->flip_addrs[planes_count].address,
8329 			afb->tmz_surface, false);
8330 
8331 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8332 				 new_plane_state->plane->index,
8333 				 bundle->plane_infos[planes_count].dcc.enable);
8334 
8335 		bundle->surface_updates[planes_count].plane_info =
8336 			&bundle->plane_infos[planes_count];
8337 
8338 		/*
8339 		 * Only allow immediate flips for fast updates that don't
8340 		 * change FB pitch, DCC state, rotation or mirroing.
8341 		 */
8342 		bundle->flip_addrs[planes_count].flip_immediate =
8343 			crtc->state->async_flip &&
8344 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8345 
8346 		timestamp_ns = ktime_get_ns();
8347 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8348 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8349 		bundle->surface_updates[planes_count].surface = dc_plane;
8350 
8351 		if (!bundle->surface_updates[planes_count].surface) {
8352 			DRM_ERROR("No surface for CRTC: id=%d\n",
8353 					acrtc_attach->crtc_id);
8354 			continue;
8355 		}
8356 
8357 		if (plane == pcrtc->primary)
8358 			update_freesync_state_on_stream(
8359 				dm,
8360 				acrtc_state,
8361 				acrtc_state->stream,
8362 				dc_plane,
8363 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8364 
8365 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8366 				 __func__,
8367 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8368 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8369 
8370 		planes_count += 1;
8371 
8372 	}
8373 
8374 	if (pflip_present) {
8375 		if (!vrr_active) {
8376 			/* Use old throttling in non-vrr fixed refresh rate mode
8377 			 * to keep flip scheduling based on target vblank counts
8378 			 * working in a backwards compatible way, e.g., for
8379 			 * clients using the GLX_OML_sync_control extension or
8380 			 * DRI3/Present extension with defined target_msc.
8381 			 */
8382 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8383 		}
8384 		else {
8385 			/* For variable refresh rate mode only:
8386 			 * Get vblank of last completed flip to avoid > 1 vrr
8387 			 * flips per video frame by use of throttling, but allow
8388 			 * flip programming anywhere in the possibly large
8389 			 * variable vrr vblank interval for fine-grained flip
8390 			 * timing control and more opportunity to avoid stutter
8391 			 * on late submission of flips.
8392 			 */
8393 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8394 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8395 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8396 		}
8397 
8398 		target_vblank = last_flip_vblank + wait_for_vblank;
8399 
8400 		/*
8401 		 * Wait until we're out of the vertical blank period before the one
8402 		 * targeted by the flip
8403 		 */
8404 		while ((acrtc_attach->enabled &&
8405 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8406 							    0, &vpos, &hpos, NULL,
8407 							    NULL, &pcrtc->hwmode)
8408 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8409 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8410 			(int)(target_vblank -
8411 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8412 			usleep_range(1000, 1100);
8413 		}
8414 
8415 		/**
8416 		 * Prepare the flip event for the pageflip interrupt to handle.
8417 		 *
8418 		 * This only works in the case where we've already turned on the
8419 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8420 		 * from 0 -> n planes we have to skip a hardware generated event
8421 		 * and rely on sending it from software.
8422 		 */
8423 		if (acrtc_attach->base.state->event &&
8424 		    acrtc_state->active_planes > 0) {
8425 			drm_crtc_vblank_get(pcrtc);
8426 
8427 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8428 
8429 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8430 			prepare_flip_isr(acrtc_attach);
8431 
8432 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8433 		}
8434 
8435 		if (acrtc_state->stream) {
8436 			if (acrtc_state->freesync_vrr_info_changed)
8437 				bundle->stream_update.vrr_infopacket =
8438 					&acrtc_state->stream->vrr_infopacket;
8439 		}
8440 	}
8441 
8442 	/* Update the planes if changed or disable if we don't have any. */
8443 	if ((planes_count || acrtc_state->active_planes == 0) &&
8444 		acrtc_state->stream) {
8445 		bundle->stream_update.stream = acrtc_state->stream;
8446 		if (new_pcrtc_state->mode_changed) {
8447 			bundle->stream_update.src = acrtc_state->stream->src;
8448 			bundle->stream_update.dst = acrtc_state->stream->dst;
8449 		}
8450 
8451 		if (new_pcrtc_state->color_mgmt_changed) {
8452 			/*
8453 			 * TODO: This isn't fully correct since we've actually
8454 			 * already modified the stream in place.
8455 			 */
8456 			bundle->stream_update.gamut_remap =
8457 				&acrtc_state->stream->gamut_remap_matrix;
8458 			bundle->stream_update.output_csc_transform =
8459 				&acrtc_state->stream->csc_color_matrix;
8460 			bundle->stream_update.out_transfer_func =
8461 				acrtc_state->stream->out_transfer_func;
8462 		}
8463 
8464 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8465 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8466 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8467 
8468 		/*
8469 		 * If FreeSync state on the stream has changed then we need to
8470 		 * re-adjust the min/max bounds now that DC doesn't handle this
8471 		 * as part of commit.
8472 		 */
8473 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8474 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8475 			dc_stream_adjust_vmin_vmax(
8476 				dm->dc, acrtc_state->stream,
8477 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8478 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8479 		}
8480 		mutex_lock(&dm->dc_lock);
8481 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8482 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8483 			amdgpu_dm_psr_disable(acrtc_state->stream);
8484 
8485 		dc_commit_updates_for_stream(dm->dc,
8486 						     bundle->surface_updates,
8487 						     planes_count,
8488 						     acrtc_state->stream,
8489 						     &bundle->stream_update,
8490 						     dc_state);
8491 
8492 		/**
8493 		 * Enable or disable the interrupts on the backend.
8494 		 *
8495 		 * Most pipes are put into power gating when unused.
8496 		 *
8497 		 * When power gating is enabled on a pipe we lose the
8498 		 * interrupt enablement state when power gating is disabled.
8499 		 *
8500 		 * So we need to update the IRQ control state in hardware
8501 		 * whenever the pipe turns on (since it could be previously
8502 		 * power gated) or off (since some pipes can't be power gated
8503 		 * on some ASICs).
8504 		 */
8505 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8506 			dm_update_pflip_irq_state(drm_to_adev(dev),
8507 						  acrtc_attach);
8508 
8509 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8510 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8511 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8512 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8513 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8514 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8515 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
8516 			amdgpu_dm_psr_enable(acrtc_state->stream);
8517 		}
8518 
8519 		mutex_unlock(&dm->dc_lock);
8520 	}
8521 
8522 	/*
8523 	 * Update cursor state *after* programming all the planes.
8524 	 * This avoids redundant programming in the case where we're going
8525 	 * to be disabling a single plane - those pipes are being disabled.
8526 	 */
8527 	if (acrtc_state->active_planes)
8528 		amdgpu_dm_commit_cursors(state);
8529 
8530 cleanup:
8531 	kfree(bundle);
8532 }
8533 
8534 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8535 				   struct drm_atomic_state *state)
8536 {
8537 	struct amdgpu_device *adev = drm_to_adev(dev);
8538 	struct amdgpu_dm_connector *aconnector;
8539 	struct drm_connector *connector;
8540 	struct drm_connector_state *old_con_state, *new_con_state;
8541 	struct drm_crtc_state *new_crtc_state;
8542 	struct dm_crtc_state *new_dm_crtc_state;
8543 	const struct dc_stream_status *status;
8544 	int i, inst;
8545 
8546 	/* Notify device removals. */
8547 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8548 		if (old_con_state->crtc != new_con_state->crtc) {
8549 			/* CRTC changes require notification. */
8550 			goto notify;
8551 		}
8552 
8553 		if (!new_con_state->crtc)
8554 			continue;
8555 
8556 		new_crtc_state = drm_atomic_get_new_crtc_state(
8557 			state, new_con_state->crtc);
8558 
8559 		if (!new_crtc_state)
8560 			continue;
8561 
8562 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8563 			continue;
8564 
8565 	notify:
8566 		aconnector = to_amdgpu_dm_connector(connector);
8567 
8568 		mutex_lock(&adev->dm.audio_lock);
8569 		inst = aconnector->audio_inst;
8570 		aconnector->audio_inst = -1;
8571 		mutex_unlock(&adev->dm.audio_lock);
8572 
8573 		amdgpu_dm_audio_eld_notify(adev, inst);
8574 	}
8575 
8576 	/* Notify audio device additions. */
8577 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8578 		if (!new_con_state->crtc)
8579 			continue;
8580 
8581 		new_crtc_state = drm_atomic_get_new_crtc_state(
8582 			state, new_con_state->crtc);
8583 
8584 		if (!new_crtc_state)
8585 			continue;
8586 
8587 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8588 			continue;
8589 
8590 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8591 		if (!new_dm_crtc_state->stream)
8592 			continue;
8593 
8594 		status = dc_stream_get_status(new_dm_crtc_state->stream);
8595 		if (!status)
8596 			continue;
8597 
8598 		aconnector = to_amdgpu_dm_connector(connector);
8599 
8600 		mutex_lock(&adev->dm.audio_lock);
8601 		inst = status->audio_inst;
8602 		aconnector->audio_inst = inst;
8603 		mutex_unlock(&adev->dm.audio_lock);
8604 
8605 		amdgpu_dm_audio_eld_notify(adev, inst);
8606 	}
8607 }
8608 
8609 /*
8610  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8611  * @crtc_state: the DRM CRTC state
8612  * @stream_state: the DC stream state.
8613  *
8614  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8615  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8616  */
8617 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8618 						struct dc_stream_state *stream_state)
8619 {
8620 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8621 }
8622 
8623 /**
8624  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8625  * @state: The atomic state to commit
8626  *
8627  * This will tell DC to commit the constructed DC state from atomic_check,
8628  * programming the hardware. Any failures here implies a hardware failure, since
8629  * atomic check should have filtered anything non-kosher.
8630  */
8631 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8632 {
8633 	struct drm_device *dev = state->dev;
8634 	struct amdgpu_device *adev = drm_to_adev(dev);
8635 	struct amdgpu_display_manager *dm = &adev->dm;
8636 	struct dm_atomic_state *dm_state;
8637 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8638 	uint32_t i, j;
8639 	struct drm_crtc *crtc;
8640 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8641 	unsigned long flags;
8642 	bool wait_for_vblank = true;
8643 	struct drm_connector *connector;
8644 	struct drm_connector_state *old_con_state, *new_con_state;
8645 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8646 	int crtc_disable_count = 0;
8647 	bool mode_set_reset_required = false;
8648 
8649 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8650 
8651 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8652 
8653 	dm_state = dm_atomic_get_new_state(state);
8654 	if (dm_state && dm_state->context) {
8655 		dc_state = dm_state->context;
8656 	} else {
8657 		/* No state changes, retain current state. */
8658 		dc_state_temp = dc_create_state(dm->dc);
8659 		ASSERT(dc_state_temp);
8660 		dc_state = dc_state_temp;
8661 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
8662 	}
8663 
8664 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8665 				       new_crtc_state, i) {
8666 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8667 
8668 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8669 
8670 		if (old_crtc_state->active &&
8671 		    (!new_crtc_state->active ||
8672 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8673 			manage_dm_interrupts(adev, acrtc, false);
8674 			dc_stream_release(dm_old_crtc_state->stream);
8675 		}
8676 	}
8677 
8678 	drm_atomic_helper_calc_timestamping_constants(state);
8679 
8680 	/* update changed items */
8681 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8682 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8683 
8684 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8685 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8686 
8687 		DRM_DEBUG_ATOMIC(
8688 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8689 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8690 			"connectors_changed:%d\n",
8691 			acrtc->crtc_id,
8692 			new_crtc_state->enable,
8693 			new_crtc_state->active,
8694 			new_crtc_state->planes_changed,
8695 			new_crtc_state->mode_changed,
8696 			new_crtc_state->active_changed,
8697 			new_crtc_state->connectors_changed);
8698 
8699 		/* Disable cursor if disabling crtc */
8700 		if (old_crtc_state->active && !new_crtc_state->active) {
8701 			struct dc_cursor_position position;
8702 
8703 			memset(&position, 0, sizeof(position));
8704 			mutex_lock(&dm->dc_lock);
8705 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8706 			mutex_unlock(&dm->dc_lock);
8707 		}
8708 
8709 		/* Copy all transient state flags into dc state */
8710 		if (dm_new_crtc_state->stream) {
8711 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8712 							    dm_new_crtc_state->stream);
8713 		}
8714 
8715 		/* handles headless hotplug case, updating new_state and
8716 		 * aconnector as needed
8717 		 */
8718 
8719 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8720 
8721 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8722 
8723 			if (!dm_new_crtc_state->stream) {
8724 				/*
8725 				 * this could happen because of issues with
8726 				 * userspace notifications delivery.
8727 				 * In this case userspace tries to set mode on
8728 				 * display which is disconnected in fact.
8729 				 * dc_sink is NULL in this case on aconnector.
8730 				 * We expect reset mode will come soon.
8731 				 *
8732 				 * This can also happen when unplug is done
8733 				 * during resume sequence ended
8734 				 *
8735 				 * In this case, we want to pretend we still
8736 				 * have a sink to keep the pipe running so that
8737 				 * hw state is consistent with the sw state
8738 				 */
8739 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8740 						__func__, acrtc->base.base.id);
8741 				continue;
8742 			}
8743 
8744 			if (dm_old_crtc_state->stream)
8745 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8746 
8747 			pm_runtime_get_noresume(dev->dev);
8748 
8749 			acrtc->enabled = true;
8750 			acrtc->hw_mode = new_crtc_state->mode;
8751 			crtc->hwmode = new_crtc_state->mode;
8752 			mode_set_reset_required = true;
8753 		} else if (modereset_required(new_crtc_state)) {
8754 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8755 			/* i.e. reset mode */
8756 			if (dm_old_crtc_state->stream)
8757 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8758 
8759 			mode_set_reset_required = true;
8760 		}
8761 	} /* for_each_crtc_in_state() */
8762 
8763 	if (dc_state) {
8764 		/* if there mode set or reset, disable eDP PSR */
8765 		if (mode_set_reset_required)
8766 			amdgpu_dm_psr_disable_all(dm);
8767 
8768 		dm_enable_per_frame_crtc_master_sync(dc_state);
8769 		mutex_lock(&dm->dc_lock);
8770 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
8771 #if defined(CONFIG_DRM_AMD_DC_DCN)
8772                /* Allow idle optimization when vblank count is 0 for display off */
8773                if (dm->active_vblank_irq_count == 0)
8774                    dc_allow_idle_optimizations(dm->dc,true);
8775 #endif
8776 		mutex_unlock(&dm->dc_lock);
8777 	}
8778 
8779 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8780 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8781 
8782 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8783 
8784 		if (dm_new_crtc_state->stream != NULL) {
8785 			const struct dc_stream_status *status =
8786 					dc_stream_get_status(dm_new_crtc_state->stream);
8787 
8788 			if (!status)
8789 				status = dc_stream_get_status_from_state(dc_state,
8790 									 dm_new_crtc_state->stream);
8791 			if (!status)
8792 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8793 			else
8794 				acrtc->otg_inst = status->primary_otg_inst;
8795 		}
8796 	}
8797 #ifdef CONFIG_DRM_AMD_DC_HDCP
8798 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8799 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8800 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8801 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8802 
8803 		new_crtc_state = NULL;
8804 
8805 		if (acrtc)
8806 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8807 
8808 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8809 
8810 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8811 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8812 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8813 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8814 			dm_new_con_state->update_hdcp = true;
8815 			continue;
8816 		}
8817 
8818 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8819 			hdcp_update_display(
8820 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8821 				new_con_state->hdcp_content_type,
8822 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8823 	}
8824 #endif
8825 
8826 	/* Handle connector state changes */
8827 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8828 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8829 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8830 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8831 		struct dc_surface_update dummy_updates[MAX_SURFACES];
8832 		struct dc_stream_update stream_update;
8833 		struct dc_info_packet hdr_packet;
8834 		struct dc_stream_status *status = NULL;
8835 		bool abm_changed, hdr_changed, scaling_changed;
8836 
8837 		memset(&dummy_updates, 0, sizeof(dummy_updates));
8838 		memset(&stream_update, 0, sizeof(stream_update));
8839 
8840 		if (acrtc) {
8841 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8842 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8843 		}
8844 
8845 		/* Skip any modesets/resets */
8846 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8847 			continue;
8848 
8849 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8850 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8851 
8852 		scaling_changed = is_scaling_state_different(dm_new_con_state,
8853 							     dm_old_con_state);
8854 
8855 		abm_changed = dm_new_crtc_state->abm_level !=
8856 			      dm_old_crtc_state->abm_level;
8857 
8858 		hdr_changed =
8859 			is_hdr_metadata_different(old_con_state, new_con_state);
8860 
8861 		if (!scaling_changed && !abm_changed && !hdr_changed)
8862 			continue;
8863 
8864 		stream_update.stream = dm_new_crtc_state->stream;
8865 		if (scaling_changed) {
8866 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8867 					dm_new_con_state, dm_new_crtc_state->stream);
8868 
8869 			stream_update.src = dm_new_crtc_state->stream->src;
8870 			stream_update.dst = dm_new_crtc_state->stream->dst;
8871 		}
8872 
8873 		if (abm_changed) {
8874 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8875 
8876 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8877 		}
8878 
8879 		if (hdr_changed) {
8880 			fill_hdr_info_packet(new_con_state, &hdr_packet);
8881 			stream_update.hdr_static_metadata = &hdr_packet;
8882 		}
8883 
8884 		status = dc_stream_get_status(dm_new_crtc_state->stream);
8885 		WARN_ON(!status);
8886 		WARN_ON(!status->plane_count);
8887 
8888 		/*
8889 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8890 		 * Here we create an empty update on each plane.
8891 		 * To fix this, DC should permit updating only stream properties.
8892 		 */
8893 		for (j = 0; j < status->plane_count; j++)
8894 			dummy_updates[j].surface = status->plane_states[0];
8895 
8896 
8897 		mutex_lock(&dm->dc_lock);
8898 		dc_commit_updates_for_stream(dm->dc,
8899 						     dummy_updates,
8900 						     status->plane_count,
8901 						     dm_new_crtc_state->stream,
8902 						     &stream_update,
8903 						     dc_state);
8904 		mutex_unlock(&dm->dc_lock);
8905 	}
8906 
8907 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
8908 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8909 				      new_crtc_state, i) {
8910 		if (old_crtc_state->active && !new_crtc_state->active)
8911 			crtc_disable_count++;
8912 
8913 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8914 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8915 
8916 		/* For freesync config update on crtc state and params for irq */
8917 		update_stream_irq_parameters(dm, dm_new_crtc_state);
8918 
8919 		/* Handle vrr on->off / off->on transitions */
8920 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8921 						dm_new_crtc_state);
8922 	}
8923 
8924 	/**
8925 	 * Enable interrupts for CRTCs that are newly enabled or went through
8926 	 * a modeset. It was intentionally deferred until after the front end
8927 	 * state was modified to wait until the OTG was on and so the IRQ
8928 	 * handlers didn't access stale or invalid state.
8929 	 */
8930 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8931 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8932 #ifdef CONFIG_DEBUG_FS
8933 		bool configure_crc = false;
8934 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
8935 #endif
8936 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8937 
8938 		if (new_crtc_state->active &&
8939 		    (!old_crtc_state->active ||
8940 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8941 			dc_stream_retain(dm_new_crtc_state->stream);
8942 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8943 			manage_dm_interrupts(adev, acrtc, true);
8944 
8945 #ifdef CONFIG_DEBUG_FS
8946 			/**
8947 			 * Frontend may have changed so reapply the CRC capture
8948 			 * settings for the stream.
8949 			 */
8950 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8951 			spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8952 			cur_crc_src = acrtc->dm_irq_params.crc_src;
8953 			spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8954 
8955 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8956 				configure_crc = true;
8957 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8958 				if (amdgpu_dm_crc_window_is_activated(crtc))
8959 					configure_crc = false;
8960 #endif
8961 			}
8962 
8963 			if (configure_crc)
8964 				amdgpu_dm_crtc_configure_crc_source(
8965 					crtc, dm_new_crtc_state, cur_crc_src);
8966 #endif
8967 		}
8968 	}
8969 
8970 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8971 		if (new_crtc_state->async_flip)
8972 			wait_for_vblank = false;
8973 
8974 	/* update planes when needed per crtc*/
8975 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8976 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8977 
8978 		if (dm_new_crtc_state->stream)
8979 			amdgpu_dm_commit_planes(state, dc_state, dev,
8980 						dm, crtc, wait_for_vblank);
8981 	}
8982 
8983 	/* Update audio instances for each connector. */
8984 	amdgpu_dm_commit_audio(dev, state);
8985 
8986 	/*
8987 	 * send vblank event on all events not handled in flip and
8988 	 * mark consumed event for drm_atomic_helper_commit_hw_done
8989 	 */
8990 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8991 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8992 
8993 		if (new_crtc_state->event)
8994 			drm_send_event_locked(dev, &new_crtc_state->event->base);
8995 
8996 		new_crtc_state->event = NULL;
8997 	}
8998 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8999 
9000 	/* Signal HW programming completion */
9001 	drm_atomic_helper_commit_hw_done(state);
9002 
9003 	if (wait_for_vblank)
9004 		drm_atomic_helper_wait_for_flip_done(dev, state);
9005 
9006 	drm_atomic_helper_cleanup_planes(dev, state);
9007 
9008 	/* return the stolen vga memory back to VRAM */
9009 	if (!adev->mman.keep_stolen_vga_memory)
9010 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9011 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9012 
9013 	/*
9014 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9015 	 * so we can put the GPU into runtime suspend if we're not driving any
9016 	 * displays anymore
9017 	 */
9018 	for (i = 0; i < crtc_disable_count; i++)
9019 		pm_runtime_put_autosuspend(dev->dev);
9020 	pm_runtime_mark_last_busy(dev->dev);
9021 
9022 	if (dc_state_temp)
9023 		dc_release_state(dc_state_temp);
9024 }
9025 
9026 
9027 static int dm_force_atomic_commit(struct drm_connector *connector)
9028 {
9029 	int ret = 0;
9030 	struct drm_device *ddev = connector->dev;
9031 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9032 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9033 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9034 	struct drm_connector_state *conn_state;
9035 	struct drm_crtc_state *crtc_state;
9036 	struct drm_plane_state *plane_state;
9037 
9038 	if (!state)
9039 		return -ENOMEM;
9040 
9041 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9042 
9043 	/* Construct an atomic state to restore previous display setting */
9044 
9045 	/*
9046 	 * Attach connectors to drm_atomic_state
9047 	 */
9048 	conn_state = drm_atomic_get_connector_state(state, connector);
9049 
9050 	ret = PTR_ERR_OR_ZERO(conn_state);
9051 	if (ret)
9052 		goto out;
9053 
9054 	/* Attach crtc to drm_atomic_state*/
9055 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9056 
9057 	ret = PTR_ERR_OR_ZERO(crtc_state);
9058 	if (ret)
9059 		goto out;
9060 
9061 	/* force a restore */
9062 	crtc_state->mode_changed = true;
9063 
9064 	/* Attach plane to drm_atomic_state */
9065 	plane_state = drm_atomic_get_plane_state(state, plane);
9066 
9067 	ret = PTR_ERR_OR_ZERO(plane_state);
9068 	if (ret)
9069 		goto out;
9070 
9071 	/* Call commit internally with the state we just constructed */
9072 	ret = drm_atomic_commit(state);
9073 
9074 out:
9075 	drm_atomic_state_put(state);
9076 	if (ret)
9077 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9078 
9079 	return ret;
9080 }
9081 
9082 /*
9083  * This function handles all cases when set mode does not come upon hotplug.
9084  * This includes when a display is unplugged then plugged back into the
9085  * same port and when running without usermode desktop manager supprot
9086  */
9087 void dm_restore_drm_connector_state(struct drm_device *dev,
9088 				    struct drm_connector *connector)
9089 {
9090 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9091 	struct amdgpu_crtc *disconnected_acrtc;
9092 	struct dm_crtc_state *acrtc_state;
9093 
9094 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9095 		return;
9096 
9097 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9098 	if (!disconnected_acrtc)
9099 		return;
9100 
9101 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9102 	if (!acrtc_state->stream)
9103 		return;
9104 
9105 	/*
9106 	 * If the previous sink is not released and different from the current,
9107 	 * we deduce we are in a state where we can not rely on usermode call
9108 	 * to turn on the display, so we do it here
9109 	 */
9110 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9111 		dm_force_atomic_commit(&aconnector->base);
9112 }
9113 
9114 /*
9115  * Grabs all modesetting locks to serialize against any blocking commits,
9116  * Waits for completion of all non blocking commits.
9117  */
9118 static int do_aquire_global_lock(struct drm_device *dev,
9119 				 struct drm_atomic_state *state)
9120 {
9121 	struct drm_crtc *crtc;
9122 	struct drm_crtc_commit *commit;
9123 	long ret;
9124 
9125 	/*
9126 	 * Adding all modeset locks to aquire_ctx will
9127 	 * ensure that when the framework release it the
9128 	 * extra locks we are locking here will get released to
9129 	 */
9130 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9131 	if (ret)
9132 		return ret;
9133 
9134 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9135 		spin_lock(&crtc->commit_lock);
9136 		commit = list_first_entry_or_null(&crtc->commit_list,
9137 				struct drm_crtc_commit, commit_entry);
9138 		if (commit)
9139 			drm_crtc_commit_get(commit);
9140 		spin_unlock(&crtc->commit_lock);
9141 
9142 		if (!commit)
9143 			continue;
9144 
9145 		/*
9146 		 * Make sure all pending HW programming completed and
9147 		 * page flips done
9148 		 */
9149 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9150 
9151 		if (ret > 0)
9152 			ret = wait_for_completion_interruptible_timeout(
9153 					&commit->flip_done, 10*HZ);
9154 
9155 		if (ret == 0)
9156 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9157 				  "timed out\n", crtc->base.id, crtc->name);
9158 
9159 		drm_crtc_commit_put(commit);
9160 	}
9161 
9162 	return ret < 0 ? ret : 0;
9163 }
9164 
9165 static void get_freesync_config_for_crtc(
9166 	struct dm_crtc_state *new_crtc_state,
9167 	struct dm_connector_state *new_con_state)
9168 {
9169 	struct mod_freesync_config config = {0};
9170 	struct amdgpu_dm_connector *aconnector =
9171 			to_amdgpu_dm_connector(new_con_state->base.connector);
9172 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9173 	int vrefresh = drm_mode_vrefresh(mode);
9174 	bool fs_vid_mode = false;
9175 
9176 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9177 					vrefresh >= aconnector->min_vfreq &&
9178 					vrefresh <= aconnector->max_vfreq;
9179 
9180 	if (new_crtc_state->vrr_supported) {
9181 		new_crtc_state->stream->ignore_msa_timing_param = true;
9182 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9183 
9184 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9185 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9186 		config.vsif_supported = true;
9187 		config.btr = true;
9188 
9189 		if (fs_vid_mode) {
9190 			config.state = VRR_STATE_ACTIVE_FIXED;
9191 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9192 			goto out;
9193 		} else if (new_crtc_state->base.vrr_enabled) {
9194 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9195 		} else {
9196 			config.state = VRR_STATE_INACTIVE;
9197 		}
9198 	}
9199 out:
9200 	new_crtc_state->freesync_config = config;
9201 }
9202 
9203 static void reset_freesync_config_for_crtc(
9204 	struct dm_crtc_state *new_crtc_state)
9205 {
9206 	new_crtc_state->vrr_supported = false;
9207 
9208 	memset(&new_crtc_state->vrr_infopacket, 0,
9209 	       sizeof(new_crtc_state->vrr_infopacket));
9210 }
9211 
9212 static bool
9213 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9214 				 struct drm_crtc_state *new_crtc_state)
9215 {
9216 	struct drm_display_mode old_mode, new_mode;
9217 
9218 	if (!old_crtc_state || !new_crtc_state)
9219 		return false;
9220 
9221 	old_mode = old_crtc_state->mode;
9222 	new_mode = new_crtc_state->mode;
9223 
9224 	if (old_mode.clock       == new_mode.clock &&
9225 	    old_mode.hdisplay    == new_mode.hdisplay &&
9226 	    old_mode.vdisplay    == new_mode.vdisplay &&
9227 	    old_mode.htotal      == new_mode.htotal &&
9228 	    old_mode.vtotal      != new_mode.vtotal &&
9229 	    old_mode.hsync_start == new_mode.hsync_start &&
9230 	    old_mode.vsync_start != new_mode.vsync_start &&
9231 	    old_mode.hsync_end   == new_mode.hsync_end &&
9232 	    old_mode.vsync_end   != new_mode.vsync_end &&
9233 	    old_mode.hskew       == new_mode.hskew &&
9234 	    old_mode.vscan       == new_mode.vscan &&
9235 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9236 	    (new_mode.vsync_end - new_mode.vsync_start))
9237 		return true;
9238 
9239 	return false;
9240 }
9241 
9242 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9243 	uint64_t num, den, res;
9244 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9245 
9246 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9247 
9248 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9249 	den = (unsigned long long)new_crtc_state->mode.htotal *
9250 	      (unsigned long long)new_crtc_state->mode.vtotal;
9251 
9252 	res = div_u64(num, den);
9253 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9254 }
9255 
9256 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9257 				struct drm_atomic_state *state,
9258 				struct drm_crtc *crtc,
9259 				struct drm_crtc_state *old_crtc_state,
9260 				struct drm_crtc_state *new_crtc_state,
9261 				bool enable,
9262 				bool *lock_and_validation_needed)
9263 {
9264 	struct dm_atomic_state *dm_state = NULL;
9265 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9266 	struct dc_stream_state *new_stream;
9267 	int ret = 0;
9268 
9269 	/*
9270 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9271 	 * update changed items
9272 	 */
9273 	struct amdgpu_crtc *acrtc = NULL;
9274 	struct amdgpu_dm_connector *aconnector = NULL;
9275 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9276 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9277 
9278 	new_stream = NULL;
9279 
9280 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9281 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9282 	acrtc = to_amdgpu_crtc(crtc);
9283 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9284 
9285 	/* TODO This hack should go away */
9286 	if (aconnector && enable) {
9287 		/* Make sure fake sink is created in plug-in scenario */
9288 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9289 							    &aconnector->base);
9290 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9291 							    &aconnector->base);
9292 
9293 		if (IS_ERR(drm_new_conn_state)) {
9294 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9295 			goto fail;
9296 		}
9297 
9298 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9299 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9300 
9301 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9302 			goto skip_modeset;
9303 
9304 		new_stream = create_validate_stream_for_sink(aconnector,
9305 							     &new_crtc_state->mode,
9306 							     dm_new_conn_state,
9307 							     dm_old_crtc_state->stream);
9308 
9309 		/*
9310 		 * we can have no stream on ACTION_SET if a display
9311 		 * was disconnected during S3, in this case it is not an
9312 		 * error, the OS will be updated after detection, and
9313 		 * will do the right thing on next atomic commit
9314 		 */
9315 
9316 		if (!new_stream) {
9317 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9318 					__func__, acrtc->base.base.id);
9319 			ret = -ENOMEM;
9320 			goto fail;
9321 		}
9322 
9323 		/*
9324 		 * TODO: Check VSDB bits to decide whether this should
9325 		 * be enabled or not.
9326 		 */
9327 		new_stream->triggered_crtc_reset.enabled =
9328 			dm->force_timing_sync;
9329 
9330 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9331 
9332 		ret = fill_hdr_info_packet(drm_new_conn_state,
9333 					   &new_stream->hdr_static_metadata);
9334 		if (ret)
9335 			goto fail;
9336 
9337 		/*
9338 		 * If we already removed the old stream from the context
9339 		 * (and set the new stream to NULL) then we can't reuse
9340 		 * the old stream even if the stream and scaling are unchanged.
9341 		 * We'll hit the BUG_ON and black screen.
9342 		 *
9343 		 * TODO: Refactor this function to allow this check to work
9344 		 * in all conditions.
9345 		 */
9346 		if (amdgpu_freesync_vid_mode &&
9347 		    dm_new_crtc_state->stream &&
9348 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9349 			goto skip_modeset;
9350 
9351 		if (dm_new_crtc_state->stream &&
9352 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9353 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9354 			new_crtc_state->mode_changed = false;
9355 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9356 					 new_crtc_state->mode_changed);
9357 		}
9358 	}
9359 
9360 	/* mode_changed flag may get updated above, need to check again */
9361 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9362 		goto skip_modeset;
9363 
9364 	DRM_DEBUG_ATOMIC(
9365 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9366 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9367 		"connectors_changed:%d\n",
9368 		acrtc->crtc_id,
9369 		new_crtc_state->enable,
9370 		new_crtc_state->active,
9371 		new_crtc_state->planes_changed,
9372 		new_crtc_state->mode_changed,
9373 		new_crtc_state->active_changed,
9374 		new_crtc_state->connectors_changed);
9375 
9376 	/* Remove stream for any changed/disabled CRTC */
9377 	if (!enable) {
9378 
9379 		if (!dm_old_crtc_state->stream)
9380 			goto skip_modeset;
9381 
9382 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9383 		    is_timing_unchanged_for_freesync(new_crtc_state,
9384 						     old_crtc_state)) {
9385 			new_crtc_state->mode_changed = false;
9386 			DRM_DEBUG_DRIVER(
9387 				"Mode change not required for front porch change, "
9388 				"setting mode_changed to %d",
9389 				new_crtc_state->mode_changed);
9390 
9391 			set_freesync_fixed_config(dm_new_crtc_state);
9392 
9393 			goto skip_modeset;
9394 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9395 			   is_freesync_video_mode(&new_crtc_state->mode,
9396 						  aconnector)) {
9397 			set_freesync_fixed_config(dm_new_crtc_state);
9398 		}
9399 
9400 		ret = dm_atomic_get_state(state, &dm_state);
9401 		if (ret)
9402 			goto fail;
9403 
9404 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9405 				crtc->base.id);
9406 
9407 		/* i.e. reset mode */
9408 		if (dc_remove_stream_from_ctx(
9409 				dm->dc,
9410 				dm_state->context,
9411 				dm_old_crtc_state->stream) != DC_OK) {
9412 			ret = -EINVAL;
9413 			goto fail;
9414 		}
9415 
9416 		dc_stream_release(dm_old_crtc_state->stream);
9417 		dm_new_crtc_state->stream = NULL;
9418 
9419 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9420 
9421 		*lock_and_validation_needed = true;
9422 
9423 	} else {/* Add stream for any updated/enabled CRTC */
9424 		/*
9425 		 * Quick fix to prevent NULL pointer on new_stream when
9426 		 * added MST connectors not found in existing crtc_state in the chained mode
9427 		 * TODO: need to dig out the root cause of that
9428 		 */
9429 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9430 			goto skip_modeset;
9431 
9432 		if (modereset_required(new_crtc_state))
9433 			goto skip_modeset;
9434 
9435 		if (modeset_required(new_crtc_state, new_stream,
9436 				     dm_old_crtc_state->stream)) {
9437 
9438 			WARN_ON(dm_new_crtc_state->stream);
9439 
9440 			ret = dm_atomic_get_state(state, &dm_state);
9441 			if (ret)
9442 				goto fail;
9443 
9444 			dm_new_crtc_state->stream = new_stream;
9445 
9446 			dc_stream_retain(new_stream);
9447 
9448 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9449 					 crtc->base.id);
9450 
9451 			if (dc_add_stream_to_ctx(
9452 					dm->dc,
9453 					dm_state->context,
9454 					dm_new_crtc_state->stream) != DC_OK) {
9455 				ret = -EINVAL;
9456 				goto fail;
9457 			}
9458 
9459 			*lock_and_validation_needed = true;
9460 		}
9461 	}
9462 
9463 skip_modeset:
9464 	/* Release extra reference */
9465 	if (new_stream)
9466 		 dc_stream_release(new_stream);
9467 
9468 	/*
9469 	 * We want to do dc stream updates that do not require a
9470 	 * full modeset below.
9471 	 */
9472 	if (!(enable && aconnector && new_crtc_state->active))
9473 		return 0;
9474 	/*
9475 	 * Given above conditions, the dc state cannot be NULL because:
9476 	 * 1. We're in the process of enabling CRTCs (just been added
9477 	 *    to the dc context, or already is on the context)
9478 	 * 2. Has a valid connector attached, and
9479 	 * 3. Is currently active and enabled.
9480 	 * => The dc stream state currently exists.
9481 	 */
9482 	BUG_ON(dm_new_crtc_state->stream == NULL);
9483 
9484 	/* Scaling or underscan settings */
9485 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9486 		update_stream_scaling_settings(
9487 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9488 
9489 	/* ABM settings */
9490 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9491 
9492 	/*
9493 	 * Color management settings. We also update color properties
9494 	 * when a modeset is needed, to ensure it gets reprogrammed.
9495 	 */
9496 	if (dm_new_crtc_state->base.color_mgmt_changed ||
9497 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9498 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9499 		if (ret)
9500 			goto fail;
9501 	}
9502 
9503 	/* Update Freesync settings. */
9504 	get_freesync_config_for_crtc(dm_new_crtc_state,
9505 				     dm_new_conn_state);
9506 
9507 	return ret;
9508 
9509 fail:
9510 	if (new_stream)
9511 		dc_stream_release(new_stream);
9512 	return ret;
9513 }
9514 
9515 static bool should_reset_plane(struct drm_atomic_state *state,
9516 			       struct drm_plane *plane,
9517 			       struct drm_plane_state *old_plane_state,
9518 			       struct drm_plane_state *new_plane_state)
9519 {
9520 	struct drm_plane *other;
9521 	struct drm_plane_state *old_other_state, *new_other_state;
9522 	struct drm_crtc_state *new_crtc_state;
9523 	int i;
9524 
9525 	/*
9526 	 * TODO: Remove this hack once the checks below are sufficient
9527 	 * enough to determine when we need to reset all the planes on
9528 	 * the stream.
9529 	 */
9530 	if (state->allow_modeset)
9531 		return true;
9532 
9533 	/* Exit early if we know that we're adding or removing the plane. */
9534 	if (old_plane_state->crtc != new_plane_state->crtc)
9535 		return true;
9536 
9537 	/* old crtc == new_crtc == NULL, plane not in context. */
9538 	if (!new_plane_state->crtc)
9539 		return false;
9540 
9541 	new_crtc_state =
9542 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9543 
9544 	if (!new_crtc_state)
9545 		return true;
9546 
9547 	/* CRTC Degamma changes currently require us to recreate planes. */
9548 	if (new_crtc_state->color_mgmt_changed)
9549 		return true;
9550 
9551 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9552 		return true;
9553 
9554 	/*
9555 	 * If there are any new primary or overlay planes being added or
9556 	 * removed then the z-order can potentially change. To ensure
9557 	 * correct z-order and pipe acquisition the current DC architecture
9558 	 * requires us to remove and recreate all existing planes.
9559 	 *
9560 	 * TODO: Come up with a more elegant solution for this.
9561 	 */
9562 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9563 		struct amdgpu_framebuffer *old_afb, *new_afb;
9564 		if (other->type == DRM_PLANE_TYPE_CURSOR)
9565 			continue;
9566 
9567 		if (old_other_state->crtc != new_plane_state->crtc &&
9568 		    new_other_state->crtc != new_plane_state->crtc)
9569 			continue;
9570 
9571 		if (old_other_state->crtc != new_other_state->crtc)
9572 			return true;
9573 
9574 		/* Src/dst size and scaling updates. */
9575 		if (old_other_state->src_w != new_other_state->src_w ||
9576 		    old_other_state->src_h != new_other_state->src_h ||
9577 		    old_other_state->crtc_w != new_other_state->crtc_w ||
9578 		    old_other_state->crtc_h != new_other_state->crtc_h)
9579 			return true;
9580 
9581 		/* Rotation / mirroring updates. */
9582 		if (old_other_state->rotation != new_other_state->rotation)
9583 			return true;
9584 
9585 		/* Blending updates. */
9586 		if (old_other_state->pixel_blend_mode !=
9587 		    new_other_state->pixel_blend_mode)
9588 			return true;
9589 
9590 		/* Alpha updates. */
9591 		if (old_other_state->alpha != new_other_state->alpha)
9592 			return true;
9593 
9594 		/* Colorspace changes. */
9595 		if (old_other_state->color_range != new_other_state->color_range ||
9596 		    old_other_state->color_encoding != new_other_state->color_encoding)
9597 			return true;
9598 
9599 		/* Framebuffer checks fall at the end. */
9600 		if (!old_other_state->fb || !new_other_state->fb)
9601 			continue;
9602 
9603 		/* Pixel format changes can require bandwidth updates. */
9604 		if (old_other_state->fb->format != new_other_state->fb->format)
9605 			return true;
9606 
9607 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9608 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9609 
9610 		/* Tiling and DCC changes also require bandwidth updates. */
9611 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9612 		    old_afb->base.modifier != new_afb->base.modifier)
9613 			return true;
9614 	}
9615 
9616 	return false;
9617 }
9618 
9619 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9620 			      struct drm_plane_state *new_plane_state,
9621 			      struct drm_framebuffer *fb)
9622 {
9623 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9624 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9625 	unsigned int pitch;
9626 	bool linear;
9627 
9628 	if (fb->width > new_acrtc->max_cursor_width ||
9629 	    fb->height > new_acrtc->max_cursor_height) {
9630 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9631 				 new_plane_state->fb->width,
9632 				 new_plane_state->fb->height);
9633 		return -EINVAL;
9634 	}
9635 	if (new_plane_state->src_w != fb->width << 16 ||
9636 	    new_plane_state->src_h != fb->height << 16) {
9637 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9638 		return -EINVAL;
9639 	}
9640 
9641 	/* Pitch in pixels */
9642 	pitch = fb->pitches[0] / fb->format->cpp[0];
9643 
9644 	if (fb->width != pitch) {
9645 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9646 				 fb->width, pitch);
9647 		return -EINVAL;
9648 	}
9649 
9650 	switch (pitch) {
9651 	case 64:
9652 	case 128:
9653 	case 256:
9654 		/* FB pitch is supported by cursor plane */
9655 		break;
9656 	default:
9657 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9658 		return -EINVAL;
9659 	}
9660 
9661 	/* Core DRM takes care of checking FB modifiers, so we only need to
9662 	 * check tiling flags when the FB doesn't have a modifier. */
9663 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9664 		if (adev->family < AMDGPU_FAMILY_AI) {
9665 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9666 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9667 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9668 		} else {
9669 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9670 		}
9671 		if (!linear) {
9672 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9673 			return -EINVAL;
9674 		}
9675 	}
9676 
9677 	return 0;
9678 }
9679 
9680 static int dm_update_plane_state(struct dc *dc,
9681 				 struct drm_atomic_state *state,
9682 				 struct drm_plane *plane,
9683 				 struct drm_plane_state *old_plane_state,
9684 				 struct drm_plane_state *new_plane_state,
9685 				 bool enable,
9686 				 bool *lock_and_validation_needed)
9687 {
9688 
9689 	struct dm_atomic_state *dm_state = NULL;
9690 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9691 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9692 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9693 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9694 	struct amdgpu_crtc *new_acrtc;
9695 	bool needs_reset;
9696 	int ret = 0;
9697 
9698 
9699 	new_plane_crtc = new_plane_state->crtc;
9700 	old_plane_crtc = old_plane_state->crtc;
9701 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9702 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9703 
9704 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9705 		if (!enable || !new_plane_crtc ||
9706 			drm_atomic_plane_disabling(plane->state, new_plane_state))
9707 			return 0;
9708 
9709 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9710 
9711 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9712 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9713 			return -EINVAL;
9714 		}
9715 
9716 		if (new_plane_state->fb) {
9717 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9718 						 new_plane_state->fb);
9719 			if (ret)
9720 				return ret;
9721 		}
9722 
9723 		return 0;
9724 	}
9725 
9726 	needs_reset = should_reset_plane(state, plane, old_plane_state,
9727 					 new_plane_state);
9728 
9729 	/* Remove any changed/removed planes */
9730 	if (!enable) {
9731 		if (!needs_reset)
9732 			return 0;
9733 
9734 		if (!old_plane_crtc)
9735 			return 0;
9736 
9737 		old_crtc_state = drm_atomic_get_old_crtc_state(
9738 				state, old_plane_crtc);
9739 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9740 
9741 		if (!dm_old_crtc_state->stream)
9742 			return 0;
9743 
9744 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9745 				plane->base.id, old_plane_crtc->base.id);
9746 
9747 		ret = dm_atomic_get_state(state, &dm_state);
9748 		if (ret)
9749 			return ret;
9750 
9751 		if (!dc_remove_plane_from_context(
9752 				dc,
9753 				dm_old_crtc_state->stream,
9754 				dm_old_plane_state->dc_state,
9755 				dm_state->context)) {
9756 
9757 			return -EINVAL;
9758 		}
9759 
9760 
9761 		dc_plane_state_release(dm_old_plane_state->dc_state);
9762 		dm_new_plane_state->dc_state = NULL;
9763 
9764 		*lock_and_validation_needed = true;
9765 
9766 	} else { /* Add new planes */
9767 		struct dc_plane_state *dc_new_plane_state;
9768 
9769 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9770 			return 0;
9771 
9772 		if (!new_plane_crtc)
9773 			return 0;
9774 
9775 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9776 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9777 
9778 		if (!dm_new_crtc_state->stream)
9779 			return 0;
9780 
9781 		if (!needs_reset)
9782 			return 0;
9783 
9784 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9785 		if (ret)
9786 			return ret;
9787 
9788 		WARN_ON(dm_new_plane_state->dc_state);
9789 
9790 		dc_new_plane_state = dc_create_plane_state(dc);
9791 		if (!dc_new_plane_state)
9792 			return -ENOMEM;
9793 
9794 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9795 				 plane->base.id, new_plane_crtc->base.id);
9796 
9797 		ret = fill_dc_plane_attributes(
9798 			drm_to_adev(new_plane_crtc->dev),
9799 			dc_new_plane_state,
9800 			new_plane_state,
9801 			new_crtc_state);
9802 		if (ret) {
9803 			dc_plane_state_release(dc_new_plane_state);
9804 			return ret;
9805 		}
9806 
9807 		ret = dm_atomic_get_state(state, &dm_state);
9808 		if (ret) {
9809 			dc_plane_state_release(dc_new_plane_state);
9810 			return ret;
9811 		}
9812 
9813 		/*
9814 		 * Any atomic check errors that occur after this will
9815 		 * not need a release. The plane state will be attached
9816 		 * to the stream, and therefore part of the atomic
9817 		 * state. It'll be released when the atomic state is
9818 		 * cleaned.
9819 		 */
9820 		if (!dc_add_plane_to_context(
9821 				dc,
9822 				dm_new_crtc_state->stream,
9823 				dc_new_plane_state,
9824 				dm_state->context)) {
9825 
9826 			dc_plane_state_release(dc_new_plane_state);
9827 			return -EINVAL;
9828 		}
9829 
9830 		dm_new_plane_state->dc_state = dc_new_plane_state;
9831 
9832 		/* Tell DC to do a full surface update every time there
9833 		 * is a plane change. Inefficient, but works for now.
9834 		 */
9835 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9836 
9837 		*lock_and_validation_needed = true;
9838 	}
9839 
9840 
9841 	return ret;
9842 }
9843 
9844 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9845 				struct drm_crtc *crtc,
9846 				struct drm_crtc_state *new_crtc_state)
9847 {
9848 	struct drm_plane_state *new_cursor_state, *new_primary_state;
9849 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9850 
9851 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9852 	 * cursor per pipe but it's going to inherit the scaling and
9853 	 * positioning from the underlying pipe. Check the cursor plane's
9854 	 * blending properties match the primary plane's. */
9855 
9856 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9857 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9858 	if (!new_cursor_state || !new_primary_state ||
9859 	    !new_cursor_state->fb || !new_primary_state->fb) {
9860 		return 0;
9861 	}
9862 
9863 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9864 			 (new_cursor_state->src_w >> 16);
9865 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9866 			 (new_cursor_state->src_h >> 16);
9867 
9868 	primary_scale_w = new_primary_state->crtc_w * 1000 /
9869 			 (new_primary_state->src_w >> 16);
9870 	primary_scale_h = new_primary_state->crtc_h * 1000 /
9871 			 (new_primary_state->src_h >> 16);
9872 
9873 	if (cursor_scale_w != primary_scale_w ||
9874 	    cursor_scale_h != primary_scale_h) {
9875 		DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9876 		return -EINVAL;
9877 	}
9878 
9879 	return 0;
9880 }
9881 
9882 #if defined(CONFIG_DRM_AMD_DC_DCN)
9883 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9884 {
9885 	struct drm_connector *connector;
9886 	struct drm_connector_state *conn_state;
9887 	struct amdgpu_dm_connector *aconnector = NULL;
9888 	int i;
9889 	for_each_new_connector_in_state(state, connector, conn_state, i) {
9890 		if (conn_state->crtc != crtc)
9891 			continue;
9892 
9893 		aconnector = to_amdgpu_dm_connector(connector);
9894 		if (!aconnector->port || !aconnector->mst_port)
9895 			aconnector = NULL;
9896 		else
9897 			break;
9898 	}
9899 
9900 	if (!aconnector)
9901 		return 0;
9902 
9903 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9904 }
9905 #endif
9906 
9907 static int validate_overlay(struct drm_atomic_state *state)
9908 {
9909 	int i;
9910 	struct drm_plane *plane;
9911 	struct drm_plane_state *old_plane_state, *new_plane_state;
9912 	struct drm_plane_state *primary_state, *overlay_state = NULL;
9913 
9914 	/* Check if primary plane is contained inside overlay */
9915 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9916 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
9917 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9918 				return 0;
9919 
9920 			overlay_state = new_plane_state;
9921 			continue;
9922 		}
9923 	}
9924 
9925 	/* check if we're making changes to the overlay plane */
9926 	if (!overlay_state)
9927 		return 0;
9928 
9929 	/* check if overlay plane is enabled */
9930 	if (!overlay_state->crtc)
9931 		return 0;
9932 
9933 	/* find the primary plane for the CRTC that the overlay is enabled on */
9934 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
9935 	if (IS_ERR(primary_state))
9936 		return PTR_ERR(primary_state);
9937 
9938 	/* check if primary plane is enabled */
9939 	if (!primary_state->crtc)
9940 		return 0;
9941 
9942 	/* Perform the bounds check to ensure the overlay plane covers the primary */
9943 	if (primary_state->crtc_x < overlay_state->crtc_x ||
9944 	    primary_state->crtc_y < overlay_state->crtc_y ||
9945 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
9946 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
9947 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
9948 		return -EINVAL;
9949 	}
9950 
9951 	return 0;
9952 }
9953 
9954 /**
9955  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9956  * @dev: The DRM device
9957  * @state: The atomic state to commit
9958  *
9959  * Validate that the given atomic state is programmable by DC into hardware.
9960  * This involves constructing a &struct dc_state reflecting the new hardware
9961  * state we wish to commit, then querying DC to see if it is programmable. It's
9962  * important not to modify the existing DC state. Otherwise, atomic_check
9963  * may unexpectedly commit hardware changes.
9964  *
9965  * When validating the DC state, it's important that the right locks are
9966  * acquired. For full updates case which removes/adds/updates streams on one
9967  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9968  * that any such full update commit will wait for completion of any outstanding
9969  * flip using DRMs synchronization events.
9970  *
9971  * Note that DM adds the affected connectors for all CRTCs in state, when that
9972  * might not seem necessary. This is because DC stream creation requires the
9973  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9974  * be possible but non-trivial - a possible TODO item.
9975  *
9976  * Return: -Error code if validation failed.
9977  */
9978 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9979 				  struct drm_atomic_state *state)
9980 {
9981 	struct amdgpu_device *adev = drm_to_adev(dev);
9982 	struct dm_atomic_state *dm_state = NULL;
9983 	struct dc *dc = adev->dm.dc;
9984 	struct drm_connector *connector;
9985 	struct drm_connector_state *old_con_state, *new_con_state;
9986 	struct drm_crtc *crtc;
9987 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9988 	struct drm_plane *plane;
9989 	struct drm_plane_state *old_plane_state, *new_plane_state;
9990 	enum dc_status status;
9991 	int ret, i;
9992 	bool lock_and_validation_needed = false;
9993 	struct dm_crtc_state *dm_old_crtc_state;
9994 
9995 	trace_amdgpu_dm_atomic_check_begin(state);
9996 
9997 	ret = drm_atomic_helper_check_modeset(dev, state);
9998 	if (ret)
9999 		goto fail;
10000 
10001 	/* Check connector changes */
10002 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10003 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10004 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10005 
10006 		/* Skip connectors that are disabled or part of modeset already. */
10007 		if (!old_con_state->crtc && !new_con_state->crtc)
10008 			continue;
10009 
10010 		if (!new_con_state->crtc)
10011 			continue;
10012 
10013 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10014 		if (IS_ERR(new_crtc_state)) {
10015 			ret = PTR_ERR(new_crtc_state);
10016 			goto fail;
10017 		}
10018 
10019 		if (dm_old_con_state->abm_level !=
10020 		    dm_new_con_state->abm_level)
10021 			new_crtc_state->connectors_changed = true;
10022 	}
10023 
10024 #if defined(CONFIG_DRM_AMD_DC_DCN)
10025 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10026 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10027 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10028 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10029 				if (ret)
10030 					goto fail;
10031 			}
10032 		}
10033 	}
10034 #endif
10035 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10036 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10037 
10038 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10039 		    !new_crtc_state->color_mgmt_changed &&
10040 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10041 			dm_old_crtc_state->dsc_force_changed == false)
10042 			continue;
10043 
10044 		if (!new_crtc_state->enable)
10045 			continue;
10046 
10047 		ret = drm_atomic_add_affected_connectors(state, crtc);
10048 		if (ret)
10049 			return ret;
10050 
10051 		ret = drm_atomic_add_affected_planes(state, crtc);
10052 		if (ret)
10053 			goto fail;
10054 
10055 		if (dm_old_crtc_state->dsc_force_changed)
10056 			new_crtc_state->mode_changed = true;
10057 	}
10058 
10059 	/*
10060 	 * Add all primary and overlay planes on the CRTC to the state
10061 	 * whenever a plane is enabled to maintain correct z-ordering
10062 	 * and to enable fast surface updates.
10063 	 */
10064 	drm_for_each_crtc(crtc, dev) {
10065 		bool modified = false;
10066 
10067 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10068 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10069 				continue;
10070 
10071 			if (new_plane_state->crtc == crtc ||
10072 			    old_plane_state->crtc == crtc) {
10073 				modified = true;
10074 				break;
10075 			}
10076 		}
10077 
10078 		if (!modified)
10079 			continue;
10080 
10081 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10082 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10083 				continue;
10084 
10085 			new_plane_state =
10086 				drm_atomic_get_plane_state(state, plane);
10087 
10088 			if (IS_ERR(new_plane_state)) {
10089 				ret = PTR_ERR(new_plane_state);
10090 				goto fail;
10091 			}
10092 		}
10093 	}
10094 
10095 	/* Remove exiting planes if they are modified */
10096 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10097 		ret = dm_update_plane_state(dc, state, plane,
10098 					    old_plane_state,
10099 					    new_plane_state,
10100 					    false,
10101 					    &lock_and_validation_needed);
10102 		if (ret)
10103 			goto fail;
10104 	}
10105 
10106 	/* Disable all crtcs which require disable */
10107 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10108 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10109 					   old_crtc_state,
10110 					   new_crtc_state,
10111 					   false,
10112 					   &lock_and_validation_needed);
10113 		if (ret)
10114 			goto fail;
10115 	}
10116 
10117 	/* Enable all crtcs which require enable */
10118 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10119 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10120 					   old_crtc_state,
10121 					   new_crtc_state,
10122 					   true,
10123 					   &lock_and_validation_needed);
10124 		if (ret)
10125 			goto fail;
10126 	}
10127 
10128 	ret = validate_overlay(state);
10129 	if (ret)
10130 		goto fail;
10131 
10132 	/* Add new/modified planes */
10133 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10134 		ret = dm_update_plane_state(dc, state, plane,
10135 					    old_plane_state,
10136 					    new_plane_state,
10137 					    true,
10138 					    &lock_and_validation_needed);
10139 		if (ret)
10140 			goto fail;
10141 	}
10142 
10143 	/* Run this here since we want to validate the streams we created */
10144 	ret = drm_atomic_helper_check_planes(dev, state);
10145 	if (ret)
10146 		goto fail;
10147 
10148 	/* Check cursor planes scaling */
10149 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10150 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10151 		if (ret)
10152 			goto fail;
10153 	}
10154 
10155 	if (state->legacy_cursor_update) {
10156 		/*
10157 		 * This is a fast cursor update coming from the plane update
10158 		 * helper, check if it can be done asynchronously for better
10159 		 * performance.
10160 		 */
10161 		state->async_update =
10162 			!drm_atomic_helper_async_check(dev, state);
10163 
10164 		/*
10165 		 * Skip the remaining global validation if this is an async
10166 		 * update. Cursor updates can be done without affecting
10167 		 * state or bandwidth calcs and this avoids the performance
10168 		 * penalty of locking the private state object and
10169 		 * allocating a new dc_state.
10170 		 */
10171 		if (state->async_update)
10172 			return 0;
10173 	}
10174 
10175 	/* Check scaling and underscan changes*/
10176 	/* TODO Removed scaling changes validation due to inability to commit
10177 	 * new stream into context w\o causing full reset. Need to
10178 	 * decide how to handle.
10179 	 */
10180 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10181 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10182 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10183 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10184 
10185 		/* Skip any modesets/resets */
10186 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10187 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10188 			continue;
10189 
10190 		/* Skip any thing not scale or underscan changes */
10191 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10192 			continue;
10193 
10194 		lock_and_validation_needed = true;
10195 	}
10196 
10197 	/**
10198 	 * Streams and planes are reset when there are changes that affect
10199 	 * bandwidth. Anything that affects bandwidth needs to go through
10200 	 * DC global validation to ensure that the configuration can be applied
10201 	 * to hardware.
10202 	 *
10203 	 * We have to currently stall out here in atomic_check for outstanding
10204 	 * commits to finish in this case because our IRQ handlers reference
10205 	 * DRM state directly - we can end up disabling interrupts too early
10206 	 * if we don't.
10207 	 *
10208 	 * TODO: Remove this stall and drop DM state private objects.
10209 	 */
10210 	if (lock_and_validation_needed) {
10211 		ret = dm_atomic_get_state(state, &dm_state);
10212 		if (ret)
10213 			goto fail;
10214 
10215 		ret = do_aquire_global_lock(dev, state);
10216 		if (ret)
10217 			goto fail;
10218 
10219 #if defined(CONFIG_DRM_AMD_DC_DCN)
10220 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10221 			goto fail;
10222 
10223 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10224 		if (ret)
10225 			goto fail;
10226 #endif
10227 
10228 		/*
10229 		 * Perform validation of MST topology in the state:
10230 		 * We need to perform MST atomic check before calling
10231 		 * dc_validate_global_state(), or there is a chance
10232 		 * to get stuck in an infinite loop and hang eventually.
10233 		 */
10234 		ret = drm_dp_mst_atomic_check(state);
10235 		if (ret)
10236 			goto fail;
10237 		status = dc_validate_global_state(dc, dm_state->context, false);
10238 		if (status != DC_OK) {
10239 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
10240 				       dc_status_to_str(status), status);
10241 			ret = -EINVAL;
10242 			goto fail;
10243 		}
10244 	} else {
10245 		/*
10246 		 * The commit is a fast update. Fast updates shouldn't change
10247 		 * the DC context, affect global validation, and can have their
10248 		 * commit work done in parallel with other commits not touching
10249 		 * the same resource. If we have a new DC context as part of
10250 		 * the DM atomic state from validation we need to free it and
10251 		 * retain the existing one instead.
10252 		 *
10253 		 * Furthermore, since the DM atomic state only contains the DC
10254 		 * context and can safely be annulled, we can free the state
10255 		 * and clear the associated private object now to free
10256 		 * some memory and avoid a possible use-after-free later.
10257 		 */
10258 
10259 		for (i = 0; i < state->num_private_objs; i++) {
10260 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10261 
10262 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10263 				int j = state->num_private_objs-1;
10264 
10265 				dm_atomic_destroy_state(obj,
10266 						state->private_objs[i].state);
10267 
10268 				/* If i is not at the end of the array then the
10269 				 * last element needs to be moved to where i was
10270 				 * before the array can safely be truncated.
10271 				 */
10272 				if (i != j)
10273 					state->private_objs[i] =
10274 						state->private_objs[j];
10275 
10276 				state->private_objs[j].ptr = NULL;
10277 				state->private_objs[j].state = NULL;
10278 				state->private_objs[j].old_state = NULL;
10279 				state->private_objs[j].new_state = NULL;
10280 
10281 				state->num_private_objs = j;
10282 				break;
10283 			}
10284 		}
10285 	}
10286 
10287 	/* Store the overall update type for use later in atomic check. */
10288 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10289 		struct dm_crtc_state *dm_new_crtc_state =
10290 			to_dm_crtc_state(new_crtc_state);
10291 
10292 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10293 							 UPDATE_TYPE_FULL :
10294 							 UPDATE_TYPE_FAST;
10295 	}
10296 
10297 	/* Must be success */
10298 	WARN_ON(ret);
10299 
10300 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10301 
10302 	return ret;
10303 
10304 fail:
10305 	if (ret == -EDEADLK)
10306 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10307 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10308 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10309 	else
10310 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10311 
10312 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10313 
10314 	return ret;
10315 }
10316 
10317 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10318 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10319 {
10320 	uint8_t dpcd_data;
10321 	bool capable = false;
10322 
10323 	if (amdgpu_dm_connector->dc_link &&
10324 		dm_helpers_dp_read_dpcd(
10325 				NULL,
10326 				amdgpu_dm_connector->dc_link,
10327 				DP_DOWN_STREAM_PORT_COUNT,
10328 				&dpcd_data,
10329 				sizeof(dpcd_data))) {
10330 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10331 	}
10332 
10333 	return capable;
10334 }
10335 
10336 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10337 		uint8_t *edid_ext, int len,
10338 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10339 {
10340 	int i;
10341 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10342 	struct dc *dc = adev->dm.dc;
10343 
10344 	/* send extension block to DMCU for parsing */
10345 	for (i = 0; i < len; i += 8) {
10346 		bool res;
10347 		int offset;
10348 
10349 		/* send 8 bytes a time */
10350 		if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10351 			return false;
10352 
10353 		if (i+8 == len) {
10354 			/* EDID block sent completed, expect result */
10355 			int version, min_rate, max_rate;
10356 
10357 			res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10358 			if (res) {
10359 				/* amd vsdb found */
10360 				vsdb_info->freesync_supported = 1;
10361 				vsdb_info->amd_vsdb_version = version;
10362 				vsdb_info->min_refresh_rate_hz = min_rate;
10363 				vsdb_info->max_refresh_rate_hz = max_rate;
10364 				return true;
10365 			}
10366 			/* not amd vsdb */
10367 			return false;
10368 		}
10369 
10370 		/* check for ack*/
10371 		res = dc_edid_parser_recv_cea_ack(dc, &offset);
10372 		if (!res)
10373 			return false;
10374 	}
10375 
10376 	return false;
10377 }
10378 
10379 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10380 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10381 {
10382 	uint8_t *edid_ext = NULL;
10383 	int i;
10384 	bool valid_vsdb_found = false;
10385 
10386 	/*----- drm_find_cea_extension() -----*/
10387 	/* No EDID or EDID extensions */
10388 	if (edid == NULL || edid->extensions == 0)
10389 		return -ENODEV;
10390 
10391 	/* Find CEA extension */
10392 	for (i = 0; i < edid->extensions; i++) {
10393 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10394 		if (edid_ext[0] == CEA_EXT)
10395 			break;
10396 	}
10397 
10398 	if (i == edid->extensions)
10399 		return -ENODEV;
10400 
10401 	/*----- cea_db_offsets() -----*/
10402 	if (edid_ext[0] != CEA_EXT)
10403 		return -ENODEV;
10404 
10405 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10406 
10407 	return valid_vsdb_found ? i : -ENODEV;
10408 }
10409 
10410 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10411 					struct edid *edid)
10412 {
10413 	int i = 0;
10414 	struct detailed_timing *timing;
10415 	struct detailed_non_pixel *data;
10416 	struct detailed_data_monitor_range *range;
10417 	struct amdgpu_dm_connector *amdgpu_dm_connector =
10418 			to_amdgpu_dm_connector(connector);
10419 	struct dm_connector_state *dm_con_state = NULL;
10420 
10421 	struct drm_device *dev = connector->dev;
10422 	struct amdgpu_device *adev = drm_to_adev(dev);
10423 	bool freesync_capable = false;
10424 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10425 
10426 	if (!connector->state) {
10427 		DRM_ERROR("%s - Connector has no state", __func__);
10428 		goto update;
10429 	}
10430 
10431 	if (!edid) {
10432 		dm_con_state = to_dm_connector_state(connector->state);
10433 
10434 		amdgpu_dm_connector->min_vfreq = 0;
10435 		amdgpu_dm_connector->max_vfreq = 0;
10436 		amdgpu_dm_connector->pixel_clock_mhz = 0;
10437 
10438 		goto update;
10439 	}
10440 
10441 	dm_con_state = to_dm_connector_state(connector->state);
10442 
10443 	if (!amdgpu_dm_connector->dc_sink) {
10444 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10445 		goto update;
10446 	}
10447 	if (!adev->dm.freesync_module)
10448 		goto update;
10449 
10450 
10451 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10452 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10453 		bool edid_check_required = false;
10454 
10455 		if (edid) {
10456 			edid_check_required = is_dp_capable_without_timing_msa(
10457 						adev->dm.dc,
10458 						amdgpu_dm_connector);
10459 		}
10460 
10461 		if (edid_check_required == true && (edid->version > 1 ||
10462 		   (edid->version == 1 && edid->revision > 1))) {
10463 			for (i = 0; i < 4; i++) {
10464 
10465 				timing	= &edid->detailed_timings[i];
10466 				data	= &timing->data.other_data;
10467 				range	= &data->data.range;
10468 				/*
10469 				 * Check if monitor has continuous frequency mode
10470 				 */
10471 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10472 					continue;
10473 				/*
10474 				 * Check for flag range limits only. If flag == 1 then
10475 				 * no additional timing information provided.
10476 				 * Default GTF, GTF Secondary curve and CVT are not
10477 				 * supported
10478 				 */
10479 				if (range->flags != 1)
10480 					continue;
10481 
10482 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10483 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10484 				amdgpu_dm_connector->pixel_clock_mhz =
10485 					range->pixel_clock_mhz * 10;
10486 
10487 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10488 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10489 
10490 				break;
10491 			}
10492 
10493 			if (amdgpu_dm_connector->max_vfreq -
10494 			    amdgpu_dm_connector->min_vfreq > 10) {
10495 
10496 				freesync_capable = true;
10497 			}
10498 		}
10499 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10500 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10501 		if (i >= 0 && vsdb_info.freesync_supported) {
10502 			timing  = &edid->detailed_timings[i];
10503 			data    = &timing->data.other_data;
10504 
10505 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10506 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10507 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10508 				freesync_capable = true;
10509 
10510 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10511 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10512 		}
10513 	}
10514 
10515 update:
10516 	if (dm_con_state)
10517 		dm_con_state->freesync_capable = freesync_capable;
10518 
10519 	if (connector->vrr_capable_property)
10520 		drm_connector_set_vrr_capable_property(connector,
10521 						       freesync_capable);
10522 }
10523 
10524 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10525 {
10526 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10527 
10528 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10529 		return;
10530 	if (link->type == dc_connection_none)
10531 		return;
10532 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10533 					dpcd_data, sizeof(dpcd_data))) {
10534 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10535 
10536 		if (dpcd_data[0] == 0) {
10537 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10538 			link->psr_settings.psr_feature_enabled = false;
10539 		} else {
10540 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
10541 			link->psr_settings.psr_feature_enabled = true;
10542 		}
10543 
10544 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10545 	}
10546 }
10547 
10548 /*
10549  * amdgpu_dm_link_setup_psr() - configure psr link
10550  * @stream: stream state
10551  *
10552  * Return: true if success
10553  */
10554 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10555 {
10556 	struct dc_link *link = NULL;
10557 	struct psr_config psr_config = {0};
10558 	struct psr_context psr_context = {0};
10559 	bool ret = false;
10560 
10561 	if (stream == NULL)
10562 		return false;
10563 
10564 	link = stream->link;
10565 
10566 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10567 
10568 	if (psr_config.psr_version > 0) {
10569 		psr_config.psr_exit_link_training_required = 0x1;
10570 		psr_config.psr_frame_capture_indication_req = 0;
10571 		psr_config.psr_rfb_setup_time = 0x37;
10572 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10573 		psr_config.allow_smu_optimizations = 0x0;
10574 
10575 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10576 
10577 	}
10578 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
10579 
10580 	return ret;
10581 }
10582 
10583 /*
10584  * amdgpu_dm_psr_enable() - enable psr f/w
10585  * @stream: stream state
10586  *
10587  * Return: true if success
10588  */
10589 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10590 {
10591 	struct dc_link *link = stream->link;
10592 	unsigned int vsync_rate_hz = 0;
10593 	struct dc_static_screen_params params = {0};
10594 	/* Calculate number of static frames before generating interrupt to
10595 	 * enter PSR.
10596 	 */
10597 	// Init fail safe of 2 frames static
10598 	unsigned int num_frames_static = 2;
10599 
10600 	DRM_DEBUG_DRIVER("Enabling psr...\n");
10601 
10602 	vsync_rate_hz = div64_u64(div64_u64((
10603 			stream->timing.pix_clk_100hz * 100),
10604 			stream->timing.v_total),
10605 			stream->timing.h_total);
10606 
10607 	/* Round up
10608 	 * Calculate number of frames such that at least 30 ms of time has
10609 	 * passed.
10610 	 */
10611 	if (vsync_rate_hz != 0) {
10612 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10613 		num_frames_static = (30000 / frame_time_microsec) + 1;
10614 	}
10615 
10616 	params.triggers.cursor_update = true;
10617 	params.triggers.overlay_update = true;
10618 	params.triggers.surface_update = true;
10619 	params.num_frames = num_frames_static;
10620 
10621 	dc_stream_set_static_screen_params(link->ctx->dc,
10622 					   &stream, 1,
10623 					   &params);
10624 
10625 	return dc_link_set_psr_allow_active(link, true, false, false);
10626 }
10627 
10628 /*
10629  * amdgpu_dm_psr_disable() - disable psr f/w
10630  * @stream:  stream state
10631  *
10632  * Return: true if success
10633  */
10634 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10635 {
10636 
10637 	DRM_DEBUG_DRIVER("Disabling psr...\n");
10638 
10639 	return dc_link_set_psr_allow_active(stream->link, false, true, false);
10640 }
10641 
10642 /*
10643  * amdgpu_dm_psr_disable() - disable psr f/w
10644  * if psr is enabled on any stream
10645  *
10646  * Return: true if success
10647  */
10648 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10649 {
10650 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10651 	return dc_set_psr_allow_active(dm->dc, false);
10652 }
10653 
10654 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10655 {
10656 	struct amdgpu_device *adev = drm_to_adev(dev);
10657 	struct dc *dc = adev->dm.dc;
10658 	int i;
10659 
10660 	mutex_lock(&adev->dm.dc_lock);
10661 	if (dc->current_state) {
10662 		for (i = 0; i < dc->current_state->stream_count; ++i)
10663 			dc->current_state->streams[i]
10664 				->triggered_crtc_reset.enabled =
10665 				adev->dm.force_timing_sync;
10666 
10667 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10668 		dc_trigger_sync(dc, dc->current_state);
10669 	}
10670 	mutex_unlock(&adev->dm.dc_lock);
10671 }
10672 
10673 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10674 		       uint32_t value, const char *func_name)
10675 {
10676 #ifdef DM_CHECK_ADDR_0
10677 	if (address == 0) {
10678 		DC_ERR("invalid register write. address = 0");
10679 		return;
10680 	}
10681 #endif
10682 	cgs_write_register(ctx->cgs_device, address, value);
10683 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10684 }
10685 
10686 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10687 			  const char *func_name)
10688 {
10689 	uint32_t value;
10690 #ifdef DM_CHECK_ADDR_0
10691 	if (address == 0) {
10692 		DC_ERR("invalid register read; address = 0\n");
10693 		return 0;
10694 	}
10695 #endif
10696 
10697 	if (ctx->dmub_srv &&
10698 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10699 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10700 		ASSERT(false);
10701 		return 0;
10702 	}
10703 
10704 	value = cgs_read_register(ctx->cgs_device, address);
10705 
10706 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10707 
10708 	return value;
10709 }
10710