1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41 
42 #include "vid.h"
43 #include "amdgpu.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
46 #include "atom.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
51 #endif
52 #include "amdgpu_pm.h"
53 
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
60 #endif
61 #include "amdgpu_dm_psr.h"
62 
63 #include "ivsrcid/ivsrcid_vislands30.h"
64 
65 #include "i2caux_interface.h"
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/types.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/pci.h>
71 #include <linux/firmware.h>
72 #include <linux/component.h>
73 
74 #include <drm/drm_atomic.h>
75 #include <drm/drm_atomic_uapi.h>
76 #include <drm/drm_atomic_helper.h>
77 #include <drm/drm_dp_mst_helper.h>
78 #include <drm/drm_fb_helper.h>
79 #include <drm/drm_fourcc.h>
80 #include <drm/drm_edid.h>
81 #include <drm/drm_vblank.h>
82 #include <drm/drm_audio_component.h>
83 
84 #if defined(CONFIG_DRM_AMD_DC_DCN)
85 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
86 
87 #include "dcn/dcn_1_0_offset.h"
88 #include "dcn/dcn_1_0_sh_mask.h"
89 #include "soc15_hw_ip.h"
90 #include "vega10_ip_offset.h"
91 
92 #include "soc15_common.h"
93 #endif
94 
95 #include "modules/inc/mod_freesync.h"
96 #include "modules/power/power_helpers.h"
97 #include "modules/inc/mod_info_packet.h"
98 
99 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
101 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
103 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
105 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
107 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
109 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
111 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
113 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
115 
116 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
117 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
118 
119 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
120 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
121 
122 /* Number of bytes in PSP header for firmware. */
123 #define PSP_HEADER_BYTES 0x100
124 
125 /* Number of bytes in PSP footer for firmware. */
126 #define PSP_FOOTER_BYTES 0x100
127 
128 /**
129  * DOC: overview
130  *
131  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
132  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
133  * requests into DC requests, and DC responses into DRM responses.
134  *
135  * The root control structure is &struct amdgpu_display_manager.
136  */
137 
138 /* basic init/fini API */
139 static int amdgpu_dm_init(struct amdgpu_device *adev);
140 static void amdgpu_dm_fini(struct amdgpu_device *adev);
141 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
142 
143 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
144 {
145 	switch (link->dpcd_caps.dongle_type) {
146 	case DISPLAY_DONGLE_NONE:
147 		return DRM_MODE_SUBCONNECTOR_Native;
148 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
149 		return DRM_MODE_SUBCONNECTOR_VGA;
150 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
151 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
152 		return DRM_MODE_SUBCONNECTOR_DVID;
153 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
154 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
155 		return DRM_MODE_SUBCONNECTOR_HDMIA;
156 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
157 	default:
158 		return DRM_MODE_SUBCONNECTOR_Unknown;
159 	}
160 }
161 
162 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
163 {
164 	struct dc_link *link = aconnector->dc_link;
165 	struct drm_connector *connector = &aconnector->base;
166 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
167 
168 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
169 		return;
170 
171 	if (aconnector->dc_sink)
172 		subconnector = get_subconnector_type(link);
173 
174 	drm_object_property_set_value(&connector->base,
175 			connector->dev->mode_config.dp_subconnector_property,
176 			subconnector);
177 }
178 
179 /*
180  * initializes drm_device display related structures, based on the information
181  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
182  * drm_encoder, drm_mode_config
183  *
184  * Returns 0 on success
185  */
186 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
187 /* removes and deallocates the drm structures, created by the above function */
188 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
189 
190 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
191 				struct drm_plane *plane,
192 				unsigned long possible_crtcs,
193 				const struct dc_plane_cap *plane_cap);
194 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
195 			       struct drm_plane *plane,
196 			       uint32_t link_index);
197 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
198 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
199 				    uint32_t link_index,
200 				    struct amdgpu_encoder *amdgpu_encoder);
201 static int amdgpu_dm_encoder_init(struct drm_device *dev,
202 				  struct amdgpu_encoder *aencoder,
203 				  uint32_t link_index);
204 
205 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
206 
207 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
208 
209 static int amdgpu_dm_atomic_check(struct drm_device *dev,
210 				  struct drm_atomic_state *state);
211 
212 static void handle_cursor_update(struct drm_plane *plane,
213 				 struct drm_plane_state *old_plane_state);
214 
215 static const struct drm_format_info *
216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217 
218 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
219 
220 static bool
221 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
222 				 struct drm_crtc_state *new_crtc_state);
223 /*
224  * dm_vblank_get_counter
225  *
226  * @brief
227  * Get counter for number of vertical blanks
228  *
229  * @param
230  * struct amdgpu_device *adev - [in] desired amdgpu device
231  * int disp_idx - [in] which CRTC to get the counter from
232  *
233  * @return
234  * Counter for vertical blanks
235  */
236 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
237 {
238 	if (crtc >= adev->mode_info.num_crtc)
239 		return 0;
240 	else {
241 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
242 
243 		if (acrtc->dm_irq_params.stream == NULL) {
244 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
245 				  crtc);
246 			return 0;
247 		}
248 
249 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
250 	}
251 }
252 
253 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
254 				  u32 *vbl, u32 *position)
255 {
256 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
257 
258 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
259 		return -EINVAL;
260 	else {
261 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
262 
263 		if (acrtc->dm_irq_params.stream ==  NULL) {
264 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
265 				  crtc);
266 			return 0;
267 		}
268 
269 		/*
270 		 * TODO rework base driver to use values directly.
271 		 * for now parse it back into reg-format
272 		 */
273 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
274 					 &v_blank_start,
275 					 &v_blank_end,
276 					 &h_position,
277 					 &v_position);
278 
279 		*position = v_position | (h_position << 16);
280 		*vbl = v_blank_start | (v_blank_end << 16);
281 	}
282 
283 	return 0;
284 }
285 
286 static bool dm_is_idle(void *handle)
287 {
288 	/* XXX todo */
289 	return true;
290 }
291 
292 static int dm_wait_for_idle(void *handle)
293 {
294 	/* XXX todo */
295 	return 0;
296 }
297 
298 static bool dm_check_soft_reset(void *handle)
299 {
300 	return false;
301 }
302 
303 static int dm_soft_reset(void *handle)
304 {
305 	/* XXX todo */
306 	return 0;
307 }
308 
309 static struct amdgpu_crtc *
310 get_crtc_by_otg_inst(struct amdgpu_device *adev,
311 		     int otg_inst)
312 {
313 	struct drm_device *dev = adev_to_drm(adev);
314 	struct drm_crtc *crtc;
315 	struct amdgpu_crtc *amdgpu_crtc;
316 
317 	if (WARN_ON(otg_inst == -1))
318 		return adev->mode_info.crtcs[0];
319 
320 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
321 		amdgpu_crtc = to_amdgpu_crtc(crtc);
322 
323 		if (amdgpu_crtc->otg_inst == otg_inst)
324 			return amdgpu_crtc;
325 	}
326 
327 	return NULL;
328 }
329 
330 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
331 {
332 	return acrtc->dm_irq_params.freesync_config.state ==
333 		       VRR_STATE_ACTIVE_VARIABLE ||
334 	       acrtc->dm_irq_params.freesync_config.state ==
335 		       VRR_STATE_ACTIVE_FIXED;
336 }
337 
338 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
339 {
340 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
341 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
342 }
343 
344 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
345 					      struct dm_crtc_state *new_state)
346 {
347 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
348 		return true;
349 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
350 		return true;
351 	else
352 		return false;
353 }
354 
355 /**
356  * dm_pflip_high_irq() - Handle pageflip interrupt
357  * @interrupt_params: ignored
358  *
359  * Handles the pageflip interrupt by notifying all interested parties
360  * that the pageflip has been completed.
361  */
362 static void dm_pflip_high_irq(void *interrupt_params)
363 {
364 	struct amdgpu_crtc *amdgpu_crtc;
365 	struct common_irq_params *irq_params = interrupt_params;
366 	struct amdgpu_device *adev = irq_params->adev;
367 	unsigned long flags;
368 	struct drm_pending_vblank_event *e;
369 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
370 	bool vrr_active;
371 
372 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
373 
374 	/* IRQ could occur when in initial stage */
375 	/* TODO work and BO cleanup */
376 	if (amdgpu_crtc == NULL) {
377 		DC_LOG_PFLIP("CRTC is null, returning.\n");
378 		return;
379 	}
380 
381 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
382 
383 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
384 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
385 						 amdgpu_crtc->pflip_status,
386 						 AMDGPU_FLIP_SUBMITTED,
387 						 amdgpu_crtc->crtc_id,
388 						 amdgpu_crtc);
389 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
390 		return;
391 	}
392 
393 	/* page flip completed. */
394 	e = amdgpu_crtc->event;
395 	amdgpu_crtc->event = NULL;
396 
397 	WARN_ON(!e);
398 
399 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
400 
401 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
402 	if (!vrr_active ||
403 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
404 				      &v_blank_end, &hpos, &vpos) ||
405 	    (vpos < v_blank_start)) {
406 		/* Update to correct count and vblank timestamp if racing with
407 		 * vblank irq. This also updates to the correct vblank timestamp
408 		 * even in VRR mode, as scanout is past the front-porch atm.
409 		 */
410 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
411 
412 		/* Wake up userspace by sending the pageflip event with proper
413 		 * count and timestamp of vblank of flip completion.
414 		 */
415 		if (e) {
416 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
417 
418 			/* Event sent, so done with vblank for this flip */
419 			drm_crtc_vblank_put(&amdgpu_crtc->base);
420 		}
421 	} else if (e) {
422 		/* VRR active and inside front-porch: vblank count and
423 		 * timestamp for pageflip event will only be up to date after
424 		 * drm_crtc_handle_vblank() has been executed from late vblank
425 		 * irq handler after start of back-porch (vline 0). We queue the
426 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
427 		 * updated timestamp and count, once it runs after us.
428 		 *
429 		 * We need to open-code this instead of using the helper
430 		 * drm_crtc_arm_vblank_event(), as that helper would
431 		 * call drm_crtc_accurate_vblank_count(), which we must
432 		 * not call in VRR mode while we are in front-porch!
433 		 */
434 
435 		/* sequence will be replaced by real count during send-out. */
436 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
437 		e->pipe = amdgpu_crtc->crtc_id;
438 
439 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
440 		e = NULL;
441 	}
442 
443 	/* Keep track of vblank of this flip for flip throttling. We use the
444 	 * cooked hw counter, as that one incremented at start of this vblank
445 	 * of pageflip completion, so last_flip_vblank is the forbidden count
446 	 * for queueing new pageflips if vsync + VRR is enabled.
447 	 */
448 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
449 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
450 
451 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
452 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
453 
454 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
455 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
456 		     vrr_active, (int) !e);
457 }
458 
459 static void dm_vupdate_high_irq(void *interrupt_params)
460 {
461 	struct common_irq_params *irq_params = interrupt_params;
462 	struct amdgpu_device *adev = irq_params->adev;
463 	struct amdgpu_crtc *acrtc;
464 	struct drm_device *drm_dev;
465 	struct drm_vblank_crtc *vblank;
466 	ktime_t frame_duration_ns, previous_timestamp;
467 	unsigned long flags;
468 	int vrr_active;
469 
470 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
471 
472 	if (acrtc) {
473 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
474 		drm_dev = acrtc->base.dev;
475 		vblank = &drm_dev->vblank[acrtc->base.index];
476 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
477 		frame_duration_ns = vblank->time - previous_timestamp;
478 
479 		if (frame_duration_ns > 0) {
480 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
481 						frame_duration_ns,
482 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
483 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
484 		}
485 
486 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
487 			      acrtc->crtc_id,
488 			      vrr_active);
489 
490 		/* Core vblank handling is done here after end of front-porch in
491 		 * vrr mode, as vblank timestamping will give valid results
492 		 * while now done after front-porch. This will also deliver
493 		 * page-flip completion events that have been queued to us
494 		 * if a pageflip happened inside front-porch.
495 		 */
496 		if (vrr_active) {
497 			drm_crtc_handle_vblank(&acrtc->base);
498 
499 			/* BTR processing for pre-DCE12 ASICs */
500 			if (acrtc->dm_irq_params.stream &&
501 			    adev->family < AMDGPU_FAMILY_AI) {
502 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
503 				mod_freesync_handle_v_update(
504 				    adev->dm.freesync_module,
505 				    acrtc->dm_irq_params.stream,
506 				    &acrtc->dm_irq_params.vrr_params);
507 
508 				dc_stream_adjust_vmin_vmax(
509 				    adev->dm.dc,
510 				    acrtc->dm_irq_params.stream,
511 				    &acrtc->dm_irq_params.vrr_params.adjust);
512 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
513 			}
514 		}
515 	}
516 }
517 
518 /**
519  * dm_crtc_high_irq() - Handles CRTC interrupt
520  * @interrupt_params: used for determining the CRTC instance
521  *
522  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
523  * event handler.
524  */
525 static void dm_crtc_high_irq(void *interrupt_params)
526 {
527 	struct common_irq_params *irq_params = interrupt_params;
528 	struct amdgpu_device *adev = irq_params->adev;
529 	struct amdgpu_crtc *acrtc;
530 	unsigned long flags;
531 	int vrr_active;
532 
533 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
534 	if (!acrtc)
535 		return;
536 
537 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
538 
539 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
540 		      vrr_active, acrtc->dm_irq_params.active_planes);
541 
542 	/**
543 	 * Core vblank handling at start of front-porch is only possible
544 	 * in non-vrr mode, as only there vblank timestamping will give
545 	 * valid results while done in front-porch. Otherwise defer it
546 	 * to dm_vupdate_high_irq after end of front-porch.
547 	 */
548 	if (!vrr_active)
549 		drm_crtc_handle_vblank(&acrtc->base);
550 
551 	/**
552 	 * Following stuff must happen at start of vblank, for crc
553 	 * computation and below-the-range btr support in vrr mode.
554 	 */
555 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
556 
557 	/* BTR updates need to happen before VUPDATE on Vega and above. */
558 	if (adev->family < AMDGPU_FAMILY_AI)
559 		return;
560 
561 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
562 
563 	if (acrtc->dm_irq_params.stream &&
564 	    acrtc->dm_irq_params.vrr_params.supported &&
565 	    acrtc->dm_irq_params.freesync_config.state ==
566 		    VRR_STATE_ACTIVE_VARIABLE) {
567 		mod_freesync_handle_v_update(adev->dm.freesync_module,
568 					     acrtc->dm_irq_params.stream,
569 					     &acrtc->dm_irq_params.vrr_params);
570 
571 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
572 					   &acrtc->dm_irq_params.vrr_params.adjust);
573 	}
574 
575 	/*
576 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
577 	 * In that case, pageflip completion interrupts won't fire and pageflip
578 	 * completion events won't get delivered. Prevent this by sending
579 	 * pending pageflip events from here if a flip is still pending.
580 	 *
581 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
582 	 * avoid race conditions between flip programming and completion,
583 	 * which could cause too early flip completion events.
584 	 */
585 	if (adev->family >= AMDGPU_FAMILY_RV &&
586 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
587 	    acrtc->dm_irq_params.active_planes == 0) {
588 		if (acrtc->event) {
589 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
590 			acrtc->event = NULL;
591 			drm_crtc_vblank_put(&acrtc->base);
592 		}
593 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
594 	}
595 
596 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
597 }
598 
599 #if defined(CONFIG_DRM_AMD_DC_DCN)
600 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
601 /**
602  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
603  * DCN generation ASICs
604  * @interrupt_params: interrupt parameters
605  *
606  * Used to set crc window/read out crc value at vertical line 0 position
607  */
608 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
609 {
610 	struct common_irq_params *irq_params = interrupt_params;
611 	struct amdgpu_device *adev = irq_params->adev;
612 	struct amdgpu_crtc *acrtc;
613 
614 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
615 
616 	if (!acrtc)
617 		return;
618 
619 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
620 }
621 #endif
622 
623 /**
624  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
625  * @adev: amdgpu_device pointer
626  * @notify: dmub notification structure
627  *
628  * Dmub AUX or SET_CONFIG command completion processing callback
629  * Copies dmub notification to DM which is to be read by AUX command.
630  * issuing thread and also signals the event to wake up the thread.
631  */
632 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
633 {
634 	if (adev->dm.dmub_notify)
635 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
636 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
637 		complete(&adev->dm.dmub_aux_transfer_done);
638 }
639 
640 /**
641  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
642  * @adev: amdgpu_device pointer
643  * @notify: dmub notification structure
644  *
645  * Dmub Hpd interrupt processing callback. Gets displayindex through the
646  * ink index and calls helper to do the processing.
647  */
648 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
649 {
650 	struct amdgpu_dm_connector *aconnector;
651 	struct drm_connector *connector;
652 	struct drm_connector_list_iter iter;
653 	struct dc_link *link;
654 	uint8_t link_index = 0;
655 	struct drm_device *dev = adev->dm.ddev;
656 
657 	if (adev == NULL)
658 		return;
659 
660 	if (notify == NULL) {
661 		DRM_ERROR("DMUB HPD callback notification was NULL");
662 		return;
663 	}
664 
665 	if (notify->link_index > adev->dm.dc->link_count) {
666 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
667 		return;
668 	}
669 
670 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
671 
672 	link_index = notify->link_index;
673 
674 	link = adev->dm.dc->links[link_index];
675 
676 	drm_connector_list_iter_begin(dev, &iter);
677 	drm_for_each_connector_iter(connector, &iter) {
678 		aconnector = to_amdgpu_dm_connector(connector);
679 		if (link && aconnector->dc_link == link) {
680 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
681 			handle_hpd_irq_helper(aconnector);
682 			break;
683 		}
684 	}
685 	drm_connector_list_iter_end(&iter);
686 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
687 
688 }
689 
690 /**
691  * register_dmub_notify_callback - Sets callback for DMUB notify
692  * @adev: amdgpu_device pointer
693  * @type: Type of dmub notification
694  * @callback: Dmub interrupt callback function
695  * @dmub_int_thread_offload: offload indicator
696  *
697  * API to register a dmub callback handler for a dmub notification
698  * Also sets indicator whether callback processing to be offloaded.
699  * to dmub interrupt handling thread
700  * Return: true if successfully registered, false if there is existing registration
701  */
702 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
703 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
704 {
705 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
706 		adev->dm.dmub_callback[type] = callback;
707 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
708 	} else
709 		return false;
710 
711 	return true;
712 }
713 
714 static void dm_handle_hpd_work(struct work_struct *work)
715 {
716 	struct dmub_hpd_work *dmub_hpd_wrk;
717 
718 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
719 
720 	if (!dmub_hpd_wrk->dmub_notify) {
721 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
722 		return;
723 	}
724 
725 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
726 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
727 		dmub_hpd_wrk->dmub_notify);
728 	}
729 	kfree(dmub_hpd_wrk);
730 
731 }
732 
733 #define DMUB_TRACE_MAX_READ 64
734 /**
735  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
736  * @interrupt_params: used for determining the Outbox instance
737  *
738  * Handles the Outbox Interrupt
739  * event handler.
740  */
741 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
742 {
743 	struct dmub_notification notify;
744 	struct common_irq_params *irq_params = interrupt_params;
745 	struct amdgpu_device *adev = irq_params->adev;
746 	struct amdgpu_display_manager *dm = &adev->dm;
747 	struct dmcub_trace_buf_entry entry = { 0 };
748 	uint32_t count = 0;
749 	struct dmub_hpd_work *dmub_hpd_wrk;
750 
751 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
752 		dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
753 		if (!dmub_hpd_wrk) {
754 			DRM_ERROR("Failed to allocate dmub_hpd_wrk");
755 			return;
756 		}
757 		INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
758 
759 		if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
760 			do {
761 				dc_stat_get_dmub_notification(adev->dm.dc, &notify);
762 				if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
763 					DRM_ERROR("DM: notify type %d larger than the array size %ld !", notify.type,
764 					ARRAY_SIZE(dm->dmub_thread_offload));
765 					continue;
766 				}
767 				if (dm->dmub_thread_offload[notify.type] == true) {
768 					dmub_hpd_wrk->dmub_notify = &notify;
769 					dmub_hpd_wrk->adev = adev;
770 					queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
771 				} else {
772 					dm->dmub_callback[notify.type](adev, &notify);
773 				}
774 
775 			} while (notify.pending_notification);
776 
777 		} else {
778 			DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
779 		}
780 	}
781 
782 
783 	do {
784 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
785 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
786 							entry.param0, entry.param1);
787 
788 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
789 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
790 		} else
791 			break;
792 
793 		count++;
794 
795 	} while (count <= DMUB_TRACE_MAX_READ);
796 
797 	ASSERT(count <= DMUB_TRACE_MAX_READ);
798 }
799 #endif
800 
801 static int dm_set_clockgating_state(void *handle,
802 		  enum amd_clockgating_state state)
803 {
804 	return 0;
805 }
806 
807 static int dm_set_powergating_state(void *handle,
808 		  enum amd_powergating_state state)
809 {
810 	return 0;
811 }
812 
813 /* Prototypes of private functions */
814 static int dm_early_init(void* handle);
815 
816 /* Allocate memory for FBC compressed data  */
817 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
818 {
819 	struct drm_device *dev = connector->dev;
820 	struct amdgpu_device *adev = drm_to_adev(dev);
821 	struct dm_compressor_info *compressor = &adev->dm.compressor;
822 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
823 	struct drm_display_mode *mode;
824 	unsigned long max_size = 0;
825 
826 	if (adev->dm.dc->fbc_compressor == NULL)
827 		return;
828 
829 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
830 		return;
831 
832 	if (compressor->bo_ptr)
833 		return;
834 
835 
836 	list_for_each_entry(mode, &connector->modes, head) {
837 		if (max_size < mode->htotal * mode->vtotal)
838 			max_size = mode->htotal * mode->vtotal;
839 	}
840 
841 	if (max_size) {
842 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
843 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
844 			    &compressor->gpu_addr, &compressor->cpu_addr);
845 
846 		if (r)
847 			DRM_ERROR("DM: Failed to initialize FBC\n");
848 		else {
849 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
850 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
851 		}
852 
853 	}
854 
855 }
856 
857 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
858 					  int pipe, bool *enabled,
859 					  unsigned char *buf, int max_bytes)
860 {
861 	struct drm_device *dev = dev_get_drvdata(kdev);
862 	struct amdgpu_device *adev = drm_to_adev(dev);
863 	struct drm_connector *connector;
864 	struct drm_connector_list_iter conn_iter;
865 	struct amdgpu_dm_connector *aconnector;
866 	int ret = 0;
867 
868 	*enabled = false;
869 
870 	mutex_lock(&adev->dm.audio_lock);
871 
872 	drm_connector_list_iter_begin(dev, &conn_iter);
873 	drm_for_each_connector_iter(connector, &conn_iter) {
874 		aconnector = to_amdgpu_dm_connector(connector);
875 		if (aconnector->audio_inst != port)
876 			continue;
877 
878 		*enabled = true;
879 		ret = drm_eld_size(connector->eld);
880 		memcpy(buf, connector->eld, min(max_bytes, ret));
881 
882 		break;
883 	}
884 	drm_connector_list_iter_end(&conn_iter);
885 
886 	mutex_unlock(&adev->dm.audio_lock);
887 
888 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
889 
890 	return ret;
891 }
892 
893 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
894 	.get_eld = amdgpu_dm_audio_component_get_eld,
895 };
896 
897 static int amdgpu_dm_audio_component_bind(struct device *kdev,
898 				       struct device *hda_kdev, void *data)
899 {
900 	struct drm_device *dev = dev_get_drvdata(kdev);
901 	struct amdgpu_device *adev = drm_to_adev(dev);
902 	struct drm_audio_component *acomp = data;
903 
904 	acomp->ops = &amdgpu_dm_audio_component_ops;
905 	acomp->dev = kdev;
906 	adev->dm.audio_component = acomp;
907 
908 	return 0;
909 }
910 
911 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
912 					  struct device *hda_kdev, void *data)
913 {
914 	struct drm_device *dev = dev_get_drvdata(kdev);
915 	struct amdgpu_device *adev = drm_to_adev(dev);
916 	struct drm_audio_component *acomp = data;
917 
918 	acomp->ops = NULL;
919 	acomp->dev = NULL;
920 	adev->dm.audio_component = NULL;
921 }
922 
923 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
924 	.bind	= amdgpu_dm_audio_component_bind,
925 	.unbind	= amdgpu_dm_audio_component_unbind,
926 };
927 
928 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
929 {
930 	int i, ret;
931 
932 	if (!amdgpu_audio)
933 		return 0;
934 
935 	adev->mode_info.audio.enabled = true;
936 
937 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
938 
939 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
940 		adev->mode_info.audio.pin[i].channels = -1;
941 		adev->mode_info.audio.pin[i].rate = -1;
942 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
943 		adev->mode_info.audio.pin[i].status_bits = 0;
944 		adev->mode_info.audio.pin[i].category_code = 0;
945 		adev->mode_info.audio.pin[i].connected = false;
946 		adev->mode_info.audio.pin[i].id =
947 			adev->dm.dc->res_pool->audios[i]->inst;
948 		adev->mode_info.audio.pin[i].offset = 0;
949 	}
950 
951 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
952 	if (ret < 0)
953 		return ret;
954 
955 	adev->dm.audio_registered = true;
956 
957 	return 0;
958 }
959 
960 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
961 {
962 	if (!amdgpu_audio)
963 		return;
964 
965 	if (!adev->mode_info.audio.enabled)
966 		return;
967 
968 	if (adev->dm.audio_registered) {
969 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
970 		adev->dm.audio_registered = false;
971 	}
972 
973 	/* TODO: Disable audio? */
974 
975 	adev->mode_info.audio.enabled = false;
976 }
977 
978 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
979 {
980 	struct drm_audio_component *acomp = adev->dm.audio_component;
981 
982 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
983 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
984 
985 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
986 						 pin, -1);
987 	}
988 }
989 
990 static int dm_dmub_hw_init(struct amdgpu_device *adev)
991 {
992 	const struct dmcub_firmware_header_v1_0 *hdr;
993 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
994 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
995 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
996 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
997 	struct abm *abm = adev->dm.dc->res_pool->abm;
998 	struct dmub_srv_hw_params hw_params;
999 	enum dmub_status status;
1000 	const unsigned char *fw_inst_const, *fw_bss_data;
1001 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1002 	bool has_hw_support;
1003 
1004 	if (!dmub_srv)
1005 		/* DMUB isn't supported on the ASIC. */
1006 		return 0;
1007 
1008 	if (!fb_info) {
1009 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1010 		return -EINVAL;
1011 	}
1012 
1013 	if (!dmub_fw) {
1014 		/* Firmware required for DMUB support. */
1015 		DRM_ERROR("No firmware provided for DMUB.\n");
1016 		return -EINVAL;
1017 	}
1018 
1019 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1020 	if (status != DMUB_STATUS_OK) {
1021 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1022 		return -EINVAL;
1023 	}
1024 
1025 	if (!has_hw_support) {
1026 		DRM_INFO("DMUB unsupported on ASIC\n");
1027 		return 0;
1028 	}
1029 
1030 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1031 
1032 	fw_inst_const = dmub_fw->data +
1033 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1034 			PSP_HEADER_BYTES;
1035 
1036 	fw_bss_data = dmub_fw->data +
1037 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1038 		      le32_to_cpu(hdr->inst_const_bytes);
1039 
1040 	/* Copy firmware and bios info into FB memory. */
1041 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1042 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1043 
1044 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1045 
1046 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1047 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1048 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1049 	 * will be done by dm_dmub_hw_init
1050 	 */
1051 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1052 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1053 				fw_inst_const_size);
1054 	}
1055 
1056 	if (fw_bss_data_size)
1057 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1058 		       fw_bss_data, fw_bss_data_size);
1059 
1060 	/* Copy firmware bios info into FB memory. */
1061 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1062 	       adev->bios_size);
1063 
1064 	/* Reset regions that need to be reset. */
1065 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1066 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1067 
1068 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1069 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1070 
1071 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1072 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1073 
1074 	/* Initialize hardware. */
1075 	memset(&hw_params, 0, sizeof(hw_params));
1076 	hw_params.fb_base = adev->gmc.fb_start;
1077 	hw_params.fb_offset = adev->gmc.aper_base;
1078 
1079 	/* backdoor load firmware and trigger dmub running */
1080 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1081 		hw_params.load_inst_const = true;
1082 
1083 	if (dmcu)
1084 		hw_params.psp_version = dmcu->psp_version;
1085 
1086 	for (i = 0; i < fb_info->num_fb; ++i)
1087 		hw_params.fb[i] = &fb_info->fb[i];
1088 
1089 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1090 	if (status != DMUB_STATUS_OK) {
1091 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1092 		return -EINVAL;
1093 	}
1094 
1095 	/* Wait for firmware load to finish. */
1096 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1097 	if (status != DMUB_STATUS_OK)
1098 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1099 
1100 	/* Init DMCU and ABM if available. */
1101 	if (dmcu && abm) {
1102 		dmcu->funcs->dmcu_init(dmcu);
1103 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1104 	}
1105 
1106 	if (!adev->dm.dc->ctx->dmub_srv)
1107 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1108 	if (!adev->dm.dc->ctx->dmub_srv) {
1109 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1110 		return -ENOMEM;
1111 	}
1112 
1113 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1114 		 adev->dm.dmcub_fw_version);
1115 
1116 	return 0;
1117 }
1118 
1119 #if defined(CONFIG_DRM_AMD_DC_DCN)
1120 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1121 {
1122 	uint64_t pt_base;
1123 	uint32_t logical_addr_low;
1124 	uint32_t logical_addr_high;
1125 	uint32_t agp_base, agp_bot, agp_top;
1126 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1127 
1128 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1129 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1130 
1131 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1132 		/*
1133 		 * Raven2 has a HW issue that it is unable to use the vram which
1134 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1135 		 * workaround that increase system aperture high address (add 1)
1136 		 * to get rid of the VM fault and hardware hang.
1137 		 */
1138 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1139 	else
1140 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1141 
1142 	agp_base = 0;
1143 	agp_bot = adev->gmc.agp_start >> 24;
1144 	agp_top = adev->gmc.agp_end >> 24;
1145 
1146 
1147 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1148 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1149 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1150 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1151 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1152 	page_table_base.low_part = lower_32_bits(pt_base);
1153 
1154 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1155 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1156 
1157 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1158 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1159 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1160 
1161 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1162 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1163 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1164 
1165 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1166 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1167 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1168 
1169 	pa_config->is_hvm_enabled = 0;
1170 
1171 }
1172 #endif
1173 #if defined(CONFIG_DRM_AMD_DC_DCN)
1174 static void vblank_control_worker(struct work_struct *work)
1175 {
1176 	struct vblank_control_work *vblank_work =
1177 		container_of(work, struct vblank_control_work, work);
1178 	struct amdgpu_display_manager *dm = vblank_work->dm;
1179 
1180 	mutex_lock(&dm->dc_lock);
1181 
1182 	if (vblank_work->enable)
1183 		dm->active_vblank_irq_count++;
1184 	else if(dm->active_vblank_irq_count)
1185 		dm->active_vblank_irq_count--;
1186 
1187 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1188 
1189 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1190 
1191 	/* Control PSR based on vblank requirements from OS */
1192 	if (vblank_work->stream && vblank_work->stream->link) {
1193 		if (vblank_work->enable) {
1194 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1195 				amdgpu_dm_psr_disable(vblank_work->stream);
1196 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1197 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1198 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1199 			amdgpu_dm_psr_enable(vblank_work->stream);
1200 		}
1201 	}
1202 
1203 	mutex_unlock(&dm->dc_lock);
1204 
1205 	dc_stream_release(vblank_work->stream);
1206 
1207 	kfree(vblank_work);
1208 }
1209 
1210 #endif
1211 
1212 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1213 {
1214 	struct hpd_rx_irq_offload_work *offload_work;
1215 	struct amdgpu_dm_connector *aconnector;
1216 	struct dc_link *dc_link;
1217 	struct amdgpu_device *adev;
1218 	enum dc_connection_type new_connection_type = dc_connection_none;
1219 	unsigned long flags;
1220 
1221 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1222 	aconnector = offload_work->offload_wq->aconnector;
1223 
1224 	if (!aconnector) {
1225 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1226 		goto skip;
1227 	}
1228 
1229 	adev = drm_to_adev(aconnector->base.dev);
1230 	dc_link = aconnector->dc_link;
1231 
1232 	mutex_lock(&aconnector->hpd_lock);
1233 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1234 		DRM_ERROR("KMS: Failed to detect connector\n");
1235 	mutex_unlock(&aconnector->hpd_lock);
1236 
1237 	if (new_connection_type == dc_connection_none)
1238 		goto skip;
1239 
1240 	if (amdgpu_in_reset(adev))
1241 		goto skip;
1242 
1243 	mutex_lock(&adev->dm.dc_lock);
1244 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1245 		dc_link_dp_handle_automated_test(dc_link);
1246 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1247 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1248 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1249 		dc_link_dp_handle_link_loss(dc_link);
1250 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1251 		offload_work->offload_wq->is_handling_link_loss = false;
1252 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1253 	}
1254 	mutex_unlock(&adev->dm.dc_lock);
1255 
1256 skip:
1257 	kfree(offload_work);
1258 
1259 }
1260 
1261 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1262 {
1263 	int max_caps = dc->caps.max_links;
1264 	int i = 0;
1265 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1266 
1267 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1268 
1269 	if (!hpd_rx_offload_wq)
1270 		return NULL;
1271 
1272 
1273 	for (i = 0; i < max_caps; i++) {
1274 		hpd_rx_offload_wq[i].wq =
1275 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1276 
1277 		if (hpd_rx_offload_wq[i].wq == NULL) {
1278 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1279 			return NULL;
1280 		}
1281 
1282 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1283 	}
1284 
1285 	return hpd_rx_offload_wq;
1286 }
1287 
1288 static int amdgpu_dm_init(struct amdgpu_device *adev)
1289 {
1290 	struct dc_init_data init_data;
1291 #ifdef CONFIG_DRM_AMD_DC_HDCP
1292 	struct dc_callback_init init_params;
1293 #endif
1294 	int r;
1295 
1296 	adev->dm.ddev = adev_to_drm(adev);
1297 	adev->dm.adev = adev;
1298 
1299 	/* Zero all the fields */
1300 	memset(&init_data, 0, sizeof(init_data));
1301 #ifdef CONFIG_DRM_AMD_DC_HDCP
1302 	memset(&init_params, 0, sizeof(init_params));
1303 #endif
1304 
1305 	mutex_init(&adev->dm.dc_lock);
1306 	mutex_init(&adev->dm.audio_lock);
1307 #if defined(CONFIG_DRM_AMD_DC_DCN)
1308 	spin_lock_init(&adev->dm.vblank_lock);
1309 #endif
1310 
1311 	if(amdgpu_dm_irq_init(adev)) {
1312 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1313 		goto error;
1314 	}
1315 
1316 	init_data.asic_id.chip_family = adev->family;
1317 
1318 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1319 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1320 
1321 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1322 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1323 	init_data.asic_id.atombios_base_address =
1324 		adev->mode_info.atom_context->bios;
1325 
1326 	init_data.driver = adev;
1327 
1328 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1329 
1330 	if (!adev->dm.cgs_device) {
1331 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1332 		goto error;
1333 	}
1334 
1335 	init_data.cgs_device = adev->dm.cgs_device;
1336 
1337 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1338 
1339 	switch (adev->asic_type) {
1340 	case CHIP_CARRIZO:
1341 	case CHIP_STONEY:
1342 	case CHIP_RAVEN:
1343 	case CHIP_RENOIR:
1344 		init_data.flags.gpu_vm_support = true;
1345 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1346 			init_data.flags.disable_dmcu = true;
1347 		break;
1348 	case CHIP_VANGOGH:
1349 	case CHIP_YELLOW_CARP:
1350 		init_data.flags.gpu_vm_support = true;
1351 		break;
1352 	default:
1353 		break;
1354 	}
1355 
1356 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1357 		init_data.flags.fbc_support = true;
1358 
1359 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1360 		init_data.flags.multi_mon_pp_mclk_switch = true;
1361 
1362 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1363 		init_data.flags.disable_fractional_pwm = true;
1364 
1365 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1366 		init_data.flags.edp_no_power_sequencing = true;
1367 
1368 	init_data.flags.power_down_display_on_boot = true;
1369 
1370 	INIT_LIST_HEAD(&adev->dm.da_list);
1371 	/* Display Core create. */
1372 	adev->dm.dc = dc_create(&init_data);
1373 
1374 	if (adev->dm.dc) {
1375 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1376 	} else {
1377 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1378 		goto error;
1379 	}
1380 
1381 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1382 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1383 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1384 	}
1385 
1386 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1387 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1388 
1389 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1390 		adev->dm.dc->debug.disable_stutter = true;
1391 
1392 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1393 		adev->dm.dc->debug.disable_dsc = true;
1394 
1395 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1396 		adev->dm.dc->debug.disable_clock_gate = true;
1397 
1398 	r = dm_dmub_hw_init(adev);
1399 	if (r) {
1400 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1401 		goto error;
1402 	}
1403 
1404 	dc_hardware_init(adev->dm.dc);
1405 
1406 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1407 	if (!adev->dm.hpd_rx_offload_wq) {
1408 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1409 		goto error;
1410 	}
1411 
1412 #if defined(CONFIG_DRM_AMD_DC_DCN)
1413 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1414 		struct dc_phy_addr_space_config pa_config;
1415 
1416 		mmhub_read_system_context(adev, &pa_config);
1417 
1418 		// Call the DC init_memory func
1419 		dc_setup_system_context(adev->dm.dc, &pa_config);
1420 	}
1421 #endif
1422 
1423 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1424 	if (!adev->dm.freesync_module) {
1425 		DRM_ERROR(
1426 		"amdgpu: failed to initialize freesync_module.\n");
1427 	} else
1428 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1429 				adev->dm.freesync_module);
1430 
1431 	amdgpu_dm_init_color_mod();
1432 
1433 #if defined(CONFIG_DRM_AMD_DC_DCN)
1434 	if (adev->dm.dc->caps.max_links > 0) {
1435 		adev->dm.vblank_control_workqueue =
1436 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1437 		if (!adev->dm.vblank_control_workqueue)
1438 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1439 	}
1440 #endif
1441 
1442 #ifdef CONFIG_DRM_AMD_DC_HDCP
1443 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1444 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1445 
1446 		if (!adev->dm.hdcp_workqueue)
1447 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1448 		else
1449 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1450 
1451 		dc_init_callbacks(adev->dm.dc, &init_params);
1452 	}
1453 #endif
1454 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1455 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1456 #endif
1457 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1458 		init_completion(&adev->dm.dmub_aux_transfer_done);
1459 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1460 		if (!adev->dm.dmub_notify) {
1461 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1462 			goto error;
1463 		}
1464 
1465 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1466 		if (!adev->dm.delayed_hpd_wq) {
1467 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1468 			goto error;
1469 		}
1470 
1471 		amdgpu_dm_outbox_init(adev);
1472 #if defined(CONFIG_DRM_AMD_DC_DCN)
1473 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1474 			dmub_aux_setconfig_callback, false)) {
1475 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1476 			goto error;
1477 		}
1478 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1479 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1480 			goto error;
1481 		}
1482 #endif
1483 	}
1484 
1485 	if (amdgpu_dm_initialize_drm_device(adev)) {
1486 		DRM_ERROR(
1487 		"amdgpu: failed to initialize sw for display support.\n");
1488 		goto error;
1489 	}
1490 
1491 	/* create fake encoders for MST */
1492 	dm_dp_create_fake_mst_encoders(adev);
1493 
1494 	/* TODO: Add_display_info? */
1495 
1496 	/* TODO use dynamic cursor width */
1497 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1498 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1499 
1500 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1501 		DRM_ERROR(
1502 		"amdgpu: failed to initialize sw for display support.\n");
1503 		goto error;
1504 	}
1505 
1506 
1507 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1508 
1509 	return 0;
1510 error:
1511 	amdgpu_dm_fini(adev);
1512 
1513 	return -EINVAL;
1514 }
1515 
1516 static int amdgpu_dm_early_fini(void *handle)
1517 {
1518 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1519 
1520 	amdgpu_dm_audio_fini(adev);
1521 
1522 	return 0;
1523 }
1524 
1525 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1526 {
1527 	int i;
1528 
1529 #if defined(CONFIG_DRM_AMD_DC_DCN)
1530 	if (adev->dm.vblank_control_workqueue) {
1531 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1532 		adev->dm.vblank_control_workqueue = NULL;
1533 	}
1534 #endif
1535 
1536 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1537 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1538 	}
1539 
1540 	amdgpu_dm_destroy_drm_device(&adev->dm);
1541 
1542 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1543 	if (adev->dm.crc_rd_wrk) {
1544 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1545 		kfree(adev->dm.crc_rd_wrk);
1546 		adev->dm.crc_rd_wrk = NULL;
1547 	}
1548 #endif
1549 #ifdef CONFIG_DRM_AMD_DC_HDCP
1550 	if (adev->dm.hdcp_workqueue) {
1551 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1552 		adev->dm.hdcp_workqueue = NULL;
1553 	}
1554 
1555 	if (adev->dm.dc)
1556 		dc_deinit_callbacks(adev->dm.dc);
1557 #endif
1558 
1559 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1560 
1561 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1562 		kfree(adev->dm.dmub_notify);
1563 		adev->dm.dmub_notify = NULL;
1564 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1565 		adev->dm.delayed_hpd_wq = NULL;
1566 	}
1567 
1568 	if (adev->dm.dmub_bo)
1569 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1570 				      &adev->dm.dmub_bo_gpu_addr,
1571 				      &adev->dm.dmub_bo_cpu_addr);
1572 
1573 	/* DC Destroy TODO: Replace destroy DAL */
1574 	if (adev->dm.dc)
1575 		dc_destroy(&adev->dm.dc);
1576 	/*
1577 	 * TODO: pageflip, vlank interrupt
1578 	 *
1579 	 * amdgpu_dm_irq_fini(adev);
1580 	 */
1581 
1582 	if (adev->dm.cgs_device) {
1583 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1584 		adev->dm.cgs_device = NULL;
1585 	}
1586 	if (adev->dm.freesync_module) {
1587 		mod_freesync_destroy(adev->dm.freesync_module);
1588 		adev->dm.freesync_module = NULL;
1589 	}
1590 
1591 	if (adev->dm.hpd_rx_offload_wq) {
1592 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1593 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1594 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1595 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1596 			}
1597 		}
1598 
1599 		kfree(adev->dm.hpd_rx_offload_wq);
1600 		adev->dm.hpd_rx_offload_wq = NULL;
1601 	}
1602 
1603 	mutex_destroy(&adev->dm.audio_lock);
1604 	mutex_destroy(&adev->dm.dc_lock);
1605 
1606 	return;
1607 }
1608 
1609 static int load_dmcu_fw(struct amdgpu_device *adev)
1610 {
1611 	const char *fw_name_dmcu = NULL;
1612 	int r;
1613 	const struct dmcu_firmware_header_v1_0 *hdr;
1614 
1615 	switch(adev->asic_type) {
1616 #if defined(CONFIG_DRM_AMD_DC_SI)
1617 	case CHIP_TAHITI:
1618 	case CHIP_PITCAIRN:
1619 	case CHIP_VERDE:
1620 	case CHIP_OLAND:
1621 #endif
1622 	case CHIP_BONAIRE:
1623 	case CHIP_HAWAII:
1624 	case CHIP_KAVERI:
1625 	case CHIP_KABINI:
1626 	case CHIP_MULLINS:
1627 	case CHIP_TONGA:
1628 	case CHIP_FIJI:
1629 	case CHIP_CARRIZO:
1630 	case CHIP_STONEY:
1631 	case CHIP_POLARIS11:
1632 	case CHIP_POLARIS10:
1633 	case CHIP_POLARIS12:
1634 	case CHIP_VEGAM:
1635 	case CHIP_VEGA10:
1636 	case CHIP_VEGA12:
1637 	case CHIP_VEGA20:
1638 	case CHIP_NAVI10:
1639 	case CHIP_NAVI14:
1640 	case CHIP_RENOIR:
1641 	case CHIP_SIENNA_CICHLID:
1642 	case CHIP_NAVY_FLOUNDER:
1643 	case CHIP_DIMGREY_CAVEFISH:
1644 	case CHIP_BEIGE_GOBY:
1645 	case CHIP_VANGOGH:
1646 	case CHIP_YELLOW_CARP:
1647 		return 0;
1648 	case CHIP_NAVI12:
1649 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1650 		break;
1651 	case CHIP_RAVEN:
1652 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1653 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1654 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1655 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1656 		else
1657 			return 0;
1658 		break;
1659 	default:
1660 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1661 		return -EINVAL;
1662 	}
1663 
1664 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1665 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1666 		return 0;
1667 	}
1668 
1669 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1670 	if (r == -ENOENT) {
1671 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1672 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1673 		adev->dm.fw_dmcu = NULL;
1674 		return 0;
1675 	}
1676 	if (r) {
1677 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1678 			fw_name_dmcu);
1679 		return r;
1680 	}
1681 
1682 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1683 	if (r) {
1684 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1685 			fw_name_dmcu);
1686 		release_firmware(adev->dm.fw_dmcu);
1687 		adev->dm.fw_dmcu = NULL;
1688 		return r;
1689 	}
1690 
1691 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1692 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1693 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1694 	adev->firmware.fw_size +=
1695 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1696 
1697 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1698 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1699 	adev->firmware.fw_size +=
1700 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1701 
1702 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1703 
1704 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1705 
1706 	return 0;
1707 }
1708 
1709 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1710 {
1711 	struct amdgpu_device *adev = ctx;
1712 
1713 	return dm_read_reg(adev->dm.dc->ctx, address);
1714 }
1715 
1716 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1717 				     uint32_t value)
1718 {
1719 	struct amdgpu_device *adev = ctx;
1720 
1721 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1722 }
1723 
1724 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1725 {
1726 	struct dmub_srv_create_params create_params;
1727 	struct dmub_srv_region_params region_params;
1728 	struct dmub_srv_region_info region_info;
1729 	struct dmub_srv_fb_params fb_params;
1730 	struct dmub_srv_fb_info *fb_info;
1731 	struct dmub_srv *dmub_srv;
1732 	const struct dmcub_firmware_header_v1_0 *hdr;
1733 	const char *fw_name_dmub;
1734 	enum dmub_asic dmub_asic;
1735 	enum dmub_status status;
1736 	int r;
1737 
1738 	switch (adev->asic_type) {
1739 	case CHIP_RENOIR:
1740 		dmub_asic = DMUB_ASIC_DCN21;
1741 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1742 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1743 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1744 		break;
1745 	case CHIP_SIENNA_CICHLID:
1746 		dmub_asic = DMUB_ASIC_DCN30;
1747 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1748 		break;
1749 	case CHIP_NAVY_FLOUNDER:
1750 		dmub_asic = DMUB_ASIC_DCN30;
1751 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1752 		break;
1753 	case CHIP_VANGOGH:
1754 		dmub_asic = DMUB_ASIC_DCN301;
1755 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1756 		break;
1757 	case CHIP_DIMGREY_CAVEFISH:
1758 		dmub_asic = DMUB_ASIC_DCN302;
1759 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1760 		break;
1761 	case CHIP_BEIGE_GOBY:
1762 		dmub_asic = DMUB_ASIC_DCN303;
1763 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1764 		break;
1765 	case CHIP_YELLOW_CARP:
1766 		dmub_asic = DMUB_ASIC_DCN31;
1767 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1768 		break;
1769 
1770 	default:
1771 		/* ASIC doesn't support DMUB. */
1772 		return 0;
1773 	}
1774 
1775 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1776 	if (r) {
1777 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1778 		return 0;
1779 	}
1780 
1781 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1782 	if (r) {
1783 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1784 		return 0;
1785 	}
1786 
1787 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1788 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1789 
1790 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1791 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1792 			AMDGPU_UCODE_ID_DMCUB;
1793 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1794 			adev->dm.dmub_fw;
1795 		adev->firmware.fw_size +=
1796 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1797 
1798 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1799 			 adev->dm.dmcub_fw_version);
1800 	}
1801 
1802 
1803 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1804 	dmub_srv = adev->dm.dmub_srv;
1805 
1806 	if (!dmub_srv) {
1807 		DRM_ERROR("Failed to allocate DMUB service!\n");
1808 		return -ENOMEM;
1809 	}
1810 
1811 	memset(&create_params, 0, sizeof(create_params));
1812 	create_params.user_ctx = adev;
1813 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1814 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1815 	create_params.asic = dmub_asic;
1816 
1817 	/* Create the DMUB service. */
1818 	status = dmub_srv_create(dmub_srv, &create_params);
1819 	if (status != DMUB_STATUS_OK) {
1820 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1821 		return -EINVAL;
1822 	}
1823 
1824 	/* Calculate the size of all the regions for the DMUB service. */
1825 	memset(&region_params, 0, sizeof(region_params));
1826 
1827 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1828 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1829 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1830 	region_params.vbios_size = adev->bios_size;
1831 	region_params.fw_bss_data = region_params.bss_data_size ?
1832 		adev->dm.dmub_fw->data +
1833 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1834 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1835 	region_params.fw_inst_const =
1836 		adev->dm.dmub_fw->data +
1837 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1838 		PSP_HEADER_BYTES;
1839 
1840 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1841 					   &region_info);
1842 
1843 	if (status != DMUB_STATUS_OK) {
1844 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1845 		return -EINVAL;
1846 	}
1847 
1848 	/*
1849 	 * Allocate a framebuffer based on the total size of all the regions.
1850 	 * TODO: Move this into GART.
1851 	 */
1852 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1853 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1854 				    &adev->dm.dmub_bo_gpu_addr,
1855 				    &adev->dm.dmub_bo_cpu_addr);
1856 	if (r)
1857 		return r;
1858 
1859 	/* Rebase the regions on the framebuffer address. */
1860 	memset(&fb_params, 0, sizeof(fb_params));
1861 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1862 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1863 	fb_params.region_info = &region_info;
1864 
1865 	adev->dm.dmub_fb_info =
1866 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1867 	fb_info = adev->dm.dmub_fb_info;
1868 
1869 	if (!fb_info) {
1870 		DRM_ERROR(
1871 			"Failed to allocate framebuffer info for DMUB service!\n");
1872 		return -ENOMEM;
1873 	}
1874 
1875 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1876 	if (status != DMUB_STATUS_OK) {
1877 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1878 		return -EINVAL;
1879 	}
1880 
1881 	return 0;
1882 }
1883 
1884 static int dm_sw_init(void *handle)
1885 {
1886 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1887 	int r;
1888 
1889 	r = dm_dmub_sw_init(adev);
1890 	if (r)
1891 		return r;
1892 
1893 	return load_dmcu_fw(adev);
1894 }
1895 
1896 static int dm_sw_fini(void *handle)
1897 {
1898 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1899 
1900 	kfree(adev->dm.dmub_fb_info);
1901 	adev->dm.dmub_fb_info = NULL;
1902 
1903 	if (adev->dm.dmub_srv) {
1904 		dmub_srv_destroy(adev->dm.dmub_srv);
1905 		adev->dm.dmub_srv = NULL;
1906 	}
1907 
1908 	release_firmware(adev->dm.dmub_fw);
1909 	adev->dm.dmub_fw = NULL;
1910 
1911 	release_firmware(adev->dm.fw_dmcu);
1912 	adev->dm.fw_dmcu = NULL;
1913 
1914 	return 0;
1915 }
1916 
1917 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1918 {
1919 	struct amdgpu_dm_connector *aconnector;
1920 	struct drm_connector *connector;
1921 	struct drm_connector_list_iter iter;
1922 	int ret = 0;
1923 
1924 	drm_connector_list_iter_begin(dev, &iter);
1925 	drm_for_each_connector_iter(connector, &iter) {
1926 		aconnector = to_amdgpu_dm_connector(connector);
1927 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1928 		    aconnector->mst_mgr.aux) {
1929 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1930 					 aconnector,
1931 					 aconnector->base.base.id);
1932 
1933 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1934 			if (ret < 0) {
1935 				DRM_ERROR("DM_MST: Failed to start MST\n");
1936 				aconnector->dc_link->type =
1937 					dc_connection_single;
1938 				break;
1939 			}
1940 		}
1941 	}
1942 	drm_connector_list_iter_end(&iter);
1943 
1944 	return ret;
1945 }
1946 
1947 static int dm_late_init(void *handle)
1948 {
1949 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1950 
1951 	struct dmcu_iram_parameters params;
1952 	unsigned int linear_lut[16];
1953 	int i;
1954 	struct dmcu *dmcu = NULL;
1955 
1956 	dmcu = adev->dm.dc->res_pool->dmcu;
1957 
1958 	for (i = 0; i < 16; i++)
1959 		linear_lut[i] = 0xFFFF * i / 15;
1960 
1961 	params.set = 0;
1962 	params.backlight_ramping_start = 0xCCCC;
1963 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1964 	params.backlight_lut_array_size = 16;
1965 	params.backlight_lut_array = linear_lut;
1966 
1967 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1968 	 * 0xFFFF x 0.01 = 0x28F
1969 	 */
1970 	params.min_abm_backlight = 0x28F;
1971 	/* In the case where abm is implemented on dmcub,
1972 	* dmcu object will be null.
1973 	* ABM 2.4 and up are implemented on dmcub.
1974 	*/
1975 	if (dmcu) {
1976 		if (!dmcu_load_iram(dmcu, params))
1977 			return -EINVAL;
1978 	} else if (adev->dm.dc->ctx->dmub_srv) {
1979 		struct dc_link *edp_links[MAX_NUM_EDP];
1980 		int edp_num;
1981 
1982 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
1983 		for (i = 0; i < edp_num; i++) {
1984 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
1985 				return -EINVAL;
1986 		}
1987 	}
1988 
1989 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1990 }
1991 
1992 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1993 {
1994 	struct amdgpu_dm_connector *aconnector;
1995 	struct drm_connector *connector;
1996 	struct drm_connector_list_iter iter;
1997 	struct drm_dp_mst_topology_mgr *mgr;
1998 	int ret;
1999 	bool need_hotplug = false;
2000 
2001 	drm_connector_list_iter_begin(dev, &iter);
2002 	drm_for_each_connector_iter(connector, &iter) {
2003 		aconnector = to_amdgpu_dm_connector(connector);
2004 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2005 		    aconnector->mst_port)
2006 			continue;
2007 
2008 		mgr = &aconnector->mst_mgr;
2009 
2010 		if (suspend) {
2011 			drm_dp_mst_topology_mgr_suspend(mgr);
2012 		} else {
2013 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2014 			if (ret < 0) {
2015 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2016 				need_hotplug = true;
2017 			}
2018 		}
2019 	}
2020 	drm_connector_list_iter_end(&iter);
2021 
2022 	if (need_hotplug)
2023 		drm_kms_helper_hotplug_event(dev);
2024 }
2025 
2026 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2027 {
2028 	struct smu_context *smu = &adev->smu;
2029 	int ret = 0;
2030 
2031 	if (!is_support_sw_smu(adev))
2032 		return 0;
2033 
2034 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2035 	 * on window driver dc implementation.
2036 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2037 	 * should be passed to smu during boot up and resume from s3.
2038 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2039 	 * dcn20_resource_construct
2040 	 * then call pplib functions below to pass the settings to smu:
2041 	 * smu_set_watermarks_for_clock_ranges
2042 	 * smu_set_watermarks_table
2043 	 * navi10_set_watermarks_table
2044 	 * smu_write_watermarks_table
2045 	 *
2046 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2047 	 * dc has implemented different flow for window driver:
2048 	 * dc_hardware_init / dc_set_power_state
2049 	 * dcn10_init_hw
2050 	 * notify_wm_ranges
2051 	 * set_wm_ranges
2052 	 * -- Linux
2053 	 * smu_set_watermarks_for_clock_ranges
2054 	 * renoir_set_watermarks_table
2055 	 * smu_write_watermarks_table
2056 	 *
2057 	 * For Linux,
2058 	 * dc_hardware_init -> amdgpu_dm_init
2059 	 * dc_set_power_state --> dm_resume
2060 	 *
2061 	 * therefore, this function apply to navi10/12/14 but not Renoir
2062 	 * *
2063 	 */
2064 	switch(adev->asic_type) {
2065 	case CHIP_NAVI10:
2066 	case CHIP_NAVI14:
2067 	case CHIP_NAVI12:
2068 		break;
2069 	default:
2070 		return 0;
2071 	}
2072 
2073 	ret = smu_write_watermarks_table(smu);
2074 	if (ret) {
2075 		DRM_ERROR("Failed to update WMTABLE!\n");
2076 		return ret;
2077 	}
2078 
2079 	return 0;
2080 }
2081 
2082 /**
2083  * dm_hw_init() - Initialize DC device
2084  * @handle: The base driver device containing the amdgpu_dm device.
2085  *
2086  * Initialize the &struct amdgpu_display_manager device. This involves calling
2087  * the initializers of each DM component, then populating the struct with them.
2088  *
2089  * Although the function implies hardware initialization, both hardware and
2090  * software are initialized here. Splitting them out to their relevant init
2091  * hooks is a future TODO item.
2092  *
2093  * Some notable things that are initialized here:
2094  *
2095  * - Display Core, both software and hardware
2096  * - DC modules that we need (freesync and color management)
2097  * - DRM software states
2098  * - Interrupt sources and handlers
2099  * - Vblank support
2100  * - Debug FS entries, if enabled
2101  */
2102 static int dm_hw_init(void *handle)
2103 {
2104 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2105 	/* Create DAL display manager */
2106 	amdgpu_dm_init(adev);
2107 	amdgpu_dm_hpd_init(adev);
2108 
2109 	return 0;
2110 }
2111 
2112 /**
2113  * dm_hw_fini() - Teardown DC device
2114  * @handle: The base driver device containing the amdgpu_dm device.
2115  *
2116  * Teardown components within &struct amdgpu_display_manager that require
2117  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2118  * were loaded. Also flush IRQ workqueues and disable them.
2119  */
2120 static int dm_hw_fini(void *handle)
2121 {
2122 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2123 
2124 	amdgpu_dm_hpd_fini(adev);
2125 
2126 	amdgpu_dm_irq_fini(adev);
2127 	amdgpu_dm_fini(adev);
2128 	return 0;
2129 }
2130 
2131 
2132 static int dm_enable_vblank(struct drm_crtc *crtc);
2133 static void dm_disable_vblank(struct drm_crtc *crtc);
2134 
2135 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2136 				 struct dc_state *state, bool enable)
2137 {
2138 	enum dc_irq_source irq_source;
2139 	struct amdgpu_crtc *acrtc;
2140 	int rc = -EBUSY;
2141 	int i = 0;
2142 
2143 	for (i = 0; i < state->stream_count; i++) {
2144 		acrtc = get_crtc_by_otg_inst(
2145 				adev, state->stream_status[i].primary_otg_inst);
2146 
2147 		if (acrtc && state->stream_status[i].plane_count != 0) {
2148 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2149 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2150 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2151 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2152 			if (rc)
2153 				DRM_WARN("Failed to %s pflip interrupts\n",
2154 					 enable ? "enable" : "disable");
2155 
2156 			if (enable) {
2157 				rc = dm_enable_vblank(&acrtc->base);
2158 				if (rc)
2159 					DRM_WARN("Failed to enable vblank interrupts\n");
2160 			} else {
2161 				dm_disable_vblank(&acrtc->base);
2162 			}
2163 
2164 		}
2165 	}
2166 
2167 }
2168 
2169 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2170 {
2171 	struct dc_state *context = NULL;
2172 	enum dc_status res = DC_ERROR_UNEXPECTED;
2173 	int i;
2174 	struct dc_stream_state *del_streams[MAX_PIPES];
2175 	int del_streams_count = 0;
2176 
2177 	memset(del_streams, 0, sizeof(del_streams));
2178 
2179 	context = dc_create_state(dc);
2180 	if (context == NULL)
2181 		goto context_alloc_fail;
2182 
2183 	dc_resource_state_copy_construct_current(dc, context);
2184 
2185 	/* First remove from context all streams */
2186 	for (i = 0; i < context->stream_count; i++) {
2187 		struct dc_stream_state *stream = context->streams[i];
2188 
2189 		del_streams[del_streams_count++] = stream;
2190 	}
2191 
2192 	/* Remove all planes for removed streams and then remove the streams */
2193 	for (i = 0; i < del_streams_count; i++) {
2194 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2195 			res = DC_FAIL_DETACH_SURFACES;
2196 			goto fail;
2197 		}
2198 
2199 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2200 		if (res != DC_OK)
2201 			goto fail;
2202 	}
2203 
2204 
2205 	res = dc_validate_global_state(dc, context, false);
2206 
2207 	if (res != DC_OK) {
2208 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2209 		goto fail;
2210 	}
2211 
2212 	res = dc_commit_state(dc, context);
2213 
2214 fail:
2215 	dc_release_state(context);
2216 
2217 context_alloc_fail:
2218 	return res;
2219 }
2220 
2221 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2222 {
2223 	int i;
2224 
2225 	if (dm->hpd_rx_offload_wq) {
2226 		for (i = 0; i < dm->dc->caps.max_links; i++)
2227 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2228 	}
2229 }
2230 
2231 static int dm_suspend(void *handle)
2232 {
2233 	struct amdgpu_device *adev = handle;
2234 	struct amdgpu_display_manager *dm = &adev->dm;
2235 	int ret = 0;
2236 
2237 	if (amdgpu_in_reset(adev)) {
2238 		mutex_lock(&dm->dc_lock);
2239 
2240 #if defined(CONFIG_DRM_AMD_DC_DCN)
2241 		dc_allow_idle_optimizations(adev->dm.dc, false);
2242 #endif
2243 
2244 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2245 
2246 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2247 
2248 		amdgpu_dm_commit_zero_streams(dm->dc);
2249 
2250 		amdgpu_dm_irq_suspend(adev);
2251 
2252 		hpd_rx_irq_work_suspend(dm);
2253 
2254 		return ret;
2255 	}
2256 
2257 	WARN_ON(adev->dm.cached_state);
2258 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2259 
2260 	s3_handle_mst(adev_to_drm(adev), true);
2261 
2262 	amdgpu_dm_irq_suspend(adev);
2263 
2264 	hpd_rx_irq_work_suspend(dm);
2265 
2266 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2267 
2268 	return 0;
2269 }
2270 
2271 static struct amdgpu_dm_connector *
2272 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2273 					     struct drm_crtc *crtc)
2274 {
2275 	uint32_t i;
2276 	struct drm_connector_state *new_con_state;
2277 	struct drm_connector *connector;
2278 	struct drm_crtc *crtc_from_state;
2279 
2280 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2281 		crtc_from_state = new_con_state->crtc;
2282 
2283 		if (crtc_from_state == crtc)
2284 			return to_amdgpu_dm_connector(connector);
2285 	}
2286 
2287 	return NULL;
2288 }
2289 
2290 static void emulated_link_detect(struct dc_link *link)
2291 {
2292 	struct dc_sink_init_data sink_init_data = { 0 };
2293 	struct display_sink_capability sink_caps = { 0 };
2294 	enum dc_edid_status edid_status;
2295 	struct dc_context *dc_ctx = link->ctx;
2296 	struct dc_sink *sink = NULL;
2297 	struct dc_sink *prev_sink = NULL;
2298 
2299 	link->type = dc_connection_none;
2300 	prev_sink = link->local_sink;
2301 
2302 	if (prev_sink)
2303 		dc_sink_release(prev_sink);
2304 
2305 	switch (link->connector_signal) {
2306 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2307 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2308 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2309 		break;
2310 	}
2311 
2312 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2313 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2314 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2315 		break;
2316 	}
2317 
2318 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2319 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2320 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2321 		break;
2322 	}
2323 
2324 	case SIGNAL_TYPE_LVDS: {
2325 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2326 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2327 		break;
2328 	}
2329 
2330 	case SIGNAL_TYPE_EDP: {
2331 		sink_caps.transaction_type =
2332 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2333 		sink_caps.signal = SIGNAL_TYPE_EDP;
2334 		break;
2335 	}
2336 
2337 	case SIGNAL_TYPE_DISPLAY_PORT: {
2338 		sink_caps.transaction_type =
2339 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2340 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2341 		break;
2342 	}
2343 
2344 	default:
2345 		DC_ERROR("Invalid connector type! signal:%d\n",
2346 			link->connector_signal);
2347 		return;
2348 	}
2349 
2350 	sink_init_data.link = link;
2351 	sink_init_data.sink_signal = sink_caps.signal;
2352 
2353 	sink = dc_sink_create(&sink_init_data);
2354 	if (!sink) {
2355 		DC_ERROR("Failed to create sink!\n");
2356 		return;
2357 	}
2358 
2359 	/* dc_sink_create returns a new reference */
2360 	link->local_sink = sink;
2361 
2362 	edid_status = dm_helpers_read_local_edid(
2363 			link->ctx,
2364 			link,
2365 			sink);
2366 
2367 	if (edid_status != EDID_OK)
2368 		DC_ERROR("Failed to read EDID");
2369 
2370 }
2371 
2372 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2373 				     struct amdgpu_display_manager *dm)
2374 {
2375 	struct {
2376 		struct dc_surface_update surface_updates[MAX_SURFACES];
2377 		struct dc_plane_info plane_infos[MAX_SURFACES];
2378 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2379 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2380 		struct dc_stream_update stream_update;
2381 	} * bundle;
2382 	int k, m;
2383 
2384 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2385 
2386 	if (!bundle) {
2387 		dm_error("Failed to allocate update bundle\n");
2388 		goto cleanup;
2389 	}
2390 
2391 	for (k = 0; k < dc_state->stream_count; k++) {
2392 		bundle->stream_update.stream = dc_state->streams[k];
2393 
2394 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2395 			bundle->surface_updates[m].surface =
2396 				dc_state->stream_status->plane_states[m];
2397 			bundle->surface_updates[m].surface->force_full_update =
2398 				true;
2399 		}
2400 		dc_commit_updates_for_stream(
2401 			dm->dc, bundle->surface_updates,
2402 			dc_state->stream_status->plane_count,
2403 			dc_state->streams[k], &bundle->stream_update, dc_state);
2404 	}
2405 
2406 cleanup:
2407 	kfree(bundle);
2408 
2409 	return;
2410 }
2411 
2412 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2413 {
2414 	struct dc_stream_state *stream_state;
2415 	struct amdgpu_dm_connector *aconnector = link->priv;
2416 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2417 	struct dc_stream_update stream_update;
2418 	bool dpms_off = true;
2419 
2420 	memset(&stream_update, 0, sizeof(stream_update));
2421 	stream_update.dpms_off = &dpms_off;
2422 
2423 	mutex_lock(&adev->dm.dc_lock);
2424 	stream_state = dc_stream_find_from_link(link);
2425 
2426 	if (stream_state == NULL) {
2427 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2428 		mutex_unlock(&adev->dm.dc_lock);
2429 		return;
2430 	}
2431 
2432 	stream_update.stream = stream_state;
2433 	acrtc_state->force_dpms_off = true;
2434 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2435 				     stream_state, &stream_update,
2436 				     stream_state->ctx->dc->current_state);
2437 	mutex_unlock(&adev->dm.dc_lock);
2438 }
2439 
2440 static int dm_resume(void *handle)
2441 {
2442 	struct amdgpu_device *adev = handle;
2443 	struct drm_device *ddev = adev_to_drm(adev);
2444 	struct amdgpu_display_manager *dm = &adev->dm;
2445 	struct amdgpu_dm_connector *aconnector;
2446 	struct drm_connector *connector;
2447 	struct drm_connector_list_iter iter;
2448 	struct drm_crtc *crtc;
2449 	struct drm_crtc_state *new_crtc_state;
2450 	struct dm_crtc_state *dm_new_crtc_state;
2451 	struct drm_plane *plane;
2452 	struct drm_plane_state *new_plane_state;
2453 	struct dm_plane_state *dm_new_plane_state;
2454 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2455 	enum dc_connection_type new_connection_type = dc_connection_none;
2456 	struct dc_state *dc_state;
2457 	int i, r, j;
2458 
2459 	if (amdgpu_in_reset(adev)) {
2460 		dc_state = dm->cached_dc_state;
2461 
2462 		r = dm_dmub_hw_init(adev);
2463 		if (r)
2464 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2465 
2466 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2467 		dc_resume(dm->dc);
2468 
2469 		amdgpu_dm_irq_resume_early(adev);
2470 
2471 		for (i = 0; i < dc_state->stream_count; i++) {
2472 			dc_state->streams[i]->mode_changed = true;
2473 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2474 				dc_state->stream_status->plane_states[j]->update_flags.raw
2475 					= 0xffffffff;
2476 			}
2477 		}
2478 #if defined(CONFIG_DRM_AMD_DC_DCN)
2479 		/*
2480 		 * Resource allocation happens for link encoders for newer ASIC in
2481 		 * dc_validate_global_state, so we need to revalidate it.
2482 		 *
2483 		 * This shouldn't fail (it passed once before), so warn if it does.
2484 		 */
2485 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2486 #endif
2487 
2488 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2489 
2490 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2491 
2492 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2493 
2494 		dc_release_state(dm->cached_dc_state);
2495 		dm->cached_dc_state = NULL;
2496 
2497 		amdgpu_dm_irq_resume_late(adev);
2498 
2499 		mutex_unlock(&dm->dc_lock);
2500 
2501 		return 0;
2502 	}
2503 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2504 	dc_release_state(dm_state->context);
2505 	dm_state->context = dc_create_state(dm->dc);
2506 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2507 	dc_resource_state_construct(dm->dc, dm_state->context);
2508 
2509 	/* Before powering on DC we need to re-initialize DMUB. */
2510 	r = dm_dmub_hw_init(adev);
2511 	if (r)
2512 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2513 
2514 	/* power on hardware */
2515 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2516 
2517 	/* program HPD filter */
2518 	dc_resume(dm->dc);
2519 
2520 	/*
2521 	 * early enable HPD Rx IRQ, should be done before set mode as short
2522 	 * pulse interrupts are used for MST
2523 	 */
2524 	amdgpu_dm_irq_resume_early(adev);
2525 
2526 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2527 	s3_handle_mst(ddev, false);
2528 
2529 	/* Do detection*/
2530 	drm_connector_list_iter_begin(ddev, &iter);
2531 	drm_for_each_connector_iter(connector, &iter) {
2532 		aconnector = to_amdgpu_dm_connector(connector);
2533 
2534 		/*
2535 		 * this is the case when traversing through already created
2536 		 * MST connectors, should be skipped
2537 		 */
2538 		if (aconnector->mst_port)
2539 			continue;
2540 
2541 		mutex_lock(&aconnector->hpd_lock);
2542 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2543 			DRM_ERROR("KMS: Failed to detect connector\n");
2544 
2545 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2546 			emulated_link_detect(aconnector->dc_link);
2547 		else
2548 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2549 
2550 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2551 			aconnector->fake_enable = false;
2552 
2553 		if (aconnector->dc_sink)
2554 			dc_sink_release(aconnector->dc_sink);
2555 		aconnector->dc_sink = NULL;
2556 		amdgpu_dm_update_connector_after_detect(aconnector);
2557 		mutex_unlock(&aconnector->hpd_lock);
2558 	}
2559 	drm_connector_list_iter_end(&iter);
2560 
2561 	/* Force mode set in atomic commit */
2562 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2563 		new_crtc_state->active_changed = true;
2564 
2565 	/*
2566 	 * atomic_check is expected to create the dc states. We need to release
2567 	 * them here, since they were duplicated as part of the suspend
2568 	 * procedure.
2569 	 */
2570 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2571 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2572 		if (dm_new_crtc_state->stream) {
2573 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2574 			dc_stream_release(dm_new_crtc_state->stream);
2575 			dm_new_crtc_state->stream = NULL;
2576 		}
2577 	}
2578 
2579 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2580 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2581 		if (dm_new_plane_state->dc_state) {
2582 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2583 			dc_plane_state_release(dm_new_plane_state->dc_state);
2584 			dm_new_plane_state->dc_state = NULL;
2585 		}
2586 	}
2587 
2588 	drm_atomic_helper_resume(ddev, dm->cached_state);
2589 
2590 	dm->cached_state = NULL;
2591 
2592 	amdgpu_dm_irq_resume_late(adev);
2593 
2594 	amdgpu_dm_smu_write_watermarks_table(adev);
2595 
2596 	return 0;
2597 }
2598 
2599 /**
2600  * DOC: DM Lifecycle
2601  *
2602  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2603  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2604  * the base driver's device list to be initialized and torn down accordingly.
2605  *
2606  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2607  */
2608 
2609 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2610 	.name = "dm",
2611 	.early_init = dm_early_init,
2612 	.late_init = dm_late_init,
2613 	.sw_init = dm_sw_init,
2614 	.sw_fini = dm_sw_fini,
2615 	.early_fini = amdgpu_dm_early_fini,
2616 	.hw_init = dm_hw_init,
2617 	.hw_fini = dm_hw_fini,
2618 	.suspend = dm_suspend,
2619 	.resume = dm_resume,
2620 	.is_idle = dm_is_idle,
2621 	.wait_for_idle = dm_wait_for_idle,
2622 	.check_soft_reset = dm_check_soft_reset,
2623 	.soft_reset = dm_soft_reset,
2624 	.set_clockgating_state = dm_set_clockgating_state,
2625 	.set_powergating_state = dm_set_powergating_state,
2626 };
2627 
2628 const struct amdgpu_ip_block_version dm_ip_block =
2629 {
2630 	.type = AMD_IP_BLOCK_TYPE_DCE,
2631 	.major = 1,
2632 	.minor = 0,
2633 	.rev = 0,
2634 	.funcs = &amdgpu_dm_funcs,
2635 };
2636 
2637 
2638 /**
2639  * DOC: atomic
2640  *
2641  * *WIP*
2642  */
2643 
2644 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2645 	.fb_create = amdgpu_display_user_framebuffer_create,
2646 	.get_format_info = amd_get_format_info,
2647 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2648 	.atomic_check = amdgpu_dm_atomic_check,
2649 	.atomic_commit = drm_atomic_helper_commit,
2650 };
2651 
2652 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2653 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2654 };
2655 
2656 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2657 {
2658 	u32 max_cll, min_cll, max, min, q, r;
2659 	struct amdgpu_dm_backlight_caps *caps;
2660 	struct amdgpu_display_manager *dm;
2661 	struct drm_connector *conn_base;
2662 	struct amdgpu_device *adev;
2663 	struct dc_link *link = NULL;
2664 	static const u8 pre_computed_values[] = {
2665 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2666 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2667 	int i;
2668 
2669 	if (!aconnector || !aconnector->dc_link)
2670 		return;
2671 
2672 	link = aconnector->dc_link;
2673 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2674 		return;
2675 
2676 	conn_base = &aconnector->base;
2677 	adev = drm_to_adev(conn_base->dev);
2678 	dm = &adev->dm;
2679 	for (i = 0; i < dm->num_of_edps; i++) {
2680 		if (link == dm->backlight_link[i])
2681 			break;
2682 	}
2683 	if (i >= dm->num_of_edps)
2684 		return;
2685 	caps = &dm->backlight_caps[i];
2686 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2687 	caps->aux_support = false;
2688 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2689 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2690 
2691 	if (caps->ext_caps->bits.oled == 1 /*||
2692 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2693 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2694 		caps->aux_support = true;
2695 
2696 	if (amdgpu_backlight == 0)
2697 		caps->aux_support = false;
2698 	else if (amdgpu_backlight == 1)
2699 		caps->aux_support = true;
2700 
2701 	/* From the specification (CTA-861-G), for calculating the maximum
2702 	 * luminance we need to use:
2703 	 *	Luminance = 50*2**(CV/32)
2704 	 * Where CV is a one-byte value.
2705 	 * For calculating this expression we may need float point precision;
2706 	 * to avoid this complexity level, we take advantage that CV is divided
2707 	 * by a constant. From the Euclids division algorithm, we know that CV
2708 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2709 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2710 	 * need to pre-compute the value of r/32. For pre-computing the values
2711 	 * We just used the following Ruby line:
2712 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2713 	 * The results of the above expressions can be verified at
2714 	 * pre_computed_values.
2715 	 */
2716 	q = max_cll >> 5;
2717 	r = max_cll % 32;
2718 	max = (1 << q) * pre_computed_values[r];
2719 
2720 	// min luminance: maxLum * (CV/255)^2 / 100
2721 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2722 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2723 
2724 	caps->aux_max_input_signal = max;
2725 	caps->aux_min_input_signal = min;
2726 }
2727 
2728 void amdgpu_dm_update_connector_after_detect(
2729 		struct amdgpu_dm_connector *aconnector)
2730 {
2731 	struct drm_connector *connector = &aconnector->base;
2732 	struct drm_device *dev = connector->dev;
2733 	struct dc_sink *sink;
2734 
2735 	/* MST handled by drm_mst framework */
2736 	if (aconnector->mst_mgr.mst_state == true)
2737 		return;
2738 
2739 	sink = aconnector->dc_link->local_sink;
2740 	if (sink)
2741 		dc_sink_retain(sink);
2742 
2743 	/*
2744 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2745 	 * the connector sink is set to either fake or physical sink depends on link status.
2746 	 * Skip if already done during boot.
2747 	 */
2748 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2749 			&& aconnector->dc_em_sink) {
2750 
2751 		/*
2752 		 * For S3 resume with headless use eml_sink to fake stream
2753 		 * because on resume connector->sink is set to NULL
2754 		 */
2755 		mutex_lock(&dev->mode_config.mutex);
2756 
2757 		if (sink) {
2758 			if (aconnector->dc_sink) {
2759 				amdgpu_dm_update_freesync_caps(connector, NULL);
2760 				/*
2761 				 * retain and release below are used to
2762 				 * bump up refcount for sink because the link doesn't point
2763 				 * to it anymore after disconnect, so on next crtc to connector
2764 				 * reshuffle by UMD we will get into unwanted dc_sink release
2765 				 */
2766 				dc_sink_release(aconnector->dc_sink);
2767 			}
2768 			aconnector->dc_sink = sink;
2769 			dc_sink_retain(aconnector->dc_sink);
2770 			amdgpu_dm_update_freesync_caps(connector,
2771 					aconnector->edid);
2772 		} else {
2773 			amdgpu_dm_update_freesync_caps(connector, NULL);
2774 			if (!aconnector->dc_sink) {
2775 				aconnector->dc_sink = aconnector->dc_em_sink;
2776 				dc_sink_retain(aconnector->dc_sink);
2777 			}
2778 		}
2779 
2780 		mutex_unlock(&dev->mode_config.mutex);
2781 
2782 		if (sink)
2783 			dc_sink_release(sink);
2784 		return;
2785 	}
2786 
2787 	/*
2788 	 * TODO: temporary guard to look for proper fix
2789 	 * if this sink is MST sink, we should not do anything
2790 	 */
2791 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2792 		dc_sink_release(sink);
2793 		return;
2794 	}
2795 
2796 	if (aconnector->dc_sink == sink) {
2797 		/*
2798 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2799 		 * Do nothing!!
2800 		 */
2801 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2802 				aconnector->connector_id);
2803 		if (sink)
2804 			dc_sink_release(sink);
2805 		return;
2806 	}
2807 
2808 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2809 		aconnector->connector_id, aconnector->dc_sink, sink);
2810 
2811 	mutex_lock(&dev->mode_config.mutex);
2812 
2813 	/*
2814 	 * 1. Update status of the drm connector
2815 	 * 2. Send an event and let userspace tell us what to do
2816 	 */
2817 	if (sink) {
2818 		/*
2819 		 * TODO: check if we still need the S3 mode update workaround.
2820 		 * If yes, put it here.
2821 		 */
2822 		if (aconnector->dc_sink) {
2823 			amdgpu_dm_update_freesync_caps(connector, NULL);
2824 			dc_sink_release(aconnector->dc_sink);
2825 		}
2826 
2827 		aconnector->dc_sink = sink;
2828 		dc_sink_retain(aconnector->dc_sink);
2829 		if (sink->dc_edid.length == 0) {
2830 			aconnector->edid = NULL;
2831 			if (aconnector->dc_link->aux_mode) {
2832 				drm_dp_cec_unset_edid(
2833 					&aconnector->dm_dp_aux.aux);
2834 			}
2835 		} else {
2836 			aconnector->edid =
2837 				(struct edid *)sink->dc_edid.raw_edid;
2838 
2839 			drm_connector_update_edid_property(connector,
2840 							   aconnector->edid);
2841 			if (aconnector->dc_link->aux_mode)
2842 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2843 						    aconnector->edid);
2844 		}
2845 
2846 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2847 		update_connector_ext_caps(aconnector);
2848 	} else {
2849 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2850 		amdgpu_dm_update_freesync_caps(connector, NULL);
2851 		drm_connector_update_edid_property(connector, NULL);
2852 		aconnector->num_modes = 0;
2853 		dc_sink_release(aconnector->dc_sink);
2854 		aconnector->dc_sink = NULL;
2855 		aconnector->edid = NULL;
2856 #ifdef CONFIG_DRM_AMD_DC_HDCP
2857 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2858 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2859 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2860 #endif
2861 	}
2862 
2863 	mutex_unlock(&dev->mode_config.mutex);
2864 
2865 	update_subconnector_property(aconnector);
2866 
2867 	if (sink)
2868 		dc_sink_release(sink);
2869 }
2870 
2871 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2872 {
2873 	struct drm_connector *connector = &aconnector->base;
2874 	struct drm_device *dev = connector->dev;
2875 	enum dc_connection_type new_connection_type = dc_connection_none;
2876 	struct amdgpu_device *adev = drm_to_adev(dev);
2877 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2878 	struct dm_crtc_state *dm_crtc_state = NULL;
2879 
2880 	if (adev->dm.disable_hpd_irq)
2881 		return;
2882 
2883 	if (dm_con_state->base.state && dm_con_state->base.crtc)
2884 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
2885 					dm_con_state->base.state,
2886 					dm_con_state->base.crtc));
2887 	/*
2888 	 * In case of failure or MST no need to update connector status or notify the OS
2889 	 * since (for MST case) MST does this in its own context.
2890 	 */
2891 	mutex_lock(&aconnector->hpd_lock);
2892 
2893 #ifdef CONFIG_DRM_AMD_DC_HDCP
2894 	if (adev->dm.hdcp_workqueue) {
2895 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2896 		dm_con_state->update_hdcp = true;
2897 	}
2898 #endif
2899 	if (aconnector->fake_enable)
2900 		aconnector->fake_enable = false;
2901 
2902 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2903 		DRM_ERROR("KMS: Failed to detect connector\n");
2904 
2905 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2906 		emulated_link_detect(aconnector->dc_link);
2907 
2908 
2909 		drm_modeset_lock_all(dev);
2910 		dm_restore_drm_connector_state(dev, connector);
2911 		drm_modeset_unlock_all(dev);
2912 
2913 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2914 			drm_kms_helper_hotplug_event(dev);
2915 
2916 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2917 		if (new_connection_type == dc_connection_none &&
2918 		    aconnector->dc_link->type == dc_connection_none &&
2919 		    dm_crtc_state)
2920 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
2921 
2922 		amdgpu_dm_update_connector_after_detect(aconnector);
2923 
2924 		drm_modeset_lock_all(dev);
2925 		dm_restore_drm_connector_state(dev, connector);
2926 		drm_modeset_unlock_all(dev);
2927 
2928 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2929 			drm_kms_helper_hotplug_event(dev);
2930 	}
2931 	mutex_unlock(&aconnector->hpd_lock);
2932 
2933 }
2934 
2935 static void handle_hpd_irq(void *param)
2936 {
2937 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2938 
2939 	handle_hpd_irq_helper(aconnector);
2940 
2941 }
2942 
2943 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
2944 {
2945 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2946 	uint8_t dret;
2947 	bool new_irq_handled = false;
2948 	int dpcd_addr;
2949 	int dpcd_bytes_to_read;
2950 
2951 	const int max_process_count = 30;
2952 	int process_count = 0;
2953 
2954 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2955 
2956 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2957 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2958 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2959 		dpcd_addr = DP_SINK_COUNT;
2960 	} else {
2961 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2962 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2963 		dpcd_addr = DP_SINK_COUNT_ESI;
2964 	}
2965 
2966 	dret = drm_dp_dpcd_read(
2967 		&aconnector->dm_dp_aux.aux,
2968 		dpcd_addr,
2969 		esi,
2970 		dpcd_bytes_to_read);
2971 
2972 	while (dret == dpcd_bytes_to_read &&
2973 		process_count < max_process_count) {
2974 		uint8_t retry;
2975 		dret = 0;
2976 
2977 		process_count++;
2978 
2979 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2980 		/* handle HPD short pulse irq */
2981 		if (aconnector->mst_mgr.mst_state)
2982 			drm_dp_mst_hpd_irq(
2983 				&aconnector->mst_mgr,
2984 				esi,
2985 				&new_irq_handled);
2986 
2987 		if (new_irq_handled) {
2988 			/* ACK at DPCD to notify down stream */
2989 			const int ack_dpcd_bytes_to_write =
2990 				dpcd_bytes_to_read - 1;
2991 
2992 			for (retry = 0; retry < 3; retry++) {
2993 				uint8_t wret;
2994 
2995 				wret = drm_dp_dpcd_write(
2996 					&aconnector->dm_dp_aux.aux,
2997 					dpcd_addr + 1,
2998 					&esi[1],
2999 					ack_dpcd_bytes_to_write);
3000 				if (wret == ack_dpcd_bytes_to_write)
3001 					break;
3002 			}
3003 
3004 			/* check if there is new irq to be handled */
3005 			dret = drm_dp_dpcd_read(
3006 				&aconnector->dm_dp_aux.aux,
3007 				dpcd_addr,
3008 				esi,
3009 				dpcd_bytes_to_read);
3010 
3011 			new_irq_handled = false;
3012 		} else {
3013 			break;
3014 		}
3015 	}
3016 
3017 	if (process_count == max_process_count)
3018 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3019 }
3020 
3021 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3022 							union hpd_irq_data hpd_irq_data)
3023 {
3024 	struct hpd_rx_irq_offload_work *offload_work =
3025 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3026 
3027 	if (!offload_work) {
3028 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3029 		return;
3030 	}
3031 
3032 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3033 	offload_work->data = hpd_irq_data;
3034 	offload_work->offload_wq = offload_wq;
3035 
3036 	queue_work(offload_wq->wq, &offload_work->work);
3037 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3038 }
3039 
3040 static void handle_hpd_rx_irq(void *param)
3041 {
3042 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3043 	struct drm_connector *connector = &aconnector->base;
3044 	struct drm_device *dev = connector->dev;
3045 	struct dc_link *dc_link = aconnector->dc_link;
3046 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3047 	bool result = false;
3048 	enum dc_connection_type new_connection_type = dc_connection_none;
3049 	struct amdgpu_device *adev = drm_to_adev(dev);
3050 	union hpd_irq_data hpd_irq_data;
3051 	bool link_loss = false;
3052 	bool has_left_work = false;
3053 	int idx = aconnector->base.index;
3054 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3055 
3056 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3057 
3058 	if (adev->dm.disable_hpd_irq)
3059 		return;
3060 
3061 	/*
3062 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3063 	 * conflict, after implement i2c helper, this mutex should be
3064 	 * retired.
3065 	 */
3066 	mutex_lock(&aconnector->hpd_lock);
3067 
3068 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3069 						&link_loss, true, &has_left_work);
3070 
3071 	if (!has_left_work)
3072 		goto out;
3073 
3074 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3075 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3076 		goto out;
3077 	}
3078 
3079 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3080 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3081 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3082 			dm_handle_mst_sideband_msg(aconnector);
3083 			goto out;
3084 		}
3085 
3086 		if (link_loss) {
3087 			bool skip = false;
3088 
3089 			spin_lock(&offload_wq->offload_lock);
3090 			skip = offload_wq->is_handling_link_loss;
3091 
3092 			if (!skip)
3093 				offload_wq->is_handling_link_loss = true;
3094 
3095 			spin_unlock(&offload_wq->offload_lock);
3096 
3097 			if (!skip)
3098 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3099 
3100 			goto out;
3101 		}
3102 	}
3103 
3104 out:
3105 	if (result && !is_mst_root_connector) {
3106 		/* Downstream Port status changed. */
3107 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3108 			DRM_ERROR("KMS: Failed to detect connector\n");
3109 
3110 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3111 			emulated_link_detect(dc_link);
3112 
3113 			if (aconnector->fake_enable)
3114 				aconnector->fake_enable = false;
3115 
3116 			amdgpu_dm_update_connector_after_detect(aconnector);
3117 
3118 
3119 			drm_modeset_lock_all(dev);
3120 			dm_restore_drm_connector_state(dev, connector);
3121 			drm_modeset_unlock_all(dev);
3122 
3123 			drm_kms_helper_hotplug_event(dev);
3124 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3125 
3126 			if (aconnector->fake_enable)
3127 				aconnector->fake_enable = false;
3128 
3129 			amdgpu_dm_update_connector_after_detect(aconnector);
3130 
3131 
3132 			drm_modeset_lock_all(dev);
3133 			dm_restore_drm_connector_state(dev, connector);
3134 			drm_modeset_unlock_all(dev);
3135 
3136 			drm_kms_helper_hotplug_event(dev);
3137 		}
3138 	}
3139 #ifdef CONFIG_DRM_AMD_DC_HDCP
3140 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3141 		if (adev->dm.hdcp_workqueue)
3142 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3143 	}
3144 #endif
3145 
3146 	if (dc_link->type != dc_connection_mst_branch)
3147 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3148 
3149 	mutex_unlock(&aconnector->hpd_lock);
3150 }
3151 
3152 static void register_hpd_handlers(struct amdgpu_device *adev)
3153 {
3154 	struct drm_device *dev = adev_to_drm(adev);
3155 	struct drm_connector *connector;
3156 	struct amdgpu_dm_connector *aconnector;
3157 	const struct dc_link *dc_link;
3158 	struct dc_interrupt_params int_params = {0};
3159 
3160 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3161 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3162 
3163 	list_for_each_entry(connector,
3164 			&dev->mode_config.connector_list, head)	{
3165 
3166 		aconnector = to_amdgpu_dm_connector(connector);
3167 		dc_link = aconnector->dc_link;
3168 
3169 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3170 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3171 			int_params.irq_source = dc_link->irq_source_hpd;
3172 
3173 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3174 					handle_hpd_irq,
3175 					(void *) aconnector);
3176 		}
3177 
3178 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3179 
3180 			/* Also register for DP short pulse (hpd_rx). */
3181 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3182 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3183 
3184 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3185 					handle_hpd_rx_irq,
3186 					(void *) aconnector);
3187 
3188 			if (adev->dm.hpd_rx_offload_wq)
3189 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3190 					aconnector;
3191 		}
3192 	}
3193 }
3194 
3195 #if defined(CONFIG_DRM_AMD_DC_SI)
3196 /* Register IRQ sources and initialize IRQ callbacks */
3197 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3198 {
3199 	struct dc *dc = adev->dm.dc;
3200 	struct common_irq_params *c_irq_params;
3201 	struct dc_interrupt_params int_params = {0};
3202 	int r;
3203 	int i;
3204 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3205 
3206 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3207 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3208 
3209 	/*
3210 	 * Actions of amdgpu_irq_add_id():
3211 	 * 1. Register a set() function with base driver.
3212 	 *    Base driver will call set() function to enable/disable an
3213 	 *    interrupt in DC hardware.
3214 	 * 2. Register amdgpu_dm_irq_handler().
3215 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3216 	 *    coming from DC hardware.
3217 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3218 	 *    for acknowledging and handling. */
3219 
3220 	/* Use VBLANK interrupt */
3221 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3222 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3223 		if (r) {
3224 			DRM_ERROR("Failed to add crtc irq id!\n");
3225 			return r;
3226 		}
3227 
3228 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3229 		int_params.irq_source =
3230 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3231 
3232 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3233 
3234 		c_irq_params->adev = adev;
3235 		c_irq_params->irq_src = int_params.irq_source;
3236 
3237 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3238 				dm_crtc_high_irq, c_irq_params);
3239 	}
3240 
3241 	/* Use GRPH_PFLIP interrupt */
3242 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3243 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3244 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3245 		if (r) {
3246 			DRM_ERROR("Failed to add page flip irq id!\n");
3247 			return r;
3248 		}
3249 
3250 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3251 		int_params.irq_source =
3252 			dc_interrupt_to_irq_source(dc, i, 0);
3253 
3254 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3255 
3256 		c_irq_params->adev = adev;
3257 		c_irq_params->irq_src = int_params.irq_source;
3258 
3259 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3260 				dm_pflip_high_irq, c_irq_params);
3261 
3262 	}
3263 
3264 	/* HPD */
3265 	r = amdgpu_irq_add_id(adev, client_id,
3266 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3267 	if (r) {
3268 		DRM_ERROR("Failed to add hpd irq id!\n");
3269 		return r;
3270 	}
3271 
3272 	register_hpd_handlers(adev);
3273 
3274 	return 0;
3275 }
3276 #endif
3277 
3278 /* Register IRQ sources and initialize IRQ callbacks */
3279 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3280 {
3281 	struct dc *dc = adev->dm.dc;
3282 	struct common_irq_params *c_irq_params;
3283 	struct dc_interrupt_params int_params = {0};
3284 	int r;
3285 	int i;
3286 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3287 
3288 	if (adev->asic_type >= CHIP_VEGA10)
3289 		client_id = SOC15_IH_CLIENTID_DCE;
3290 
3291 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3292 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3293 
3294 	/*
3295 	 * Actions of amdgpu_irq_add_id():
3296 	 * 1. Register a set() function with base driver.
3297 	 *    Base driver will call set() function to enable/disable an
3298 	 *    interrupt in DC hardware.
3299 	 * 2. Register amdgpu_dm_irq_handler().
3300 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3301 	 *    coming from DC hardware.
3302 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3303 	 *    for acknowledging and handling. */
3304 
3305 	/* Use VBLANK interrupt */
3306 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3307 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3308 		if (r) {
3309 			DRM_ERROR("Failed to add crtc irq id!\n");
3310 			return r;
3311 		}
3312 
3313 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3314 		int_params.irq_source =
3315 			dc_interrupt_to_irq_source(dc, i, 0);
3316 
3317 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3318 
3319 		c_irq_params->adev = adev;
3320 		c_irq_params->irq_src = int_params.irq_source;
3321 
3322 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3323 				dm_crtc_high_irq, c_irq_params);
3324 	}
3325 
3326 	/* Use VUPDATE interrupt */
3327 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3328 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3329 		if (r) {
3330 			DRM_ERROR("Failed to add vupdate irq id!\n");
3331 			return r;
3332 		}
3333 
3334 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3335 		int_params.irq_source =
3336 			dc_interrupt_to_irq_source(dc, i, 0);
3337 
3338 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3339 
3340 		c_irq_params->adev = adev;
3341 		c_irq_params->irq_src = int_params.irq_source;
3342 
3343 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3344 				dm_vupdate_high_irq, c_irq_params);
3345 	}
3346 
3347 	/* Use GRPH_PFLIP interrupt */
3348 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3349 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3350 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3351 		if (r) {
3352 			DRM_ERROR("Failed to add page flip irq id!\n");
3353 			return r;
3354 		}
3355 
3356 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3357 		int_params.irq_source =
3358 			dc_interrupt_to_irq_source(dc, i, 0);
3359 
3360 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3361 
3362 		c_irq_params->adev = adev;
3363 		c_irq_params->irq_src = int_params.irq_source;
3364 
3365 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3366 				dm_pflip_high_irq, c_irq_params);
3367 
3368 	}
3369 
3370 	/* HPD */
3371 	r = amdgpu_irq_add_id(adev, client_id,
3372 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3373 	if (r) {
3374 		DRM_ERROR("Failed to add hpd irq id!\n");
3375 		return r;
3376 	}
3377 
3378 	register_hpd_handlers(adev);
3379 
3380 	return 0;
3381 }
3382 
3383 #if defined(CONFIG_DRM_AMD_DC_DCN)
3384 /* Register IRQ sources and initialize IRQ callbacks */
3385 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3386 {
3387 	struct dc *dc = adev->dm.dc;
3388 	struct common_irq_params *c_irq_params;
3389 	struct dc_interrupt_params int_params = {0};
3390 	int r;
3391 	int i;
3392 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3393 	static const unsigned int vrtl_int_srcid[] = {
3394 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3395 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3396 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3397 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3398 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3399 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3400 	};
3401 #endif
3402 
3403 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3404 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3405 
3406 	/*
3407 	 * Actions of amdgpu_irq_add_id():
3408 	 * 1. Register a set() function with base driver.
3409 	 *    Base driver will call set() function to enable/disable an
3410 	 *    interrupt in DC hardware.
3411 	 * 2. Register amdgpu_dm_irq_handler().
3412 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3413 	 *    coming from DC hardware.
3414 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3415 	 *    for acknowledging and handling.
3416 	 */
3417 
3418 	/* Use VSTARTUP interrupt */
3419 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3420 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3421 			i++) {
3422 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3423 
3424 		if (r) {
3425 			DRM_ERROR("Failed to add crtc irq id!\n");
3426 			return r;
3427 		}
3428 
3429 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3430 		int_params.irq_source =
3431 			dc_interrupt_to_irq_source(dc, i, 0);
3432 
3433 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3434 
3435 		c_irq_params->adev = adev;
3436 		c_irq_params->irq_src = int_params.irq_source;
3437 
3438 		amdgpu_dm_irq_register_interrupt(
3439 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3440 	}
3441 
3442 	/* Use otg vertical line interrupt */
3443 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3444 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3445 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3446 				vrtl_int_srcid[i], &adev->vline0_irq);
3447 
3448 		if (r) {
3449 			DRM_ERROR("Failed to add vline0 irq id!\n");
3450 			return r;
3451 		}
3452 
3453 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3454 		int_params.irq_source =
3455 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3456 
3457 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3458 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3459 			break;
3460 		}
3461 
3462 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3463 					- DC_IRQ_SOURCE_DC1_VLINE0];
3464 
3465 		c_irq_params->adev = adev;
3466 		c_irq_params->irq_src = int_params.irq_source;
3467 
3468 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3469 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3470 	}
3471 #endif
3472 
3473 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3474 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3475 	 * to trigger at end of each vblank, regardless of state of the lock,
3476 	 * matching DCE behaviour.
3477 	 */
3478 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3479 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3480 	     i++) {
3481 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3482 
3483 		if (r) {
3484 			DRM_ERROR("Failed to add vupdate irq id!\n");
3485 			return r;
3486 		}
3487 
3488 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3489 		int_params.irq_source =
3490 			dc_interrupt_to_irq_source(dc, i, 0);
3491 
3492 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3493 
3494 		c_irq_params->adev = adev;
3495 		c_irq_params->irq_src = int_params.irq_source;
3496 
3497 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3498 				dm_vupdate_high_irq, c_irq_params);
3499 	}
3500 
3501 	/* Use GRPH_PFLIP interrupt */
3502 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3503 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3504 			i++) {
3505 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3506 		if (r) {
3507 			DRM_ERROR("Failed to add page flip irq id!\n");
3508 			return r;
3509 		}
3510 
3511 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3512 		int_params.irq_source =
3513 			dc_interrupt_to_irq_source(dc, i, 0);
3514 
3515 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3516 
3517 		c_irq_params->adev = adev;
3518 		c_irq_params->irq_src = int_params.irq_source;
3519 
3520 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3521 				dm_pflip_high_irq, c_irq_params);
3522 
3523 	}
3524 
3525 	/* HPD */
3526 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3527 			&adev->hpd_irq);
3528 	if (r) {
3529 		DRM_ERROR("Failed to add hpd irq id!\n");
3530 		return r;
3531 	}
3532 
3533 	register_hpd_handlers(adev);
3534 
3535 	return 0;
3536 }
3537 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3538 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3539 {
3540 	struct dc *dc = adev->dm.dc;
3541 	struct common_irq_params *c_irq_params;
3542 	struct dc_interrupt_params int_params = {0};
3543 	int r, i;
3544 
3545 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3546 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3547 
3548 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3549 			&adev->dmub_outbox_irq);
3550 	if (r) {
3551 		DRM_ERROR("Failed to add outbox irq id!\n");
3552 		return r;
3553 	}
3554 
3555 	if (dc->ctx->dmub_srv) {
3556 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3557 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3558 		int_params.irq_source =
3559 		dc_interrupt_to_irq_source(dc, i, 0);
3560 
3561 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3562 
3563 		c_irq_params->adev = adev;
3564 		c_irq_params->irq_src = int_params.irq_source;
3565 
3566 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3567 				dm_dmub_outbox1_low_irq, c_irq_params);
3568 	}
3569 
3570 	return 0;
3571 }
3572 #endif
3573 
3574 /*
3575  * Acquires the lock for the atomic state object and returns
3576  * the new atomic state.
3577  *
3578  * This should only be called during atomic check.
3579  */
3580 static int dm_atomic_get_state(struct drm_atomic_state *state,
3581 			       struct dm_atomic_state **dm_state)
3582 {
3583 	struct drm_device *dev = state->dev;
3584 	struct amdgpu_device *adev = drm_to_adev(dev);
3585 	struct amdgpu_display_manager *dm = &adev->dm;
3586 	struct drm_private_state *priv_state;
3587 
3588 	if (*dm_state)
3589 		return 0;
3590 
3591 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3592 	if (IS_ERR(priv_state))
3593 		return PTR_ERR(priv_state);
3594 
3595 	*dm_state = to_dm_atomic_state(priv_state);
3596 
3597 	return 0;
3598 }
3599 
3600 static struct dm_atomic_state *
3601 dm_atomic_get_new_state(struct drm_atomic_state *state)
3602 {
3603 	struct drm_device *dev = state->dev;
3604 	struct amdgpu_device *adev = drm_to_adev(dev);
3605 	struct amdgpu_display_manager *dm = &adev->dm;
3606 	struct drm_private_obj *obj;
3607 	struct drm_private_state *new_obj_state;
3608 	int i;
3609 
3610 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3611 		if (obj->funcs == dm->atomic_obj.funcs)
3612 			return to_dm_atomic_state(new_obj_state);
3613 	}
3614 
3615 	return NULL;
3616 }
3617 
3618 static struct drm_private_state *
3619 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3620 {
3621 	struct dm_atomic_state *old_state, *new_state;
3622 
3623 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3624 	if (!new_state)
3625 		return NULL;
3626 
3627 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3628 
3629 	old_state = to_dm_atomic_state(obj->state);
3630 
3631 	if (old_state && old_state->context)
3632 		new_state->context = dc_copy_state(old_state->context);
3633 
3634 	if (!new_state->context) {
3635 		kfree(new_state);
3636 		return NULL;
3637 	}
3638 
3639 	return &new_state->base;
3640 }
3641 
3642 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3643 				    struct drm_private_state *state)
3644 {
3645 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3646 
3647 	if (dm_state && dm_state->context)
3648 		dc_release_state(dm_state->context);
3649 
3650 	kfree(dm_state);
3651 }
3652 
3653 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3654 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3655 	.atomic_destroy_state = dm_atomic_destroy_state,
3656 };
3657 
3658 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3659 {
3660 	struct dm_atomic_state *state;
3661 	int r;
3662 
3663 	adev->mode_info.mode_config_initialized = true;
3664 
3665 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3666 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3667 
3668 	adev_to_drm(adev)->mode_config.max_width = 16384;
3669 	adev_to_drm(adev)->mode_config.max_height = 16384;
3670 
3671 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3672 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3673 	/* indicates support for immediate flip */
3674 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3675 
3676 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3677 
3678 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3679 	if (!state)
3680 		return -ENOMEM;
3681 
3682 	state->context = dc_create_state(adev->dm.dc);
3683 	if (!state->context) {
3684 		kfree(state);
3685 		return -ENOMEM;
3686 	}
3687 
3688 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3689 
3690 	drm_atomic_private_obj_init(adev_to_drm(adev),
3691 				    &adev->dm.atomic_obj,
3692 				    &state->base,
3693 				    &dm_atomic_state_funcs);
3694 
3695 	r = amdgpu_display_modeset_create_props(adev);
3696 	if (r) {
3697 		dc_release_state(state->context);
3698 		kfree(state);
3699 		return r;
3700 	}
3701 
3702 	r = amdgpu_dm_audio_init(adev);
3703 	if (r) {
3704 		dc_release_state(state->context);
3705 		kfree(state);
3706 		return r;
3707 	}
3708 
3709 	return 0;
3710 }
3711 
3712 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3713 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3714 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3715 
3716 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3717 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3718 
3719 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3720 					    int bl_idx)
3721 {
3722 #if defined(CONFIG_ACPI)
3723 	struct amdgpu_dm_backlight_caps caps;
3724 
3725 	memset(&caps, 0, sizeof(caps));
3726 
3727 	if (dm->backlight_caps[bl_idx].caps_valid)
3728 		return;
3729 
3730 	amdgpu_acpi_get_backlight_caps(&caps);
3731 	if (caps.caps_valid) {
3732 		dm->backlight_caps[bl_idx].caps_valid = true;
3733 		if (caps.aux_support)
3734 			return;
3735 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3736 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3737 	} else {
3738 		dm->backlight_caps[bl_idx].min_input_signal =
3739 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3740 		dm->backlight_caps[bl_idx].max_input_signal =
3741 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3742 	}
3743 #else
3744 	if (dm->backlight_caps[bl_idx].aux_support)
3745 		return;
3746 
3747 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3748 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3749 #endif
3750 }
3751 
3752 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3753 				unsigned *min, unsigned *max)
3754 {
3755 	if (!caps)
3756 		return 0;
3757 
3758 	if (caps->aux_support) {
3759 		// Firmware limits are in nits, DC API wants millinits.
3760 		*max = 1000 * caps->aux_max_input_signal;
3761 		*min = 1000 * caps->aux_min_input_signal;
3762 	} else {
3763 		// Firmware limits are 8-bit, PWM control is 16-bit.
3764 		*max = 0x101 * caps->max_input_signal;
3765 		*min = 0x101 * caps->min_input_signal;
3766 	}
3767 	return 1;
3768 }
3769 
3770 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3771 					uint32_t brightness)
3772 {
3773 	unsigned min, max;
3774 
3775 	if (!get_brightness_range(caps, &min, &max))
3776 		return brightness;
3777 
3778 	// Rescale 0..255 to min..max
3779 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3780 				       AMDGPU_MAX_BL_LEVEL);
3781 }
3782 
3783 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3784 				      uint32_t brightness)
3785 {
3786 	unsigned min, max;
3787 
3788 	if (!get_brightness_range(caps, &min, &max))
3789 		return brightness;
3790 
3791 	if (brightness < min)
3792 		return 0;
3793 	// Rescale min..max to 0..255
3794 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3795 				 max - min);
3796 }
3797 
3798 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3799 					 int bl_idx,
3800 					 u32 user_brightness)
3801 {
3802 	struct amdgpu_dm_backlight_caps caps;
3803 	struct dc_link *link;
3804 	u32 brightness;
3805 	bool rc;
3806 
3807 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3808 	caps = dm->backlight_caps[bl_idx];
3809 
3810 	dm->brightness[bl_idx] = user_brightness;
3811 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3812 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3813 
3814 	/* Change brightness based on AUX property */
3815 	if (caps.aux_support) {
3816 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3817 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3818 		if (!rc)
3819 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3820 	} else {
3821 		rc = dc_link_set_backlight_level(link, brightness, 0);
3822 		if (!rc)
3823 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3824 	}
3825 
3826 	return rc ? 0 : 1;
3827 }
3828 
3829 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3830 {
3831 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3832 	int i;
3833 
3834 	for (i = 0; i < dm->num_of_edps; i++) {
3835 		if (bd == dm->backlight_dev[i])
3836 			break;
3837 	}
3838 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3839 		i = 0;
3840 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3841 
3842 	return 0;
3843 }
3844 
3845 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3846 					 int bl_idx)
3847 {
3848 	struct amdgpu_dm_backlight_caps caps;
3849 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3850 
3851 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3852 	caps = dm->backlight_caps[bl_idx];
3853 
3854 	if (caps.aux_support) {
3855 		u32 avg, peak;
3856 		bool rc;
3857 
3858 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3859 		if (!rc)
3860 			return dm->brightness[bl_idx];
3861 		return convert_brightness_to_user(&caps, avg);
3862 	} else {
3863 		int ret = dc_link_get_backlight_level(link);
3864 
3865 		if (ret == DC_ERROR_UNEXPECTED)
3866 			return dm->brightness[bl_idx];
3867 		return convert_brightness_to_user(&caps, ret);
3868 	}
3869 }
3870 
3871 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3872 {
3873 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3874 	int i;
3875 
3876 	for (i = 0; i < dm->num_of_edps; i++) {
3877 		if (bd == dm->backlight_dev[i])
3878 			break;
3879 	}
3880 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3881 		i = 0;
3882 	return amdgpu_dm_backlight_get_level(dm, i);
3883 }
3884 
3885 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3886 	.options = BL_CORE_SUSPENDRESUME,
3887 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3888 	.update_status	= amdgpu_dm_backlight_update_status,
3889 };
3890 
3891 static void
3892 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3893 {
3894 	char bl_name[16];
3895 	struct backlight_properties props = { 0 };
3896 
3897 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
3898 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
3899 
3900 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3901 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3902 	props.type = BACKLIGHT_RAW;
3903 
3904 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3905 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
3906 
3907 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
3908 								       adev_to_drm(dm->adev)->dev,
3909 								       dm,
3910 								       &amdgpu_dm_backlight_ops,
3911 								       &props);
3912 
3913 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
3914 		DRM_ERROR("DM: Backlight registration failed!\n");
3915 	else
3916 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3917 }
3918 #endif
3919 
3920 static int initialize_plane(struct amdgpu_display_manager *dm,
3921 			    struct amdgpu_mode_info *mode_info, int plane_id,
3922 			    enum drm_plane_type plane_type,
3923 			    const struct dc_plane_cap *plane_cap)
3924 {
3925 	struct drm_plane *plane;
3926 	unsigned long possible_crtcs;
3927 	int ret = 0;
3928 
3929 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3930 	if (!plane) {
3931 		DRM_ERROR("KMS: Failed to allocate plane\n");
3932 		return -ENOMEM;
3933 	}
3934 	plane->type = plane_type;
3935 
3936 	/*
3937 	 * HACK: IGT tests expect that the primary plane for a CRTC
3938 	 * can only have one possible CRTC. Only expose support for
3939 	 * any CRTC if they're not going to be used as a primary plane
3940 	 * for a CRTC - like overlay or underlay planes.
3941 	 */
3942 	possible_crtcs = 1 << plane_id;
3943 	if (plane_id >= dm->dc->caps.max_streams)
3944 		possible_crtcs = 0xff;
3945 
3946 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3947 
3948 	if (ret) {
3949 		DRM_ERROR("KMS: Failed to initialize plane\n");
3950 		kfree(plane);
3951 		return ret;
3952 	}
3953 
3954 	if (mode_info)
3955 		mode_info->planes[plane_id] = plane;
3956 
3957 	return ret;
3958 }
3959 
3960 
3961 static void register_backlight_device(struct amdgpu_display_manager *dm,
3962 				      struct dc_link *link)
3963 {
3964 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3965 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3966 
3967 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3968 	    link->type != dc_connection_none) {
3969 		/*
3970 		 * Event if registration failed, we should continue with
3971 		 * DM initialization because not having a backlight control
3972 		 * is better then a black screen.
3973 		 */
3974 		if (!dm->backlight_dev[dm->num_of_edps])
3975 			amdgpu_dm_register_backlight_device(dm);
3976 
3977 		if (dm->backlight_dev[dm->num_of_edps]) {
3978 			dm->backlight_link[dm->num_of_edps] = link;
3979 			dm->num_of_edps++;
3980 		}
3981 	}
3982 #endif
3983 }
3984 
3985 
3986 /*
3987  * In this architecture, the association
3988  * connector -> encoder -> crtc
3989  * id not really requried. The crtc and connector will hold the
3990  * display_index as an abstraction to use with DAL component
3991  *
3992  * Returns 0 on success
3993  */
3994 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3995 {
3996 	struct amdgpu_display_manager *dm = &adev->dm;
3997 	int32_t i;
3998 	struct amdgpu_dm_connector *aconnector = NULL;
3999 	struct amdgpu_encoder *aencoder = NULL;
4000 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4001 	uint32_t link_cnt;
4002 	int32_t primary_planes;
4003 	enum dc_connection_type new_connection_type = dc_connection_none;
4004 	const struct dc_plane_cap *plane;
4005 
4006 	dm->display_indexes_num = dm->dc->caps.max_streams;
4007 	/* Update the actual used number of crtc */
4008 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4009 
4010 	link_cnt = dm->dc->caps.max_links;
4011 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4012 		DRM_ERROR("DM: Failed to initialize mode config\n");
4013 		return -EINVAL;
4014 	}
4015 
4016 	/* There is one primary plane per CRTC */
4017 	primary_planes = dm->dc->caps.max_streams;
4018 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4019 
4020 	/*
4021 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4022 	 * Order is reversed to match iteration order in atomic check.
4023 	 */
4024 	for (i = (primary_planes - 1); i >= 0; i--) {
4025 		plane = &dm->dc->caps.planes[i];
4026 
4027 		if (initialize_plane(dm, mode_info, i,
4028 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4029 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4030 			goto fail;
4031 		}
4032 	}
4033 
4034 	/*
4035 	 * Initialize overlay planes, index starting after primary planes.
4036 	 * These planes have a higher DRM index than the primary planes since
4037 	 * they should be considered as having a higher z-order.
4038 	 * Order is reversed to match iteration order in atomic check.
4039 	 *
4040 	 * Only support DCN for now, and only expose one so we don't encourage
4041 	 * userspace to use up all the pipes.
4042 	 */
4043 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4044 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4045 
4046 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4047 			continue;
4048 
4049 		if (!plane->blends_with_above || !plane->blends_with_below)
4050 			continue;
4051 
4052 		if (!plane->pixel_format_support.argb8888)
4053 			continue;
4054 
4055 		if (initialize_plane(dm, NULL, primary_planes + i,
4056 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4057 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4058 			goto fail;
4059 		}
4060 
4061 		/* Only create one overlay plane. */
4062 		break;
4063 	}
4064 
4065 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4066 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4067 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4068 			goto fail;
4069 		}
4070 
4071 #if defined(CONFIG_DRM_AMD_DC_DCN)
4072 	/* Use Outbox interrupt */
4073 	switch (adev->asic_type) {
4074 	case CHIP_SIENNA_CICHLID:
4075 	case CHIP_NAVY_FLOUNDER:
4076 	case CHIP_YELLOW_CARP:
4077 	case CHIP_RENOIR:
4078 		if (register_outbox_irq_handlers(dm->adev)) {
4079 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4080 			goto fail;
4081 		}
4082 		break;
4083 	default:
4084 		DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
4085 	}
4086 #endif
4087 
4088 	/* loops over all connectors on the board */
4089 	for (i = 0; i < link_cnt; i++) {
4090 		struct dc_link *link = NULL;
4091 
4092 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4093 			DRM_ERROR(
4094 				"KMS: Cannot support more than %d display indexes\n",
4095 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4096 			continue;
4097 		}
4098 
4099 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4100 		if (!aconnector)
4101 			goto fail;
4102 
4103 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4104 		if (!aencoder)
4105 			goto fail;
4106 
4107 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4108 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4109 			goto fail;
4110 		}
4111 
4112 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4113 			DRM_ERROR("KMS: Failed to initialize connector\n");
4114 			goto fail;
4115 		}
4116 
4117 		link = dc_get_link_at_index(dm->dc, i);
4118 
4119 		if (!dc_link_detect_sink(link, &new_connection_type))
4120 			DRM_ERROR("KMS: Failed to detect connector\n");
4121 
4122 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4123 			emulated_link_detect(link);
4124 			amdgpu_dm_update_connector_after_detect(aconnector);
4125 
4126 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4127 			amdgpu_dm_update_connector_after_detect(aconnector);
4128 			register_backlight_device(dm, link);
4129 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
4130 				amdgpu_dm_set_psr_caps(link);
4131 		}
4132 
4133 
4134 	}
4135 
4136 	/* Software is initialized. Now we can register interrupt handlers. */
4137 	switch (adev->asic_type) {
4138 #if defined(CONFIG_DRM_AMD_DC_SI)
4139 	case CHIP_TAHITI:
4140 	case CHIP_PITCAIRN:
4141 	case CHIP_VERDE:
4142 	case CHIP_OLAND:
4143 		if (dce60_register_irq_handlers(dm->adev)) {
4144 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4145 			goto fail;
4146 		}
4147 		break;
4148 #endif
4149 	case CHIP_BONAIRE:
4150 	case CHIP_HAWAII:
4151 	case CHIP_KAVERI:
4152 	case CHIP_KABINI:
4153 	case CHIP_MULLINS:
4154 	case CHIP_TONGA:
4155 	case CHIP_FIJI:
4156 	case CHIP_CARRIZO:
4157 	case CHIP_STONEY:
4158 	case CHIP_POLARIS11:
4159 	case CHIP_POLARIS10:
4160 	case CHIP_POLARIS12:
4161 	case CHIP_VEGAM:
4162 	case CHIP_VEGA10:
4163 	case CHIP_VEGA12:
4164 	case CHIP_VEGA20:
4165 		if (dce110_register_irq_handlers(dm->adev)) {
4166 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4167 			goto fail;
4168 		}
4169 		break;
4170 #if defined(CONFIG_DRM_AMD_DC_DCN)
4171 	case CHIP_RAVEN:
4172 	case CHIP_NAVI12:
4173 	case CHIP_NAVI10:
4174 	case CHIP_NAVI14:
4175 	case CHIP_RENOIR:
4176 	case CHIP_SIENNA_CICHLID:
4177 	case CHIP_NAVY_FLOUNDER:
4178 	case CHIP_DIMGREY_CAVEFISH:
4179 	case CHIP_BEIGE_GOBY:
4180 	case CHIP_VANGOGH:
4181 	case CHIP_YELLOW_CARP:
4182 		if (dcn10_register_irq_handlers(dm->adev)) {
4183 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4184 			goto fail;
4185 		}
4186 		break;
4187 #endif
4188 	default:
4189 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4190 		goto fail;
4191 	}
4192 
4193 	return 0;
4194 fail:
4195 	kfree(aencoder);
4196 	kfree(aconnector);
4197 
4198 	return -EINVAL;
4199 }
4200 
4201 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4202 {
4203 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4204 	return;
4205 }
4206 
4207 /******************************************************************************
4208  * amdgpu_display_funcs functions
4209  *****************************************************************************/
4210 
4211 /*
4212  * dm_bandwidth_update - program display watermarks
4213  *
4214  * @adev: amdgpu_device pointer
4215  *
4216  * Calculate and program the display watermarks and line buffer allocation.
4217  */
4218 static void dm_bandwidth_update(struct amdgpu_device *adev)
4219 {
4220 	/* TODO: implement later */
4221 }
4222 
4223 static const struct amdgpu_display_funcs dm_display_funcs = {
4224 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4225 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4226 	.backlight_set_level = NULL, /* never called for DC */
4227 	.backlight_get_level = NULL, /* never called for DC */
4228 	.hpd_sense = NULL,/* called unconditionally */
4229 	.hpd_set_polarity = NULL, /* called unconditionally */
4230 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4231 	.page_flip_get_scanoutpos =
4232 		dm_crtc_get_scanoutpos,/* called unconditionally */
4233 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4234 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4235 };
4236 
4237 #if defined(CONFIG_DEBUG_KERNEL_DC)
4238 
4239 static ssize_t s3_debug_store(struct device *device,
4240 			      struct device_attribute *attr,
4241 			      const char *buf,
4242 			      size_t count)
4243 {
4244 	int ret;
4245 	int s3_state;
4246 	struct drm_device *drm_dev = dev_get_drvdata(device);
4247 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4248 
4249 	ret = kstrtoint(buf, 0, &s3_state);
4250 
4251 	if (ret == 0) {
4252 		if (s3_state) {
4253 			dm_resume(adev);
4254 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4255 		} else
4256 			dm_suspend(adev);
4257 	}
4258 
4259 	return ret == 0 ? count : 0;
4260 }
4261 
4262 DEVICE_ATTR_WO(s3_debug);
4263 
4264 #endif
4265 
4266 static int dm_early_init(void *handle)
4267 {
4268 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4269 
4270 	switch (adev->asic_type) {
4271 #if defined(CONFIG_DRM_AMD_DC_SI)
4272 	case CHIP_TAHITI:
4273 	case CHIP_PITCAIRN:
4274 	case CHIP_VERDE:
4275 		adev->mode_info.num_crtc = 6;
4276 		adev->mode_info.num_hpd = 6;
4277 		adev->mode_info.num_dig = 6;
4278 		break;
4279 	case CHIP_OLAND:
4280 		adev->mode_info.num_crtc = 2;
4281 		adev->mode_info.num_hpd = 2;
4282 		adev->mode_info.num_dig = 2;
4283 		break;
4284 #endif
4285 	case CHIP_BONAIRE:
4286 	case CHIP_HAWAII:
4287 		adev->mode_info.num_crtc = 6;
4288 		adev->mode_info.num_hpd = 6;
4289 		adev->mode_info.num_dig = 6;
4290 		break;
4291 	case CHIP_KAVERI:
4292 		adev->mode_info.num_crtc = 4;
4293 		adev->mode_info.num_hpd = 6;
4294 		adev->mode_info.num_dig = 7;
4295 		break;
4296 	case CHIP_KABINI:
4297 	case CHIP_MULLINS:
4298 		adev->mode_info.num_crtc = 2;
4299 		adev->mode_info.num_hpd = 6;
4300 		adev->mode_info.num_dig = 6;
4301 		break;
4302 	case CHIP_FIJI:
4303 	case CHIP_TONGA:
4304 		adev->mode_info.num_crtc = 6;
4305 		adev->mode_info.num_hpd = 6;
4306 		adev->mode_info.num_dig = 7;
4307 		break;
4308 	case CHIP_CARRIZO:
4309 		adev->mode_info.num_crtc = 3;
4310 		adev->mode_info.num_hpd = 6;
4311 		adev->mode_info.num_dig = 9;
4312 		break;
4313 	case CHIP_STONEY:
4314 		adev->mode_info.num_crtc = 2;
4315 		adev->mode_info.num_hpd = 6;
4316 		adev->mode_info.num_dig = 9;
4317 		break;
4318 	case CHIP_POLARIS11:
4319 	case CHIP_POLARIS12:
4320 		adev->mode_info.num_crtc = 5;
4321 		adev->mode_info.num_hpd = 5;
4322 		adev->mode_info.num_dig = 5;
4323 		break;
4324 	case CHIP_POLARIS10:
4325 	case CHIP_VEGAM:
4326 		adev->mode_info.num_crtc = 6;
4327 		adev->mode_info.num_hpd = 6;
4328 		adev->mode_info.num_dig = 6;
4329 		break;
4330 	case CHIP_VEGA10:
4331 	case CHIP_VEGA12:
4332 	case CHIP_VEGA20:
4333 		adev->mode_info.num_crtc = 6;
4334 		adev->mode_info.num_hpd = 6;
4335 		adev->mode_info.num_dig = 6;
4336 		break;
4337 #if defined(CONFIG_DRM_AMD_DC_DCN)
4338 	case CHIP_RAVEN:
4339 	case CHIP_RENOIR:
4340 	case CHIP_VANGOGH:
4341 		adev->mode_info.num_crtc = 4;
4342 		adev->mode_info.num_hpd = 4;
4343 		adev->mode_info.num_dig = 4;
4344 		break;
4345 	case CHIP_NAVI10:
4346 	case CHIP_NAVI12:
4347 	case CHIP_SIENNA_CICHLID:
4348 	case CHIP_NAVY_FLOUNDER:
4349 		adev->mode_info.num_crtc = 6;
4350 		adev->mode_info.num_hpd = 6;
4351 		adev->mode_info.num_dig = 6;
4352 		break;
4353 	case CHIP_YELLOW_CARP:
4354 		adev->mode_info.num_crtc = 4;
4355 		adev->mode_info.num_hpd = 4;
4356 		adev->mode_info.num_dig = 4;
4357 		break;
4358 	case CHIP_NAVI14:
4359 	case CHIP_DIMGREY_CAVEFISH:
4360 		adev->mode_info.num_crtc = 5;
4361 		adev->mode_info.num_hpd = 5;
4362 		adev->mode_info.num_dig = 5;
4363 		break;
4364 	case CHIP_BEIGE_GOBY:
4365 		adev->mode_info.num_crtc = 2;
4366 		adev->mode_info.num_hpd = 2;
4367 		adev->mode_info.num_dig = 2;
4368 		break;
4369 #endif
4370 	default:
4371 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4372 		return -EINVAL;
4373 	}
4374 
4375 	amdgpu_dm_set_irq_funcs(adev);
4376 
4377 	if (adev->mode_info.funcs == NULL)
4378 		adev->mode_info.funcs = &dm_display_funcs;
4379 
4380 	/*
4381 	 * Note: Do NOT change adev->audio_endpt_rreg and
4382 	 * adev->audio_endpt_wreg because they are initialised in
4383 	 * amdgpu_device_init()
4384 	 */
4385 #if defined(CONFIG_DEBUG_KERNEL_DC)
4386 	device_create_file(
4387 		adev_to_drm(adev)->dev,
4388 		&dev_attr_s3_debug);
4389 #endif
4390 
4391 	return 0;
4392 }
4393 
4394 static bool modeset_required(struct drm_crtc_state *crtc_state,
4395 			     struct dc_stream_state *new_stream,
4396 			     struct dc_stream_state *old_stream)
4397 {
4398 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4399 }
4400 
4401 static bool modereset_required(struct drm_crtc_state *crtc_state)
4402 {
4403 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4404 }
4405 
4406 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4407 {
4408 	drm_encoder_cleanup(encoder);
4409 	kfree(encoder);
4410 }
4411 
4412 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4413 	.destroy = amdgpu_dm_encoder_destroy,
4414 };
4415 
4416 
4417 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4418 					 struct drm_framebuffer *fb,
4419 					 int *min_downscale, int *max_upscale)
4420 {
4421 	struct amdgpu_device *adev = drm_to_adev(dev);
4422 	struct dc *dc = adev->dm.dc;
4423 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4424 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4425 
4426 	switch (fb->format->format) {
4427 	case DRM_FORMAT_P010:
4428 	case DRM_FORMAT_NV12:
4429 	case DRM_FORMAT_NV21:
4430 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4431 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4432 		break;
4433 
4434 	case DRM_FORMAT_XRGB16161616F:
4435 	case DRM_FORMAT_ARGB16161616F:
4436 	case DRM_FORMAT_XBGR16161616F:
4437 	case DRM_FORMAT_ABGR16161616F:
4438 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4439 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4440 		break;
4441 
4442 	default:
4443 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4444 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4445 		break;
4446 	}
4447 
4448 	/*
4449 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4450 	 * scaling factor of 1.0 == 1000 units.
4451 	 */
4452 	if (*max_upscale == 1)
4453 		*max_upscale = 1000;
4454 
4455 	if (*min_downscale == 1)
4456 		*min_downscale = 1000;
4457 }
4458 
4459 
4460 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4461 				struct dc_scaling_info *scaling_info)
4462 {
4463 	int scale_w, scale_h, min_downscale, max_upscale;
4464 
4465 	memset(scaling_info, 0, sizeof(*scaling_info));
4466 
4467 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4468 	scaling_info->src_rect.x = state->src_x >> 16;
4469 	scaling_info->src_rect.y = state->src_y >> 16;
4470 
4471 	/*
4472 	 * For reasons we don't (yet) fully understand a non-zero
4473 	 * src_y coordinate into an NV12 buffer can cause a
4474 	 * system hang. To avoid hangs (and maybe be overly cautious)
4475 	 * let's reject both non-zero src_x and src_y.
4476 	 *
4477 	 * We currently know of only one use-case to reproduce a
4478 	 * scenario with non-zero src_x and src_y for NV12, which
4479 	 * is to gesture the YouTube Android app into full screen
4480 	 * on ChromeOS.
4481 	 */
4482 	if (state->fb &&
4483 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4484 	    (scaling_info->src_rect.x != 0 ||
4485 	     scaling_info->src_rect.y != 0))
4486 		return -EINVAL;
4487 
4488 	scaling_info->src_rect.width = state->src_w >> 16;
4489 	if (scaling_info->src_rect.width == 0)
4490 		return -EINVAL;
4491 
4492 	scaling_info->src_rect.height = state->src_h >> 16;
4493 	if (scaling_info->src_rect.height == 0)
4494 		return -EINVAL;
4495 
4496 	scaling_info->dst_rect.x = state->crtc_x;
4497 	scaling_info->dst_rect.y = state->crtc_y;
4498 
4499 	if (state->crtc_w == 0)
4500 		return -EINVAL;
4501 
4502 	scaling_info->dst_rect.width = state->crtc_w;
4503 
4504 	if (state->crtc_h == 0)
4505 		return -EINVAL;
4506 
4507 	scaling_info->dst_rect.height = state->crtc_h;
4508 
4509 	/* DRM doesn't specify clipping on destination output. */
4510 	scaling_info->clip_rect = scaling_info->dst_rect;
4511 
4512 	/* Validate scaling per-format with DC plane caps */
4513 	if (state->plane && state->plane->dev && state->fb) {
4514 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4515 					     &min_downscale, &max_upscale);
4516 	} else {
4517 		min_downscale = 250;
4518 		max_upscale = 16000;
4519 	}
4520 
4521 	scale_w = scaling_info->dst_rect.width * 1000 /
4522 		  scaling_info->src_rect.width;
4523 
4524 	if (scale_w < min_downscale || scale_w > max_upscale)
4525 		return -EINVAL;
4526 
4527 	scale_h = scaling_info->dst_rect.height * 1000 /
4528 		  scaling_info->src_rect.height;
4529 
4530 	if (scale_h < min_downscale || scale_h > max_upscale)
4531 		return -EINVAL;
4532 
4533 	/*
4534 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4535 	 * assume reasonable defaults based on the format.
4536 	 */
4537 
4538 	return 0;
4539 }
4540 
4541 static void
4542 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4543 				 uint64_t tiling_flags)
4544 {
4545 	/* Fill GFX8 params */
4546 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4547 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4548 
4549 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4550 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4551 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4552 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4553 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4554 
4555 		/* XXX fix me for VI */
4556 		tiling_info->gfx8.num_banks = num_banks;
4557 		tiling_info->gfx8.array_mode =
4558 				DC_ARRAY_2D_TILED_THIN1;
4559 		tiling_info->gfx8.tile_split = tile_split;
4560 		tiling_info->gfx8.bank_width = bankw;
4561 		tiling_info->gfx8.bank_height = bankh;
4562 		tiling_info->gfx8.tile_aspect = mtaspect;
4563 		tiling_info->gfx8.tile_mode =
4564 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4565 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4566 			== DC_ARRAY_1D_TILED_THIN1) {
4567 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4568 	}
4569 
4570 	tiling_info->gfx8.pipe_config =
4571 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4572 }
4573 
4574 static void
4575 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4576 				  union dc_tiling_info *tiling_info)
4577 {
4578 	tiling_info->gfx9.num_pipes =
4579 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4580 	tiling_info->gfx9.num_banks =
4581 		adev->gfx.config.gb_addr_config_fields.num_banks;
4582 	tiling_info->gfx9.pipe_interleave =
4583 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4584 	tiling_info->gfx9.num_shader_engines =
4585 		adev->gfx.config.gb_addr_config_fields.num_se;
4586 	tiling_info->gfx9.max_compressed_frags =
4587 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4588 	tiling_info->gfx9.num_rb_per_se =
4589 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4590 	tiling_info->gfx9.shaderEnable = 1;
4591 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4592 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4593 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4594 	    adev->asic_type == CHIP_BEIGE_GOBY ||
4595 	    adev->asic_type == CHIP_YELLOW_CARP ||
4596 	    adev->asic_type == CHIP_VANGOGH)
4597 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4598 }
4599 
4600 static int
4601 validate_dcc(struct amdgpu_device *adev,
4602 	     const enum surface_pixel_format format,
4603 	     const enum dc_rotation_angle rotation,
4604 	     const union dc_tiling_info *tiling_info,
4605 	     const struct dc_plane_dcc_param *dcc,
4606 	     const struct dc_plane_address *address,
4607 	     const struct plane_size *plane_size)
4608 {
4609 	struct dc *dc = adev->dm.dc;
4610 	struct dc_dcc_surface_param input;
4611 	struct dc_surface_dcc_cap output;
4612 
4613 	memset(&input, 0, sizeof(input));
4614 	memset(&output, 0, sizeof(output));
4615 
4616 	if (!dcc->enable)
4617 		return 0;
4618 
4619 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4620 	    !dc->cap_funcs.get_dcc_compression_cap)
4621 		return -EINVAL;
4622 
4623 	input.format = format;
4624 	input.surface_size.width = plane_size->surface_size.width;
4625 	input.surface_size.height = plane_size->surface_size.height;
4626 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4627 
4628 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4629 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4630 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4631 		input.scan = SCAN_DIRECTION_VERTICAL;
4632 
4633 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4634 		return -EINVAL;
4635 
4636 	if (!output.capable)
4637 		return -EINVAL;
4638 
4639 	if (dcc->independent_64b_blks == 0 &&
4640 	    output.grph.rgb.independent_64b_blks != 0)
4641 		return -EINVAL;
4642 
4643 	return 0;
4644 }
4645 
4646 static bool
4647 modifier_has_dcc(uint64_t modifier)
4648 {
4649 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4650 }
4651 
4652 static unsigned
4653 modifier_gfx9_swizzle_mode(uint64_t modifier)
4654 {
4655 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4656 		return 0;
4657 
4658 	return AMD_FMT_MOD_GET(TILE, modifier);
4659 }
4660 
4661 static const struct drm_format_info *
4662 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4663 {
4664 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4665 }
4666 
4667 static void
4668 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4669 				    union dc_tiling_info *tiling_info,
4670 				    uint64_t modifier)
4671 {
4672 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4673 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4674 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4675 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4676 
4677 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4678 
4679 	if (!IS_AMD_FMT_MOD(modifier))
4680 		return;
4681 
4682 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4683 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4684 
4685 	if (adev->family >= AMDGPU_FAMILY_NV) {
4686 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4687 	} else {
4688 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4689 
4690 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4691 	}
4692 }
4693 
4694 enum dm_micro_swizzle {
4695 	MICRO_SWIZZLE_Z = 0,
4696 	MICRO_SWIZZLE_S = 1,
4697 	MICRO_SWIZZLE_D = 2,
4698 	MICRO_SWIZZLE_R = 3
4699 };
4700 
4701 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4702 					  uint32_t format,
4703 					  uint64_t modifier)
4704 {
4705 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4706 	const struct drm_format_info *info = drm_format_info(format);
4707 	int i;
4708 
4709 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4710 
4711 	if (!info)
4712 		return false;
4713 
4714 	/*
4715 	 * We always have to allow these modifiers:
4716 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4717 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4718 	 */
4719 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4720 	    modifier == DRM_FORMAT_MOD_INVALID) {
4721 		return true;
4722 	}
4723 
4724 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4725 	for (i = 0; i < plane->modifier_count; i++) {
4726 		if (modifier == plane->modifiers[i])
4727 			break;
4728 	}
4729 	if (i == plane->modifier_count)
4730 		return false;
4731 
4732 	/*
4733 	 * For D swizzle the canonical modifier depends on the bpp, so check
4734 	 * it here.
4735 	 */
4736 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4737 	    adev->family >= AMDGPU_FAMILY_NV) {
4738 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4739 			return false;
4740 	}
4741 
4742 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4743 	    info->cpp[0] < 8)
4744 		return false;
4745 
4746 	if (modifier_has_dcc(modifier)) {
4747 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4748 		if (info->cpp[0] != 4)
4749 			return false;
4750 		/* We support multi-planar formats, but not when combined with
4751 		 * additional DCC metadata planes. */
4752 		if (info->num_planes > 1)
4753 			return false;
4754 	}
4755 
4756 	return true;
4757 }
4758 
4759 static void
4760 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4761 {
4762 	if (!*mods)
4763 		return;
4764 
4765 	if (*cap - *size < 1) {
4766 		uint64_t new_cap = *cap * 2;
4767 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4768 
4769 		if (!new_mods) {
4770 			kfree(*mods);
4771 			*mods = NULL;
4772 			return;
4773 		}
4774 
4775 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4776 		kfree(*mods);
4777 		*mods = new_mods;
4778 		*cap = new_cap;
4779 	}
4780 
4781 	(*mods)[*size] = mod;
4782 	*size += 1;
4783 }
4784 
4785 static void
4786 add_gfx9_modifiers(const struct amdgpu_device *adev,
4787 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4788 {
4789 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4790 	int pipe_xor_bits = min(8, pipes +
4791 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4792 	int bank_xor_bits = min(8 - pipe_xor_bits,
4793 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4794 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4795 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4796 
4797 
4798 	if (adev->family == AMDGPU_FAMILY_RV) {
4799 		/* Raven2 and later */
4800 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4801 
4802 		/*
4803 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4804 		 * doesn't support _D on DCN
4805 		 */
4806 
4807 		if (has_constant_encode) {
4808 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4809 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4810 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4811 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4812 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4813 				    AMD_FMT_MOD_SET(DCC, 1) |
4814 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4815 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4816 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4817 		}
4818 
4819 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4820 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4821 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4822 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4823 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4824 			    AMD_FMT_MOD_SET(DCC, 1) |
4825 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4826 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4827 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4828 
4829 		if (has_constant_encode) {
4830 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4831 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4832 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4833 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4834 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4835 				    AMD_FMT_MOD_SET(DCC, 1) |
4836 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4837 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4838 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4839 
4840 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4841 				    AMD_FMT_MOD_SET(RB, rb) |
4842 				    AMD_FMT_MOD_SET(PIPE, pipes));
4843 		}
4844 
4845 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4846 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4847 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4848 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4849 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4850 			    AMD_FMT_MOD_SET(DCC, 1) |
4851 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4852 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4853 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4854 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4855 			    AMD_FMT_MOD_SET(RB, rb) |
4856 			    AMD_FMT_MOD_SET(PIPE, pipes));
4857 	}
4858 
4859 	/*
4860 	 * Only supported for 64bpp on Raven, will be filtered on format in
4861 	 * dm_plane_format_mod_supported.
4862 	 */
4863 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4864 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4865 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4866 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4867 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4868 
4869 	if (adev->family == AMDGPU_FAMILY_RV) {
4870 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4871 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4872 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4873 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4874 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4875 	}
4876 
4877 	/*
4878 	 * Only supported for 64bpp on Raven, will be filtered on format in
4879 	 * dm_plane_format_mod_supported.
4880 	 */
4881 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4882 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4883 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4884 
4885 	if (adev->family == AMDGPU_FAMILY_RV) {
4886 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4887 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4888 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4889 	}
4890 }
4891 
4892 static void
4893 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4894 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4895 {
4896 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4897 
4898 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4899 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4900 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4901 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4902 		    AMD_FMT_MOD_SET(DCC, 1) |
4903 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4904 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4905 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4906 
4907 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4908 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4909 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4910 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4911 		    AMD_FMT_MOD_SET(DCC, 1) |
4912 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4913 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4914 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4915 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4916 
4917 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4918 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4919 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4920 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4921 
4922 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4923 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4924 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4925 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4926 
4927 
4928 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4929 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4930 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4931 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4932 
4933 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4934 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4935 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4936 }
4937 
4938 static void
4939 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4940 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
4941 {
4942 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4943 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4944 
4945 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4946 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4947 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4948 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4949 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4950 		    AMD_FMT_MOD_SET(DCC, 1) |
4951 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4952 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4953 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4954 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4955 
4956 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4957 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4958 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4959 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4960 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
4961 		    AMD_FMT_MOD_SET(DCC, 1) |
4962 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4963 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4964 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4965 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4966 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4967 
4968 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4969 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4970 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4971 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4972 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4973 
4974 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4975 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4976 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4977 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4978 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
4979 
4980 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4981 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4982 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4983 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4984 
4985 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4986 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4987 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4988 }
4989 
4990 static int
4991 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4992 {
4993 	uint64_t size = 0, capacity = 128;
4994 	*mods = NULL;
4995 
4996 	/* We have not hooked up any pre-GFX9 modifiers. */
4997 	if (adev->family < AMDGPU_FAMILY_AI)
4998 		return 0;
4999 
5000 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5001 
5002 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5003 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5004 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5005 		return *mods ? 0 : -ENOMEM;
5006 	}
5007 
5008 	switch (adev->family) {
5009 	case AMDGPU_FAMILY_AI:
5010 	case AMDGPU_FAMILY_RV:
5011 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5012 		break;
5013 	case AMDGPU_FAMILY_NV:
5014 	case AMDGPU_FAMILY_VGH:
5015 	case AMDGPU_FAMILY_YC:
5016 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
5017 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5018 		else
5019 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5020 		break;
5021 	}
5022 
5023 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5024 
5025 	/* INVALID marks the end of the list. */
5026 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5027 
5028 	if (!*mods)
5029 		return -ENOMEM;
5030 
5031 	return 0;
5032 }
5033 
5034 static int
5035 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5036 					  const struct amdgpu_framebuffer *afb,
5037 					  const enum surface_pixel_format format,
5038 					  const enum dc_rotation_angle rotation,
5039 					  const struct plane_size *plane_size,
5040 					  union dc_tiling_info *tiling_info,
5041 					  struct dc_plane_dcc_param *dcc,
5042 					  struct dc_plane_address *address,
5043 					  const bool force_disable_dcc)
5044 {
5045 	const uint64_t modifier = afb->base.modifier;
5046 	int ret = 0;
5047 
5048 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5049 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5050 
5051 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5052 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5053 
5054 		dcc->enable = 1;
5055 		dcc->meta_pitch = afb->base.pitches[1];
5056 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5057 
5058 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5059 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5060 	}
5061 
5062 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5063 	if (ret)
5064 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5065 
5066 	return ret;
5067 }
5068 
5069 static int
5070 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5071 			     const struct amdgpu_framebuffer *afb,
5072 			     const enum surface_pixel_format format,
5073 			     const enum dc_rotation_angle rotation,
5074 			     const uint64_t tiling_flags,
5075 			     union dc_tiling_info *tiling_info,
5076 			     struct plane_size *plane_size,
5077 			     struct dc_plane_dcc_param *dcc,
5078 			     struct dc_plane_address *address,
5079 			     bool tmz_surface,
5080 			     bool force_disable_dcc)
5081 {
5082 	const struct drm_framebuffer *fb = &afb->base;
5083 	int ret;
5084 
5085 	memset(tiling_info, 0, sizeof(*tiling_info));
5086 	memset(plane_size, 0, sizeof(*plane_size));
5087 	memset(dcc, 0, sizeof(*dcc));
5088 	memset(address, 0, sizeof(*address));
5089 
5090 	address->tmz_surface = tmz_surface;
5091 
5092 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5093 		uint64_t addr = afb->address + fb->offsets[0];
5094 
5095 		plane_size->surface_size.x = 0;
5096 		plane_size->surface_size.y = 0;
5097 		plane_size->surface_size.width = fb->width;
5098 		plane_size->surface_size.height = fb->height;
5099 		plane_size->surface_pitch =
5100 			fb->pitches[0] / fb->format->cpp[0];
5101 
5102 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5103 		address->grph.addr.low_part = lower_32_bits(addr);
5104 		address->grph.addr.high_part = upper_32_bits(addr);
5105 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5106 		uint64_t luma_addr = afb->address + fb->offsets[0];
5107 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5108 
5109 		plane_size->surface_size.x = 0;
5110 		plane_size->surface_size.y = 0;
5111 		plane_size->surface_size.width = fb->width;
5112 		plane_size->surface_size.height = fb->height;
5113 		plane_size->surface_pitch =
5114 			fb->pitches[0] / fb->format->cpp[0];
5115 
5116 		plane_size->chroma_size.x = 0;
5117 		plane_size->chroma_size.y = 0;
5118 		/* TODO: set these based on surface format */
5119 		plane_size->chroma_size.width = fb->width / 2;
5120 		plane_size->chroma_size.height = fb->height / 2;
5121 
5122 		plane_size->chroma_pitch =
5123 			fb->pitches[1] / fb->format->cpp[1];
5124 
5125 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5126 		address->video_progressive.luma_addr.low_part =
5127 			lower_32_bits(luma_addr);
5128 		address->video_progressive.luma_addr.high_part =
5129 			upper_32_bits(luma_addr);
5130 		address->video_progressive.chroma_addr.low_part =
5131 			lower_32_bits(chroma_addr);
5132 		address->video_progressive.chroma_addr.high_part =
5133 			upper_32_bits(chroma_addr);
5134 	}
5135 
5136 	if (adev->family >= AMDGPU_FAMILY_AI) {
5137 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5138 								rotation, plane_size,
5139 								tiling_info, dcc,
5140 								address,
5141 								force_disable_dcc);
5142 		if (ret)
5143 			return ret;
5144 	} else {
5145 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5146 	}
5147 
5148 	return 0;
5149 }
5150 
5151 static void
5152 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5153 			       bool *per_pixel_alpha, bool *global_alpha,
5154 			       int *global_alpha_value)
5155 {
5156 	*per_pixel_alpha = false;
5157 	*global_alpha = false;
5158 	*global_alpha_value = 0xff;
5159 
5160 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5161 		return;
5162 
5163 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5164 		static const uint32_t alpha_formats[] = {
5165 			DRM_FORMAT_ARGB8888,
5166 			DRM_FORMAT_RGBA8888,
5167 			DRM_FORMAT_ABGR8888,
5168 		};
5169 		uint32_t format = plane_state->fb->format->format;
5170 		unsigned int i;
5171 
5172 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5173 			if (format == alpha_formats[i]) {
5174 				*per_pixel_alpha = true;
5175 				break;
5176 			}
5177 		}
5178 	}
5179 
5180 	if (plane_state->alpha < 0xffff) {
5181 		*global_alpha = true;
5182 		*global_alpha_value = plane_state->alpha >> 8;
5183 	}
5184 }
5185 
5186 static int
5187 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5188 			    const enum surface_pixel_format format,
5189 			    enum dc_color_space *color_space)
5190 {
5191 	bool full_range;
5192 
5193 	*color_space = COLOR_SPACE_SRGB;
5194 
5195 	/* DRM color properties only affect non-RGB formats. */
5196 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5197 		return 0;
5198 
5199 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5200 
5201 	switch (plane_state->color_encoding) {
5202 	case DRM_COLOR_YCBCR_BT601:
5203 		if (full_range)
5204 			*color_space = COLOR_SPACE_YCBCR601;
5205 		else
5206 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5207 		break;
5208 
5209 	case DRM_COLOR_YCBCR_BT709:
5210 		if (full_range)
5211 			*color_space = COLOR_SPACE_YCBCR709;
5212 		else
5213 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5214 		break;
5215 
5216 	case DRM_COLOR_YCBCR_BT2020:
5217 		if (full_range)
5218 			*color_space = COLOR_SPACE_2020_YCBCR;
5219 		else
5220 			return -EINVAL;
5221 		break;
5222 
5223 	default:
5224 		return -EINVAL;
5225 	}
5226 
5227 	return 0;
5228 }
5229 
5230 static int
5231 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5232 			    const struct drm_plane_state *plane_state,
5233 			    const uint64_t tiling_flags,
5234 			    struct dc_plane_info *plane_info,
5235 			    struct dc_plane_address *address,
5236 			    bool tmz_surface,
5237 			    bool force_disable_dcc)
5238 {
5239 	const struct drm_framebuffer *fb = plane_state->fb;
5240 	const struct amdgpu_framebuffer *afb =
5241 		to_amdgpu_framebuffer(plane_state->fb);
5242 	int ret;
5243 
5244 	memset(plane_info, 0, sizeof(*plane_info));
5245 
5246 	switch (fb->format->format) {
5247 	case DRM_FORMAT_C8:
5248 		plane_info->format =
5249 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5250 		break;
5251 	case DRM_FORMAT_RGB565:
5252 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5253 		break;
5254 	case DRM_FORMAT_XRGB8888:
5255 	case DRM_FORMAT_ARGB8888:
5256 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5257 		break;
5258 	case DRM_FORMAT_XRGB2101010:
5259 	case DRM_FORMAT_ARGB2101010:
5260 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5261 		break;
5262 	case DRM_FORMAT_XBGR2101010:
5263 	case DRM_FORMAT_ABGR2101010:
5264 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5265 		break;
5266 	case DRM_FORMAT_XBGR8888:
5267 	case DRM_FORMAT_ABGR8888:
5268 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5269 		break;
5270 	case DRM_FORMAT_NV21:
5271 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5272 		break;
5273 	case DRM_FORMAT_NV12:
5274 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5275 		break;
5276 	case DRM_FORMAT_P010:
5277 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5278 		break;
5279 	case DRM_FORMAT_XRGB16161616F:
5280 	case DRM_FORMAT_ARGB16161616F:
5281 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5282 		break;
5283 	case DRM_FORMAT_XBGR16161616F:
5284 	case DRM_FORMAT_ABGR16161616F:
5285 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5286 		break;
5287 	case DRM_FORMAT_XRGB16161616:
5288 	case DRM_FORMAT_ARGB16161616:
5289 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5290 		break;
5291 	case DRM_FORMAT_XBGR16161616:
5292 	case DRM_FORMAT_ABGR16161616:
5293 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5294 		break;
5295 	default:
5296 		DRM_ERROR(
5297 			"Unsupported screen format %p4cc\n",
5298 			&fb->format->format);
5299 		return -EINVAL;
5300 	}
5301 
5302 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5303 	case DRM_MODE_ROTATE_0:
5304 		plane_info->rotation = ROTATION_ANGLE_0;
5305 		break;
5306 	case DRM_MODE_ROTATE_90:
5307 		plane_info->rotation = ROTATION_ANGLE_90;
5308 		break;
5309 	case DRM_MODE_ROTATE_180:
5310 		plane_info->rotation = ROTATION_ANGLE_180;
5311 		break;
5312 	case DRM_MODE_ROTATE_270:
5313 		plane_info->rotation = ROTATION_ANGLE_270;
5314 		break;
5315 	default:
5316 		plane_info->rotation = ROTATION_ANGLE_0;
5317 		break;
5318 	}
5319 
5320 	plane_info->visible = true;
5321 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5322 
5323 	plane_info->layer_index = 0;
5324 
5325 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5326 					  &plane_info->color_space);
5327 	if (ret)
5328 		return ret;
5329 
5330 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5331 					   plane_info->rotation, tiling_flags,
5332 					   &plane_info->tiling_info,
5333 					   &plane_info->plane_size,
5334 					   &plane_info->dcc, address, tmz_surface,
5335 					   force_disable_dcc);
5336 	if (ret)
5337 		return ret;
5338 
5339 	fill_blending_from_plane_state(
5340 		plane_state, &plane_info->per_pixel_alpha,
5341 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5342 
5343 	return 0;
5344 }
5345 
5346 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5347 				    struct dc_plane_state *dc_plane_state,
5348 				    struct drm_plane_state *plane_state,
5349 				    struct drm_crtc_state *crtc_state)
5350 {
5351 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5352 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5353 	struct dc_scaling_info scaling_info;
5354 	struct dc_plane_info plane_info;
5355 	int ret;
5356 	bool force_disable_dcc = false;
5357 
5358 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
5359 	if (ret)
5360 		return ret;
5361 
5362 	dc_plane_state->src_rect = scaling_info.src_rect;
5363 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5364 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5365 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5366 
5367 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5368 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5369 					  afb->tiling_flags,
5370 					  &plane_info,
5371 					  &dc_plane_state->address,
5372 					  afb->tmz_surface,
5373 					  force_disable_dcc);
5374 	if (ret)
5375 		return ret;
5376 
5377 	dc_plane_state->format = plane_info.format;
5378 	dc_plane_state->color_space = plane_info.color_space;
5379 	dc_plane_state->format = plane_info.format;
5380 	dc_plane_state->plane_size = plane_info.plane_size;
5381 	dc_plane_state->rotation = plane_info.rotation;
5382 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5383 	dc_plane_state->stereo_format = plane_info.stereo_format;
5384 	dc_plane_state->tiling_info = plane_info.tiling_info;
5385 	dc_plane_state->visible = plane_info.visible;
5386 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5387 	dc_plane_state->global_alpha = plane_info.global_alpha;
5388 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5389 	dc_plane_state->dcc = plane_info.dcc;
5390 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5391 	dc_plane_state->flip_int_enabled = true;
5392 
5393 	/*
5394 	 * Always set input transfer function, since plane state is refreshed
5395 	 * every time.
5396 	 */
5397 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5398 	if (ret)
5399 		return ret;
5400 
5401 	return 0;
5402 }
5403 
5404 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5405 					   const struct dm_connector_state *dm_state,
5406 					   struct dc_stream_state *stream)
5407 {
5408 	enum amdgpu_rmx_type rmx_type;
5409 
5410 	struct rect src = { 0 }; /* viewport in composition space*/
5411 	struct rect dst = { 0 }; /* stream addressable area */
5412 
5413 	/* no mode. nothing to be done */
5414 	if (!mode)
5415 		return;
5416 
5417 	/* Full screen scaling by default */
5418 	src.width = mode->hdisplay;
5419 	src.height = mode->vdisplay;
5420 	dst.width = stream->timing.h_addressable;
5421 	dst.height = stream->timing.v_addressable;
5422 
5423 	if (dm_state) {
5424 		rmx_type = dm_state->scaling;
5425 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5426 			if (src.width * dst.height <
5427 					src.height * dst.width) {
5428 				/* height needs less upscaling/more downscaling */
5429 				dst.width = src.width *
5430 						dst.height / src.height;
5431 			} else {
5432 				/* width needs less upscaling/more downscaling */
5433 				dst.height = src.height *
5434 						dst.width / src.width;
5435 			}
5436 		} else if (rmx_type == RMX_CENTER) {
5437 			dst = src;
5438 		}
5439 
5440 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5441 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5442 
5443 		if (dm_state->underscan_enable) {
5444 			dst.x += dm_state->underscan_hborder / 2;
5445 			dst.y += dm_state->underscan_vborder / 2;
5446 			dst.width -= dm_state->underscan_hborder;
5447 			dst.height -= dm_state->underscan_vborder;
5448 		}
5449 	}
5450 
5451 	stream->src = src;
5452 	stream->dst = dst;
5453 
5454 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5455 		      dst.x, dst.y, dst.width, dst.height);
5456 
5457 }
5458 
5459 static enum dc_color_depth
5460 convert_color_depth_from_display_info(const struct drm_connector *connector,
5461 				      bool is_y420, int requested_bpc)
5462 {
5463 	uint8_t bpc;
5464 
5465 	if (is_y420) {
5466 		bpc = 8;
5467 
5468 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5469 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5470 			bpc = 16;
5471 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5472 			bpc = 12;
5473 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5474 			bpc = 10;
5475 	} else {
5476 		bpc = (uint8_t)connector->display_info.bpc;
5477 		/* Assume 8 bpc by default if no bpc is specified. */
5478 		bpc = bpc ? bpc : 8;
5479 	}
5480 
5481 	if (requested_bpc > 0) {
5482 		/*
5483 		 * Cap display bpc based on the user requested value.
5484 		 *
5485 		 * The value for state->max_bpc may not correctly updated
5486 		 * depending on when the connector gets added to the state
5487 		 * or if this was called outside of atomic check, so it
5488 		 * can't be used directly.
5489 		 */
5490 		bpc = min_t(u8, bpc, requested_bpc);
5491 
5492 		/* Round down to the nearest even number. */
5493 		bpc = bpc - (bpc & 1);
5494 	}
5495 
5496 	switch (bpc) {
5497 	case 0:
5498 		/*
5499 		 * Temporary Work around, DRM doesn't parse color depth for
5500 		 * EDID revision before 1.4
5501 		 * TODO: Fix edid parsing
5502 		 */
5503 		return COLOR_DEPTH_888;
5504 	case 6:
5505 		return COLOR_DEPTH_666;
5506 	case 8:
5507 		return COLOR_DEPTH_888;
5508 	case 10:
5509 		return COLOR_DEPTH_101010;
5510 	case 12:
5511 		return COLOR_DEPTH_121212;
5512 	case 14:
5513 		return COLOR_DEPTH_141414;
5514 	case 16:
5515 		return COLOR_DEPTH_161616;
5516 	default:
5517 		return COLOR_DEPTH_UNDEFINED;
5518 	}
5519 }
5520 
5521 static enum dc_aspect_ratio
5522 get_aspect_ratio(const struct drm_display_mode *mode_in)
5523 {
5524 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5525 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5526 }
5527 
5528 static enum dc_color_space
5529 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5530 {
5531 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5532 
5533 	switch (dc_crtc_timing->pixel_encoding)	{
5534 	case PIXEL_ENCODING_YCBCR422:
5535 	case PIXEL_ENCODING_YCBCR444:
5536 	case PIXEL_ENCODING_YCBCR420:
5537 	{
5538 		/*
5539 		 * 27030khz is the separation point between HDTV and SDTV
5540 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5541 		 * respectively
5542 		 */
5543 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5544 			if (dc_crtc_timing->flags.Y_ONLY)
5545 				color_space =
5546 					COLOR_SPACE_YCBCR709_LIMITED;
5547 			else
5548 				color_space = COLOR_SPACE_YCBCR709;
5549 		} else {
5550 			if (dc_crtc_timing->flags.Y_ONLY)
5551 				color_space =
5552 					COLOR_SPACE_YCBCR601_LIMITED;
5553 			else
5554 				color_space = COLOR_SPACE_YCBCR601;
5555 		}
5556 
5557 	}
5558 	break;
5559 	case PIXEL_ENCODING_RGB:
5560 		color_space = COLOR_SPACE_SRGB;
5561 		break;
5562 
5563 	default:
5564 		WARN_ON(1);
5565 		break;
5566 	}
5567 
5568 	return color_space;
5569 }
5570 
5571 static bool adjust_colour_depth_from_display_info(
5572 	struct dc_crtc_timing *timing_out,
5573 	const struct drm_display_info *info)
5574 {
5575 	enum dc_color_depth depth = timing_out->display_color_depth;
5576 	int normalized_clk;
5577 	do {
5578 		normalized_clk = timing_out->pix_clk_100hz / 10;
5579 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5580 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5581 			normalized_clk /= 2;
5582 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5583 		switch (depth) {
5584 		case COLOR_DEPTH_888:
5585 			break;
5586 		case COLOR_DEPTH_101010:
5587 			normalized_clk = (normalized_clk * 30) / 24;
5588 			break;
5589 		case COLOR_DEPTH_121212:
5590 			normalized_clk = (normalized_clk * 36) / 24;
5591 			break;
5592 		case COLOR_DEPTH_161616:
5593 			normalized_clk = (normalized_clk * 48) / 24;
5594 			break;
5595 		default:
5596 			/* The above depths are the only ones valid for HDMI. */
5597 			return false;
5598 		}
5599 		if (normalized_clk <= info->max_tmds_clock) {
5600 			timing_out->display_color_depth = depth;
5601 			return true;
5602 		}
5603 	} while (--depth > COLOR_DEPTH_666);
5604 	return false;
5605 }
5606 
5607 static void fill_stream_properties_from_drm_display_mode(
5608 	struct dc_stream_state *stream,
5609 	const struct drm_display_mode *mode_in,
5610 	const struct drm_connector *connector,
5611 	const struct drm_connector_state *connector_state,
5612 	const struct dc_stream_state *old_stream,
5613 	int requested_bpc)
5614 {
5615 	struct dc_crtc_timing *timing_out = &stream->timing;
5616 	const struct drm_display_info *info = &connector->display_info;
5617 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5618 	struct hdmi_vendor_infoframe hv_frame;
5619 	struct hdmi_avi_infoframe avi_frame;
5620 
5621 	memset(&hv_frame, 0, sizeof(hv_frame));
5622 	memset(&avi_frame, 0, sizeof(avi_frame));
5623 
5624 	timing_out->h_border_left = 0;
5625 	timing_out->h_border_right = 0;
5626 	timing_out->v_border_top = 0;
5627 	timing_out->v_border_bottom = 0;
5628 	/* TODO: un-hardcode */
5629 	if (drm_mode_is_420_only(info, mode_in)
5630 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5631 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5632 	else if (drm_mode_is_420_also(info, mode_in)
5633 			&& aconnector->force_yuv420_output)
5634 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5635 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5636 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5637 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5638 	else
5639 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5640 
5641 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5642 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5643 		connector,
5644 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5645 		requested_bpc);
5646 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5647 	timing_out->hdmi_vic = 0;
5648 
5649 	if(old_stream) {
5650 		timing_out->vic = old_stream->timing.vic;
5651 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5652 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5653 	} else {
5654 		timing_out->vic = drm_match_cea_mode(mode_in);
5655 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5656 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5657 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5658 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5659 	}
5660 
5661 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5662 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5663 		timing_out->vic = avi_frame.video_code;
5664 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5665 		timing_out->hdmi_vic = hv_frame.vic;
5666 	}
5667 
5668 	if (is_freesync_video_mode(mode_in, aconnector)) {
5669 		timing_out->h_addressable = mode_in->hdisplay;
5670 		timing_out->h_total = mode_in->htotal;
5671 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5672 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5673 		timing_out->v_total = mode_in->vtotal;
5674 		timing_out->v_addressable = mode_in->vdisplay;
5675 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5676 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5677 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5678 	} else {
5679 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5680 		timing_out->h_total = mode_in->crtc_htotal;
5681 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5682 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5683 		timing_out->v_total = mode_in->crtc_vtotal;
5684 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5685 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5686 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5687 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5688 	}
5689 
5690 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5691 
5692 	stream->output_color_space = get_output_color_space(timing_out);
5693 
5694 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5695 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5696 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5697 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5698 		    drm_mode_is_420_also(info, mode_in) &&
5699 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5700 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5701 			adjust_colour_depth_from_display_info(timing_out, info);
5702 		}
5703 	}
5704 }
5705 
5706 static void fill_audio_info(struct audio_info *audio_info,
5707 			    const struct drm_connector *drm_connector,
5708 			    const struct dc_sink *dc_sink)
5709 {
5710 	int i = 0;
5711 	int cea_revision = 0;
5712 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5713 
5714 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5715 	audio_info->product_id = edid_caps->product_id;
5716 
5717 	cea_revision = drm_connector->display_info.cea_rev;
5718 
5719 	strscpy(audio_info->display_name,
5720 		edid_caps->display_name,
5721 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5722 
5723 	if (cea_revision >= 3) {
5724 		audio_info->mode_count = edid_caps->audio_mode_count;
5725 
5726 		for (i = 0; i < audio_info->mode_count; ++i) {
5727 			audio_info->modes[i].format_code =
5728 					(enum audio_format_code)
5729 					(edid_caps->audio_modes[i].format_code);
5730 			audio_info->modes[i].channel_count =
5731 					edid_caps->audio_modes[i].channel_count;
5732 			audio_info->modes[i].sample_rates.all =
5733 					edid_caps->audio_modes[i].sample_rate;
5734 			audio_info->modes[i].sample_size =
5735 					edid_caps->audio_modes[i].sample_size;
5736 		}
5737 	}
5738 
5739 	audio_info->flags.all = edid_caps->speaker_flags;
5740 
5741 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5742 	if (drm_connector->latency_present[0]) {
5743 		audio_info->video_latency = drm_connector->video_latency[0];
5744 		audio_info->audio_latency = drm_connector->audio_latency[0];
5745 	}
5746 
5747 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5748 
5749 }
5750 
5751 static void
5752 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5753 				      struct drm_display_mode *dst_mode)
5754 {
5755 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5756 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5757 	dst_mode->crtc_clock = src_mode->crtc_clock;
5758 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5759 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5760 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5761 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5762 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5763 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5764 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5765 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5766 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5767 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5768 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5769 }
5770 
5771 static void
5772 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5773 					const struct drm_display_mode *native_mode,
5774 					bool scale_enabled)
5775 {
5776 	if (scale_enabled) {
5777 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5778 	} else if (native_mode->clock == drm_mode->clock &&
5779 			native_mode->htotal == drm_mode->htotal &&
5780 			native_mode->vtotal == drm_mode->vtotal) {
5781 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5782 	} else {
5783 		/* no scaling nor amdgpu inserted, no need to patch */
5784 	}
5785 }
5786 
5787 static struct dc_sink *
5788 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5789 {
5790 	struct dc_sink_init_data sink_init_data = { 0 };
5791 	struct dc_sink *sink = NULL;
5792 	sink_init_data.link = aconnector->dc_link;
5793 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5794 
5795 	sink = dc_sink_create(&sink_init_data);
5796 	if (!sink) {
5797 		DRM_ERROR("Failed to create sink!\n");
5798 		return NULL;
5799 	}
5800 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5801 
5802 	return sink;
5803 }
5804 
5805 static void set_multisync_trigger_params(
5806 		struct dc_stream_state *stream)
5807 {
5808 	struct dc_stream_state *master = NULL;
5809 
5810 	if (stream->triggered_crtc_reset.enabled) {
5811 		master = stream->triggered_crtc_reset.event_source;
5812 		stream->triggered_crtc_reset.event =
5813 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5814 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5815 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5816 	}
5817 }
5818 
5819 static void set_master_stream(struct dc_stream_state *stream_set[],
5820 			      int stream_count)
5821 {
5822 	int j, highest_rfr = 0, master_stream = 0;
5823 
5824 	for (j = 0;  j < stream_count; j++) {
5825 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5826 			int refresh_rate = 0;
5827 
5828 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5829 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5830 			if (refresh_rate > highest_rfr) {
5831 				highest_rfr = refresh_rate;
5832 				master_stream = j;
5833 			}
5834 		}
5835 	}
5836 	for (j = 0;  j < stream_count; j++) {
5837 		if (stream_set[j])
5838 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5839 	}
5840 }
5841 
5842 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5843 {
5844 	int i = 0;
5845 	struct dc_stream_state *stream;
5846 
5847 	if (context->stream_count < 2)
5848 		return;
5849 	for (i = 0; i < context->stream_count ; i++) {
5850 		if (!context->streams[i])
5851 			continue;
5852 		/*
5853 		 * TODO: add a function to read AMD VSDB bits and set
5854 		 * crtc_sync_master.multi_sync_enabled flag
5855 		 * For now it's set to false
5856 		 */
5857 	}
5858 
5859 	set_master_stream(context->streams, context->stream_count);
5860 
5861 	for (i = 0; i < context->stream_count ; i++) {
5862 		stream = context->streams[i];
5863 
5864 		if (!stream)
5865 			continue;
5866 
5867 		set_multisync_trigger_params(stream);
5868 	}
5869 }
5870 
5871 #if defined(CONFIG_DRM_AMD_DC_DCN)
5872 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5873 							struct dc_sink *sink, struct dc_stream_state *stream,
5874 							struct dsc_dec_dpcd_caps *dsc_caps)
5875 {
5876 	stream->timing.flags.DSC = 0;
5877 
5878 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5879 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5880 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5881 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5882 				      dsc_caps);
5883 	}
5884 }
5885 
5886 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5887 										struct dc_sink *sink, struct dc_stream_state *stream,
5888 										struct dsc_dec_dpcd_caps *dsc_caps)
5889 {
5890 	struct drm_connector *drm_connector = &aconnector->base;
5891 	uint32_t link_bandwidth_kbps;
5892 	uint32_t max_dsc_target_bpp_limit_override = 0;
5893 
5894 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5895 							dc_link_get_link_cap(aconnector->dc_link));
5896 
5897 	if (stream->link && stream->link->local_sink)
5898 		max_dsc_target_bpp_limit_override =
5899 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
5900 
5901 	/* Set DSC policy according to dsc_clock_en */
5902 	dc_dsc_policy_set_enable_dsc_when_not_needed(
5903 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5904 
5905 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5906 
5907 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5908 						dsc_caps,
5909 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5910 						max_dsc_target_bpp_limit_override,
5911 						link_bandwidth_kbps,
5912 						&stream->timing,
5913 						&stream->timing.dsc_cfg)) {
5914 			stream->timing.flags.DSC = 1;
5915 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5916 		}
5917 	}
5918 
5919 	/* Overwrite the stream flag if DSC is enabled through debugfs */
5920 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5921 		stream->timing.flags.DSC = 1;
5922 
5923 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5924 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5925 
5926 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5927 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5928 
5929 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5930 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5931 }
5932 #endif
5933 
5934 /**
5935  * DOC: FreeSync Video
5936  *
5937  * When a userspace application wants to play a video, the content follows a
5938  * standard format definition that usually specifies the FPS for that format.
5939  * The below list illustrates some video format and the expected FPS,
5940  * respectively:
5941  *
5942  * - TV/NTSC (23.976 FPS)
5943  * - Cinema (24 FPS)
5944  * - TV/PAL (25 FPS)
5945  * - TV/NTSC (29.97 FPS)
5946  * - TV/NTSC (30 FPS)
5947  * - Cinema HFR (48 FPS)
5948  * - TV/PAL (50 FPS)
5949  * - Commonly used (60 FPS)
5950  * - Multiples of 24 (48,72,96 FPS)
5951  *
5952  * The list of standards video format is not huge and can be added to the
5953  * connector modeset list beforehand. With that, userspace can leverage
5954  * FreeSync to extends the front porch in order to attain the target refresh
5955  * rate. Such a switch will happen seamlessly, without screen blanking or
5956  * reprogramming of the output in any other way. If the userspace requests a
5957  * modesetting change compatible with FreeSync modes that only differ in the
5958  * refresh rate, DC will skip the full update and avoid blink during the
5959  * transition. For example, the video player can change the modesetting from
5960  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5961  * causing any display blink. This same concept can be applied to a mode
5962  * setting change.
5963  */
5964 static struct drm_display_mode *
5965 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5966 			  bool use_probed_modes)
5967 {
5968 	struct drm_display_mode *m, *m_pref = NULL;
5969 	u16 current_refresh, highest_refresh;
5970 	struct list_head *list_head = use_probed_modes ?
5971 						    &aconnector->base.probed_modes :
5972 						    &aconnector->base.modes;
5973 
5974 	if (aconnector->freesync_vid_base.clock != 0)
5975 		return &aconnector->freesync_vid_base;
5976 
5977 	/* Find the preferred mode */
5978 	list_for_each_entry (m, list_head, head) {
5979 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5980 			m_pref = m;
5981 			break;
5982 		}
5983 	}
5984 
5985 	if (!m_pref) {
5986 		/* Probably an EDID with no preferred mode. Fallback to first entry */
5987 		m_pref = list_first_entry_or_null(
5988 			&aconnector->base.modes, struct drm_display_mode, head);
5989 		if (!m_pref) {
5990 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5991 			return NULL;
5992 		}
5993 	}
5994 
5995 	highest_refresh = drm_mode_vrefresh(m_pref);
5996 
5997 	/*
5998 	 * Find the mode with highest refresh rate with same resolution.
5999 	 * For some monitors, preferred mode is not the mode with highest
6000 	 * supported refresh rate.
6001 	 */
6002 	list_for_each_entry (m, list_head, head) {
6003 		current_refresh  = drm_mode_vrefresh(m);
6004 
6005 		if (m->hdisplay == m_pref->hdisplay &&
6006 		    m->vdisplay == m_pref->vdisplay &&
6007 		    highest_refresh < current_refresh) {
6008 			highest_refresh = current_refresh;
6009 			m_pref = m;
6010 		}
6011 	}
6012 
6013 	aconnector->freesync_vid_base = *m_pref;
6014 	return m_pref;
6015 }
6016 
6017 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6018 				   struct amdgpu_dm_connector *aconnector)
6019 {
6020 	struct drm_display_mode *high_mode;
6021 	int timing_diff;
6022 
6023 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6024 	if (!high_mode || !mode)
6025 		return false;
6026 
6027 	timing_diff = high_mode->vtotal - mode->vtotal;
6028 
6029 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6030 	    high_mode->hdisplay != mode->hdisplay ||
6031 	    high_mode->vdisplay != mode->vdisplay ||
6032 	    high_mode->hsync_start != mode->hsync_start ||
6033 	    high_mode->hsync_end != mode->hsync_end ||
6034 	    high_mode->htotal != mode->htotal ||
6035 	    high_mode->hskew != mode->hskew ||
6036 	    high_mode->vscan != mode->vscan ||
6037 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6038 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6039 		return false;
6040 	else
6041 		return true;
6042 }
6043 
6044 static struct dc_stream_state *
6045 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6046 		       const struct drm_display_mode *drm_mode,
6047 		       const struct dm_connector_state *dm_state,
6048 		       const struct dc_stream_state *old_stream,
6049 		       int requested_bpc)
6050 {
6051 	struct drm_display_mode *preferred_mode = NULL;
6052 	struct drm_connector *drm_connector;
6053 	const struct drm_connector_state *con_state =
6054 		dm_state ? &dm_state->base : NULL;
6055 	struct dc_stream_state *stream = NULL;
6056 	struct drm_display_mode mode = *drm_mode;
6057 	struct drm_display_mode saved_mode;
6058 	struct drm_display_mode *freesync_mode = NULL;
6059 	bool native_mode_found = false;
6060 	bool recalculate_timing = false;
6061 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6062 	int mode_refresh;
6063 	int preferred_refresh = 0;
6064 #if defined(CONFIG_DRM_AMD_DC_DCN)
6065 	struct dsc_dec_dpcd_caps dsc_caps;
6066 #endif
6067 	struct dc_sink *sink = NULL;
6068 
6069 	memset(&saved_mode, 0, sizeof(saved_mode));
6070 
6071 	if (aconnector == NULL) {
6072 		DRM_ERROR("aconnector is NULL!\n");
6073 		return stream;
6074 	}
6075 
6076 	drm_connector = &aconnector->base;
6077 
6078 	if (!aconnector->dc_sink) {
6079 		sink = create_fake_sink(aconnector);
6080 		if (!sink)
6081 			return stream;
6082 	} else {
6083 		sink = aconnector->dc_sink;
6084 		dc_sink_retain(sink);
6085 	}
6086 
6087 	stream = dc_create_stream_for_sink(sink);
6088 
6089 	if (stream == NULL) {
6090 		DRM_ERROR("Failed to create stream for sink!\n");
6091 		goto finish;
6092 	}
6093 
6094 	stream->dm_stream_context = aconnector;
6095 
6096 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6097 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6098 
6099 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6100 		/* Search for preferred mode */
6101 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6102 			native_mode_found = true;
6103 			break;
6104 		}
6105 	}
6106 	if (!native_mode_found)
6107 		preferred_mode = list_first_entry_or_null(
6108 				&aconnector->base.modes,
6109 				struct drm_display_mode,
6110 				head);
6111 
6112 	mode_refresh = drm_mode_vrefresh(&mode);
6113 
6114 	if (preferred_mode == NULL) {
6115 		/*
6116 		 * This may not be an error, the use case is when we have no
6117 		 * usermode calls to reset and set mode upon hotplug. In this
6118 		 * case, we call set mode ourselves to restore the previous mode
6119 		 * and the modelist may not be filled in in time.
6120 		 */
6121 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6122 	} else {
6123 		recalculate_timing = amdgpu_freesync_vid_mode &&
6124 				 is_freesync_video_mode(&mode, aconnector);
6125 		if (recalculate_timing) {
6126 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6127 			saved_mode = mode;
6128 			mode = *freesync_mode;
6129 		} else {
6130 			decide_crtc_timing_for_drm_display_mode(
6131 				&mode, preferred_mode, scale);
6132 
6133 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6134 		}
6135 	}
6136 
6137 	if (recalculate_timing)
6138 		drm_mode_set_crtcinfo(&saved_mode, 0);
6139 	else if (!dm_state)
6140 		drm_mode_set_crtcinfo(&mode, 0);
6141 
6142        /*
6143 	* If scaling is enabled and refresh rate didn't change
6144 	* we copy the vic and polarities of the old timings
6145 	*/
6146 	if (!scale || mode_refresh != preferred_refresh)
6147 		fill_stream_properties_from_drm_display_mode(
6148 			stream, &mode, &aconnector->base, con_state, NULL,
6149 			requested_bpc);
6150 	else
6151 		fill_stream_properties_from_drm_display_mode(
6152 			stream, &mode, &aconnector->base, con_state, old_stream,
6153 			requested_bpc);
6154 
6155 #if defined(CONFIG_DRM_AMD_DC_DCN)
6156 	/* SST DSC determination policy */
6157 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6158 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6159 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6160 #endif
6161 
6162 	update_stream_scaling_settings(&mode, dm_state, stream);
6163 
6164 	fill_audio_info(
6165 		&stream->audio_info,
6166 		drm_connector,
6167 		sink);
6168 
6169 	update_stream_signal(stream, sink);
6170 
6171 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6172 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6173 
6174 	if (stream->link->psr_settings.psr_feature_enabled) {
6175 		//
6176 		// should decide stream support vsc sdp colorimetry capability
6177 		// before building vsc info packet
6178 		//
6179 		stream->use_vsc_sdp_for_colorimetry = false;
6180 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6181 			stream->use_vsc_sdp_for_colorimetry =
6182 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6183 		} else {
6184 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6185 				stream->use_vsc_sdp_for_colorimetry = true;
6186 		}
6187 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6188 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6189 
6190 	}
6191 finish:
6192 	dc_sink_release(sink);
6193 
6194 	return stream;
6195 }
6196 
6197 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6198 {
6199 	drm_crtc_cleanup(crtc);
6200 	kfree(crtc);
6201 }
6202 
6203 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6204 				  struct drm_crtc_state *state)
6205 {
6206 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6207 
6208 	/* TODO Destroy dc_stream objects are stream object is flattened */
6209 	if (cur->stream)
6210 		dc_stream_release(cur->stream);
6211 
6212 
6213 	__drm_atomic_helper_crtc_destroy_state(state);
6214 
6215 
6216 	kfree(state);
6217 }
6218 
6219 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6220 {
6221 	struct dm_crtc_state *state;
6222 
6223 	if (crtc->state)
6224 		dm_crtc_destroy_state(crtc, crtc->state);
6225 
6226 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6227 	if (WARN_ON(!state))
6228 		return;
6229 
6230 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6231 }
6232 
6233 static struct drm_crtc_state *
6234 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6235 {
6236 	struct dm_crtc_state *state, *cur;
6237 
6238 	cur = to_dm_crtc_state(crtc->state);
6239 
6240 	if (WARN_ON(!crtc->state))
6241 		return NULL;
6242 
6243 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6244 	if (!state)
6245 		return NULL;
6246 
6247 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6248 
6249 	if (cur->stream) {
6250 		state->stream = cur->stream;
6251 		dc_stream_retain(state->stream);
6252 	}
6253 
6254 	state->active_planes = cur->active_planes;
6255 	state->vrr_infopacket = cur->vrr_infopacket;
6256 	state->abm_level = cur->abm_level;
6257 	state->vrr_supported = cur->vrr_supported;
6258 	state->freesync_config = cur->freesync_config;
6259 	state->cm_has_degamma = cur->cm_has_degamma;
6260 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6261 	state->force_dpms_off = cur->force_dpms_off;
6262 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6263 
6264 	return &state->base;
6265 }
6266 
6267 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6268 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6269 {
6270 	crtc_debugfs_init(crtc);
6271 
6272 	return 0;
6273 }
6274 #endif
6275 
6276 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6277 {
6278 	enum dc_irq_source irq_source;
6279 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6280 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6281 	int rc;
6282 
6283 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6284 
6285 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6286 
6287 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6288 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6289 	return rc;
6290 }
6291 
6292 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6293 {
6294 	enum dc_irq_source irq_source;
6295 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6296 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6297 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6298 #if defined(CONFIG_DRM_AMD_DC_DCN)
6299 	struct amdgpu_display_manager *dm = &adev->dm;
6300 	struct vblank_control_work *work;
6301 #endif
6302 	int rc = 0;
6303 
6304 	if (enable) {
6305 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6306 		if (amdgpu_dm_vrr_active(acrtc_state))
6307 			rc = dm_set_vupdate_irq(crtc, true);
6308 	} else {
6309 		/* vblank irq off -> vupdate irq off */
6310 		rc = dm_set_vupdate_irq(crtc, false);
6311 	}
6312 
6313 	if (rc)
6314 		return rc;
6315 
6316 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6317 
6318 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6319 		return -EBUSY;
6320 
6321 	if (amdgpu_in_reset(adev))
6322 		return 0;
6323 
6324 #if defined(CONFIG_DRM_AMD_DC_DCN)
6325 	if (dm->vblank_control_workqueue) {
6326 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6327 		if (!work)
6328 			return -ENOMEM;
6329 
6330 		INIT_WORK(&work->work, vblank_control_worker);
6331 		work->dm = dm;
6332 		work->acrtc = acrtc;
6333 		work->enable = enable;
6334 
6335 		if (acrtc_state->stream) {
6336 			dc_stream_retain(acrtc_state->stream);
6337 			work->stream = acrtc_state->stream;
6338 		}
6339 
6340 		queue_work(dm->vblank_control_workqueue, &work->work);
6341 	}
6342 #endif
6343 
6344 	return 0;
6345 }
6346 
6347 static int dm_enable_vblank(struct drm_crtc *crtc)
6348 {
6349 	return dm_set_vblank(crtc, true);
6350 }
6351 
6352 static void dm_disable_vblank(struct drm_crtc *crtc)
6353 {
6354 	dm_set_vblank(crtc, false);
6355 }
6356 
6357 /* Implemented only the options currently availible for the driver */
6358 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6359 	.reset = dm_crtc_reset_state,
6360 	.destroy = amdgpu_dm_crtc_destroy,
6361 	.set_config = drm_atomic_helper_set_config,
6362 	.page_flip = drm_atomic_helper_page_flip,
6363 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6364 	.atomic_destroy_state = dm_crtc_destroy_state,
6365 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6366 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6367 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6368 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6369 	.enable_vblank = dm_enable_vblank,
6370 	.disable_vblank = dm_disable_vblank,
6371 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6372 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6373 	.late_register = amdgpu_dm_crtc_late_register,
6374 #endif
6375 };
6376 
6377 static enum drm_connector_status
6378 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6379 {
6380 	bool connected;
6381 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6382 
6383 	/*
6384 	 * Notes:
6385 	 * 1. This interface is NOT called in context of HPD irq.
6386 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6387 	 * makes it a bad place for *any* MST-related activity.
6388 	 */
6389 
6390 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6391 	    !aconnector->fake_enable)
6392 		connected = (aconnector->dc_sink != NULL);
6393 	else
6394 		connected = (aconnector->base.force == DRM_FORCE_ON);
6395 
6396 	update_subconnector_property(aconnector);
6397 
6398 	return (connected ? connector_status_connected :
6399 			connector_status_disconnected);
6400 }
6401 
6402 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6403 					    struct drm_connector_state *connector_state,
6404 					    struct drm_property *property,
6405 					    uint64_t val)
6406 {
6407 	struct drm_device *dev = connector->dev;
6408 	struct amdgpu_device *adev = drm_to_adev(dev);
6409 	struct dm_connector_state *dm_old_state =
6410 		to_dm_connector_state(connector->state);
6411 	struct dm_connector_state *dm_new_state =
6412 		to_dm_connector_state(connector_state);
6413 
6414 	int ret = -EINVAL;
6415 
6416 	if (property == dev->mode_config.scaling_mode_property) {
6417 		enum amdgpu_rmx_type rmx_type;
6418 
6419 		switch (val) {
6420 		case DRM_MODE_SCALE_CENTER:
6421 			rmx_type = RMX_CENTER;
6422 			break;
6423 		case DRM_MODE_SCALE_ASPECT:
6424 			rmx_type = RMX_ASPECT;
6425 			break;
6426 		case DRM_MODE_SCALE_FULLSCREEN:
6427 			rmx_type = RMX_FULL;
6428 			break;
6429 		case DRM_MODE_SCALE_NONE:
6430 		default:
6431 			rmx_type = RMX_OFF;
6432 			break;
6433 		}
6434 
6435 		if (dm_old_state->scaling == rmx_type)
6436 			return 0;
6437 
6438 		dm_new_state->scaling = rmx_type;
6439 		ret = 0;
6440 	} else if (property == adev->mode_info.underscan_hborder_property) {
6441 		dm_new_state->underscan_hborder = val;
6442 		ret = 0;
6443 	} else if (property == adev->mode_info.underscan_vborder_property) {
6444 		dm_new_state->underscan_vborder = val;
6445 		ret = 0;
6446 	} else if (property == adev->mode_info.underscan_property) {
6447 		dm_new_state->underscan_enable = val;
6448 		ret = 0;
6449 	} else if (property == adev->mode_info.abm_level_property) {
6450 		dm_new_state->abm_level = val;
6451 		ret = 0;
6452 	}
6453 
6454 	return ret;
6455 }
6456 
6457 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6458 					    const struct drm_connector_state *state,
6459 					    struct drm_property *property,
6460 					    uint64_t *val)
6461 {
6462 	struct drm_device *dev = connector->dev;
6463 	struct amdgpu_device *adev = drm_to_adev(dev);
6464 	struct dm_connector_state *dm_state =
6465 		to_dm_connector_state(state);
6466 	int ret = -EINVAL;
6467 
6468 	if (property == dev->mode_config.scaling_mode_property) {
6469 		switch (dm_state->scaling) {
6470 		case RMX_CENTER:
6471 			*val = DRM_MODE_SCALE_CENTER;
6472 			break;
6473 		case RMX_ASPECT:
6474 			*val = DRM_MODE_SCALE_ASPECT;
6475 			break;
6476 		case RMX_FULL:
6477 			*val = DRM_MODE_SCALE_FULLSCREEN;
6478 			break;
6479 		case RMX_OFF:
6480 		default:
6481 			*val = DRM_MODE_SCALE_NONE;
6482 			break;
6483 		}
6484 		ret = 0;
6485 	} else if (property == adev->mode_info.underscan_hborder_property) {
6486 		*val = dm_state->underscan_hborder;
6487 		ret = 0;
6488 	} else if (property == adev->mode_info.underscan_vborder_property) {
6489 		*val = dm_state->underscan_vborder;
6490 		ret = 0;
6491 	} else if (property == adev->mode_info.underscan_property) {
6492 		*val = dm_state->underscan_enable;
6493 		ret = 0;
6494 	} else if (property == adev->mode_info.abm_level_property) {
6495 		*val = dm_state->abm_level;
6496 		ret = 0;
6497 	}
6498 
6499 	return ret;
6500 }
6501 
6502 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6503 {
6504 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6505 
6506 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6507 }
6508 
6509 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6510 {
6511 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6512 	const struct dc_link *link = aconnector->dc_link;
6513 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6514 	struct amdgpu_display_manager *dm = &adev->dm;
6515 	int i;
6516 
6517 	/*
6518 	 * Call only if mst_mgr was iniitalized before since it's not done
6519 	 * for all connector types.
6520 	 */
6521 	if (aconnector->mst_mgr.dev)
6522 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6523 
6524 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6525 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6526 	for (i = 0; i < dm->num_of_edps; i++) {
6527 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6528 			backlight_device_unregister(dm->backlight_dev[i]);
6529 			dm->backlight_dev[i] = NULL;
6530 		}
6531 	}
6532 #endif
6533 
6534 	if (aconnector->dc_em_sink)
6535 		dc_sink_release(aconnector->dc_em_sink);
6536 	aconnector->dc_em_sink = NULL;
6537 	if (aconnector->dc_sink)
6538 		dc_sink_release(aconnector->dc_sink);
6539 	aconnector->dc_sink = NULL;
6540 
6541 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6542 	drm_connector_unregister(connector);
6543 	drm_connector_cleanup(connector);
6544 	if (aconnector->i2c) {
6545 		i2c_del_adapter(&aconnector->i2c->base);
6546 		kfree(aconnector->i2c);
6547 	}
6548 	kfree(aconnector->dm_dp_aux.aux.name);
6549 
6550 	kfree(connector);
6551 }
6552 
6553 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6554 {
6555 	struct dm_connector_state *state =
6556 		to_dm_connector_state(connector->state);
6557 
6558 	if (connector->state)
6559 		__drm_atomic_helper_connector_destroy_state(connector->state);
6560 
6561 	kfree(state);
6562 
6563 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6564 
6565 	if (state) {
6566 		state->scaling = RMX_OFF;
6567 		state->underscan_enable = false;
6568 		state->underscan_hborder = 0;
6569 		state->underscan_vborder = 0;
6570 		state->base.max_requested_bpc = 8;
6571 		state->vcpi_slots = 0;
6572 		state->pbn = 0;
6573 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6574 			state->abm_level = amdgpu_dm_abm_level;
6575 
6576 		__drm_atomic_helper_connector_reset(connector, &state->base);
6577 	}
6578 }
6579 
6580 struct drm_connector_state *
6581 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6582 {
6583 	struct dm_connector_state *state =
6584 		to_dm_connector_state(connector->state);
6585 
6586 	struct dm_connector_state *new_state =
6587 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6588 
6589 	if (!new_state)
6590 		return NULL;
6591 
6592 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6593 
6594 	new_state->freesync_capable = state->freesync_capable;
6595 	new_state->abm_level = state->abm_level;
6596 	new_state->scaling = state->scaling;
6597 	new_state->underscan_enable = state->underscan_enable;
6598 	new_state->underscan_hborder = state->underscan_hborder;
6599 	new_state->underscan_vborder = state->underscan_vborder;
6600 	new_state->vcpi_slots = state->vcpi_slots;
6601 	new_state->pbn = state->pbn;
6602 	return &new_state->base;
6603 }
6604 
6605 static int
6606 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6607 {
6608 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6609 		to_amdgpu_dm_connector(connector);
6610 	int r;
6611 
6612 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6613 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6614 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6615 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6616 		if (r)
6617 			return r;
6618 	}
6619 
6620 #if defined(CONFIG_DEBUG_FS)
6621 	connector_debugfs_init(amdgpu_dm_connector);
6622 #endif
6623 
6624 	return 0;
6625 }
6626 
6627 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6628 	.reset = amdgpu_dm_connector_funcs_reset,
6629 	.detect = amdgpu_dm_connector_detect,
6630 	.fill_modes = drm_helper_probe_single_connector_modes,
6631 	.destroy = amdgpu_dm_connector_destroy,
6632 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6633 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6634 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6635 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6636 	.late_register = amdgpu_dm_connector_late_register,
6637 	.early_unregister = amdgpu_dm_connector_unregister
6638 };
6639 
6640 static int get_modes(struct drm_connector *connector)
6641 {
6642 	return amdgpu_dm_connector_get_modes(connector);
6643 }
6644 
6645 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6646 {
6647 	struct dc_sink_init_data init_params = {
6648 			.link = aconnector->dc_link,
6649 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6650 	};
6651 	struct edid *edid;
6652 
6653 	if (!aconnector->base.edid_blob_ptr) {
6654 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6655 				aconnector->base.name);
6656 
6657 		aconnector->base.force = DRM_FORCE_OFF;
6658 		aconnector->base.override_edid = false;
6659 		return;
6660 	}
6661 
6662 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6663 
6664 	aconnector->edid = edid;
6665 
6666 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6667 		aconnector->dc_link,
6668 		(uint8_t *)edid,
6669 		(edid->extensions + 1) * EDID_LENGTH,
6670 		&init_params);
6671 
6672 	if (aconnector->base.force == DRM_FORCE_ON) {
6673 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6674 		aconnector->dc_link->local_sink :
6675 		aconnector->dc_em_sink;
6676 		dc_sink_retain(aconnector->dc_sink);
6677 	}
6678 }
6679 
6680 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6681 {
6682 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6683 
6684 	/*
6685 	 * In case of headless boot with force on for DP managed connector
6686 	 * Those settings have to be != 0 to get initial modeset
6687 	 */
6688 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6689 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6690 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6691 	}
6692 
6693 
6694 	aconnector->base.override_edid = true;
6695 	create_eml_sink(aconnector);
6696 }
6697 
6698 static struct dc_stream_state *
6699 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6700 				const struct drm_display_mode *drm_mode,
6701 				const struct dm_connector_state *dm_state,
6702 				const struct dc_stream_state *old_stream)
6703 {
6704 	struct drm_connector *connector = &aconnector->base;
6705 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6706 	struct dc_stream_state *stream;
6707 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6708 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6709 	enum dc_status dc_result = DC_OK;
6710 
6711 	do {
6712 		stream = create_stream_for_sink(aconnector, drm_mode,
6713 						dm_state, old_stream,
6714 						requested_bpc);
6715 		if (stream == NULL) {
6716 			DRM_ERROR("Failed to create stream for sink!\n");
6717 			break;
6718 		}
6719 
6720 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6721 
6722 		if (dc_result != DC_OK) {
6723 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6724 				      drm_mode->hdisplay,
6725 				      drm_mode->vdisplay,
6726 				      drm_mode->clock,
6727 				      dc_result,
6728 				      dc_status_to_str(dc_result));
6729 
6730 			dc_stream_release(stream);
6731 			stream = NULL;
6732 			requested_bpc -= 2; /* lower bpc to retry validation */
6733 		}
6734 
6735 	} while (stream == NULL && requested_bpc >= 6);
6736 
6737 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6738 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6739 
6740 		aconnector->force_yuv420_output = true;
6741 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6742 						dm_state, old_stream);
6743 		aconnector->force_yuv420_output = false;
6744 	}
6745 
6746 	return stream;
6747 }
6748 
6749 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6750 				   struct drm_display_mode *mode)
6751 {
6752 	int result = MODE_ERROR;
6753 	struct dc_sink *dc_sink;
6754 	/* TODO: Unhardcode stream count */
6755 	struct dc_stream_state *stream;
6756 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6757 
6758 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6759 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6760 		return result;
6761 
6762 	/*
6763 	 * Only run this the first time mode_valid is called to initilialize
6764 	 * EDID mgmt
6765 	 */
6766 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6767 		!aconnector->dc_em_sink)
6768 		handle_edid_mgmt(aconnector);
6769 
6770 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6771 
6772 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6773 				aconnector->base.force != DRM_FORCE_ON) {
6774 		DRM_ERROR("dc_sink is NULL!\n");
6775 		goto fail;
6776 	}
6777 
6778 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6779 	if (stream) {
6780 		dc_stream_release(stream);
6781 		result = MODE_OK;
6782 	}
6783 
6784 fail:
6785 	/* TODO: error handling*/
6786 	return result;
6787 }
6788 
6789 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6790 				struct dc_info_packet *out)
6791 {
6792 	struct hdmi_drm_infoframe frame;
6793 	unsigned char buf[30]; /* 26 + 4 */
6794 	ssize_t len;
6795 	int ret, i;
6796 
6797 	memset(out, 0, sizeof(*out));
6798 
6799 	if (!state->hdr_output_metadata)
6800 		return 0;
6801 
6802 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6803 	if (ret)
6804 		return ret;
6805 
6806 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6807 	if (len < 0)
6808 		return (int)len;
6809 
6810 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6811 	if (len != 30)
6812 		return -EINVAL;
6813 
6814 	/* Prepare the infopacket for DC. */
6815 	switch (state->connector->connector_type) {
6816 	case DRM_MODE_CONNECTOR_HDMIA:
6817 		out->hb0 = 0x87; /* type */
6818 		out->hb1 = 0x01; /* version */
6819 		out->hb2 = 0x1A; /* length */
6820 		out->sb[0] = buf[3]; /* checksum */
6821 		i = 1;
6822 		break;
6823 
6824 	case DRM_MODE_CONNECTOR_DisplayPort:
6825 	case DRM_MODE_CONNECTOR_eDP:
6826 		out->hb0 = 0x00; /* sdp id, zero */
6827 		out->hb1 = 0x87; /* type */
6828 		out->hb2 = 0x1D; /* payload len - 1 */
6829 		out->hb3 = (0x13 << 2); /* sdp version */
6830 		out->sb[0] = 0x01; /* version */
6831 		out->sb[1] = 0x1A; /* length */
6832 		i = 2;
6833 		break;
6834 
6835 	default:
6836 		return -EINVAL;
6837 	}
6838 
6839 	memcpy(&out->sb[i], &buf[4], 26);
6840 	out->valid = true;
6841 
6842 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6843 		       sizeof(out->sb), false);
6844 
6845 	return 0;
6846 }
6847 
6848 static int
6849 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6850 				 struct drm_atomic_state *state)
6851 {
6852 	struct drm_connector_state *new_con_state =
6853 		drm_atomic_get_new_connector_state(state, conn);
6854 	struct drm_connector_state *old_con_state =
6855 		drm_atomic_get_old_connector_state(state, conn);
6856 	struct drm_crtc *crtc = new_con_state->crtc;
6857 	struct drm_crtc_state *new_crtc_state;
6858 	int ret;
6859 
6860 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6861 
6862 	if (!crtc)
6863 		return 0;
6864 
6865 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6866 		struct dc_info_packet hdr_infopacket;
6867 
6868 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6869 		if (ret)
6870 			return ret;
6871 
6872 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6873 		if (IS_ERR(new_crtc_state))
6874 			return PTR_ERR(new_crtc_state);
6875 
6876 		/*
6877 		 * DC considers the stream backends changed if the
6878 		 * static metadata changes. Forcing the modeset also
6879 		 * gives a simple way for userspace to switch from
6880 		 * 8bpc to 10bpc when setting the metadata to enter
6881 		 * or exit HDR.
6882 		 *
6883 		 * Changing the static metadata after it's been
6884 		 * set is permissible, however. So only force a
6885 		 * modeset if we're entering or exiting HDR.
6886 		 */
6887 		new_crtc_state->mode_changed =
6888 			!old_con_state->hdr_output_metadata ||
6889 			!new_con_state->hdr_output_metadata;
6890 	}
6891 
6892 	return 0;
6893 }
6894 
6895 static const struct drm_connector_helper_funcs
6896 amdgpu_dm_connector_helper_funcs = {
6897 	/*
6898 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6899 	 * modes will be filtered by drm_mode_validate_size(), and those modes
6900 	 * are missing after user start lightdm. So we need to renew modes list.
6901 	 * in get_modes call back, not just return the modes count
6902 	 */
6903 	.get_modes = get_modes,
6904 	.mode_valid = amdgpu_dm_connector_mode_valid,
6905 	.atomic_check = amdgpu_dm_connector_atomic_check,
6906 };
6907 
6908 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6909 {
6910 }
6911 
6912 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6913 {
6914 	struct drm_atomic_state *state = new_crtc_state->state;
6915 	struct drm_plane *plane;
6916 	int num_active = 0;
6917 
6918 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6919 		struct drm_plane_state *new_plane_state;
6920 
6921 		/* Cursor planes are "fake". */
6922 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
6923 			continue;
6924 
6925 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6926 
6927 		if (!new_plane_state) {
6928 			/*
6929 			 * The plane is enable on the CRTC and hasn't changed
6930 			 * state. This means that it previously passed
6931 			 * validation and is therefore enabled.
6932 			 */
6933 			num_active += 1;
6934 			continue;
6935 		}
6936 
6937 		/* We need a framebuffer to be considered enabled. */
6938 		num_active += (new_plane_state->fb != NULL);
6939 	}
6940 
6941 	return num_active;
6942 }
6943 
6944 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6945 					 struct drm_crtc_state *new_crtc_state)
6946 {
6947 	struct dm_crtc_state *dm_new_crtc_state =
6948 		to_dm_crtc_state(new_crtc_state);
6949 
6950 	dm_new_crtc_state->active_planes = 0;
6951 
6952 	if (!dm_new_crtc_state->stream)
6953 		return;
6954 
6955 	dm_new_crtc_state->active_planes =
6956 		count_crtc_active_planes(new_crtc_state);
6957 }
6958 
6959 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6960 				       struct drm_atomic_state *state)
6961 {
6962 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6963 									  crtc);
6964 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6965 	struct dc *dc = adev->dm.dc;
6966 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6967 	int ret = -EINVAL;
6968 
6969 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6970 
6971 	dm_update_crtc_active_planes(crtc, crtc_state);
6972 
6973 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
6974 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
6975 		return ret;
6976 	}
6977 
6978 	/*
6979 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6980 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6981 	 * planes are disabled, which is not supported by the hardware. And there is legacy
6982 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6983 	 */
6984 	if (crtc_state->enable &&
6985 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6986 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6987 		return -EINVAL;
6988 	}
6989 
6990 	/* In some use cases, like reset, no stream is attached */
6991 	if (!dm_crtc_state->stream)
6992 		return 0;
6993 
6994 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6995 		return 0;
6996 
6997 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6998 	return ret;
6999 }
7000 
7001 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7002 				      const struct drm_display_mode *mode,
7003 				      struct drm_display_mode *adjusted_mode)
7004 {
7005 	return true;
7006 }
7007 
7008 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7009 	.disable = dm_crtc_helper_disable,
7010 	.atomic_check = dm_crtc_helper_atomic_check,
7011 	.mode_fixup = dm_crtc_helper_mode_fixup,
7012 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7013 };
7014 
7015 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7016 {
7017 
7018 }
7019 
7020 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7021 {
7022 	switch (display_color_depth) {
7023 		case COLOR_DEPTH_666:
7024 			return 6;
7025 		case COLOR_DEPTH_888:
7026 			return 8;
7027 		case COLOR_DEPTH_101010:
7028 			return 10;
7029 		case COLOR_DEPTH_121212:
7030 			return 12;
7031 		case COLOR_DEPTH_141414:
7032 			return 14;
7033 		case COLOR_DEPTH_161616:
7034 			return 16;
7035 		default:
7036 			break;
7037 		}
7038 	return 0;
7039 }
7040 
7041 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7042 					  struct drm_crtc_state *crtc_state,
7043 					  struct drm_connector_state *conn_state)
7044 {
7045 	struct drm_atomic_state *state = crtc_state->state;
7046 	struct drm_connector *connector = conn_state->connector;
7047 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7048 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7049 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7050 	struct drm_dp_mst_topology_mgr *mst_mgr;
7051 	struct drm_dp_mst_port *mst_port;
7052 	enum dc_color_depth color_depth;
7053 	int clock, bpp = 0;
7054 	bool is_y420 = false;
7055 
7056 	if (!aconnector->port || !aconnector->dc_sink)
7057 		return 0;
7058 
7059 	mst_port = aconnector->port;
7060 	mst_mgr = &aconnector->mst_port->mst_mgr;
7061 
7062 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7063 		return 0;
7064 
7065 	if (!state->duplicated) {
7066 		int max_bpc = conn_state->max_requested_bpc;
7067 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7068 				aconnector->force_yuv420_output;
7069 		color_depth = convert_color_depth_from_display_info(connector,
7070 								    is_y420,
7071 								    max_bpc);
7072 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7073 		clock = adjusted_mode->clock;
7074 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7075 	}
7076 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7077 									   mst_mgr,
7078 									   mst_port,
7079 									   dm_new_connector_state->pbn,
7080 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7081 	if (dm_new_connector_state->vcpi_slots < 0) {
7082 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7083 		return dm_new_connector_state->vcpi_slots;
7084 	}
7085 	return 0;
7086 }
7087 
7088 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7089 	.disable = dm_encoder_helper_disable,
7090 	.atomic_check = dm_encoder_helper_atomic_check
7091 };
7092 
7093 #if defined(CONFIG_DRM_AMD_DC_DCN)
7094 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7095 					    struct dc_state *dc_state)
7096 {
7097 	struct dc_stream_state *stream = NULL;
7098 	struct drm_connector *connector;
7099 	struct drm_connector_state *new_con_state;
7100 	struct amdgpu_dm_connector *aconnector;
7101 	struct dm_connector_state *dm_conn_state;
7102 	int i, j, clock, bpp;
7103 	int vcpi, pbn_div, pbn = 0;
7104 
7105 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7106 
7107 		aconnector = to_amdgpu_dm_connector(connector);
7108 
7109 		if (!aconnector->port)
7110 			continue;
7111 
7112 		if (!new_con_state || !new_con_state->crtc)
7113 			continue;
7114 
7115 		dm_conn_state = to_dm_connector_state(new_con_state);
7116 
7117 		for (j = 0; j < dc_state->stream_count; j++) {
7118 			stream = dc_state->streams[j];
7119 			if (!stream)
7120 				continue;
7121 
7122 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7123 				break;
7124 
7125 			stream = NULL;
7126 		}
7127 
7128 		if (!stream)
7129 			continue;
7130 
7131 		if (stream->timing.flags.DSC != 1) {
7132 			drm_dp_mst_atomic_enable_dsc(state,
7133 						     aconnector->port,
7134 						     dm_conn_state->pbn,
7135 						     0,
7136 						     false);
7137 			continue;
7138 		}
7139 
7140 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7141 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
7142 		clock = stream->timing.pix_clk_100hz / 10;
7143 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
7144 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7145 						    aconnector->port,
7146 						    pbn, pbn_div,
7147 						    true);
7148 		if (vcpi < 0)
7149 			return vcpi;
7150 
7151 		dm_conn_state->pbn = pbn;
7152 		dm_conn_state->vcpi_slots = vcpi;
7153 	}
7154 	return 0;
7155 }
7156 #endif
7157 
7158 static void dm_drm_plane_reset(struct drm_plane *plane)
7159 {
7160 	struct dm_plane_state *amdgpu_state = NULL;
7161 
7162 	if (plane->state)
7163 		plane->funcs->atomic_destroy_state(plane, plane->state);
7164 
7165 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7166 	WARN_ON(amdgpu_state == NULL);
7167 
7168 	if (amdgpu_state)
7169 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7170 }
7171 
7172 static struct drm_plane_state *
7173 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7174 {
7175 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7176 
7177 	old_dm_plane_state = to_dm_plane_state(plane->state);
7178 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7179 	if (!dm_plane_state)
7180 		return NULL;
7181 
7182 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7183 
7184 	if (old_dm_plane_state->dc_state) {
7185 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7186 		dc_plane_state_retain(dm_plane_state->dc_state);
7187 	}
7188 
7189 	return &dm_plane_state->base;
7190 }
7191 
7192 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7193 				struct drm_plane_state *state)
7194 {
7195 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7196 
7197 	if (dm_plane_state->dc_state)
7198 		dc_plane_state_release(dm_plane_state->dc_state);
7199 
7200 	drm_atomic_helper_plane_destroy_state(plane, state);
7201 }
7202 
7203 static const struct drm_plane_funcs dm_plane_funcs = {
7204 	.update_plane	= drm_atomic_helper_update_plane,
7205 	.disable_plane	= drm_atomic_helper_disable_plane,
7206 	.destroy	= drm_primary_helper_destroy,
7207 	.reset = dm_drm_plane_reset,
7208 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7209 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7210 	.format_mod_supported = dm_plane_format_mod_supported,
7211 };
7212 
7213 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7214 				      struct drm_plane_state *new_state)
7215 {
7216 	struct amdgpu_framebuffer *afb;
7217 	struct drm_gem_object *obj;
7218 	struct amdgpu_device *adev;
7219 	struct amdgpu_bo *rbo;
7220 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7221 	struct list_head list;
7222 	struct ttm_validate_buffer tv;
7223 	struct ww_acquire_ctx ticket;
7224 	uint32_t domain;
7225 	int r;
7226 
7227 	if (!new_state->fb) {
7228 		DRM_DEBUG_KMS("No FB bound\n");
7229 		return 0;
7230 	}
7231 
7232 	afb = to_amdgpu_framebuffer(new_state->fb);
7233 	obj = new_state->fb->obj[0];
7234 	rbo = gem_to_amdgpu_bo(obj);
7235 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7236 	INIT_LIST_HEAD(&list);
7237 
7238 	tv.bo = &rbo->tbo;
7239 	tv.num_shared = 1;
7240 	list_add(&tv.head, &list);
7241 
7242 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7243 	if (r) {
7244 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7245 		return r;
7246 	}
7247 
7248 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7249 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7250 	else
7251 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7252 
7253 	r = amdgpu_bo_pin(rbo, domain);
7254 	if (unlikely(r != 0)) {
7255 		if (r != -ERESTARTSYS)
7256 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7257 		ttm_eu_backoff_reservation(&ticket, &list);
7258 		return r;
7259 	}
7260 
7261 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7262 	if (unlikely(r != 0)) {
7263 		amdgpu_bo_unpin(rbo);
7264 		ttm_eu_backoff_reservation(&ticket, &list);
7265 		DRM_ERROR("%p bind failed\n", rbo);
7266 		return r;
7267 	}
7268 
7269 	ttm_eu_backoff_reservation(&ticket, &list);
7270 
7271 	afb->address = amdgpu_bo_gpu_offset(rbo);
7272 
7273 	amdgpu_bo_ref(rbo);
7274 
7275 	/**
7276 	 * We don't do surface updates on planes that have been newly created,
7277 	 * but we also don't have the afb->address during atomic check.
7278 	 *
7279 	 * Fill in buffer attributes depending on the address here, but only on
7280 	 * newly created planes since they're not being used by DC yet and this
7281 	 * won't modify global state.
7282 	 */
7283 	dm_plane_state_old = to_dm_plane_state(plane->state);
7284 	dm_plane_state_new = to_dm_plane_state(new_state);
7285 
7286 	if (dm_plane_state_new->dc_state &&
7287 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7288 		struct dc_plane_state *plane_state =
7289 			dm_plane_state_new->dc_state;
7290 		bool force_disable_dcc = !plane_state->dcc.enable;
7291 
7292 		fill_plane_buffer_attributes(
7293 			adev, afb, plane_state->format, plane_state->rotation,
7294 			afb->tiling_flags,
7295 			&plane_state->tiling_info, &plane_state->plane_size,
7296 			&plane_state->dcc, &plane_state->address,
7297 			afb->tmz_surface, force_disable_dcc);
7298 	}
7299 
7300 	return 0;
7301 }
7302 
7303 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7304 				       struct drm_plane_state *old_state)
7305 {
7306 	struct amdgpu_bo *rbo;
7307 	int r;
7308 
7309 	if (!old_state->fb)
7310 		return;
7311 
7312 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7313 	r = amdgpu_bo_reserve(rbo, false);
7314 	if (unlikely(r)) {
7315 		DRM_ERROR("failed to reserve rbo before unpin\n");
7316 		return;
7317 	}
7318 
7319 	amdgpu_bo_unpin(rbo);
7320 	amdgpu_bo_unreserve(rbo);
7321 	amdgpu_bo_unref(&rbo);
7322 }
7323 
7324 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7325 				       struct drm_crtc_state *new_crtc_state)
7326 {
7327 	struct drm_framebuffer *fb = state->fb;
7328 	int min_downscale, max_upscale;
7329 	int min_scale = 0;
7330 	int max_scale = INT_MAX;
7331 
7332 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7333 	if (fb && state->crtc) {
7334 		/* Validate viewport to cover the case when only the position changes */
7335 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7336 			int viewport_width = state->crtc_w;
7337 			int viewport_height = state->crtc_h;
7338 
7339 			if (state->crtc_x < 0)
7340 				viewport_width += state->crtc_x;
7341 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7342 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7343 
7344 			if (state->crtc_y < 0)
7345 				viewport_height += state->crtc_y;
7346 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7347 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7348 
7349 			if (viewport_width < 0 || viewport_height < 0) {
7350 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7351 				return -EINVAL;
7352 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7353 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7354 				return -EINVAL;
7355 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7356 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7357 				return -EINVAL;
7358 			}
7359 
7360 		}
7361 
7362 		/* Get min/max allowed scaling factors from plane caps. */
7363 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7364 					     &min_downscale, &max_upscale);
7365 		/*
7366 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7367 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7368 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7369 		 */
7370 		min_scale = (1000 << 16) / max_upscale;
7371 		max_scale = (1000 << 16) / min_downscale;
7372 	}
7373 
7374 	return drm_atomic_helper_check_plane_state(
7375 		state, new_crtc_state, min_scale, max_scale, true, true);
7376 }
7377 
7378 static int dm_plane_atomic_check(struct drm_plane *plane,
7379 				 struct drm_atomic_state *state)
7380 {
7381 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7382 										 plane);
7383 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7384 	struct dc *dc = adev->dm.dc;
7385 	struct dm_plane_state *dm_plane_state;
7386 	struct dc_scaling_info scaling_info;
7387 	struct drm_crtc_state *new_crtc_state;
7388 	int ret;
7389 
7390 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7391 
7392 	dm_plane_state = to_dm_plane_state(new_plane_state);
7393 
7394 	if (!dm_plane_state->dc_state)
7395 		return 0;
7396 
7397 	new_crtc_state =
7398 		drm_atomic_get_new_crtc_state(state,
7399 					      new_plane_state->crtc);
7400 	if (!new_crtc_state)
7401 		return -EINVAL;
7402 
7403 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7404 	if (ret)
7405 		return ret;
7406 
7407 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7408 	if (ret)
7409 		return ret;
7410 
7411 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7412 		return 0;
7413 
7414 	return -EINVAL;
7415 }
7416 
7417 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7418 				       struct drm_atomic_state *state)
7419 {
7420 	/* Only support async updates on cursor planes. */
7421 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7422 		return -EINVAL;
7423 
7424 	return 0;
7425 }
7426 
7427 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7428 					 struct drm_atomic_state *state)
7429 {
7430 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7431 									   plane);
7432 	struct drm_plane_state *old_state =
7433 		drm_atomic_get_old_plane_state(state, plane);
7434 
7435 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7436 
7437 	swap(plane->state->fb, new_state->fb);
7438 
7439 	plane->state->src_x = new_state->src_x;
7440 	plane->state->src_y = new_state->src_y;
7441 	plane->state->src_w = new_state->src_w;
7442 	plane->state->src_h = new_state->src_h;
7443 	plane->state->crtc_x = new_state->crtc_x;
7444 	plane->state->crtc_y = new_state->crtc_y;
7445 	plane->state->crtc_w = new_state->crtc_w;
7446 	plane->state->crtc_h = new_state->crtc_h;
7447 
7448 	handle_cursor_update(plane, old_state);
7449 }
7450 
7451 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7452 	.prepare_fb = dm_plane_helper_prepare_fb,
7453 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7454 	.atomic_check = dm_plane_atomic_check,
7455 	.atomic_async_check = dm_plane_atomic_async_check,
7456 	.atomic_async_update = dm_plane_atomic_async_update
7457 };
7458 
7459 /*
7460  * TODO: these are currently initialized to rgb formats only.
7461  * For future use cases we should either initialize them dynamically based on
7462  * plane capabilities, or initialize this array to all formats, so internal drm
7463  * check will succeed, and let DC implement proper check
7464  */
7465 static const uint32_t rgb_formats[] = {
7466 	DRM_FORMAT_XRGB8888,
7467 	DRM_FORMAT_ARGB8888,
7468 	DRM_FORMAT_RGBA8888,
7469 	DRM_FORMAT_XRGB2101010,
7470 	DRM_FORMAT_XBGR2101010,
7471 	DRM_FORMAT_ARGB2101010,
7472 	DRM_FORMAT_ABGR2101010,
7473 	DRM_FORMAT_XRGB16161616,
7474 	DRM_FORMAT_XBGR16161616,
7475 	DRM_FORMAT_ARGB16161616,
7476 	DRM_FORMAT_ABGR16161616,
7477 	DRM_FORMAT_XBGR8888,
7478 	DRM_FORMAT_ABGR8888,
7479 	DRM_FORMAT_RGB565,
7480 };
7481 
7482 static const uint32_t overlay_formats[] = {
7483 	DRM_FORMAT_XRGB8888,
7484 	DRM_FORMAT_ARGB8888,
7485 	DRM_FORMAT_RGBA8888,
7486 	DRM_FORMAT_XBGR8888,
7487 	DRM_FORMAT_ABGR8888,
7488 	DRM_FORMAT_RGB565
7489 };
7490 
7491 static const u32 cursor_formats[] = {
7492 	DRM_FORMAT_ARGB8888
7493 };
7494 
7495 static int get_plane_formats(const struct drm_plane *plane,
7496 			     const struct dc_plane_cap *plane_cap,
7497 			     uint32_t *formats, int max_formats)
7498 {
7499 	int i, num_formats = 0;
7500 
7501 	/*
7502 	 * TODO: Query support for each group of formats directly from
7503 	 * DC plane caps. This will require adding more formats to the
7504 	 * caps list.
7505 	 */
7506 
7507 	switch (plane->type) {
7508 	case DRM_PLANE_TYPE_PRIMARY:
7509 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7510 			if (num_formats >= max_formats)
7511 				break;
7512 
7513 			formats[num_formats++] = rgb_formats[i];
7514 		}
7515 
7516 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7517 			formats[num_formats++] = DRM_FORMAT_NV12;
7518 		if (plane_cap && plane_cap->pixel_format_support.p010)
7519 			formats[num_formats++] = DRM_FORMAT_P010;
7520 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7521 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7522 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7523 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7524 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7525 		}
7526 		break;
7527 
7528 	case DRM_PLANE_TYPE_OVERLAY:
7529 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7530 			if (num_formats >= max_formats)
7531 				break;
7532 
7533 			formats[num_formats++] = overlay_formats[i];
7534 		}
7535 		break;
7536 
7537 	case DRM_PLANE_TYPE_CURSOR:
7538 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7539 			if (num_formats >= max_formats)
7540 				break;
7541 
7542 			formats[num_formats++] = cursor_formats[i];
7543 		}
7544 		break;
7545 	}
7546 
7547 	return num_formats;
7548 }
7549 
7550 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7551 				struct drm_plane *plane,
7552 				unsigned long possible_crtcs,
7553 				const struct dc_plane_cap *plane_cap)
7554 {
7555 	uint32_t formats[32];
7556 	int num_formats;
7557 	int res = -EPERM;
7558 	unsigned int supported_rotations;
7559 	uint64_t *modifiers = NULL;
7560 
7561 	num_formats = get_plane_formats(plane, plane_cap, formats,
7562 					ARRAY_SIZE(formats));
7563 
7564 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7565 	if (res)
7566 		return res;
7567 
7568 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7569 				       &dm_plane_funcs, formats, num_formats,
7570 				       modifiers, plane->type, NULL);
7571 	kfree(modifiers);
7572 	if (res)
7573 		return res;
7574 
7575 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7576 	    plane_cap && plane_cap->per_pixel_alpha) {
7577 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7578 					  BIT(DRM_MODE_BLEND_PREMULTI);
7579 
7580 		drm_plane_create_alpha_property(plane);
7581 		drm_plane_create_blend_mode_property(plane, blend_caps);
7582 	}
7583 
7584 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7585 	    plane_cap &&
7586 	    (plane_cap->pixel_format_support.nv12 ||
7587 	     plane_cap->pixel_format_support.p010)) {
7588 		/* This only affects YUV formats. */
7589 		drm_plane_create_color_properties(
7590 			plane,
7591 			BIT(DRM_COLOR_YCBCR_BT601) |
7592 			BIT(DRM_COLOR_YCBCR_BT709) |
7593 			BIT(DRM_COLOR_YCBCR_BT2020),
7594 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7595 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7596 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7597 	}
7598 
7599 	supported_rotations =
7600 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7601 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7602 
7603 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7604 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7605 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7606 						   supported_rotations);
7607 
7608 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7609 
7610 	/* Create (reset) the plane state */
7611 	if (plane->funcs->reset)
7612 		plane->funcs->reset(plane);
7613 
7614 	return 0;
7615 }
7616 
7617 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7618 			       struct drm_plane *plane,
7619 			       uint32_t crtc_index)
7620 {
7621 	struct amdgpu_crtc *acrtc = NULL;
7622 	struct drm_plane *cursor_plane;
7623 
7624 	int res = -ENOMEM;
7625 
7626 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7627 	if (!cursor_plane)
7628 		goto fail;
7629 
7630 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7631 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7632 
7633 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7634 	if (!acrtc)
7635 		goto fail;
7636 
7637 	res = drm_crtc_init_with_planes(
7638 			dm->ddev,
7639 			&acrtc->base,
7640 			plane,
7641 			cursor_plane,
7642 			&amdgpu_dm_crtc_funcs, NULL);
7643 
7644 	if (res)
7645 		goto fail;
7646 
7647 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7648 
7649 	/* Create (reset) the plane state */
7650 	if (acrtc->base.funcs->reset)
7651 		acrtc->base.funcs->reset(&acrtc->base);
7652 
7653 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7654 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7655 
7656 	acrtc->crtc_id = crtc_index;
7657 	acrtc->base.enabled = false;
7658 	acrtc->otg_inst = -1;
7659 
7660 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7661 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7662 				   true, MAX_COLOR_LUT_ENTRIES);
7663 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7664 
7665 	return 0;
7666 
7667 fail:
7668 	kfree(acrtc);
7669 	kfree(cursor_plane);
7670 	return res;
7671 }
7672 
7673 
7674 static int to_drm_connector_type(enum signal_type st)
7675 {
7676 	switch (st) {
7677 	case SIGNAL_TYPE_HDMI_TYPE_A:
7678 		return DRM_MODE_CONNECTOR_HDMIA;
7679 	case SIGNAL_TYPE_EDP:
7680 		return DRM_MODE_CONNECTOR_eDP;
7681 	case SIGNAL_TYPE_LVDS:
7682 		return DRM_MODE_CONNECTOR_LVDS;
7683 	case SIGNAL_TYPE_RGB:
7684 		return DRM_MODE_CONNECTOR_VGA;
7685 	case SIGNAL_TYPE_DISPLAY_PORT:
7686 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7687 		return DRM_MODE_CONNECTOR_DisplayPort;
7688 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7689 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7690 		return DRM_MODE_CONNECTOR_DVID;
7691 	case SIGNAL_TYPE_VIRTUAL:
7692 		return DRM_MODE_CONNECTOR_VIRTUAL;
7693 
7694 	default:
7695 		return DRM_MODE_CONNECTOR_Unknown;
7696 	}
7697 }
7698 
7699 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7700 {
7701 	struct drm_encoder *encoder;
7702 
7703 	/* There is only one encoder per connector */
7704 	drm_connector_for_each_possible_encoder(connector, encoder)
7705 		return encoder;
7706 
7707 	return NULL;
7708 }
7709 
7710 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7711 {
7712 	struct drm_encoder *encoder;
7713 	struct amdgpu_encoder *amdgpu_encoder;
7714 
7715 	encoder = amdgpu_dm_connector_to_encoder(connector);
7716 
7717 	if (encoder == NULL)
7718 		return;
7719 
7720 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7721 
7722 	amdgpu_encoder->native_mode.clock = 0;
7723 
7724 	if (!list_empty(&connector->probed_modes)) {
7725 		struct drm_display_mode *preferred_mode = NULL;
7726 
7727 		list_for_each_entry(preferred_mode,
7728 				    &connector->probed_modes,
7729 				    head) {
7730 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7731 				amdgpu_encoder->native_mode = *preferred_mode;
7732 
7733 			break;
7734 		}
7735 
7736 	}
7737 }
7738 
7739 static struct drm_display_mode *
7740 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7741 			     char *name,
7742 			     int hdisplay, int vdisplay)
7743 {
7744 	struct drm_device *dev = encoder->dev;
7745 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7746 	struct drm_display_mode *mode = NULL;
7747 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7748 
7749 	mode = drm_mode_duplicate(dev, native_mode);
7750 
7751 	if (mode == NULL)
7752 		return NULL;
7753 
7754 	mode->hdisplay = hdisplay;
7755 	mode->vdisplay = vdisplay;
7756 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7757 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7758 
7759 	return mode;
7760 
7761 }
7762 
7763 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7764 						 struct drm_connector *connector)
7765 {
7766 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7767 	struct drm_display_mode *mode = NULL;
7768 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7769 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7770 				to_amdgpu_dm_connector(connector);
7771 	int i;
7772 	int n;
7773 	struct mode_size {
7774 		char name[DRM_DISPLAY_MODE_LEN];
7775 		int w;
7776 		int h;
7777 	} common_modes[] = {
7778 		{  "640x480",  640,  480},
7779 		{  "800x600",  800,  600},
7780 		{ "1024x768", 1024,  768},
7781 		{ "1280x720", 1280,  720},
7782 		{ "1280x800", 1280,  800},
7783 		{"1280x1024", 1280, 1024},
7784 		{ "1440x900", 1440,  900},
7785 		{"1680x1050", 1680, 1050},
7786 		{"1600x1200", 1600, 1200},
7787 		{"1920x1080", 1920, 1080},
7788 		{"1920x1200", 1920, 1200}
7789 	};
7790 
7791 	n = ARRAY_SIZE(common_modes);
7792 
7793 	for (i = 0; i < n; i++) {
7794 		struct drm_display_mode *curmode = NULL;
7795 		bool mode_existed = false;
7796 
7797 		if (common_modes[i].w > native_mode->hdisplay ||
7798 		    common_modes[i].h > native_mode->vdisplay ||
7799 		   (common_modes[i].w == native_mode->hdisplay &&
7800 		    common_modes[i].h == native_mode->vdisplay))
7801 			continue;
7802 
7803 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7804 			if (common_modes[i].w == curmode->hdisplay &&
7805 			    common_modes[i].h == curmode->vdisplay) {
7806 				mode_existed = true;
7807 				break;
7808 			}
7809 		}
7810 
7811 		if (mode_existed)
7812 			continue;
7813 
7814 		mode = amdgpu_dm_create_common_mode(encoder,
7815 				common_modes[i].name, common_modes[i].w,
7816 				common_modes[i].h);
7817 		drm_mode_probed_add(connector, mode);
7818 		amdgpu_dm_connector->num_modes++;
7819 	}
7820 }
7821 
7822 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7823 					      struct edid *edid)
7824 {
7825 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7826 			to_amdgpu_dm_connector(connector);
7827 
7828 	if (edid) {
7829 		/* empty probed_modes */
7830 		INIT_LIST_HEAD(&connector->probed_modes);
7831 		amdgpu_dm_connector->num_modes =
7832 				drm_add_edid_modes(connector, edid);
7833 
7834 		/* sorting the probed modes before calling function
7835 		 * amdgpu_dm_get_native_mode() since EDID can have
7836 		 * more than one preferred mode. The modes that are
7837 		 * later in the probed mode list could be of higher
7838 		 * and preferred resolution. For example, 3840x2160
7839 		 * resolution in base EDID preferred timing and 4096x2160
7840 		 * preferred resolution in DID extension block later.
7841 		 */
7842 		drm_mode_sort(&connector->probed_modes);
7843 		amdgpu_dm_get_native_mode(connector);
7844 
7845 		/* Freesync capabilities are reset by calling
7846 		 * drm_add_edid_modes() and need to be
7847 		 * restored here.
7848 		 */
7849 		amdgpu_dm_update_freesync_caps(connector, edid);
7850 	} else {
7851 		amdgpu_dm_connector->num_modes = 0;
7852 	}
7853 }
7854 
7855 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7856 			      struct drm_display_mode *mode)
7857 {
7858 	struct drm_display_mode *m;
7859 
7860 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7861 		if (drm_mode_equal(m, mode))
7862 			return true;
7863 	}
7864 
7865 	return false;
7866 }
7867 
7868 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7869 {
7870 	const struct drm_display_mode *m;
7871 	struct drm_display_mode *new_mode;
7872 	uint i;
7873 	uint32_t new_modes_count = 0;
7874 
7875 	/* Standard FPS values
7876 	 *
7877 	 * 23.976   - TV/NTSC
7878 	 * 24 	    - Cinema
7879 	 * 25 	    - TV/PAL
7880 	 * 29.97    - TV/NTSC
7881 	 * 30 	    - TV/NTSC
7882 	 * 48 	    - Cinema HFR
7883 	 * 50 	    - TV/PAL
7884 	 * 60 	    - Commonly used
7885 	 * 48,72,96 - Multiples of 24
7886 	 */
7887 	static const uint32_t common_rates[] = {
7888 		23976, 24000, 25000, 29970, 30000,
7889 		48000, 50000, 60000, 72000, 96000
7890 	};
7891 
7892 	/*
7893 	 * Find mode with highest refresh rate with the same resolution
7894 	 * as the preferred mode. Some monitors report a preferred mode
7895 	 * with lower resolution than the highest refresh rate supported.
7896 	 */
7897 
7898 	m = get_highest_refresh_rate_mode(aconnector, true);
7899 	if (!m)
7900 		return 0;
7901 
7902 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7903 		uint64_t target_vtotal, target_vtotal_diff;
7904 		uint64_t num, den;
7905 
7906 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7907 			continue;
7908 
7909 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7910 		    common_rates[i] > aconnector->max_vfreq * 1000)
7911 			continue;
7912 
7913 		num = (unsigned long long)m->clock * 1000 * 1000;
7914 		den = common_rates[i] * (unsigned long long)m->htotal;
7915 		target_vtotal = div_u64(num, den);
7916 		target_vtotal_diff = target_vtotal - m->vtotal;
7917 
7918 		/* Check for illegal modes */
7919 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7920 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7921 		    m->vtotal + target_vtotal_diff < m->vsync_end)
7922 			continue;
7923 
7924 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7925 		if (!new_mode)
7926 			goto out;
7927 
7928 		new_mode->vtotal += (u16)target_vtotal_diff;
7929 		new_mode->vsync_start += (u16)target_vtotal_diff;
7930 		new_mode->vsync_end += (u16)target_vtotal_diff;
7931 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7932 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7933 
7934 		if (!is_duplicate_mode(aconnector, new_mode)) {
7935 			drm_mode_probed_add(&aconnector->base, new_mode);
7936 			new_modes_count += 1;
7937 		} else
7938 			drm_mode_destroy(aconnector->base.dev, new_mode);
7939 	}
7940  out:
7941 	return new_modes_count;
7942 }
7943 
7944 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7945 						   struct edid *edid)
7946 {
7947 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7948 		to_amdgpu_dm_connector(connector);
7949 
7950 	if (!(amdgpu_freesync_vid_mode && edid))
7951 		return;
7952 
7953 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7954 		amdgpu_dm_connector->num_modes +=
7955 			add_fs_modes(amdgpu_dm_connector);
7956 }
7957 
7958 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7959 {
7960 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7961 			to_amdgpu_dm_connector(connector);
7962 	struct drm_encoder *encoder;
7963 	struct edid *edid = amdgpu_dm_connector->edid;
7964 
7965 	encoder = amdgpu_dm_connector_to_encoder(connector);
7966 
7967 	if (!drm_edid_is_valid(edid)) {
7968 		amdgpu_dm_connector->num_modes =
7969 				drm_add_modes_noedid(connector, 640, 480);
7970 	} else {
7971 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7972 		amdgpu_dm_connector_add_common_modes(encoder, connector);
7973 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7974 	}
7975 	amdgpu_dm_fbc_init(connector);
7976 
7977 	return amdgpu_dm_connector->num_modes;
7978 }
7979 
7980 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7981 				     struct amdgpu_dm_connector *aconnector,
7982 				     int connector_type,
7983 				     struct dc_link *link,
7984 				     int link_index)
7985 {
7986 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7987 
7988 	/*
7989 	 * Some of the properties below require access to state, like bpc.
7990 	 * Allocate some default initial connector state with our reset helper.
7991 	 */
7992 	if (aconnector->base.funcs->reset)
7993 		aconnector->base.funcs->reset(&aconnector->base);
7994 
7995 	aconnector->connector_id = link_index;
7996 	aconnector->dc_link = link;
7997 	aconnector->base.interlace_allowed = false;
7998 	aconnector->base.doublescan_allowed = false;
7999 	aconnector->base.stereo_allowed = false;
8000 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8001 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8002 	aconnector->audio_inst = -1;
8003 	mutex_init(&aconnector->hpd_lock);
8004 
8005 	/*
8006 	 * configure support HPD hot plug connector_>polled default value is 0
8007 	 * which means HPD hot plug not supported
8008 	 */
8009 	switch (connector_type) {
8010 	case DRM_MODE_CONNECTOR_HDMIA:
8011 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8012 		aconnector->base.ycbcr_420_allowed =
8013 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8014 		break;
8015 	case DRM_MODE_CONNECTOR_DisplayPort:
8016 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8017 		aconnector->base.ycbcr_420_allowed =
8018 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8019 		break;
8020 	case DRM_MODE_CONNECTOR_DVID:
8021 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8022 		break;
8023 	default:
8024 		break;
8025 	}
8026 
8027 	drm_object_attach_property(&aconnector->base.base,
8028 				dm->ddev->mode_config.scaling_mode_property,
8029 				DRM_MODE_SCALE_NONE);
8030 
8031 	drm_object_attach_property(&aconnector->base.base,
8032 				adev->mode_info.underscan_property,
8033 				UNDERSCAN_OFF);
8034 	drm_object_attach_property(&aconnector->base.base,
8035 				adev->mode_info.underscan_hborder_property,
8036 				0);
8037 	drm_object_attach_property(&aconnector->base.base,
8038 				adev->mode_info.underscan_vborder_property,
8039 				0);
8040 
8041 	if (!aconnector->mst_port)
8042 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8043 
8044 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8045 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8046 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8047 
8048 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8049 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8050 		drm_object_attach_property(&aconnector->base.base,
8051 				adev->mode_info.abm_level_property, 0);
8052 	}
8053 
8054 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8055 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8056 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8057 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8058 
8059 		if (!aconnector->mst_port)
8060 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8061 
8062 #ifdef CONFIG_DRM_AMD_DC_HDCP
8063 		if (adev->dm.hdcp_workqueue)
8064 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8065 #endif
8066 	}
8067 }
8068 
8069 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8070 			      struct i2c_msg *msgs, int num)
8071 {
8072 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8073 	struct ddc_service *ddc_service = i2c->ddc_service;
8074 	struct i2c_command cmd;
8075 	int i;
8076 	int result = -EIO;
8077 
8078 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8079 
8080 	if (!cmd.payloads)
8081 		return result;
8082 
8083 	cmd.number_of_payloads = num;
8084 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8085 	cmd.speed = 100;
8086 
8087 	for (i = 0; i < num; i++) {
8088 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8089 		cmd.payloads[i].address = msgs[i].addr;
8090 		cmd.payloads[i].length = msgs[i].len;
8091 		cmd.payloads[i].data = msgs[i].buf;
8092 	}
8093 
8094 	if (dc_submit_i2c(
8095 			ddc_service->ctx->dc,
8096 			ddc_service->ddc_pin->hw_info.ddc_channel,
8097 			&cmd))
8098 		result = num;
8099 
8100 	kfree(cmd.payloads);
8101 	return result;
8102 }
8103 
8104 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8105 {
8106 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8107 }
8108 
8109 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8110 	.master_xfer = amdgpu_dm_i2c_xfer,
8111 	.functionality = amdgpu_dm_i2c_func,
8112 };
8113 
8114 static struct amdgpu_i2c_adapter *
8115 create_i2c(struct ddc_service *ddc_service,
8116 	   int link_index,
8117 	   int *res)
8118 {
8119 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8120 	struct amdgpu_i2c_adapter *i2c;
8121 
8122 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8123 	if (!i2c)
8124 		return NULL;
8125 	i2c->base.owner = THIS_MODULE;
8126 	i2c->base.class = I2C_CLASS_DDC;
8127 	i2c->base.dev.parent = &adev->pdev->dev;
8128 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8129 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8130 	i2c_set_adapdata(&i2c->base, i2c);
8131 	i2c->ddc_service = ddc_service;
8132 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8133 
8134 	return i2c;
8135 }
8136 
8137 
8138 /*
8139  * Note: this function assumes that dc_link_detect() was called for the
8140  * dc_link which will be represented by this aconnector.
8141  */
8142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8143 				    struct amdgpu_dm_connector *aconnector,
8144 				    uint32_t link_index,
8145 				    struct amdgpu_encoder *aencoder)
8146 {
8147 	int res = 0;
8148 	int connector_type;
8149 	struct dc *dc = dm->dc;
8150 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8151 	struct amdgpu_i2c_adapter *i2c;
8152 
8153 	link->priv = aconnector;
8154 
8155 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8156 
8157 	i2c = create_i2c(link->ddc, link->link_index, &res);
8158 	if (!i2c) {
8159 		DRM_ERROR("Failed to create i2c adapter data\n");
8160 		return -ENOMEM;
8161 	}
8162 
8163 	aconnector->i2c = i2c;
8164 	res = i2c_add_adapter(&i2c->base);
8165 
8166 	if (res) {
8167 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8168 		goto out_free;
8169 	}
8170 
8171 	connector_type = to_drm_connector_type(link->connector_signal);
8172 
8173 	res = drm_connector_init_with_ddc(
8174 			dm->ddev,
8175 			&aconnector->base,
8176 			&amdgpu_dm_connector_funcs,
8177 			connector_type,
8178 			&i2c->base);
8179 
8180 	if (res) {
8181 		DRM_ERROR("connector_init failed\n");
8182 		aconnector->connector_id = -1;
8183 		goto out_free;
8184 	}
8185 
8186 	drm_connector_helper_add(
8187 			&aconnector->base,
8188 			&amdgpu_dm_connector_helper_funcs);
8189 
8190 	amdgpu_dm_connector_init_helper(
8191 		dm,
8192 		aconnector,
8193 		connector_type,
8194 		link,
8195 		link_index);
8196 
8197 	drm_connector_attach_encoder(
8198 		&aconnector->base, &aencoder->base);
8199 
8200 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8201 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8202 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8203 
8204 out_free:
8205 	if (res) {
8206 		kfree(i2c);
8207 		aconnector->i2c = NULL;
8208 	}
8209 	return res;
8210 }
8211 
8212 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8213 {
8214 	switch (adev->mode_info.num_crtc) {
8215 	case 1:
8216 		return 0x1;
8217 	case 2:
8218 		return 0x3;
8219 	case 3:
8220 		return 0x7;
8221 	case 4:
8222 		return 0xf;
8223 	case 5:
8224 		return 0x1f;
8225 	case 6:
8226 	default:
8227 		return 0x3f;
8228 	}
8229 }
8230 
8231 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8232 				  struct amdgpu_encoder *aencoder,
8233 				  uint32_t link_index)
8234 {
8235 	struct amdgpu_device *adev = drm_to_adev(dev);
8236 
8237 	int res = drm_encoder_init(dev,
8238 				   &aencoder->base,
8239 				   &amdgpu_dm_encoder_funcs,
8240 				   DRM_MODE_ENCODER_TMDS,
8241 				   NULL);
8242 
8243 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8244 
8245 	if (!res)
8246 		aencoder->encoder_id = link_index;
8247 	else
8248 		aencoder->encoder_id = -1;
8249 
8250 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8251 
8252 	return res;
8253 }
8254 
8255 static void manage_dm_interrupts(struct amdgpu_device *adev,
8256 				 struct amdgpu_crtc *acrtc,
8257 				 bool enable)
8258 {
8259 	/*
8260 	 * We have no guarantee that the frontend index maps to the same
8261 	 * backend index - some even map to more than one.
8262 	 *
8263 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8264 	 */
8265 	int irq_type =
8266 		amdgpu_display_crtc_idx_to_irq_type(
8267 			adev,
8268 			acrtc->crtc_id);
8269 
8270 	if (enable) {
8271 		drm_crtc_vblank_on(&acrtc->base);
8272 		amdgpu_irq_get(
8273 			adev,
8274 			&adev->pageflip_irq,
8275 			irq_type);
8276 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8277 		amdgpu_irq_get(
8278 			adev,
8279 			&adev->vline0_irq,
8280 			irq_type);
8281 #endif
8282 	} else {
8283 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8284 		amdgpu_irq_put(
8285 			adev,
8286 			&adev->vline0_irq,
8287 			irq_type);
8288 #endif
8289 		amdgpu_irq_put(
8290 			adev,
8291 			&adev->pageflip_irq,
8292 			irq_type);
8293 		drm_crtc_vblank_off(&acrtc->base);
8294 	}
8295 }
8296 
8297 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8298 				      struct amdgpu_crtc *acrtc)
8299 {
8300 	int irq_type =
8301 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8302 
8303 	/**
8304 	 * This reads the current state for the IRQ and force reapplies
8305 	 * the setting to hardware.
8306 	 */
8307 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8308 }
8309 
8310 static bool
8311 is_scaling_state_different(const struct dm_connector_state *dm_state,
8312 			   const struct dm_connector_state *old_dm_state)
8313 {
8314 	if (dm_state->scaling != old_dm_state->scaling)
8315 		return true;
8316 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8317 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8318 			return true;
8319 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8320 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8321 			return true;
8322 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8323 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8324 		return true;
8325 	return false;
8326 }
8327 
8328 #ifdef CONFIG_DRM_AMD_DC_HDCP
8329 static bool is_content_protection_different(struct drm_connector_state *state,
8330 					    const struct drm_connector_state *old_state,
8331 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8332 {
8333 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8334 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8335 
8336 	/* Handle: Type0/1 change */
8337 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8338 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8339 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8340 		return true;
8341 	}
8342 
8343 	/* CP is being re enabled, ignore this
8344 	 *
8345 	 * Handles:	ENABLED -> DESIRED
8346 	 */
8347 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8348 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8349 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8350 		return false;
8351 	}
8352 
8353 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8354 	 *
8355 	 * Handles:	UNDESIRED -> ENABLED
8356 	 */
8357 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8358 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8359 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8360 
8361 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
8362 	 * hot-plug, headless s3, dpms
8363 	 *
8364 	 * Handles:	DESIRED -> DESIRED (Special case)
8365 	 */
8366 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8367 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8368 		dm_con_state->update_hdcp = false;
8369 		return true;
8370 	}
8371 
8372 	/*
8373 	 * Handles:	UNDESIRED -> UNDESIRED
8374 	 *		DESIRED -> DESIRED
8375 	 *		ENABLED -> ENABLED
8376 	 */
8377 	if (old_state->content_protection == state->content_protection)
8378 		return false;
8379 
8380 	/*
8381 	 * Handles:	UNDESIRED -> DESIRED
8382 	 *		DESIRED -> UNDESIRED
8383 	 *		ENABLED -> UNDESIRED
8384 	 */
8385 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8386 		return true;
8387 
8388 	/*
8389 	 * Handles:	DESIRED -> ENABLED
8390 	 */
8391 	return false;
8392 }
8393 
8394 #endif
8395 static void remove_stream(struct amdgpu_device *adev,
8396 			  struct amdgpu_crtc *acrtc,
8397 			  struct dc_stream_state *stream)
8398 {
8399 	/* this is the update mode case */
8400 
8401 	acrtc->otg_inst = -1;
8402 	acrtc->enabled = false;
8403 }
8404 
8405 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8406 			       struct dc_cursor_position *position)
8407 {
8408 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8409 	int x, y;
8410 	int xorigin = 0, yorigin = 0;
8411 
8412 	if (!crtc || !plane->state->fb)
8413 		return 0;
8414 
8415 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8416 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8417 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8418 			  __func__,
8419 			  plane->state->crtc_w,
8420 			  plane->state->crtc_h);
8421 		return -EINVAL;
8422 	}
8423 
8424 	x = plane->state->crtc_x;
8425 	y = plane->state->crtc_y;
8426 
8427 	if (x <= -amdgpu_crtc->max_cursor_width ||
8428 	    y <= -amdgpu_crtc->max_cursor_height)
8429 		return 0;
8430 
8431 	if (x < 0) {
8432 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8433 		x = 0;
8434 	}
8435 	if (y < 0) {
8436 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8437 		y = 0;
8438 	}
8439 	position->enable = true;
8440 	position->translate_by_source = true;
8441 	position->x = x;
8442 	position->y = y;
8443 	position->x_hotspot = xorigin;
8444 	position->y_hotspot = yorigin;
8445 
8446 	return 0;
8447 }
8448 
8449 static void handle_cursor_update(struct drm_plane *plane,
8450 				 struct drm_plane_state *old_plane_state)
8451 {
8452 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8453 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8454 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8455 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8456 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8457 	uint64_t address = afb ? afb->address : 0;
8458 	struct dc_cursor_position position = {0};
8459 	struct dc_cursor_attributes attributes;
8460 	int ret;
8461 
8462 	if (!plane->state->fb && !old_plane_state->fb)
8463 		return;
8464 
8465 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8466 		      __func__,
8467 		      amdgpu_crtc->crtc_id,
8468 		      plane->state->crtc_w,
8469 		      plane->state->crtc_h);
8470 
8471 	ret = get_cursor_position(plane, crtc, &position);
8472 	if (ret)
8473 		return;
8474 
8475 	if (!position.enable) {
8476 		/* turn off cursor */
8477 		if (crtc_state && crtc_state->stream) {
8478 			mutex_lock(&adev->dm.dc_lock);
8479 			dc_stream_set_cursor_position(crtc_state->stream,
8480 						      &position);
8481 			mutex_unlock(&adev->dm.dc_lock);
8482 		}
8483 		return;
8484 	}
8485 
8486 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8487 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8488 
8489 	memset(&attributes, 0, sizeof(attributes));
8490 	attributes.address.high_part = upper_32_bits(address);
8491 	attributes.address.low_part  = lower_32_bits(address);
8492 	attributes.width             = plane->state->crtc_w;
8493 	attributes.height            = plane->state->crtc_h;
8494 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8495 	attributes.rotation_angle    = 0;
8496 	attributes.attribute_flags.value = 0;
8497 
8498 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8499 
8500 	if (crtc_state->stream) {
8501 		mutex_lock(&adev->dm.dc_lock);
8502 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8503 							 &attributes))
8504 			DRM_ERROR("DC failed to set cursor attributes\n");
8505 
8506 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8507 						   &position))
8508 			DRM_ERROR("DC failed to set cursor position\n");
8509 		mutex_unlock(&adev->dm.dc_lock);
8510 	}
8511 }
8512 
8513 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8514 {
8515 
8516 	assert_spin_locked(&acrtc->base.dev->event_lock);
8517 	WARN_ON(acrtc->event);
8518 
8519 	acrtc->event = acrtc->base.state->event;
8520 
8521 	/* Set the flip status */
8522 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8523 
8524 	/* Mark this event as consumed */
8525 	acrtc->base.state->event = NULL;
8526 
8527 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8528 		     acrtc->crtc_id);
8529 }
8530 
8531 static void update_freesync_state_on_stream(
8532 	struct amdgpu_display_manager *dm,
8533 	struct dm_crtc_state *new_crtc_state,
8534 	struct dc_stream_state *new_stream,
8535 	struct dc_plane_state *surface,
8536 	u32 flip_timestamp_in_us)
8537 {
8538 	struct mod_vrr_params vrr_params;
8539 	struct dc_info_packet vrr_infopacket = {0};
8540 	struct amdgpu_device *adev = dm->adev;
8541 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8542 	unsigned long flags;
8543 	bool pack_sdp_v1_3 = false;
8544 
8545 	if (!new_stream)
8546 		return;
8547 
8548 	/*
8549 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8550 	 * For now it's sufficient to just guard against these conditions.
8551 	 */
8552 
8553 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8554 		return;
8555 
8556 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8557         vrr_params = acrtc->dm_irq_params.vrr_params;
8558 
8559 	if (surface) {
8560 		mod_freesync_handle_preflip(
8561 			dm->freesync_module,
8562 			surface,
8563 			new_stream,
8564 			flip_timestamp_in_us,
8565 			&vrr_params);
8566 
8567 		if (adev->family < AMDGPU_FAMILY_AI &&
8568 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8569 			mod_freesync_handle_v_update(dm->freesync_module,
8570 						     new_stream, &vrr_params);
8571 
8572 			/* Need to call this before the frame ends. */
8573 			dc_stream_adjust_vmin_vmax(dm->dc,
8574 						   new_crtc_state->stream,
8575 						   &vrr_params.adjust);
8576 		}
8577 	}
8578 
8579 	mod_freesync_build_vrr_infopacket(
8580 		dm->freesync_module,
8581 		new_stream,
8582 		&vrr_params,
8583 		PACKET_TYPE_VRR,
8584 		TRANSFER_FUNC_UNKNOWN,
8585 		&vrr_infopacket,
8586 		pack_sdp_v1_3);
8587 
8588 	new_crtc_state->freesync_timing_changed |=
8589 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8590 			&vrr_params.adjust,
8591 			sizeof(vrr_params.adjust)) != 0);
8592 
8593 	new_crtc_state->freesync_vrr_info_changed |=
8594 		(memcmp(&new_crtc_state->vrr_infopacket,
8595 			&vrr_infopacket,
8596 			sizeof(vrr_infopacket)) != 0);
8597 
8598 	acrtc->dm_irq_params.vrr_params = vrr_params;
8599 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8600 
8601 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8602 	new_stream->vrr_infopacket = vrr_infopacket;
8603 
8604 	if (new_crtc_state->freesync_vrr_info_changed)
8605 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8606 			      new_crtc_state->base.crtc->base.id,
8607 			      (int)new_crtc_state->base.vrr_enabled,
8608 			      (int)vrr_params.state);
8609 
8610 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8611 }
8612 
8613 static void update_stream_irq_parameters(
8614 	struct amdgpu_display_manager *dm,
8615 	struct dm_crtc_state *new_crtc_state)
8616 {
8617 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8618 	struct mod_vrr_params vrr_params;
8619 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8620 	struct amdgpu_device *adev = dm->adev;
8621 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8622 	unsigned long flags;
8623 
8624 	if (!new_stream)
8625 		return;
8626 
8627 	/*
8628 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8629 	 * For now it's sufficient to just guard against these conditions.
8630 	 */
8631 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8632 		return;
8633 
8634 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8635 	vrr_params = acrtc->dm_irq_params.vrr_params;
8636 
8637 	if (new_crtc_state->vrr_supported &&
8638 	    config.min_refresh_in_uhz &&
8639 	    config.max_refresh_in_uhz) {
8640 		/*
8641 		 * if freesync compatible mode was set, config.state will be set
8642 		 * in atomic check
8643 		 */
8644 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8645 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8646 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8647 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8648 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8649 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8650 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8651 		} else {
8652 			config.state = new_crtc_state->base.vrr_enabled ?
8653 						     VRR_STATE_ACTIVE_VARIABLE :
8654 						     VRR_STATE_INACTIVE;
8655 		}
8656 	} else {
8657 		config.state = VRR_STATE_UNSUPPORTED;
8658 	}
8659 
8660 	mod_freesync_build_vrr_params(dm->freesync_module,
8661 				      new_stream,
8662 				      &config, &vrr_params);
8663 
8664 	new_crtc_state->freesync_timing_changed |=
8665 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8666 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8667 
8668 	new_crtc_state->freesync_config = config;
8669 	/* Copy state for access from DM IRQ handler */
8670 	acrtc->dm_irq_params.freesync_config = config;
8671 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8672 	acrtc->dm_irq_params.vrr_params = vrr_params;
8673 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8674 }
8675 
8676 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8677 					    struct dm_crtc_state *new_state)
8678 {
8679 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8680 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8681 
8682 	if (!old_vrr_active && new_vrr_active) {
8683 		/* Transition VRR inactive -> active:
8684 		 * While VRR is active, we must not disable vblank irq, as a
8685 		 * reenable after disable would compute bogus vblank/pflip
8686 		 * timestamps if it likely happened inside display front-porch.
8687 		 *
8688 		 * We also need vupdate irq for the actual core vblank handling
8689 		 * at end of vblank.
8690 		 */
8691 		dm_set_vupdate_irq(new_state->base.crtc, true);
8692 		drm_crtc_vblank_get(new_state->base.crtc);
8693 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8694 				 __func__, new_state->base.crtc->base.id);
8695 	} else if (old_vrr_active && !new_vrr_active) {
8696 		/* Transition VRR active -> inactive:
8697 		 * Allow vblank irq disable again for fixed refresh rate.
8698 		 */
8699 		dm_set_vupdate_irq(new_state->base.crtc, false);
8700 		drm_crtc_vblank_put(new_state->base.crtc);
8701 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8702 				 __func__, new_state->base.crtc->base.id);
8703 	}
8704 }
8705 
8706 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8707 {
8708 	struct drm_plane *plane;
8709 	struct drm_plane_state *old_plane_state;
8710 	int i;
8711 
8712 	/*
8713 	 * TODO: Make this per-stream so we don't issue redundant updates for
8714 	 * commits with multiple streams.
8715 	 */
8716 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8717 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8718 			handle_cursor_update(plane, old_plane_state);
8719 }
8720 
8721 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8722 				    struct dc_state *dc_state,
8723 				    struct drm_device *dev,
8724 				    struct amdgpu_display_manager *dm,
8725 				    struct drm_crtc *pcrtc,
8726 				    bool wait_for_vblank)
8727 {
8728 	uint32_t i;
8729 	uint64_t timestamp_ns;
8730 	struct drm_plane *plane;
8731 	struct drm_plane_state *old_plane_state, *new_plane_state;
8732 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8733 	struct drm_crtc_state *new_pcrtc_state =
8734 			drm_atomic_get_new_crtc_state(state, pcrtc);
8735 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8736 	struct dm_crtc_state *dm_old_crtc_state =
8737 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8738 	int planes_count = 0, vpos, hpos;
8739 	long r;
8740 	unsigned long flags;
8741 	struct amdgpu_bo *abo;
8742 	uint32_t target_vblank, last_flip_vblank;
8743 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8744 	bool pflip_present = false;
8745 	struct {
8746 		struct dc_surface_update surface_updates[MAX_SURFACES];
8747 		struct dc_plane_info plane_infos[MAX_SURFACES];
8748 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8749 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8750 		struct dc_stream_update stream_update;
8751 	} *bundle;
8752 
8753 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8754 
8755 	if (!bundle) {
8756 		dm_error("Failed to allocate update bundle\n");
8757 		goto cleanup;
8758 	}
8759 
8760 	/*
8761 	 * Disable the cursor first if we're disabling all the planes.
8762 	 * It'll remain on the screen after the planes are re-enabled
8763 	 * if we don't.
8764 	 */
8765 	if (acrtc_state->active_planes == 0)
8766 		amdgpu_dm_commit_cursors(state);
8767 
8768 	/* update planes when needed */
8769 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8770 		struct drm_crtc *crtc = new_plane_state->crtc;
8771 		struct drm_crtc_state *new_crtc_state;
8772 		struct drm_framebuffer *fb = new_plane_state->fb;
8773 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8774 		bool plane_needs_flip;
8775 		struct dc_plane_state *dc_plane;
8776 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8777 
8778 		/* Cursor plane is handled after stream updates */
8779 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8780 			continue;
8781 
8782 		if (!fb || !crtc || pcrtc != crtc)
8783 			continue;
8784 
8785 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8786 		if (!new_crtc_state->active)
8787 			continue;
8788 
8789 		dc_plane = dm_new_plane_state->dc_state;
8790 
8791 		bundle->surface_updates[planes_count].surface = dc_plane;
8792 		if (new_pcrtc_state->color_mgmt_changed) {
8793 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8794 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8795 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8796 		}
8797 
8798 		fill_dc_scaling_info(new_plane_state,
8799 				     &bundle->scaling_infos[planes_count]);
8800 
8801 		bundle->surface_updates[planes_count].scaling_info =
8802 			&bundle->scaling_infos[planes_count];
8803 
8804 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8805 
8806 		pflip_present = pflip_present || plane_needs_flip;
8807 
8808 		if (!plane_needs_flip) {
8809 			planes_count += 1;
8810 			continue;
8811 		}
8812 
8813 		abo = gem_to_amdgpu_bo(fb->obj[0]);
8814 
8815 		/*
8816 		 * Wait for all fences on this FB. Do limited wait to avoid
8817 		 * deadlock during GPU reset when this fence will not signal
8818 		 * but we hold reservation lock for the BO.
8819 		 */
8820 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
8821 					  msecs_to_jiffies(5000));
8822 		if (unlikely(r <= 0))
8823 			DRM_ERROR("Waiting for fences timed out!");
8824 
8825 		fill_dc_plane_info_and_addr(
8826 			dm->adev, new_plane_state,
8827 			afb->tiling_flags,
8828 			&bundle->plane_infos[planes_count],
8829 			&bundle->flip_addrs[planes_count].address,
8830 			afb->tmz_surface, false);
8831 
8832 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
8833 				 new_plane_state->plane->index,
8834 				 bundle->plane_infos[planes_count].dcc.enable);
8835 
8836 		bundle->surface_updates[planes_count].plane_info =
8837 			&bundle->plane_infos[planes_count];
8838 
8839 		/*
8840 		 * Only allow immediate flips for fast updates that don't
8841 		 * change FB pitch, DCC state, rotation or mirroing.
8842 		 */
8843 		bundle->flip_addrs[planes_count].flip_immediate =
8844 			crtc->state->async_flip &&
8845 			acrtc_state->update_type == UPDATE_TYPE_FAST;
8846 
8847 		timestamp_ns = ktime_get_ns();
8848 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8849 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8850 		bundle->surface_updates[planes_count].surface = dc_plane;
8851 
8852 		if (!bundle->surface_updates[planes_count].surface) {
8853 			DRM_ERROR("No surface for CRTC: id=%d\n",
8854 					acrtc_attach->crtc_id);
8855 			continue;
8856 		}
8857 
8858 		if (plane == pcrtc->primary)
8859 			update_freesync_state_on_stream(
8860 				dm,
8861 				acrtc_state,
8862 				acrtc_state->stream,
8863 				dc_plane,
8864 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8865 
8866 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
8867 				 __func__,
8868 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8869 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8870 
8871 		planes_count += 1;
8872 
8873 	}
8874 
8875 	if (pflip_present) {
8876 		if (!vrr_active) {
8877 			/* Use old throttling in non-vrr fixed refresh rate mode
8878 			 * to keep flip scheduling based on target vblank counts
8879 			 * working in a backwards compatible way, e.g., for
8880 			 * clients using the GLX_OML_sync_control extension or
8881 			 * DRI3/Present extension with defined target_msc.
8882 			 */
8883 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8884 		}
8885 		else {
8886 			/* For variable refresh rate mode only:
8887 			 * Get vblank of last completed flip to avoid > 1 vrr
8888 			 * flips per video frame by use of throttling, but allow
8889 			 * flip programming anywhere in the possibly large
8890 			 * variable vrr vblank interval for fine-grained flip
8891 			 * timing control and more opportunity to avoid stutter
8892 			 * on late submission of flips.
8893 			 */
8894 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8895 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8896 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8897 		}
8898 
8899 		target_vblank = last_flip_vblank + wait_for_vblank;
8900 
8901 		/*
8902 		 * Wait until we're out of the vertical blank period before the one
8903 		 * targeted by the flip
8904 		 */
8905 		while ((acrtc_attach->enabled &&
8906 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8907 							    0, &vpos, &hpos, NULL,
8908 							    NULL, &pcrtc->hwmode)
8909 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8910 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8911 			(int)(target_vblank -
8912 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8913 			usleep_range(1000, 1100);
8914 		}
8915 
8916 		/**
8917 		 * Prepare the flip event for the pageflip interrupt to handle.
8918 		 *
8919 		 * This only works in the case where we've already turned on the
8920 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8921 		 * from 0 -> n planes we have to skip a hardware generated event
8922 		 * and rely on sending it from software.
8923 		 */
8924 		if (acrtc_attach->base.state->event &&
8925 		    acrtc_state->active_planes > 0 &&
8926 		    !acrtc_state->force_dpms_off) {
8927 			drm_crtc_vblank_get(pcrtc);
8928 
8929 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8930 
8931 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8932 			prepare_flip_isr(acrtc_attach);
8933 
8934 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8935 		}
8936 
8937 		if (acrtc_state->stream) {
8938 			if (acrtc_state->freesync_vrr_info_changed)
8939 				bundle->stream_update.vrr_infopacket =
8940 					&acrtc_state->stream->vrr_infopacket;
8941 		}
8942 	}
8943 
8944 	/* Update the planes if changed or disable if we don't have any. */
8945 	if ((planes_count || acrtc_state->active_planes == 0) &&
8946 		acrtc_state->stream) {
8947 #if defined(CONFIG_DRM_AMD_DC_DCN)
8948 		/*
8949 		 * If PSR or idle optimizations are enabled then flush out
8950 		 * any pending work before hardware programming.
8951 		 */
8952 		if (dm->vblank_control_workqueue)
8953 			flush_workqueue(dm->vblank_control_workqueue);
8954 #endif
8955 
8956 		bundle->stream_update.stream = acrtc_state->stream;
8957 		if (new_pcrtc_state->mode_changed) {
8958 			bundle->stream_update.src = acrtc_state->stream->src;
8959 			bundle->stream_update.dst = acrtc_state->stream->dst;
8960 		}
8961 
8962 		if (new_pcrtc_state->color_mgmt_changed) {
8963 			/*
8964 			 * TODO: This isn't fully correct since we've actually
8965 			 * already modified the stream in place.
8966 			 */
8967 			bundle->stream_update.gamut_remap =
8968 				&acrtc_state->stream->gamut_remap_matrix;
8969 			bundle->stream_update.output_csc_transform =
8970 				&acrtc_state->stream->csc_color_matrix;
8971 			bundle->stream_update.out_transfer_func =
8972 				acrtc_state->stream->out_transfer_func;
8973 		}
8974 
8975 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8976 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8977 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8978 
8979 		/*
8980 		 * If FreeSync state on the stream has changed then we need to
8981 		 * re-adjust the min/max bounds now that DC doesn't handle this
8982 		 * as part of commit.
8983 		 */
8984 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8985 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8986 			dc_stream_adjust_vmin_vmax(
8987 				dm->dc, acrtc_state->stream,
8988 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8989 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8990 		}
8991 		mutex_lock(&dm->dc_lock);
8992 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8993 				acrtc_state->stream->link->psr_settings.psr_allow_active)
8994 			amdgpu_dm_psr_disable(acrtc_state->stream);
8995 
8996 		dc_commit_updates_for_stream(dm->dc,
8997 						     bundle->surface_updates,
8998 						     planes_count,
8999 						     acrtc_state->stream,
9000 						     &bundle->stream_update,
9001 						     dc_state);
9002 
9003 		/**
9004 		 * Enable or disable the interrupts on the backend.
9005 		 *
9006 		 * Most pipes are put into power gating when unused.
9007 		 *
9008 		 * When power gating is enabled on a pipe we lose the
9009 		 * interrupt enablement state when power gating is disabled.
9010 		 *
9011 		 * So we need to update the IRQ control state in hardware
9012 		 * whenever the pipe turns on (since it could be previously
9013 		 * power gated) or off (since some pipes can't be power gated
9014 		 * on some ASICs).
9015 		 */
9016 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9017 			dm_update_pflip_irq_state(drm_to_adev(dev),
9018 						  acrtc_attach);
9019 
9020 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9021 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9022 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9023 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9024 
9025 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9026 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9027 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9028 			struct amdgpu_dm_connector *aconn =
9029 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9030 
9031 			if (aconn->psr_skip_count > 0)
9032 				aconn->psr_skip_count--;
9033 
9034 			/* Allow PSR when skip count is 0. */
9035 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9036 		} else {
9037 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9038 		}
9039 
9040 		mutex_unlock(&dm->dc_lock);
9041 	}
9042 
9043 	/*
9044 	 * Update cursor state *after* programming all the planes.
9045 	 * This avoids redundant programming in the case where we're going
9046 	 * to be disabling a single plane - those pipes are being disabled.
9047 	 */
9048 	if (acrtc_state->active_planes)
9049 		amdgpu_dm_commit_cursors(state);
9050 
9051 cleanup:
9052 	kfree(bundle);
9053 }
9054 
9055 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9056 				   struct drm_atomic_state *state)
9057 {
9058 	struct amdgpu_device *adev = drm_to_adev(dev);
9059 	struct amdgpu_dm_connector *aconnector;
9060 	struct drm_connector *connector;
9061 	struct drm_connector_state *old_con_state, *new_con_state;
9062 	struct drm_crtc_state *new_crtc_state;
9063 	struct dm_crtc_state *new_dm_crtc_state;
9064 	const struct dc_stream_status *status;
9065 	int i, inst;
9066 
9067 	/* Notify device removals. */
9068 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9069 		if (old_con_state->crtc != new_con_state->crtc) {
9070 			/* CRTC changes require notification. */
9071 			goto notify;
9072 		}
9073 
9074 		if (!new_con_state->crtc)
9075 			continue;
9076 
9077 		new_crtc_state = drm_atomic_get_new_crtc_state(
9078 			state, new_con_state->crtc);
9079 
9080 		if (!new_crtc_state)
9081 			continue;
9082 
9083 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9084 			continue;
9085 
9086 	notify:
9087 		aconnector = to_amdgpu_dm_connector(connector);
9088 
9089 		mutex_lock(&adev->dm.audio_lock);
9090 		inst = aconnector->audio_inst;
9091 		aconnector->audio_inst = -1;
9092 		mutex_unlock(&adev->dm.audio_lock);
9093 
9094 		amdgpu_dm_audio_eld_notify(adev, inst);
9095 	}
9096 
9097 	/* Notify audio device additions. */
9098 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9099 		if (!new_con_state->crtc)
9100 			continue;
9101 
9102 		new_crtc_state = drm_atomic_get_new_crtc_state(
9103 			state, new_con_state->crtc);
9104 
9105 		if (!new_crtc_state)
9106 			continue;
9107 
9108 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9109 			continue;
9110 
9111 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9112 		if (!new_dm_crtc_state->stream)
9113 			continue;
9114 
9115 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9116 		if (!status)
9117 			continue;
9118 
9119 		aconnector = to_amdgpu_dm_connector(connector);
9120 
9121 		mutex_lock(&adev->dm.audio_lock);
9122 		inst = status->audio_inst;
9123 		aconnector->audio_inst = inst;
9124 		mutex_unlock(&adev->dm.audio_lock);
9125 
9126 		amdgpu_dm_audio_eld_notify(adev, inst);
9127 	}
9128 }
9129 
9130 /*
9131  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9132  * @crtc_state: the DRM CRTC state
9133  * @stream_state: the DC stream state.
9134  *
9135  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9136  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9137  */
9138 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9139 						struct dc_stream_state *stream_state)
9140 {
9141 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9142 }
9143 
9144 /**
9145  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9146  * @state: The atomic state to commit
9147  *
9148  * This will tell DC to commit the constructed DC state from atomic_check,
9149  * programming the hardware. Any failures here implies a hardware failure, since
9150  * atomic check should have filtered anything non-kosher.
9151  */
9152 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9153 {
9154 	struct drm_device *dev = state->dev;
9155 	struct amdgpu_device *adev = drm_to_adev(dev);
9156 	struct amdgpu_display_manager *dm = &adev->dm;
9157 	struct dm_atomic_state *dm_state;
9158 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9159 	uint32_t i, j;
9160 	struct drm_crtc *crtc;
9161 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9162 	unsigned long flags;
9163 	bool wait_for_vblank = true;
9164 	struct drm_connector *connector;
9165 	struct drm_connector_state *old_con_state, *new_con_state;
9166 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9167 	int crtc_disable_count = 0;
9168 	bool mode_set_reset_required = false;
9169 
9170 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9171 
9172 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9173 
9174 	dm_state = dm_atomic_get_new_state(state);
9175 	if (dm_state && dm_state->context) {
9176 		dc_state = dm_state->context;
9177 	} else {
9178 		/* No state changes, retain current state. */
9179 		dc_state_temp = dc_create_state(dm->dc);
9180 		ASSERT(dc_state_temp);
9181 		dc_state = dc_state_temp;
9182 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9183 	}
9184 
9185 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9186 				       new_crtc_state, i) {
9187 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9188 
9189 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9190 
9191 		if (old_crtc_state->active &&
9192 		    (!new_crtc_state->active ||
9193 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9194 			manage_dm_interrupts(adev, acrtc, false);
9195 			dc_stream_release(dm_old_crtc_state->stream);
9196 		}
9197 	}
9198 
9199 	drm_atomic_helper_calc_timestamping_constants(state);
9200 
9201 	/* update changed items */
9202 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9203 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9204 
9205 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9206 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9207 
9208 		DRM_DEBUG_ATOMIC(
9209 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9210 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9211 			"connectors_changed:%d\n",
9212 			acrtc->crtc_id,
9213 			new_crtc_state->enable,
9214 			new_crtc_state->active,
9215 			new_crtc_state->planes_changed,
9216 			new_crtc_state->mode_changed,
9217 			new_crtc_state->active_changed,
9218 			new_crtc_state->connectors_changed);
9219 
9220 		/* Disable cursor if disabling crtc */
9221 		if (old_crtc_state->active && !new_crtc_state->active) {
9222 			struct dc_cursor_position position;
9223 
9224 			memset(&position, 0, sizeof(position));
9225 			mutex_lock(&dm->dc_lock);
9226 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9227 			mutex_unlock(&dm->dc_lock);
9228 		}
9229 
9230 		/* Copy all transient state flags into dc state */
9231 		if (dm_new_crtc_state->stream) {
9232 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9233 							    dm_new_crtc_state->stream);
9234 		}
9235 
9236 		/* handles headless hotplug case, updating new_state and
9237 		 * aconnector as needed
9238 		 */
9239 
9240 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9241 
9242 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9243 
9244 			if (!dm_new_crtc_state->stream) {
9245 				/*
9246 				 * this could happen because of issues with
9247 				 * userspace notifications delivery.
9248 				 * In this case userspace tries to set mode on
9249 				 * display which is disconnected in fact.
9250 				 * dc_sink is NULL in this case on aconnector.
9251 				 * We expect reset mode will come soon.
9252 				 *
9253 				 * This can also happen when unplug is done
9254 				 * during resume sequence ended
9255 				 *
9256 				 * In this case, we want to pretend we still
9257 				 * have a sink to keep the pipe running so that
9258 				 * hw state is consistent with the sw state
9259 				 */
9260 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9261 						__func__, acrtc->base.base.id);
9262 				continue;
9263 			}
9264 
9265 			if (dm_old_crtc_state->stream)
9266 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9267 
9268 			pm_runtime_get_noresume(dev->dev);
9269 
9270 			acrtc->enabled = true;
9271 			acrtc->hw_mode = new_crtc_state->mode;
9272 			crtc->hwmode = new_crtc_state->mode;
9273 			mode_set_reset_required = true;
9274 		} else if (modereset_required(new_crtc_state)) {
9275 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9276 			/* i.e. reset mode */
9277 			if (dm_old_crtc_state->stream)
9278 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9279 
9280 			mode_set_reset_required = true;
9281 		}
9282 	} /* for_each_crtc_in_state() */
9283 
9284 	if (dc_state) {
9285 		/* if there mode set or reset, disable eDP PSR */
9286 		if (mode_set_reset_required) {
9287 #if defined(CONFIG_DRM_AMD_DC_DCN)
9288 			if (dm->vblank_control_workqueue)
9289 				flush_workqueue(dm->vblank_control_workqueue);
9290 #endif
9291 			amdgpu_dm_psr_disable_all(dm);
9292 		}
9293 
9294 		dm_enable_per_frame_crtc_master_sync(dc_state);
9295 		mutex_lock(&dm->dc_lock);
9296 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9297 #if defined(CONFIG_DRM_AMD_DC_DCN)
9298                /* Allow idle optimization when vblank count is 0 for display off */
9299                if (dm->active_vblank_irq_count == 0)
9300                    dc_allow_idle_optimizations(dm->dc,true);
9301 #endif
9302 		mutex_unlock(&dm->dc_lock);
9303 	}
9304 
9305 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9306 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9307 
9308 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9309 
9310 		if (dm_new_crtc_state->stream != NULL) {
9311 			const struct dc_stream_status *status =
9312 					dc_stream_get_status(dm_new_crtc_state->stream);
9313 
9314 			if (!status)
9315 				status = dc_stream_get_status_from_state(dc_state,
9316 									 dm_new_crtc_state->stream);
9317 			if (!status)
9318 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9319 			else
9320 				acrtc->otg_inst = status->primary_otg_inst;
9321 		}
9322 	}
9323 #ifdef CONFIG_DRM_AMD_DC_HDCP
9324 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9325 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9326 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9327 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9328 
9329 		new_crtc_state = NULL;
9330 
9331 		if (acrtc)
9332 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9333 
9334 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9335 
9336 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9337 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9338 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9339 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9340 			dm_new_con_state->update_hdcp = true;
9341 			continue;
9342 		}
9343 
9344 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9345 			hdcp_update_display(
9346 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9347 				new_con_state->hdcp_content_type,
9348 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9349 	}
9350 #endif
9351 
9352 	/* Handle connector state changes */
9353 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9354 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9355 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9356 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9357 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9358 		struct dc_stream_update stream_update;
9359 		struct dc_info_packet hdr_packet;
9360 		struct dc_stream_status *status = NULL;
9361 		bool abm_changed, hdr_changed, scaling_changed;
9362 
9363 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9364 		memset(&stream_update, 0, sizeof(stream_update));
9365 
9366 		if (acrtc) {
9367 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9368 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9369 		}
9370 
9371 		/* Skip any modesets/resets */
9372 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9373 			continue;
9374 
9375 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9376 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9377 
9378 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9379 							     dm_old_con_state);
9380 
9381 		abm_changed = dm_new_crtc_state->abm_level !=
9382 			      dm_old_crtc_state->abm_level;
9383 
9384 		hdr_changed =
9385 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9386 
9387 		if (!scaling_changed && !abm_changed && !hdr_changed)
9388 			continue;
9389 
9390 		stream_update.stream = dm_new_crtc_state->stream;
9391 		if (scaling_changed) {
9392 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9393 					dm_new_con_state, dm_new_crtc_state->stream);
9394 
9395 			stream_update.src = dm_new_crtc_state->stream->src;
9396 			stream_update.dst = dm_new_crtc_state->stream->dst;
9397 		}
9398 
9399 		if (abm_changed) {
9400 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9401 
9402 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9403 		}
9404 
9405 		if (hdr_changed) {
9406 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9407 			stream_update.hdr_static_metadata = &hdr_packet;
9408 		}
9409 
9410 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9411 
9412 		if (WARN_ON(!status))
9413 			continue;
9414 
9415 		WARN_ON(!status->plane_count);
9416 
9417 		/*
9418 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9419 		 * Here we create an empty update on each plane.
9420 		 * To fix this, DC should permit updating only stream properties.
9421 		 */
9422 		for (j = 0; j < status->plane_count; j++)
9423 			dummy_updates[j].surface = status->plane_states[0];
9424 
9425 
9426 		mutex_lock(&dm->dc_lock);
9427 		dc_commit_updates_for_stream(dm->dc,
9428 						     dummy_updates,
9429 						     status->plane_count,
9430 						     dm_new_crtc_state->stream,
9431 						     &stream_update,
9432 						     dc_state);
9433 		mutex_unlock(&dm->dc_lock);
9434 	}
9435 
9436 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9437 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9438 				      new_crtc_state, i) {
9439 		if (old_crtc_state->active && !new_crtc_state->active)
9440 			crtc_disable_count++;
9441 
9442 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9443 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9444 
9445 		/* For freesync config update on crtc state and params for irq */
9446 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9447 
9448 		/* Handle vrr on->off / off->on transitions */
9449 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9450 						dm_new_crtc_state);
9451 	}
9452 
9453 	/**
9454 	 * Enable interrupts for CRTCs that are newly enabled or went through
9455 	 * a modeset. It was intentionally deferred until after the front end
9456 	 * state was modified to wait until the OTG was on and so the IRQ
9457 	 * handlers didn't access stale or invalid state.
9458 	 */
9459 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9460 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9461 #ifdef CONFIG_DEBUG_FS
9462 		bool configure_crc = false;
9463 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9464 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9465 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9466 #endif
9467 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9468 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9469 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9470 #endif
9471 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9472 
9473 		if (new_crtc_state->active &&
9474 		    (!old_crtc_state->active ||
9475 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9476 			dc_stream_retain(dm_new_crtc_state->stream);
9477 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9478 			manage_dm_interrupts(adev, acrtc, true);
9479 
9480 #ifdef CONFIG_DEBUG_FS
9481 			/**
9482 			 * Frontend may have changed so reapply the CRC capture
9483 			 * settings for the stream.
9484 			 */
9485 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9486 
9487 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9488 				configure_crc = true;
9489 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9490 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9491 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9492 					acrtc->dm_irq_params.crc_window.update_win = true;
9493 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9494 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9495 					crc_rd_wrk->crtc = crtc;
9496 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9497 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9498 				}
9499 #endif
9500 			}
9501 
9502 			if (configure_crc)
9503 				if (amdgpu_dm_crtc_configure_crc_source(
9504 					crtc, dm_new_crtc_state, cur_crc_src))
9505 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9506 #endif
9507 		}
9508 	}
9509 
9510 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9511 		if (new_crtc_state->async_flip)
9512 			wait_for_vblank = false;
9513 
9514 	/* update planes when needed per crtc*/
9515 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9516 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9517 
9518 		if (dm_new_crtc_state->stream)
9519 			amdgpu_dm_commit_planes(state, dc_state, dev,
9520 						dm, crtc, wait_for_vblank);
9521 	}
9522 
9523 	/* Update audio instances for each connector. */
9524 	amdgpu_dm_commit_audio(dev, state);
9525 
9526 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9527 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9528 	/* restore the backlight level */
9529 	for (i = 0; i < dm->num_of_edps; i++) {
9530 		if (dm->backlight_dev[i] &&
9531 		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9532 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9533 	}
9534 #endif
9535 	/*
9536 	 * send vblank event on all events not handled in flip and
9537 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9538 	 */
9539 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9540 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9541 
9542 		if (new_crtc_state->event)
9543 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9544 
9545 		new_crtc_state->event = NULL;
9546 	}
9547 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9548 
9549 	/* Signal HW programming completion */
9550 	drm_atomic_helper_commit_hw_done(state);
9551 
9552 	if (wait_for_vblank)
9553 		drm_atomic_helper_wait_for_flip_done(dev, state);
9554 
9555 	drm_atomic_helper_cleanup_planes(dev, state);
9556 
9557 	/* return the stolen vga memory back to VRAM */
9558 	if (!adev->mman.keep_stolen_vga_memory)
9559 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9560 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9561 
9562 	/*
9563 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9564 	 * so we can put the GPU into runtime suspend if we're not driving any
9565 	 * displays anymore
9566 	 */
9567 	for (i = 0; i < crtc_disable_count; i++)
9568 		pm_runtime_put_autosuspend(dev->dev);
9569 	pm_runtime_mark_last_busy(dev->dev);
9570 
9571 	if (dc_state_temp)
9572 		dc_release_state(dc_state_temp);
9573 }
9574 
9575 
9576 static int dm_force_atomic_commit(struct drm_connector *connector)
9577 {
9578 	int ret = 0;
9579 	struct drm_device *ddev = connector->dev;
9580 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9581 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9582 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9583 	struct drm_connector_state *conn_state;
9584 	struct drm_crtc_state *crtc_state;
9585 	struct drm_plane_state *plane_state;
9586 
9587 	if (!state)
9588 		return -ENOMEM;
9589 
9590 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9591 
9592 	/* Construct an atomic state to restore previous display setting */
9593 
9594 	/*
9595 	 * Attach connectors to drm_atomic_state
9596 	 */
9597 	conn_state = drm_atomic_get_connector_state(state, connector);
9598 
9599 	ret = PTR_ERR_OR_ZERO(conn_state);
9600 	if (ret)
9601 		goto out;
9602 
9603 	/* Attach crtc to drm_atomic_state*/
9604 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9605 
9606 	ret = PTR_ERR_OR_ZERO(crtc_state);
9607 	if (ret)
9608 		goto out;
9609 
9610 	/* force a restore */
9611 	crtc_state->mode_changed = true;
9612 
9613 	/* Attach plane to drm_atomic_state */
9614 	plane_state = drm_atomic_get_plane_state(state, plane);
9615 
9616 	ret = PTR_ERR_OR_ZERO(plane_state);
9617 	if (ret)
9618 		goto out;
9619 
9620 	/* Call commit internally with the state we just constructed */
9621 	ret = drm_atomic_commit(state);
9622 
9623 out:
9624 	drm_atomic_state_put(state);
9625 	if (ret)
9626 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9627 
9628 	return ret;
9629 }
9630 
9631 /*
9632  * This function handles all cases when set mode does not come upon hotplug.
9633  * This includes when a display is unplugged then plugged back into the
9634  * same port and when running without usermode desktop manager supprot
9635  */
9636 void dm_restore_drm_connector_state(struct drm_device *dev,
9637 				    struct drm_connector *connector)
9638 {
9639 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9640 	struct amdgpu_crtc *disconnected_acrtc;
9641 	struct dm_crtc_state *acrtc_state;
9642 
9643 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9644 		return;
9645 
9646 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9647 	if (!disconnected_acrtc)
9648 		return;
9649 
9650 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9651 	if (!acrtc_state->stream)
9652 		return;
9653 
9654 	/*
9655 	 * If the previous sink is not released and different from the current,
9656 	 * we deduce we are in a state where we can not rely on usermode call
9657 	 * to turn on the display, so we do it here
9658 	 */
9659 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9660 		dm_force_atomic_commit(&aconnector->base);
9661 }
9662 
9663 /*
9664  * Grabs all modesetting locks to serialize against any blocking commits,
9665  * Waits for completion of all non blocking commits.
9666  */
9667 static int do_aquire_global_lock(struct drm_device *dev,
9668 				 struct drm_atomic_state *state)
9669 {
9670 	struct drm_crtc *crtc;
9671 	struct drm_crtc_commit *commit;
9672 	long ret;
9673 
9674 	/*
9675 	 * Adding all modeset locks to aquire_ctx will
9676 	 * ensure that when the framework release it the
9677 	 * extra locks we are locking here will get released to
9678 	 */
9679 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9680 	if (ret)
9681 		return ret;
9682 
9683 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9684 		spin_lock(&crtc->commit_lock);
9685 		commit = list_first_entry_or_null(&crtc->commit_list,
9686 				struct drm_crtc_commit, commit_entry);
9687 		if (commit)
9688 			drm_crtc_commit_get(commit);
9689 		spin_unlock(&crtc->commit_lock);
9690 
9691 		if (!commit)
9692 			continue;
9693 
9694 		/*
9695 		 * Make sure all pending HW programming completed and
9696 		 * page flips done
9697 		 */
9698 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9699 
9700 		if (ret > 0)
9701 			ret = wait_for_completion_interruptible_timeout(
9702 					&commit->flip_done, 10*HZ);
9703 
9704 		if (ret == 0)
9705 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9706 				  "timed out\n", crtc->base.id, crtc->name);
9707 
9708 		drm_crtc_commit_put(commit);
9709 	}
9710 
9711 	return ret < 0 ? ret : 0;
9712 }
9713 
9714 static void get_freesync_config_for_crtc(
9715 	struct dm_crtc_state *new_crtc_state,
9716 	struct dm_connector_state *new_con_state)
9717 {
9718 	struct mod_freesync_config config = {0};
9719 	struct amdgpu_dm_connector *aconnector =
9720 			to_amdgpu_dm_connector(new_con_state->base.connector);
9721 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9722 	int vrefresh = drm_mode_vrefresh(mode);
9723 	bool fs_vid_mode = false;
9724 
9725 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9726 					vrefresh >= aconnector->min_vfreq &&
9727 					vrefresh <= aconnector->max_vfreq;
9728 
9729 	if (new_crtc_state->vrr_supported) {
9730 		new_crtc_state->stream->ignore_msa_timing_param = true;
9731 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9732 
9733 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9734 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9735 		config.vsif_supported = true;
9736 		config.btr = true;
9737 
9738 		if (fs_vid_mode) {
9739 			config.state = VRR_STATE_ACTIVE_FIXED;
9740 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9741 			goto out;
9742 		} else if (new_crtc_state->base.vrr_enabled) {
9743 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9744 		} else {
9745 			config.state = VRR_STATE_INACTIVE;
9746 		}
9747 	}
9748 out:
9749 	new_crtc_state->freesync_config = config;
9750 }
9751 
9752 static void reset_freesync_config_for_crtc(
9753 	struct dm_crtc_state *new_crtc_state)
9754 {
9755 	new_crtc_state->vrr_supported = false;
9756 
9757 	memset(&new_crtc_state->vrr_infopacket, 0,
9758 	       sizeof(new_crtc_state->vrr_infopacket));
9759 }
9760 
9761 static bool
9762 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9763 				 struct drm_crtc_state *new_crtc_state)
9764 {
9765 	struct drm_display_mode old_mode, new_mode;
9766 
9767 	if (!old_crtc_state || !new_crtc_state)
9768 		return false;
9769 
9770 	old_mode = old_crtc_state->mode;
9771 	new_mode = new_crtc_state->mode;
9772 
9773 	if (old_mode.clock       == new_mode.clock &&
9774 	    old_mode.hdisplay    == new_mode.hdisplay &&
9775 	    old_mode.vdisplay    == new_mode.vdisplay &&
9776 	    old_mode.htotal      == new_mode.htotal &&
9777 	    old_mode.vtotal      != new_mode.vtotal &&
9778 	    old_mode.hsync_start == new_mode.hsync_start &&
9779 	    old_mode.vsync_start != new_mode.vsync_start &&
9780 	    old_mode.hsync_end   == new_mode.hsync_end &&
9781 	    old_mode.vsync_end   != new_mode.vsync_end &&
9782 	    old_mode.hskew       == new_mode.hskew &&
9783 	    old_mode.vscan       == new_mode.vscan &&
9784 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9785 	    (new_mode.vsync_end - new_mode.vsync_start))
9786 		return true;
9787 
9788 	return false;
9789 }
9790 
9791 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9792 	uint64_t num, den, res;
9793 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9794 
9795 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9796 
9797 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9798 	den = (unsigned long long)new_crtc_state->mode.htotal *
9799 	      (unsigned long long)new_crtc_state->mode.vtotal;
9800 
9801 	res = div_u64(num, den);
9802 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9803 }
9804 
9805 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9806 				struct drm_atomic_state *state,
9807 				struct drm_crtc *crtc,
9808 				struct drm_crtc_state *old_crtc_state,
9809 				struct drm_crtc_state *new_crtc_state,
9810 				bool enable,
9811 				bool *lock_and_validation_needed)
9812 {
9813 	struct dm_atomic_state *dm_state = NULL;
9814 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9815 	struct dc_stream_state *new_stream;
9816 	int ret = 0;
9817 
9818 	/*
9819 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9820 	 * update changed items
9821 	 */
9822 	struct amdgpu_crtc *acrtc = NULL;
9823 	struct amdgpu_dm_connector *aconnector = NULL;
9824 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9825 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9826 
9827 	new_stream = NULL;
9828 
9829 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9830 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9831 	acrtc = to_amdgpu_crtc(crtc);
9832 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9833 
9834 	/* TODO This hack should go away */
9835 	if (aconnector && enable) {
9836 		/* Make sure fake sink is created in plug-in scenario */
9837 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9838 							    &aconnector->base);
9839 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9840 							    &aconnector->base);
9841 
9842 		if (IS_ERR(drm_new_conn_state)) {
9843 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9844 			goto fail;
9845 		}
9846 
9847 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9848 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9849 
9850 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9851 			goto skip_modeset;
9852 
9853 		new_stream = create_validate_stream_for_sink(aconnector,
9854 							     &new_crtc_state->mode,
9855 							     dm_new_conn_state,
9856 							     dm_old_crtc_state->stream);
9857 
9858 		/*
9859 		 * we can have no stream on ACTION_SET if a display
9860 		 * was disconnected during S3, in this case it is not an
9861 		 * error, the OS will be updated after detection, and
9862 		 * will do the right thing on next atomic commit
9863 		 */
9864 
9865 		if (!new_stream) {
9866 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9867 					__func__, acrtc->base.base.id);
9868 			ret = -ENOMEM;
9869 			goto fail;
9870 		}
9871 
9872 		/*
9873 		 * TODO: Check VSDB bits to decide whether this should
9874 		 * be enabled or not.
9875 		 */
9876 		new_stream->triggered_crtc_reset.enabled =
9877 			dm->force_timing_sync;
9878 
9879 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9880 
9881 		ret = fill_hdr_info_packet(drm_new_conn_state,
9882 					   &new_stream->hdr_static_metadata);
9883 		if (ret)
9884 			goto fail;
9885 
9886 		/*
9887 		 * If we already removed the old stream from the context
9888 		 * (and set the new stream to NULL) then we can't reuse
9889 		 * the old stream even if the stream and scaling are unchanged.
9890 		 * We'll hit the BUG_ON and black screen.
9891 		 *
9892 		 * TODO: Refactor this function to allow this check to work
9893 		 * in all conditions.
9894 		 */
9895 		if (amdgpu_freesync_vid_mode &&
9896 		    dm_new_crtc_state->stream &&
9897 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9898 			goto skip_modeset;
9899 
9900 		if (dm_new_crtc_state->stream &&
9901 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9902 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9903 			new_crtc_state->mode_changed = false;
9904 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9905 					 new_crtc_state->mode_changed);
9906 		}
9907 	}
9908 
9909 	/* mode_changed flag may get updated above, need to check again */
9910 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9911 		goto skip_modeset;
9912 
9913 	DRM_DEBUG_ATOMIC(
9914 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9915 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9916 		"connectors_changed:%d\n",
9917 		acrtc->crtc_id,
9918 		new_crtc_state->enable,
9919 		new_crtc_state->active,
9920 		new_crtc_state->planes_changed,
9921 		new_crtc_state->mode_changed,
9922 		new_crtc_state->active_changed,
9923 		new_crtc_state->connectors_changed);
9924 
9925 	/* Remove stream for any changed/disabled CRTC */
9926 	if (!enable) {
9927 
9928 		if (!dm_old_crtc_state->stream)
9929 			goto skip_modeset;
9930 
9931 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9932 		    is_timing_unchanged_for_freesync(new_crtc_state,
9933 						     old_crtc_state)) {
9934 			new_crtc_state->mode_changed = false;
9935 			DRM_DEBUG_DRIVER(
9936 				"Mode change not required for front porch change, "
9937 				"setting mode_changed to %d",
9938 				new_crtc_state->mode_changed);
9939 
9940 			set_freesync_fixed_config(dm_new_crtc_state);
9941 
9942 			goto skip_modeset;
9943 		} else if (amdgpu_freesync_vid_mode && aconnector &&
9944 			   is_freesync_video_mode(&new_crtc_state->mode,
9945 						  aconnector)) {
9946 			struct drm_display_mode *high_mode;
9947 
9948 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
9949 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
9950 				set_freesync_fixed_config(dm_new_crtc_state);
9951 			}
9952 		}
9953 
9954 		ret = dm_atomic_get_state(state, &dm_state);
9955 		if (ret)
9956 			goto fail;
9957 
9958 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9959 				crtc->base.id);
9960 
9961 		/* i.e. reset mode */
9962 		if (dc_remove_stream_from_ctx(
9963 				dm->dc,
9964 				dm_state->context,
9965 				dm_old_crtc_state->stream) != DC_OK) {
9966 			ret = -EINVAL;
9967 			goto fail;
9968 		}
9969 
9970 		dc_stream_release(dm_old_crtc_state->stream);
9971 		dm_new_crtc_state->stream = NULL;
9972 
9973 		reset_freesync_config_for_crtc(dm_new_crtc_state);
9974 
9975 		*lock_and_validation_needed = true;
9976 
9977 	} else {/* Add stream for any updated/enabled CRTC */
9978 		/*
9979 		 * Quick fix to prevent NULL pointer on new_stream when
9980 		 * added MST connectors not found in existing crtc_state in the chained mode
9981 		 * TODO: need to dig out the root cause of that
9982 		 */
9983 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9984 			goto skip_modeset;
9985 
9986 		if (modereset_required(new_crtc_state))
9987 			goto skip_modeset;
9988 
9989 		if (modeset_required(new_crtc_state, new_stream,
9990 				     dm_old_crtc_state->stream)) {
9991 
9992 			WARN_ON(dm_new_crtc_state->stream);
9993 
9994 			ret = dm_atomic_get_state(state, &dm_state);
9995 			if (ret)
9996 				goto fail;
9997 
9998 			dm_new_crtc_state->stream = new_stream;
9999 
10000 			dc_stream_retain(new_stream);
10001 
10002 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10003 					 crtc->base.id);
10004 
10005 			if (dc_add_stream_to_ctx(
10006 					dm->dc,
10007 					dm_state->context,
10008 					dm_new_crtc_state->stream) != DC_OK) {
10009 				ret = -EINVAL;
10010 				goto fail;
10011 			}
10012 
10013 			*lock_and_validation_needed = true;
10014 		}
10015 	}
10016 
10017 skip_modeset:
10018 	/* Release extra reference */
10019 	if (new_stream)
10020 		 dc_stream_release(new_stream);
10021 
10022 	/*
10023 	 * We want to do dc stream updates that do not require a
10024 	 * full modeset below.
10025 	 */
10026 	if (!(enable && aconnector && new_crtc_state->active))
10027 		return 0;
10028 	/*
10029 	 * Given above conditions, the dc state cannot be NULL because:
10030 	 * 1. We're in the process of enabling CRTCs (just been added
10031 	 *    to the dc context, or already is on the context)
10032 	 * 2. Has a valid connector attached, and
10033 	 * 3. Is currently active and enabled.
10034 	 * => The dc stream state currently exists.
10035 	 */
10036 	BUG_ON(dm_new_crtc_state->stream == NULL);
10037 
10038 	/* Scaling or underscan settings */
10039 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10040 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10041 		update_stream_scaling_settings(
10042 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10043 
10044 	/* ABM settings */
10045 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10046 
10047 	/*
10048 	 * Color management settings. We also update color properties
10049 	 * when a modeset is needed, to ensure it gets reprogrammed.
10050 	 */
10051 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10052 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10053 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10054 		if (ret)
10055 			goto fail;
10056 	}
10057 
10058 	/* Update Freesync settings. */
10059 	get_freesync_config_for_crtc(dm_new_crtc_state,
10060 				     dm_new_conn_state);
10061 
10062 	return ret;
10063 
10064 fail:
10065 	if (new_stream)
10066 		dc_stream_release(new_stream);
10067 	return ret;
10068 }
10069 
10070 static bool should_reset_plane(struct drm_atomic_state *state,
10071 			       struct drm_plane *plane,
10072 			       struct drm_plane_state *old_plane_state,
10073 			       struct drm_plane_state *new_plane_state)
10074 {
10075 	struct drm_plane *other;
10076 	struct drm_plane_state *old_other_state, *new_other_state;
10077 	struct drm_crtc_state *new_crtc_state;
10078 	int i;
10079 
10080 	/*
10081 	 * TODO: Remove this hack once the checks below are sufficient
10082 	 * enough to determine when we need to reset all the planes on
10083 	 * the stream.
10084 	 */
10085 	if (state->allow_modeset)
10086 		return true;
10087 
10088 	/* Exit early if we know that we're adding or removing the plane. */
10089 	if (old_plane_state->crtc != new_plane_state->crtc)
10090 		return true;
10091 
10092 	/* old crtc == new_crtc == NULL, plane not in context. */
10093 	if (!new_plane_state->crtc)
10094 		return false;
10095 
10096 	new_crtc_state =
10097 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10098 
10099 	if (!new_crtc_state)
10100 		return true;
10101 
10102 	/* CRTC Degamma changes currently require us to recreate planes. */
10103 	if (new_crtc_state->color_mgmt_changed)
10104 		return true;
10105 
10106 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10107 		return true;
10108 
10109 	/*
10110 	 * If there are any new primary or overlay planes being added or
10111 	 * removed then the z-order can potentially change. To ensure
10112 	 * correct z-order and pipe acquisition the current DC architecture
10113 	 * requires us to remove and recreate all existing planes.
10114 	 *
10115 	 * TODO: Come up with a more elegant solution for this.
10116 	 */
10117 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10118 		struct amdgpu_framebuffer *old_afb, *new_afb;
10119 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10120 			continue;
10121 
10122 		if (old_other_state->crtc != new_plane_state->crtc &&
10123 		    new_other_state->crtc != new_plane_state->crtc)
10124 			continue;
10125 
10126 		if (old_other_state->crtc != new_other_state->crtc)
10127 			return true;
10128 
10129 		/* Src/dst size and scaling updates. */
10130 		if (old_other_state->src_w != new_other_state->src_w ||
10131 		    old_other_state->src_h != new_other_state->src_h ||
10132 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10133 		    old_other_state->crtc_h != new_other_state->crtc_h)
10134 			return true;
10135 
10136 		/* Rotation / mirroring updates. */
10137 		if (old_other_state->rotation != new_other_state->rotation)
10138 			return true;
10139 
10140 		/* Blending updates. */
10141 		if (old_other_state->pixel_blend_mode !=
10142 		    new_other_state->pixel_blend_mode)
10143 			return true;
10144 
10145 		/* Alpha updates. */
10146 		if (old_other_state->alpha != new_other_state->alpha)
10147 			return true;
10148 
10149 		/* Colorspace changes. */
10150 		if (old_other_state->color_range != new_other_state->color_range ||
10151 		    old_other_state->color_encoding != new_other_state->color_encoding)
10152 			return true;
10153 
10154 		/* Framebuffer checks fall at the end. */
10155 		if (!old_other_state->fb || !new_other_state->fb)
10156 			continue;
10157 
10158 		/* Pixel format changes can require bandwidth updates. */
10159 		if (old_other_state->fb->format != new_other_state->fb->format)
10160 			return true;
10161 
10162 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10163 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10164 
10165 		/* Tiling and DCC changes also require bandwidth updates. */
10166 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10167 		    old_afb->base.modifier != new_afb->base.modifier)
10168 			return true;
10169 	}
10170 
10171 	return false;
10172 }
10173 
10174 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10175 			      struct drm_plane_state *new_plane_state,
10176 			      struct drm_framebuffer *fb)
10177 {
10178 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10179 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10180 	unsigned int pitch;
10181 	bool linear;
10182 
10183 	if (fb->width > new_acrtc->max_cursor_width ||
10184 	    fb->height > new_acrtc->max_cursor_height) {
10185 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10186 				 new_plane_state->fb->width,
10187 				 new_plane_state->fb->height);
10188 		return -EINVAL;
10189 	}
10190 	if (new_plane_state->src_w != fb->width << 16 ||
10191 	    new_plane_state->src_h != fb->height << 16) {
10192 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10193 		return -EINVAL;
10194 	}
10195 
10196 	/* Pitch in pixels */
10197 	pitch = fb->pitches[0] / fb->format->cpp[0];
10198 
10199 	if (fb->width != pitch) {
10200 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10201 				 fb->width, pitch);
10202 		return -EINVAL;
10203 	}
10204 
10205 	switch (pitch) {
10206 	case 64:
10207 	case 128:
10208 	case 256:
10209 		/* FB pitch is supported by cursor plane */
10210 		break;
10211 	default:
10212 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10213 		return -EINVAL;
10214 	}
10215 
10216 	/* Core DRM takes care of checking FB modifiers, so we only need to
10217 	 * check tiling flags when the FB doesn't have a modifier. */
10218 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10219 		if (adev->family < AMDGPU_FAMILY_AI) {
10220 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10221 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10222 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10223 		} else {
10224 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10225 		}
10226 		if (!linear) {
10227 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10228 			return -EINVAL;
10229 		}
10230 	}
10231 
10232 	return 0;
10233 }
10234 
10235 static int dm_update_plane_state(struct dc *dc,
10236 				 struct drm_atomic_state *state,
10237 				 struct drm_plane *plane,
10238 				 struct drm_plane_state *old_plane_state,
10239 				 struct drm_plane_state *new_plane_state,
10240 				 bool enable,
10241 				 bool *lock_and_validation_needed)
10242 {
10243 
10244 	struct dm_atomic_state *dm_state = NULL;
10245 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10246 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10247 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10248 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10249 	struct amdgpu_crtc *new_acrtc;
10250 	bool needs_reset;
10251 	int ret = 0;
10252 
10253 
10254 	new_plane_crtc = new_plane_state->crtc;
10255 	old_plane_crtc = old_plane_state->crtc;
10256 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10257 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10258 
10259 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10260 		if (!enable || !new_plane_crtc ||
10261 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10262 			return 0;
10263 
10264 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10265 
10266 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10267 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10268 			return -EINVAL;
10269 		}
10270 
10271 		if (new_plane_state->fb) {
10272 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10273 						 new_plane_state->fb);
10274 			if (ret)
10275 				return ret;
10276 		}
10277 
10278 		return 0;
10279 	}
10280 
10281 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10282 					 new_plane_state);
10283 
10284 	/* Remove any changed/removed planes */
10285 	if (!enable) {
10286 		if (!needs_reset)
10287 			return 0;
10288 
10289 		if (!old_plane_crtc)
10290 			return 0;
10291 
10292 		old_crtc_state = drm_atomic_get_old_crtc_state(
10293 				state, old_plane_crtc);
10294 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10295 
10296 		if (!dm_old_crtc_state->stream)
10297 			return 0;
10298 
10299 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10300 				plane->base.id, old_plane_crtc->base.id);
10301 
10302 		ret = dm_atomic_get_state(state, &dm_state);
10303 		if (ret)
10304 			return ret;
10305 
10306 		if (!dc_remove_plane_from_context(
10307 				dc,
10308 				dm_old_crtc_state->stream,
10309 				dm_old_plane_state->dc_state,
10310 				dm_state->context)) {
10311 
10312 			return -EINVAL;
10313 		}
10314 
10315 
10316 		dc_plane_state_release(dm_old_plane_state->dc_state);
10317 		dm_new_plane_state->dc_state = NULL;
10318 
10319 		*lock_and_validation_needed = true;
10320 
10321 	} else { /* Add new planes */
10322 		struct dc_plane_state *dc_new_plane_state;
10323 
10324 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10325 			return 0;
10326 
10327 		if (!new_plane_crtc)
10328 			return 0;
10329 
10330 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10331 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10332 
10333 		if (!dm_new_crtc_state->stream)
10334 			return 0;
10335 
10336 		if (!needs_reset)
10337 			return 0;
10338 
10339 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10340 		if (ret)
10341 			return ret;
10342 
10343 		WARN_ON(dm_new_plane_state->dc_state);
10344 
10345 		dc_new_plane_state = dc_create_plane_state(dc);
10346 		if (!dc_new_plane_state)
10347 			return -ENOMEM;
10348 
10349 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10350 				 plane->base.id, new_plane_crtc->base.id);
10351 
10352 		ret = fill_dc_plane_attributes(
10353 			drm_to_adev(new_plane_crtc->dev),
10354 			dc_new_plane_state,
10355 			new_plane_state,
10356 			new_crtc_state);
10357 		if (ret) {
10358 			dc_plane_state_release(dc_new_plane_state);
10359 			return ret;
10360 		}
10361 
10362 		ret = dm_atomic_get_state(state, &dm_state);
10363 		if (ret) {
10364 			dc_plane_state_release(dc_new_plane_state);
10365 			return ret;
10366 		}
10367 
10368 		/*
10369 		 * Any atomic check errors that occur after this will
10370 		 * not need a release. The plane state will be attached
10371 		 * to the stream, and therefore part of the atomic
10372 		 * state. It'll be released when the atomic state is
10373 		 * cleaned.
10374 		 */
10375 		if (!dc_add_plane_to_context(
10376 				dc,
10377 				dm_new_crtc_state->stream,
10378 				dc_new_plane_state,
10379 				dm_state->context)) {
10380 
10381 			dc_plane_state_release(dc_new_plane_state);
10382 			return -EINVAL;
10383 		}
10384 
10385 		dm_new_plane_state->dc_state = dc_new_plane_state;
10386 
10387 		/* Tell DC to do a full surface update every time there
10388 		 * is a plane change. Inefficient, but works for now.
10389 		 */
10390 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10391 
10392 		*lock_and_validation_needed = true;
10393 	}
10394 
10395 
10396 	return ret;
10397 }
10398 
10399 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10400 				struct drm_crtc *crtc,
10401 				struct drm_crtc_state *new_crtc_state)
10402 {
10403 	struct drm_plane_state *new_cursor_state, *new_primary_state;
10404 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10405 
10406 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10407 	 * cursor per pipe but it's going to inherit the scaling and
10408 	 * positioning from the underlying pipe. Check the cursor plane's
10409 	 * blending properties match the primary plane's. */
10410 
10411 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10412 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10413 	if (!new_cursor_state || !new_primary_state ||
10414 	    !new_cursor_state->fb || !new_primary_state->fb) {
10415 		return 0;
10416 	}
10417 
10418 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10419 			 (new_cursor_state->src_w >> 16);
10420 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10421 			 (new_cursor_state->src_h >> 16);
10422 
10423 	primary_scale_w = new_primary_state->crtc_w * 1000 /
10424 			 (new_primary_state->src_w >> 16);
10425 	primary_scale_h = new_primary_state->crtc_h * 1000 /
10426 			 (new_primary_state->src_h >> 16);
10427 
10428 	if (cursor_scale_w != primary_scale_w ||
10429 	    cursor_scale_h != primary_scale_h) {
10430 		drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10431 		return -EINVAL;
10432 	}
10433 
10434 	return 0;
10435 }
10436 
10437 #if defined(CONFIG_DRM_AMD_DC_DCN)
10438 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10439 {
10440 	struct drm_connector *connector;
10441 	struct drm_connector_state *conn_state;
10442 	struct amdgpu_dm_connector *aconnector = NULL;
10443 	int i;
10444 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10445 		if (conn_state->crtc != crtc)
10446 			continue;
10447 
10448 		aconnector = to_amdgpu_dm_connector(connector);
10449 		if (!aconnector->port || !aconnector->mst_port)
10450 			aconnector = NULL;
10451 		else
10452 			break;
10453 	}
10454 
10455 	if (!aconnector)
10456 		return 0;
10457 
10458 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10459 }
10460 #endif
10461 
10462 static int validate_overlay(struct drm_atomic_state *state)
10463 {
10464 	int i;
10465 	struct drm_plane *plane;
10466 	struct drm_plane_state *new_plane_state;
10467 	struct drm_plane_state *primary_state, *overlay_state = NULL;
10468 
10469 	/* Check if primary plane is contained inside overlay */
10470 	for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10471 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10472 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10473 				return 0;
10474 
10475 			overlay_state = new_plane_state;
10476 			continue;
10477 		}
10478 	}
10479 
10480 	/* check if we're making changes to the overlay plane */
10481 	if (!overlay_state)
10482 		return 0;
10483 
10484 	/* check if overlay plane is enabled */
10485 	if (!overlay_state->crtc)
10486 		return 0;
10487 
10488 	/* find the primary plane for the CRTC that the overlay is enabled on */
10489 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10490 	if (IS_ERR(primary_state))
10491 		return PTR_ERR(primary_state);
10492 
10493 	/* check if primary plane is enabled */
10494 	if (!primary_state->crtc)
10495 		return 0;
10496 
10497 	/* Perform the bounds check to ensure the overlay plane covers the primary */
10498 	if (primary_state->crtc_x < overlay_state->crtc_x ||
10499 	    primary_state->crtc_y < overlay_state->crtc_y ||
10500 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10501 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10502 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10503 		return -EINVAL;
10504 	}
10505 
10506 	return 0;
10507 }
10508 
10509 /**
10510  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10511  * @dev: The DRM device
10512  * @state: The atomic state to commit
10513  *
10514  * Validate that the given atomic state is programmable by DC into hardware.
10515  * This involves constructing a &struct dc_state reflecting the new hardware
10516  * state we wish to commit, then querying DC to see if it is programmable. It's
10517  * important not to modify the existing DC state. Otherwise, atomic_check
10518  * may unexpectedly commit hardware changes.
10519  *
10520  * When validating the DC state, it's important that the right locks are
10521  * acquired. For full updates case which removes/adds/updates streams on one
10522  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10523  * that any such full update commit will wait for completion of any outstanding
10524  * flip using DRMs synchronization events.
10525  *
10526  * Note that DM adds the affected connectors for all CRTCs in state, when that
10527  * might not seem necessary. This is because DC stream creation requires the
10528  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10529  * be possible but non-trivial - a possible TODO item.
10530  *
10531  * Return: -Error code if validation failed.
10532  */
10533 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10534 				  struct drm_atomic_state *state)
10535 {
10536 	struct amdgpu_device *adev = drm_to_adev(dev);
10537 	struct dm_atomic_state *dm_state = NULL;
10538 	struct dc *dc = adev->dm.dc;
10539 	struct drm_connector *connector;
10540 	struct drm_connector_state *old_con_state, *new_con_state;
10541 	struct drm_crtc *crtc;
10542 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10543 	struct drm_plane *plane;
10544 	struct drm_plane_state *old_plane_state, *new_plane_state;
10545 	enum dc_status status;
10546 	int ret, i;
10547 	bool lock_and_validation_needed = false;
10548 	struct dm_crtc_state *dm_old_crtc_state;
10549 
10550 	trace_amdgpu_dm_atomic_check_begin(state);
10551 
10552 	ret = drm_atomic_helper_check_modeset(dev, state);
10553 	if (ret)
10554 		goto fail;
10555 
10556 	/* Check connector changes */
10557 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10558 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10559 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10560 
10561 		/* Skip connectors that are disabled or part of modeset already. */
10562 		if (!old_con_state->crtc && !new_con_state->crtc)
10563 			continue;
10564 
10565 		if (!new_con_state->crtc)
10566 			continue;
10567 
10568 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10569 		if (IS_ERR(new_crtc_state)) {
10570 			ret = PTR_ERR(new_crtc_state);
10571 			goto fail;
10572 		}
10573 
10574 		if (dm_old_con_state->abm_level !=
10575 		    dm_new_con_state->abm_level)
10576 			new_crtc_state->connectors_changed = true;
10577 	}
10578 
10579 #if defined(CONFIG_DRM_AMD_DC_DCN)
10580 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10581 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10582 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10583 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10584 				if (ret)
10585 					goto fail;
10586 			}
10587 		}
10588 	}
10589 #endif
10590 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10591 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10592 
10593 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10594 		    !new_crtc_state->color_mgmt_changed &&
10595 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10596 			dm_old_crtc_state->dsc_force_changed == false)
10597 			continue;
10598 
10599 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10600 		if (ret)
10601 			goto fail;
10602 
10603 		if (!new_crtc_state->enable)
10604 			continue;
10605 
10606 		ret = drm_atomic_add_affected_connectors(state, crtc);
10607 		if (ret)
10608 			return ret;
10609 
10610 		ret = drm_atomic_add_affected_planes(state, crtc);
10611 		if (ret)
10612 			goto fail;
10613 
10614 		if (dm_old_crtc_state->dsc_force_changed)
10615 			new_crtc_state->mode_changed = true;
10616 	}
10617 
10618 	/*
10619 	 * Add all primary and overlay planes on the CRTC to the state
10620 	 * whenever a plane is enabled to maintain correct z-ordering
10621 	 * and to enable fast surface updates.
10622 	 */
10623 	drm_for_each_crtc(crtc, dev) {
10624 		bool modified = false;
10625 
10626 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10627 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10628 				continue;
10629 
10630 			if (new_plane_state->crtc == crtc ||
10631 			    old_plane_state->crtc == crtc) {
10632 				modified = true;
10633 				break;
10634 			}
10635 		}
10636 
10637 		if (!modified)
10638 			continue;
10639 
10640 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10641 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10642 				continue;
10643 
10644 			new_plane_state =
10645 				drm_atomic_get_plane_state(state, plane);
10646 
10647 			if (IS_ERR(new_plane_state)) {
10648 				ret = PTR_ERR(new_plane_state);
10649 				goto fail;
10650 			}
10651 		}
10652 	}
10653 
10654 	/* Remove exiting planes if they are modified */
10655 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10656 		ret = dm_update_plane_state(dc, state, plane,
10657 					    old_plane_state,
10658 					    new_plane_state,
10659 					    false,
10660 					    &lock_and_validation_needed);
10661 		if (ret)
10662 			goto fail;
10663 	}
10664 
10665 	/* Disable all crtcs which require disable */
10666 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10667 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10668 					   old_crtc_state,
10669 					   new_crtc_state,
10670 					   false,
10671 					   &lock_and_validation_needed);
10672 		if (ret)
10673 			goto fail;
10674 	}
10675 
10676 	/* Enable all crtcs which require enable */
10677 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10678 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10679 					   old_crtc_state,
10680 					   new_crtc_state,
10681 					   true,
10682 					   &lock_and_validation_needed);
10683 		if (ret)
10684 			goto fail;
10685 	}
10686 
10687 	ret = validate_overlay(state);
10688 	if (ret)
10689 		goto fail;
10690 
10691 	/* Add new/modified planes */
10692 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10693 		ret = dm_update_plane_state(dc, state, plane,
10694 					    old_plane_state,
10695 					    new_plane_state,
10696 					    true,
10697 					    &lock_and_validation_needed);
10698 		if (ret)
10699 			goto fail;
10700 	}
10701 
10702 	/* Run this here since we want to validate the streams we created */
10703 	ret = drm_atomic_helper_check_planes(dev, state);
10704 	if (ret)
10705 		goto fail;
10706 
10707 	/* Check cursor planes scaling */
10708 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10709 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10710 		if (ret)
10711 			goto fail;
10712 	}
10713 
10714 	if (state->legacy_cursor_update) {
10715 		/*
10716 		 * This is a fast cursor update coming from the plane update
10717 		 * helper, check if it can be done asynchronously for better
10718 		 * performance.
10719 		 */
10720 		state->async_update =
10721 			!drm_atomic_helper_async_check(dev, state);
10722 
10723 		/*
10724 		 * Skip the remaining global validation if this is an async
10725 		 * update. Cursor updates can be done without affecting
10726 		 * state or bandwidth calcs and this avoids the performance
10727 		 * penalty of locking the private state object and
10728 		 * allocating a new dc_state.
10729 		 */
10730 		if (state->async_update)
10731 			return 0;
10732 	}
10733 
10734 	/* Check scaling and underscan changes*/
10735 	/* TODO Removed scaling changes validation due to inability to commit
10736 	 * new stream into context w\o causing full reset. Need to
10737 	 * decide how to handle.
10738 	 */
10739 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10740 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10741 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10742 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10743 
10744 		/* Skip any modesets/resets */
10745 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10746 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10747 			continue;
10748 
10749 		/* Skip any thing not scale or underscan changes */
10750 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10751 			continue;
10752 
10753 		lock_and_validation_needed = true;
10754 	}
10755 
10756 	/**
10757 	 * Streams and planes are reset when there are changes that affect
10758 	 * bandwidth. Anything that affects bandwidth needs to go through
10759 	 * DC global validation to ensure that the configuration can be applied
10760 	 * to hardware.
10761 	 *
10762 	 * We have to currently stall out here in atomic_check for outstanding
10763 	 * commits to finish in this case because our IRQ handlers reference
10764 	 * DRM state directly - we can end up disabling interrupts too early
10765 	 * if we don't.
10766 	 *
10767 	 * TODO: Remove this stall and drop DM state private objects.
10768 	 */
10769 	if (lock_and_validation_needed) {
10770 		ret = dm_atomic_get_state(state, &dm_state);
10771 		if (ret)
10772 			goto fail;
10773 
10774 		ret = do_aquire_global_lock(dev, state);
10775 		if (ret)
10776 			goto fail;
10777 
10778 #if defined(CONFIG_DRM_AMD_DC_DCN)
10779 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10780 			goto fail;
10781 
10782 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10783 		if (ret)
10784 			goto fail;
10785 #endif
10786 
10787 		/*
10788 		 * Perform validation of MST topology in the state:
10789 		 * We need to perform MST atomic check before calling
10790 		 * dc_validate_global_state(), or there is a chance
10791 		 * to get stuck in an infinite loop and hang eventually.
10792 		 */
10793 		ret = drm_dp_mst_atomic_check(state);
10794 		if (ret)
10795 			goto fail;
10796 		status = dc_validate_global_state(dc, dm_state->context, false);
10797 		if (status != DC_OK) {
10798 			drm_dbg_atomic(dev,
10799 				       "DC global validation failure: %s (%d)",
10800 				       dc_status_to_str(status), status);
10801 			ret = -EINVAL;
10802 			goto fail;
10803 		}
10804 	} else {
10805 		/*
10806 		 * The commit is a fast update. Fast updates shouldn't change
10807 		 * the DC context, affect global validation, and can have their
10808 		 * commit work done in parallel with other commits not touching
10809 		 * the same resource. If we have a new DC context as part of
10810 		 * the DM atomic state from validation we need to free it and
10811 		 * retain the existing one instead.
10812 		 *
10813 		 * Furthermore, since the DM atomic state only contains the DC
10814 		 * context and can safely be annulled, we can free the state
10815 		 * and clear the associated private object now to free
10816 		 * some memory and avoid a possible use-after-free later.
10817 		 */
10818 
10819 		for (i = 0; i < state->num_private_objs; i++) {
10820 			struct drm_private_obj *obj = state->private_objs[i].ptr;
10821 
10822 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10823 				int j = state->num_private_objs-1;
10824 
10825 				dm_atomic_destroy_state(obj,
10826 						state->private_objs[i].state);
10827 
10828 				/* If i is not at the end of the array then the
10829 				 * last element needs to be moved to where i was
10830 				 * before the array can safely be truncated.
10831 				 */
10832 				if (i != j)
10833 					state->private_objs[i] =
10834 						state->private_objs[j];
10835 
10836 				state->private_objs[j].ptr = NULL;
10837 				state->private_objs[j].state = NULL;
10838 				state->private_objs[j].old_state = NULL;
10839 				state->private_objs[j].new_state = NULL;
10840 
10841 				state->num_private_objs = j;
10842 				break;
10843 			}
10844 		}
10845 	}
10846 
10847 	/* Store the overall update type for use later in atomic check. */
10848 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10849 		struct dm_crtc_state *dm_new_crtc_state =
10850 			to_dm_crtc_state(new_crtc_state);
10851 
10852 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10853 							 UPDATE_TYPE_FULL :
10854 							 UPDATE_TYPE_FAST;
10855 	}
10856 
10857 	/* Must be success */
10858 	WARN_ON(ret);
10859 
10860 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10861 
10862 	return ret;
10863 
10864 fail:
10865 	if (ret == -EDEADLK)
10866 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10867 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10868 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10869 	else
10870 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10871 
10872 	trace_amdgpu_dm_atomic_check_finish(state, ret);
10873 
10874 	return ret;
10875 }
10876 
10877 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10878 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10879 {
10880 	uint8_t dpcd_data;
10881 	bool capable = false;
10882 
10883 	if (amdgpu_dm_connector->dc_link &&
10884 		dm_helpers_dp_read_dpcd(
10885 				NULL,
10886 				amdgpu_dm_connector->dc_link,
10887 				DP_DOWN_STREAM_PORT_COUNT,
10888 				&dpcd_data,
10889 				sizeof(dpcd_data))) {
10890 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10891 	}
10892 
10893 	return capable;
10894 }
10895 
10896 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10897 		unsigned int offset,
10898 		unsigned int total_length,
10899 		uint8_t *data,
10900 		unsigned int length,
10901 		struct amdgpu_hdmi_vsdb_info *vsdb)
10902 {
10903 	bool res;
10904 	union dmub_rb_cmd cmd;
10905 	struct dmub_cmd_send_edid_cea *input;
10906 	struct dmub_cmd_edid_cea_output *output;
10907 
10908 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
10909 		return false;
10910 
10911 	memset(&cmd, 0, sizeof(cmd));
10912 
10913 	input = &cmd.edid_cea.data.input;
10914 
10915 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
10916 	cmd.edid_cea.header.sub_type = 0;
10917 	cmd.edid_cea.header.payload_bytes =
10918 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
10919 	input->offset = offset;
10920 	input->length = length;
10921 	input->total_length = total_length;
10922 	memcpy(input->payload, data, length);
10923 
10924 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
10925 	if (!res) {
10926 		DRM_ERROR("EDID CEA parser failed\n");
10927 		return false;
10928 	}
10929 
10930 	output = &cmd.edid_cea.data.output;
10931 
10932 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
10933 		if (!output->ack.success) {
10934 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
10935 					output->ack.offset);
10936 		}
10937 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
10938 		if (!output->amd_vsdb.vsdb_found)
10939 			return false;
10940 
10941 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
10942 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
10943 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
10944 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
10945 	} else {
10946 		DRM_WARN("Unknown EDID CEA parser results\n");
10947 		return false;
10948 	}
10949 
10950 	return true;
10951 }
10952 
10953 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
10954 		uint8_t *edid_ext, int len,
10955 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10956 {
10957 	int i;
10958 
10959 	/* send extension block to DMCU for parsing */
10960 	for (i = 0; i < len; i += 8) {
10961 		bool res;
10962 		int offset;
10963 
10964 		/* send 8 bytes a time */
10965 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
10966 			return false;
10967 
10968 		if (i+8 == len) {
10969 			/* EDID block sent completed, expect result */
10970 			int version, min_rate, max_rate;
10971 
10972 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
10973 			if (res) {
10974 				/* amd vsdb found */
10975 				vsdb_info->freesync_supported = 1;
10976 				vsdb_info->amd_vsdb_version = version;
10977 				vsdb_info->min_refresh_rate_hz = min_rate;
10978 				vsdb_info->max_refresh_rate_hz = max_rate;
10979 				return true;
10980 			}
10981 			/* not amd vsdb */
10982 			return false;
10983 		}
10984 
10985 		/* check for ack*/
10986 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
10987 		if (!res)
10988 			return false;
10989 	}
10990 
10991 	return false;
10992 }
10993 
10994 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
10995 		uint8_t *edid_ext, int len,
10996 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10997 {
10998 	int i;
10999 
11000 	/* send extension block to DMCU for parsing */
11001 	for (i = 0; i < len; i += 8) {
11002 		/* send 8 bytes a time */
11003 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11004 			return false;
11005 	}
11006 
11007 	return vsdb_info->freesync_supported;
11008 }
11009 
11010 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11011 		uint8_t *edid_ext, int len,
11012 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11013 {
11014 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11015 
11016 	if (adev->dm.dmub_srv)
11017 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11018 	else
11019 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11020 }
11021 
11022 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11023 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11024 {
11025 	uint8_t *edid_ext = NULL;
11026 	int i;
11027 	bool valid_vsdb_found = false;
11028 
11029 	/*----- drm_find_cea_extension() -----*/
11030 	/* No EDID or EDID extensions */
11031 	if (edid == NULL || edid->extensions == 0)
11032 		return -ENODEV;
11033 
11034 	/* Find CEA extension */
11035 	for (i = 0; i < edid->extensions; i++) {
11036 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11037 		if (edid_ext[0] == CEA_EXT)
11038 			break;
11039 	}
11040 
11041 	if (i == edid->extensions)
11042 		return -ENODEV;
11043 
11044 	/*----- cea_db_offsets() -----*/
11045 	if (edid_ext[0] != CEA_EXT)
11046 		return -ENODEV;
11047 
11048 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11049 
11050 	return valid_vsdb_found ? i : -ENODEV;
11051 }
11052 
11053 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11054 					struct edid *edid)
11055 {
11056 	int i = 0;
11057 	struct detailed_timing *timing;
11058 	struct detailed_non_pixel *data;
11059 	struct detailed_data_monitor_range *range;
11060 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11061 			to_amdgpu_dm_connector(connector);
11062 	struct dm_connector_state *dm_con_state = NULL;
11063 	struct dc_sink *sink;
11064 
11065 	struct drm_device *dev = connector->dev;
11066 	struct amdgpu_device *adev = drm_to_adev(dev);
11067 	bool freesync_capable = false;
11068 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11069 
11070 	if (!connector->state) {
11071 		DRM_ERROR("%s - Connector has no state", __func__);
11072 		goto update;
11073 	}
11074 
11075 	sink = amdgpu_dm_connector->dc_sink ?
11076 		amdgpu_dm_connector->dc_sink :
11077 		amdgpu_dm_connector->dc_em_sink;
11078 
11079 	if (!edid || !sink) {
11080 		dm_con_state = to_dm_connector_state(connector->state);
11081 
11082 		amdgpu_dm_connector->min_vfreq = 0;
11083 		amdgpu_dm_connector->max_vfreq = 0;
11084 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11085 		connector->display_info.monitor_range.min_vfreq = 0;
11086 		connector->display_info.monitor_range.max_vfreq = 0;
11087 		freesync_capable = false;
11088 
11089 		goto update;
11090 	}
11091 
11092 	dm_con_state = to_dm_connector_state(connector->state);
11093 
11094 	if (!adev->dm.freesync_module)
11095 		goto update;
11096 
11097 
11098 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11099 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11100 		bool edid_check_required = false;
11101 
11102 		if (edid) {
11103 			edid_check_required = is_dp_capable_without_timing_msa(
11104 						adev->dm.dc,
11105 						amdgpu_dm_connector);
11106 		}
11107 
11108 		if (edid_check_required == true && (edid->version > 1 ||
11109 		   (edid->version == 1 && edid->revision > 1))) {
11110 			for (i = 0; i < 4; i++) {
11111 
11112 				timing	= &edid->detailed_timings[i];
11113 				data	= &timing->data.other_data;
11114 				range	= &data->data.range;
11115 				/*
11116 				 * Check if monitor has continuous frequency mode
11117 				 */
11118 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11119 					continue;
11120 				/*
11121 				 * Check for flag range limits only. If flag == 1 then
11122 				 * no additional timing information provided.
11123 				 * Default GTF, GTF Secondary curve and CVT are not
11124 				 * supported
11125 				 */
11126 				if (range->flags != 1)
11127 					continue;
11128 
11129 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11130 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11131 				amdgpu_dm_connector->pixel_clock_mhz =
11132 					range->pixel_clock_mhz * 10;
11133 
11134 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11135 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11136 
11137 				break;
11138 			}
11139 
11140 			if (amdgpu_dm_connector->max_vfreq -
11141 			    amdgpu_dm_connector->min_vfreq > 10) {
11142 
11143 				freesync_capable = true;
11144 			}
11145 		}
11146 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11147 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11148 		if (i >= 0 && vsdb_info.freesync_supported) {
11149 			timing  = &edid->detailed_timings[i];
11150 			data    = &timing->data.other_data;
11151 
11152 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11153 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11154 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11155 				freesync_capable = true;
11156 
11157 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11158 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11159 		}
11160 	}
11161 
11162 update:
11163 	if (dm_con_state)
11164 		dm_con_state->freesync_capable = freesync_capable;
11165 
11166 	if (connector->vrr_capable_property)
11167 		drm_connector_set_vrr_capable_property(connector,
11168 						       freesync_capable);
11169 }
11170 
11171 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11172 {
11173 	struct amdgpu_device *adev = drm_to_adev(dev);
11174 	struct dc *dc = adev->dm.dc;
11175 	int i;
11176 
11177 	mutex_lock(&adev->dm.dc_lock);
11178 	if (dc->current_state) {
11179 		for (i = 0; i < dc->current_state->stream_count; ++i)
11180 			dc->current_state->streams[i]
11181 				->triggered_crtc_reset.enabled =
11182 				adev->dm.force_timing_sync;
11183 
11184 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11185 		dc_trigger_sync(dc, dc->current_state);
11186 	}
11187 	mutex_unlock(&adev->dm.dc_lock);
11188 }
11189 
11190 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11191 		       uint32_t value, const char *func_name)
11192 {
11193 #ifdef DM_CHECK_ADDR_0
11194 	if (address == 0) {
11195 		DC_ERR("invalid register write. address = 0");
11196 		return;
11197 	}
11198 #endif
11199 	cgs_write_register(ctx->cgs_device, address, value);
11200 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11201 }
11202 
11203 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11204 			  const char *func_name)
11205 {
11206 	uint32_t value;
11207 #ifdef DM_CHECK_ADDR_0
11208 	if (address == 0) {
11209 		DC_ERR("invalid register read; address = 0\n");
11210 		return 0;
11211 	}
11212 #endif
11213 
11214 	if (ctx->dmub_srv &&
11215 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11216 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11217 		ASSERT(false);
11218 		return 0;
11219 	}
11220 
11221 	value = cgs_read_register(ctx->cgs_device, address);
11222 
11223 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11224 
11225 	return value;
11226 }
11227 
11228 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
11229 				struct aux_payload *payload, enum aux_return_code_type *operation_result)
11230 {
11231 	struct amdgpu_device *adev = ctx->driver_context;
11232 	int ret = 0;
11233 
11234 	dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
11235 	ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
11236 	if (ret == 0) {
11237 		*operation_result = AUX_RET_ERROR_TIMEOUT;
11238 		return -1;
11239 	}
11240 	*operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
11241 
11242 	if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11243 		(*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
11244 
11245 		// For read case, Copy data to payload
11246 		if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11247 		(*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
11248 			memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11249 			adev->dm.dmub_notify->aux_reply.length);
11250 	}
11251 
11252 	return adev->dm.dmub_notify->aux_reply.length;
11253 }
11254