1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/drm_hdcp.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/drm_atomic.h>
77 #include <drm/drm_atomic_uapi.h>
78 #include <drm/drm_atomic_helper.h>
79 #include <drm/drm_dp_mst_helper.h>
80 #include <drm/drm_fb_helper.h>
81 #include <drm/drm_fourcc.h>
82 #include <drm/drm_edid.h>
83 #include <drm/drm_vblank.h>
84 #include <drm/drm_audio_component.h>
85 
86 #if defined(CONFIG_DRM_AMD_DC_DCN)
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88 
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93 
94 #include "soc15_common.h"
95 #endif
96 
97 #include "modules/inc/mod_freesync.h"
98 #include "modules/power/power_helpers.h"
99 #include "modules/inc/mod_info_packet.h"
100 
101 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117 
118 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
119 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
120 
121 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123 
124 /* Number of bytes in PSP header for firmware. */
125 #define PSP_HEADER_BYTES 0x100
126 
127 /* Number of bytes in PSP footer for firmware. */
128 #define PSP_FOOTER_BYTES 0x100
129 
130 /**
131  * DOC: overview
132  *
133  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
134  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
135  * requests into DC requests, and DC responses into DRM responses.
136  *
137  * The root control structure is &struct amdgpu_display_manager.
138  */
139 
140 /* basic init/fini API */
141 static int amdgpu_dm_init(struct amdgpu_device *adev);
142 static void amdgpu_dm_fini(struct amdgpu_device *adev);
143 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
144 
145 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146 {
147 	switch (link->dpcd_caps.dongle_type) {
148 	case DISPLAY_DONGLE_NONE:
149 		return DRM_MODE_SUBCONNECTOR_Native;
150 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151 		return DRM_MODE_SUBCONNECTOR_VGA;
152 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
154 		return DRM_MODE_SUBCONNECTOR_DVID;
155 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157 		return DRM_MODE_SUBCONNECTOR_HDMIA;
158 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159 	default:
160 		return DRM_MODE_SUBCONNECTOR_Unknown;
161 	}
162 }
163 
164 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165 {
166 	struct dc_link *link = aconnector->dc_link;
167 	struct drm_connector *connector = &aconnector->base;
168 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169 
170 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171 		return;
172 
173 	if (aconnector->dc_sink)
174 		subconnector = get_subconnector_type(link);
175 
176 	drm_object_property_set_value(&connector->base,
177 			connector->dev->mode_config.dp_subconnector_property,
178 			subconnector);
179 }
180 
181 /*
182  * initializes drm_device display related structures, based on the information
183  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184  * drm_encoder, drm_mode_config
185  *
186  * Returns 0 on success
187  */
188 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189 /* removes and deallocates the drm structures, created by the above function */
190 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191 
192 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
193 				struct drm_plane *plane,
194 				unsigned long possible_crtcs,
195 				const struct dc_plane_cap *plane_cap);
196 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197 			       struct drm_plane *plane,
198 			       uint32_t link_index);
199 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
201 				    uint32_t link_index,
202 				    struct amdgpu_encoder *amdgpu_encoder);
203 static int amdgpu_dm_encoder_init(struct drm_device *dev,
204 				  struct amdgpu_encoder *aencoder,
205 				  uint32_t link_index);
206 
207 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208 
209 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210 
211 static int amdgpu_dm_atomic_check(struct drm_device *dev,
212 				  struct drm_atomic_state *state);
213 
214 static void handle_cursor_update(struct drm_plane *plane,
215 				 struct drm_plane_state *old_plane_state);
216 
217 static const struct drm_format_info *
218 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219 
220 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
221 static void handle_hpd_rx_irq(void *param);
222 
223 static bool
224 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225 				 struct drm_crtc_state *new_crtc_state);
226 /*
227  * dm_vblank_get_counter
228  *
229  * @brief
230  * Get counter for number of vertical blanks
231  *
232  * @param
233  * struct amdgpu_device *adev - [in] desired amdgpu device
234  * int disp_idx - [in] which CRTC to get the counter from
235  *
236  * @return
237  * Counter for vertical blanks
238  */
239 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
240 {
241 	if (crtc >= adev->mode_info.num_crtc)
242 		return 0;
243 	else {
244 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
245 
246 		if (acrtc->dm_irq_params.stream == NULL) {
247 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
248 				  crtc);
249 			return 0;
250 		}
251 
252 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
253 	}
254 }
255 
256 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257 				  u32 *vbl, u32 *position)
258 {
259 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
260 
261 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262 		return -EINVAL;
263 	else {
264 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
265 
266 		if (acrtc->dm_irq_params.stream ==  NULL) {
267 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268 				  crtc);
269 			return 0;
270 		}
271 
272 		/*
273 		 * TODO rework base driver to use values directly.
274 		 * for now parse it back into reg-format
275 		 */
276 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
277 					 &v_blank_start,
278 					 &v_blank_end,
279 					 &h_position,
280 					 &v_position);
281 
282 		*position = v_position | (h_position << 16);
283 		*vbl = v_blank_start | (v_blank_end << 16);
284 	}
285 
286 	return 0;
287 }
288 
289 static bool dm_is_idle(void *handle)
290 {
291 	/* XXX todo */
292 	return true;
293 }
294 
295 static int dm_wait_for_idle(void *handle)
296 {
297 	/* XXX todo */
298 	return 0;
299 }
300 
301 static bool dm_check_soft_reset(void *handle)
302 {
303 	return false;
304 }
305 
306 static int dm_soft_reset(void *handle)
307 {
308 	/* XXX todo */
309 	return 0;
310 }
311 
312 static struct amdgpu_crtc *
313 get_crtc_by_otg_inst(struct amdgpu_device *adev,
314 		     int otg_inst)
315 {
316 	struct drm_device *dev = adev_to_drm(adev);
317 	struct drm_crtc *crtc;
318 	struct amdgpu_crtc *amdgpu_crtc;
319 
320 	if (WARN_ON(otg_inst == -1))
321 		return adev->mode_info.crtcs[0];
322 
323 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 		amdgpu_crtc = to_amdgpu_crtc(crtc);
325 
326 		if (amdgpu_crtc->otg_inst == otg_inst)
327 			return amdgpu_crtc;
328 	}
329 
330 	return NULL;
331 }
332 
333 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334 {
335 	return acrtc->dm_irq_params.freesync_config.state ==
336 		       VRR_STATE_ACTIVE_VARIABLE ||
337 	       acrtc->dm_irq_params.freesync_config.state ==
338 		       VRR_STATE_ACTIVE_FIXED;
339 }
340 
341 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342 {
343 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345 }
346 
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348 					      struct dm_crtc_state *new_state)
349 {
350 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
351 		return true;
352 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353 		return true;
354 	else
355 		return false;
356 }
357 
358 /**
359  * dm_pflip_high_irq() - Handle pageflip interrupt
360  * @interrupt_params: ignored
361  *
362  * Handles the pageflip interrupt by notifying all interested parties
363  * that the pageflip has been completed.
364  */
365 static void dm_pflip_high_irq(void *interrupt_params)
366 {
367 	struct amdgpu_crtc *amdgpu_crtc;
368 	struct common_irq_params *irq_params = interrupt_params;
369 	struct amdgpu_device *adev = irq_params->adev;
370 	unsigned long flags;
371 	struct drm_pending_vblank_event *e;
372 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
373 	bool vrr_active;
374 
375 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376 
377 	/* IRQ could occur when in initial stage */
378 	/* TODO work and BO cleanup */
379 	if (amdgpu_crtc == NULL) {
380 		DC_LOG_PFLIP("CRTC is null, returning.\n");
381 		return;
382 	}
383 
384 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
385 
386 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388 						 amdgpu_crtc->pflip_status,
389 						 AMDGPU_FLIP_SUBMITTED,
390 						 amdgpu_crtc->crtc_id,
391 						 amdgpu_crtc);
392 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
393 		return;
394 	}
395 
396 	/* page flip completed. */
397 	e = amdgpu_crtc->event;
398 	amdgpu_crtc->event = NULL;
399 
400 	WARN_ON(!e);
401 
402 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
403 
404 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
405 	if (!vrr_active ||
406 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407 				      &v_blank_end, &hpos, &vpos) ||
408 	    (vpos < v_blank_start)) {
409 		/* Update to correct count and vblank timestamp if racing with
410 		 * vblank irq. This also updates to the correct vblank timestamp
411 		 * even in VRR mode, as scanout is past the front-porch atm.
412 		 */
413 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
414 
415 		/* Wake up userspace by sending the pageflip event with proper
416 		 * count and timestamp of vblank of flip completion.
417 		 */
418 		if (e) {
419 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420 
421 			/* Event sent, so done with vblank for this flip */
422 			drm_crtc_vblank_put(&amdgpu_crtc->base);
423 		}
424 	} else if (e) {
425 		/* VRR active and inside front-porch: vblank count and
426 		 * timestamp for pageflip event will only be up to date after
427 		 * drm_crtc_handle_vblank() has been executed from late vblank
428 		 * irq handler after start of back-porch (vline 0). We queue the
429 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
430 		 * updated timestamp and count, once it runs after us.
431 		 *
432 		 * We need to open-code this instead of using the helper
433 		 * drm_crtc_arm_vblank_event(), as that helper would
434 		 * call drm_crtc_accurate_vblank_count(), which we must
435 		 * not call in VRR mode while we are in front-porch!
436 		 */
437 
438 		/* sequence will be replaced by real count during send-out. */
439 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440 		e->pipe = amdgpu_crtc->crtc_id;
441 
442 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
443 		e = NULL;
444 	}
445 
446 	/* Keep track of vblank of this flip for flip throttling. We use the
447 	 * cooked hw counter, as that one incremented at start of this vblank
448 	 * of pageflip completion, so last_flip_vblank is the forbidden count
449 	 * for queueing new pageflips if vsync + VRR is enabled.
450 	 */
451 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
452 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
453 
454 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
456 
457 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
459 		     vrr_active, (int) !e);
460 }
461 
462 static void dm_vupdate_high_irq(void *interrupt_params)
463 {
464 	struct common_irq_params *irq_params = interrupt_params;
465 	struct amdgpu_device *adev = irq_params->adev;
466 	struct amdgpu_crtc *acrtc;
467 	struct drm_device *drm_dev;
468 	struct drm_vblank_crtc *vblank;
469 	ktime_t frame_duration_ns, previous_timestamp;
470 	unsigned long flags;
471 	int vrr_active;
472 
473 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474 
475 	if (acrtc) {
476 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477 		drm_dev = acrtc->base.dev;
478 		vblank = &drm_dev->vblank[acrtc->base.index];
479 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480 		frame_duration_ns = vblank->time - previous_timestamp;
481 
482 		if (frame_duration_ns > 0) {
483 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
484 						frame_duration_ns,
485 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
487 		}
488 
489 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
490 			      acrtc->crtc_id,
491 			      vrr_active);
492 
493 		/* Core vblank handling is done here after end of front-porch in
494 		 * vrr mode, as vblank timestamping will give valid results
495 		 * while now done after front-porch. This will also deliver
496 		 * page-flip completion events that have been queued to us
497 		 * if a pageflip happened inside front-porch.
498 		 */
499 		if (vrr_active) {
500 			drm_crtc_handle_vblank(&acrtc->base);
501 
502 			/* BTR processing for pre-DCE12 ASICs */
503 			if (acrtc->dm_irq_params.stream &&
504 			    adev->family < AMDGPU_FAMILY_AI) {
505 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506 				mod_freesync_handle_v_update(
507 				    adev->dm.freesync_module,
508 				    acrtc->dm_irq_params.stream,
509 				    &acrtc->dm_irq_params.vrr_params);
510 
511 				dc_stream_adjust_vmin_vmax(
512 				    adev->dm.dc,
513 				    acrtc->dm_irq_params.stream,
514 				    &acrtc->dm_irq_params.vrr_params.adjust);
515 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
516 			}
517 		}
518 	}
519 }
520 
521 /**
522  * dm_crtc_high_irq() - Handles CRTC interrupt
523  * @interrupt_params: used for determining the CRTC instance
524  *
525  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526  * event handler.
527  */
528 static void dm_crtc_high_irq(void *interrupt_params)
529 {
530 	struct common_irq_params *irq_params = interrupt_params;
531 	struct amdgpu_device *adev = irq_params->adev;
532 	struct amdgpu_crtc *acrtc;
533 	unsigned long flags;
534 	int vrr_active;
535 
536 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
537 	if (!acrtc)
538 		return;
539 
540 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
541 
542 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543 		      vrr_active, acrtc->dm_irq_params.active_planes);
544 
545 	/**
546 	 * Core vblank handling at start of front-porch is only possible
547 	 * in non-vrr mode, as only there vblank timestamping will give
548 	 * valid results while done in front-porch. Otherwise defer it
549 	 * to dm_vupdate_high_irq after end of front-porch.
550 	 */
551 	if (!vrr_active)
552 		drm_crtc_handle_vblank(&acrtc->base);
553 
554 	/**
555 	 * Following stuff must happen at start of vblank, for crc
556 	 * computation and below-the-range btr support in vrr mode.
557 	 */
558 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
559 
560 	/* BTR updates need to happen before VUPDATE on Vega and above. */
561 	if (adev->family < AMDGPU_FAMILY_AI)
562 		return;
563 
564 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
565 
566 	if (acrtc->dm_irq_params.stream &&
567 	    acrtc->dm_irq_params.vrr_params.supported &&
568 	    acrtc->dm_irq_params.freesync_config.state ==
569 		    VRR_STATE_ACTIVE_VARIABLE) {
570 		mod_freesync_handle_v_update(adev->dm.freesync_module,
571 					     acrtc->dm_irq_params.stream,
572 					     &acrtc->dm_irq_params.vrr_params);
573 
574 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575 					   &acrtc->dm_irq_params.vrr_params.adjust);
576 	}
577 
578 	/*
579 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
580 	 * In that case, pageflip completion interrupts won't fire and pageflip
581 	 * completion events won't get delivered. Prevent this by sending
582 	 * pending pageflip events from here if a flip is still pending.
583 	 *
584 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
585 	 * avoid race conditions between flip programming and completion,
586 	 * which could cause too early flip completion events.
587 	 */
588 	if (adev->family >= AMDGPU_FAMILY_RV &&
589 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590 	    acrtc->dm_irq_params.active_planes == 0) {
591 		if (acrtc->event) {
592 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593 			acrtc->event = NULL;
594 			drm_crtc_vblank_put(&acrtc->base);
595 		}
596 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
597 	}
598 
599 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600 }
601 
602 #if defined(CONFIG_DRM_AMD_DC_DCN)
603 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
604 /**
605  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606  * DCN generation ASICs
607  * @interrupt_params: interrupt parameters
608  *
609  * Used to set crc window/read out crc value at vertical line 0 position
610  */
611 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612 {
613 	struct common_irq_params *irq_params = interrupt_params;
614 	struct amdgpu_device *adev = irq_params->adev;
615 	struct amdgpu_crtc *acrtc;
616 
617 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618 
619 	if (!acrtc)
620 		return;
621 
622 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623 }
624 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
625 
626 /**
627  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
628  * @adev: amdgpu_device pointer
629  * @notify: dmub notification structure
630  *
631  * Dmub AUX or SET_CONFIG command completion processing callback
632  * Copies dmub notification to DM which is to be read by AUX command.
633  * issuing thread and also signals the event to wake up the thread.
634  */
635 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
636 					struct dmub_notification *notify)
637 {
638 	if (adev->dm.dmub_notify)
639 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
640 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
641 		complete(&adev->dm.dmub_aux_transfer_done);
642 }
643 
644 /**
645  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
646  * @adev: amdgpu_device pointer
647  * @notify: dmub notification structure
648  *
649  * Dmub Hpd interrupt processing callback. Gets displayindex through the
650  * ink index and calls helper to do the processing.
651  */
652 static void dmub_hpd_callback(struct amdgpu_device *adev,
653 			      struct dmub_notification *notify)
654 {
655 	struct amdgpu_dm_connector *aconnector;
656 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
657 	struct drm_connector *connector;
658 	struct drm_connector_list_iter iter;
659 	struct dc_link *link;
660 	uint8_t link_index = 0;
661 	struct drm_device *dev = adev->dm.ddev;
662 
663 	if (adev == NULL)
664 		return;
665 
666 	if (notify == NULL) {
667 		DRM_ERROR("DMUB HPD callback notification was NULL");
668 		return;
669 	}
670 
671 	if (notify->link_index > adev->dm.dc->link_count) {
672 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
673 		return;
674 	}
675 
676 	link_index = notify->link_index;
677 	link = adev->dm.dc->links[link_index];
678 
679 	drm_connector_list_iter_begin(dev, &iter);
680 	drm_for_each_connector_iter(connector, &iter) {
681 		aconnector = to_amdgpu_dm_connector(connector);
682 		if (link && aconnector->dc_link == link) {
683 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
684 			hpd_aconnector = aconnector;
685 			break;
686 		}
687 	}
688 	drm_connector_list_iter_end(&iter);
689 
690 	if (hpd_aconnector) {
691 		if (notify->type == DMUB_NOTIFICATION_HPD)
692 			handle_hpd_irq_helper(hpd_aconnector);
693 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
694 			handle_hpd_rx_irq(hpd_aconnector);
695 	}
696 }
697 
698 /**
699  * register_dmub_notify_callback - Sets callback for DMUB notify
700  * @adev: amdgpu_device pointer
701  * @type: Type of dmub notification
702  * @callback: Dmub interrupt callback function
703  * @dmub_int_thread_offload: offload indicator
704  *
705  * API to register a dmub callback handler for a dmub notification
706  * Also sets indicator whether callback processing to be offloaded.
707  * to dmub interrupt handling thread
708  * Return: true if successfully registered, false if there is existing registration
709  */
710 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
711 					  enum dmub_notification_type type,
712 					  dmub_notify_interrupt_callback_t callback,
713 					  bool dmub_int_thread_offload)
714 {
715 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
716 		adev->dm.dmub_callback[type] = callback;
717 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
718 	} else
719 		return false;
720 
721 	return true;
722 }
723 
724 static void dm_handle_hpd_work(struct work_struct *work)
725 {
726 	struct dmub_hpd_work *dmub_hpd_wrk;
727 
728 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
729 
730 	if (!dmub_hpd_wrk->dmub_notify) {
731 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
732 		return;
733 	}
734 
735 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
736 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
737 		dmub_hpd_wrk->dmub_notify);
738 	}
739 
740 	kfree(dmub_hpd_wrk->dmub_notify);
741 	kfree(dmub_hpd_wrk);
742 
743 }
744 
745 #define DMUB_TRACE_MAX_READ 64
746 /**
747  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
748  * @interrupt_params: used for determining the Outbox instance
749  *
750  * Handles the Outbox Interrupt
751  * event handler.
752  */
753 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
754 {
755 	struct dmub_notification notify;
756 	struct common_irq_params *irq_params = interrupt_params;
757 	struct amdgpu_device *adev = irq_params->adev;
758 	struct amdgpu_display_manager *dm = &adev->dm;
759 	struct dmcub_trace_buf_entry entry = { 0 };
760 	uint32_t count = 0;
761 	struct dmub_hpd_work *dmub_hpd_wrk;
762 	struct dc_link *plink = NULL;
763 
764 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
765 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
766 
767 		do {
768 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
769 			if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
770 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
771 				continue;
772 			}
773 			if (!dm->dmub_callback[notify.type]) {
774 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
775 				continue;
776 			}
777 			if (dm->dmub_thread_offload[notify.type] == true) {
778 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
779 				if (!dmub_hpd_wrk) {
780 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
781 					return;
782 				}
783 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
784 				if (!dmub_hpd_wrk->dmub_notify) {
785 					kfree(dmub_hpd_wrk);
786 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
787 					return;
788 				}
789 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
790 				if (dmub_hpd_wrk->dmub_notify)
791 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
792 				dmub_hpd_wrk->adev = adev;
793 				if (notify.type == DMUB_NOTIFICATION_HPD) {
794 					plink = adev->dm.dc->links[notify.link_index];
795 					if (plink) {
796 						plink->hpd_status =
797 							notify.hpd_status == DP_HPD_PLUG;
798 					}
799 				}
800 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
801 			} else {
802 				dm->dmub_callback[notify.type](adev, &notify);
803 			}
804 		} while (notify.pending_notification);
805 	}
806 
807 
808 	do {
809 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
810 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
811 							entry.param0, entry.param1);
812 
813 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
814 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
815 		} else
816 			break;
817 
818 		count++;
819 
820 	} while (count <= DMUB_TRACE_MAX_READ);
821 
822 	if (count > DMUB_TRACE_MAX_READ)
823 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
824 }
825 #endif /* CONFIG_DRM_AMD_DC_DCN */
826 
827 static int dm_set_clockgating_state(void *handle,
828 		  enum amd_clockgating_state state)
829 {
830 	return 0;
831 }
832 
833 static int dm_set_powergating_state(void *handle,
834 		  enum amd_powergating_state state)
835 {
836 	return 0;
837 }
838 
839 /* Prototypes of private functions */
840 static int dm_early_init(void* handle);
841 
842 /* Allocate memory for FBC compressed data  */
843 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
844 {
845 	struct drm_device *dev = connector->dev;
846 	struct amdgpu_device *adev = drm_to_adev(dev);
847 	struct dm_compressor_info *compressor = &adev->dm.compressor;
848 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
849 	struct drm_display_mode *mode;
850 	unsigned long max_size = 0;
851 
852 	if (adev->dm.dc->fbc_compressor == NULL)
853 		return;
854 
855 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
856 		return;
857 
858 	if (compressor->bo_ptr)
859 		return;
860 
861 
862 	list_for_each_entry(mode, &connector->modes, head) {
863 		if (max_size < mode->htotal * mode->vtotal)
864 			max_size = mode->htotal * mode->vtotal;
865 	}
866 
867 	if (max_size) {
868 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
869 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
870 			    &compressor->gpu_addr, &compressor->cpu_addr);
871 
872 		if (r)
873 			DRM_ERROR("DM: Failed to initialize FBC\n");
874 		else {
875 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
876 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
877 		}
878 
879 	}
880 
881 }
882 
883 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
884 					  int pipe, bool *enabled,
885 					  unsigned char *buf, int max_bytes)
886 {
887 	struct drm_device *dev = dev_get_drvdata(kdev);
888 	struct amdgpu_device *adev = drm_to_adev(dev);
889 	struct drm_connector *connector;
890 	struct drm_connector_list_iter conn_iter;
891 	struct amdgpu_dm_connector *aconnector;
892 	int ret = 0;
893 
894 	*enabled = false;
895 
896 	mutex_lock(&adev->dm.audio_lock);
897 
898 	drm_connector_list_iter_begin(dev, &conn_iter);
899 	drm_for_each_connector_iter(connector, &conn_iter) {
900 		aconnector = to_amdgpu_dm_connector(connector);
901 		if (aconnector->audio_inst != port)
902 			continue;
903 
904 		*enabled = true;
905 		ret = drm_eld_size(connector->eld);
906 		memcpy(buf, connector->eld, min(max_bytes, ret));
907 
908 		break;
909 	}
910 	drm_connector_list_iter_end(&conn_iter);
911 
912 	mutex_unlock(&adev->dm.audio_lock);
913 
914 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
915 
916 	return ret;
917 }
918 
919 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
920 	.get_eld = amdgpu_dm_audio_component_get_eld,
921 };
922 
923 static int amdgpu_dm_audio_component_bind(struct device *kdev,
924 				       struct device *hda_kdev, void *data)
925 {
926 	struct drm_device *dev = dev_get_drvdata(kdev);
927 	struct amdgpu_device *adev = drm_to_adev(dev);
928 	struct drm_audio_component *acomp = data;
929 
930 	acomp->ops = &amdgpu_dm_audio_component_ops;
931 	acomp->dev = kdev;
932 	adev->dm.audio_component = acomp;
933 
934 	return 0;
935 }
936 
937 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
938 					  struct device *hda_kdev, void *data)
939 {
940 	struct drm_device *dev = dev_get_drvdata(kdev);
941 	struct amdgpu_device *adev = drm_to_adev(dev);
942 	struct drm_audio_component *acomp = data;
943 
944 	acomp->ops = NULL;
945 	acomp->dev = NULL;
946 	adev->dm.audio_component = NULL;
947 }
948 
949 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
950 	.bind	= amdgpu_dm_audio_component_bind,
951 	.unbind	= amdgpu_dm_audio_component_unbind,
952 };
953 
954 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
955 {
956 	int i, ret;
957 
958 	if (!amdgpu_audio)
959 		return 0;
960 
961 	adev->mode_info.audio.enabled = true;
962 
963 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
964 
965 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
966 		adev->mode_info.audio.pin[i].channels = -1;
967 		adev->mode_info.audio.pin[i].rate = -1;
968 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
969 		adev->mode_info.audio.pin[i].status_bits = 0;
970 		adev->mode_info.audio.pin[i].category_code = 0;
971 		adev->mode_info.audio.pin[i].connected = false;
972 		adev->mode_info.audio.pin[i].id =
973 			adev->dm.dc->res_pool->audios[i]->inst;
974 		adev->mode_info.audio.pin[i].offset = 0;
975 	}
976 
977 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
978 	if (ret < 0)
979 		return ret;
980 
981 	adev->dm.audio_registered = true;
982 
983 	return 0;
984 }
985 
986 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
987 {
988 	if (!amdgpu_audio)
989 		return;
990 
991 	if (!adev->mode_info.audio.enabled)
992 		return;
993 
994 	if (adev->dm.audio_registered) {
995 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
996 		adev->dm.audio_registered = false;
997 	}
998 
999 	/* TODO: Disable audio? */
1000 
1001 	adev->mode_info.audio.enabled = false;
1002 }
1003 
1004 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1005 {
1006 	struct drm_audio_component *acomp = adev->dm.audio_component;
1007 
1008 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1009 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1010 
1011 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1012 						 pin, -1);
1013 	}
1014 }
1015 
1016 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1017 {
1018 	const struct dmcub_firmware_header_v1_0 *hdr;
1019 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1020 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1021 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1022 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1023 	struct abm *abm = adev->dm.dc->res_pool->abm;
1024 	struct dmub_srv_hw_params hw_params;
1025 	enum dmub_status status;
1026 	const unsigned char *fw_inst_const, *fw_bss_data;
1027 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1028 	bool has_hw_support;
1029 	struct dc *dc = adev->dm.dc;
1030 
1031 	if (!dmub_srv)
1032 		/* DMUB isn't supported on the ASIC. */
1033 		return 0;
1034 
1035 	if (!fb_info) {
1036 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1037 		return -EINVAL;
1038 	}
1039 
1040 	if (!dmub_fw) {
1041 		/* Firmware required for DMUB support. */
1042 		DRM_ERROR("No firmware provided for DMUB.\n");
1043 		return -EINVAL;
1044 	}
1045 
1046 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1047 	if (status != DMUB_STATUS_OK) {
1048 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1049 		return -EINVAL;
1050 	}
1051 
1052 	if (!has_hw_support) {
1053 		DRM_INFO("DMUB unsupported on ASIC\n");
1054 		return 0;
1055 	}
1056 
1057 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1058 
1059 	fw_inst_const = dmub_fw->data +
1060 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1061 			PSP_HEADER_BYTES;
1062 
1063 	fw_bss_data = dmub_fw->data +
1064 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1065 		      le32_to_cpu(hdr->inst_const_bytes);
1066 
1067 	/* Copy firmware and bios info into FB memory. */
1068 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1069 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1070 
1071 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1072 
1073 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1074 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1075 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1076 	 * will be done by dm_dmub_hw_init
1077 	 */
1078 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1079 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1080 				fw_inst_const_size);
1081 	}
1082 
1083 	if (fw_bss_data_size)
1084 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1085 		       fw_bss_data, fw_bss_data_size);
1086 
1087 	/* Copy firmware bios info into FB memory. */
1088 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1089 	       adev->bios_size);
1090 
1091 	/* Reset regions that need to be reset. */
1092 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1093 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1094 
1095 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1096 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1097 
1098 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1099 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1100 
1101 	/* Initialize hardware. */
1102 	memset(&hw_params, 0, sizeof(hw_params));
1103 	hw_params.fb_base = adev->gmc.fb_start;
1104 	hw_params.fb_offset = adev->gmc.aper_base;
1105 
1106 	/* backdoor load firmware and trigger dmub running */
1107 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1108 		hw_params.load_inst_const = true;
1109 
1110 	if (dmcu)
1111 		hw_params.psp_version = dmcu->psp_version;
1112 
1113 	for (i = 0; i < fb_info->num_fb; ++i)
1114 		hw_params.fb[i] = &fb_info->fb[i];
1115 
1116 	switch (adev->asic_type) {
1117 	case CHIP_YELLOW_CARP:
1118 		if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1119 			hw_params.dpia_supported = true;
1120 #if defined(CONFIG_DRM_AMD_DC_DCN)
1121 			hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1122 #endif
1123 		}
1124 		break;
1125 	default:
1126 		break;
1127 	}
1128 
1129 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1130 	if (status != DMUB_STATUS_OK) {
1131 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1132 		return -EINVAL;
1133 	}
1134 
1135 	/* Wait for firmware load to finish. */
1136 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1137 	if (status != DMUB_STATUS_OK)
1138 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1139 
1140 	/* Init DMCU and ABM if available. */
1141 	if (dmcu && abm) {
1142 		dmcu->funcs->dmcu_init(dmcu);
1143 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1144 	}
1145 
1146 	if (!adev->dm.dc->ctx->dmub_srv)
1147 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1148 	if (!adev->dm.dc->ctx->dmub_srv) {
1149 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1150 		return -ENOMEM;
1151 	}
1152 
1153 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1154 		 adev->dm.dmcub_fw_version);
1155 
1156 	return 0;
1157 }
1158 
1159 #if defined(CONFIG_DRM_AMD_DC_DCN)
1160 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1161 {
1162 	uint64_t pt_base;
1163 	uint32_t logical_addr_low;
1164 	uint32_t logical_addr_high;
1165 	uint32_t agp_base, agp_bot, agp_top;
1166 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1167 
1168 	memset(pa_config, 0, sizeof(*pa_config));
1169 
1170 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1171 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1172 
1173 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1174 		/*
1175 		 * Raven2 has a HW issue that it is unable to use the vram which
1176 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1177 		 * workaround that increase system aperture high address (add 1)
1178 		 * to get rid of the VM fault and hardware hang.
1179 		 */
1180 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1181 	else
1182 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1183 
1184 	agp_base = 0;
1185 	agp_bot = adev->gmc.agp_start >> 24;
1186 	agp_top = adev->gmc.agp_end >> 24;
1187 
1188 
1189 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1190 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1191 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1192 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1193 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1194 	page_table_base.low_part = lower_32_bits(pt_base);
1195 
1196 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1197 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1198 
1199 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1200 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1201 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1202 
1203 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1204 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1205 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1206 
1207 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1208 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1209 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1210 
1211 	pa_config->is_hvm_enabled = 0;
1212 
1213 }
1214 #endif
1215 #if defined(CONFIG_DRM_AMD_DC_DCN)
1216 static void vblank_control_worker(struct work_struct *work)
1217 {
1218 	struct vblank_control_work *vblank_work =
1219 		container_of(work, struct vblank_control_work, work);
1220 	struct amdgpu_display_manager *dm = vblank_work->dm;
1221 
1222 	mutex_lock(&dm->dc_lock);
1223 
1224 	if (vblank_work->enable)
1225 		dm->active_vblank_irq_count++;
1226 	else if(dm->active_vblank_irq_count)
1227 		dm->active_vblank_irq_count--;
1228 
1229 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1230 
1231 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1232 
1233 	/* Control PSR based on vblank requirements from OS */
1234 	if (vblank_work->stream && vblank_work->stream->link) {
1235 		if (vblank_work->enable) {
1236 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1237 				amdgpu_dm_psr_disable(vblank_work->stream);
1238 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1239 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1240 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1241 			amdgpu_dm_psr_enable(vblank_work->stream);
1242 		}
1243 	}
1244 
1245 	mutex_unlock(&dm->dc_lock);
1246 
1247 	dc_stream_release(vblank_work->stream);
1248 
1249 	kfree(vblank_work);
1250 }
1251 
1252 #endif
1253 
1254 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1255 {
1256 	struct hpd_rx_irq_offload_work *offload_work;
1257 	struct amdgpu_dm_connector *aconnector;
1258 	struct dc_link *dc_link;
1259 	struct amdgpu_device *adev;
1260 	enum dc_connection_type new_connection_type = dc_connection_none;
1261 	unsigned long flags;
1262 
1263 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1264 	aconnector = offload_work->offload_wq->aconnector;
1265 
1266 	if (!aconnector) {
1267 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1268 		goto skip;
1269 	}
1270 
1271 	adev = drm_to_adev(aconnector->base.dev);
1272 	dc_link = aconnector->dc_link;
1273 
1274 	mutex_lock(&aconnector->hpd_lock);
1275 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1276 		DRM_ERROR("KMS: Failed to detect connector\n");
1277 	mutex_unlock(&aconnector->hpd_lock);
1278 
1279 	if (new_connection_type == dc_connection_none)
1280 		goto skip;
1281 
1282 	if (amdgpu_in_reset(adev))
1283 		goto skip;
1284 
1285 	mutex_lock(&adev->dm.dc_lock);
1286 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1287 		dc_link_dp_handle_automated_test(dc_link);
1288 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1289 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1290 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1291 		dc_link_dp_handle_link_loss(dc_link);
1292 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1293 		offload_work->offload_wq->is_handling_link_loss = false;
1294 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1295 	}
1296 	mutex_unlock(&adev->dm.dc_lock);
1297 
1298 skip:
1299 	kfree(offload_work);
1300 
1301 }
1302 
1303 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1304 {
1305 	int max_caps = dc->caps.max_links;
1306 	int i = 0;
1307 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1308 
1309 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1310 
1311 	if (!hpd_rx_offload_wq)
1312 		return NULL;
1313 
1314 
1315 	for (i = 0; i < max_caps; i++) {
1316 		hpd_rx_offload_wq[i].wq =
1317 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1318 
1319 		if (hpd_rx_offload_wq[i].wq == NULL) {
1320 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1321 			return NULL;
1322 		}
1323 
1324 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1325 	}
1326 
1327 	return hpd_rx_offload_wq;
1328 }
1329 
1330 struct amdgpu_stutter_quirk {
1331 	u16 chip_vendor;
1332 	u16 chip_device;
1333 	u16 subsys_vendor;
1334 	u16 subsys_device;
1335 	u8 revision;
1336 };
1337 
1338 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1339 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1340 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1341 	{ 0, 0, 0, 0, 0 },
1342 };
1343 
1344 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1345 {
1346 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1347 
1348 	while (p && p->chip_device != 0) {
1349 		if (pdev->vendor == p->chip_vendor &&
1350 		    pdev->device == p->chip_device &&
1351 		    pdev->subsystem_vendor == p->subsys_vendor &&
1352 		    pdev->subsystem_device == p->subsys_device &&
1353 		    pdev->revision == p->revision) {
1354 			return true;
1355 		}
1356 		++p;
1357 	}
1358 	return false;
1359 }
1360 
1361 static int amdgpu_dm_init(struct amdgpu_device *adev)
1362 {
1363 	struct dc_init_data init_data;
1364 #ifdef CONFIG_DRM_AMD_DC_HDCP
1365 	struct dc_callback_init init_params;
1366 #endif
1367 	int r;
1368 
1369 	adev->dm.ddev = adev_to_drm(adev);
1370 	adev->dm.adev = adev;
1371 
1372 	/* Zero all the fields */
1373 	memset(&init_data, 0, sizeof(init_data));
1374 #ifdef CONFIG_DRM_AMD_DC_HDCP
1375 	memset(&init_params, 0, sizeof(init_params));
1376 #endif
1377 
1378 	mutex_init(&adev->dm.dc_lock);
1379 	mutex_init(&adev->dm.audio_lock);
1380 #if defined(CONFIG_DRM_AMD_DC_DCN)
1381 	spin_lock_init(&adev->dm.vblank_lock);
1382 #endif
1383 
1384 	if(amdgpu_dm_irq_init(adev)) {
1385 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1386 		goto error;
1387 	}
1388 
1389 	init_data.asic_id.chip_family = adev->family;
1390 
1391 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1392 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1393 	init_data.asic_id.chip_id = adev->pdev->device;
1394 
1395 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1396 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1397 	init_data.asic_id.atombios_base_address =
1398 		adev->mode_info.atom_context->bios;
1399 
1400 	init_data.driver = adev;
1401 
1402 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1403 
1404 	if (!adev->dm.cgs_device) {
1405 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1406 		goto error;
1407 	}
1408 
1409 	init_data.cgs_device = adev->dm.cgs_device;
1410 
1411 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1412 
1413 	switch (adev->asic_type) {
1414 	case CHIP_CARRIZO:
1415 	case CHIP_STONEY:
1416 		init_data.flags.gpu_vm_support = true;
1417 		break;
1418 	default:
1419 		switch (adev->ip_versions[DCE_HWIP][0]) {
1420 		case IP_VERSION(2, 1, 0):
1421 			init_data.flags.gpu_vm_support = true;
1422 			switch (adev->dm.dmcub_fw_version) {
1423 			case 0: /* development */
1424 			case 0x1: /* linux-firmware.git hash 6d9f399 */
1425 			case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1426 				init_data.flags.disable_dmcu = false;
1427 				break;
1428 			default:
1429 				init_data.flags.disable_dmcu = true;
1430 			}
1431 			break;
1432 		case IP_VERSION(1, 0, 0):
1433 		case IP_VERSION(1, 0, 1):
1434 		case IP_VERSION(3, 0, 1):
1435 		case IP_VERSION(3, 1, 2):
1436 		case IP_VERSION(3, 1, 3):
1437 			init_data.flags.gpu_vm_support = true;
1438 			break;
1439 		case IP_VERSION(2, 0, 3):
1440 			init_data.flags.disable_dmcu = true;
1441 			break;
1442 		default:
1443 			break;
1444 		}
1445 		break;
1446 	}
1447 
1448 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1449 		init_data.flags.fbc_support = true;
1450 
1451 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1452 		init_data.flags.multi_mon_pp_mclk_switch = true;
1453 
1454 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1455 		init_data.flags.disable_fractional_pwm = true;
1456 
1457 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1458 		init_data.flags.edp_no_power_sequencing = true;
1459 
1460 #ifdef CONFIG_DRM_AMD_DC_DCN
1461 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1462 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1463 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1464 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1465 #endif
1466 
1467 	init_data.flags.power_down_display_on_boot = true;
1468 
1469 	if (check_seamless_boot_capability(adev)) {
1470 		init_data.flags.power_down_display_on_boot = false;
1471 		init_data.flags.allow_seamless_boot_optimization = true;
1472 		DRM_INFO("Seamless boot condition check passed\n");
1473 	}
1474 
1475 	INIT_LIST_HEAD(&adev->dm.da_list);
1476 	/* Display Core create. */
1477 	adev->dm.dc = dc_create(&init_data);
1478 
1479 	if (adev->dm.dc) {
1480 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1481 	} else {
1482 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1483 		goto error;
1484 	}
1485 
1486 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1487 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1488 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1489 	}
1490 
1491 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1492 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1493 	if (dm_should_disable_stutter(adev->pdev))
1494 		adev->dm.dc->debug.disable_stutter = true;
1495 
1496 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1497 		adev->dm.dc->debug.disable_stutter = true;
1498 
1499 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1500 		adev->dm.dc->debug.disable_dsc = true;
1501 		adev->dm.dc->debug.disable_dsc_edp = true;
1502 	}
1503 
1504 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1505 		adev->dm.dc->debug.disable_clock_gate = true;
1506 
1507 	r = dm_dmub_hw_init(adev);
1508 	if (r) {
1509 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1510 		goto error;
1511 	}
1512 
1513 	dc_hardware_init(adev->dm.dc);
1514 
1515 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1516 	if (!adev->dm.hpd_rx_offload_wq) {
1517 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1518 		goto error;
1519 	}
1520 
1521 #if defined(CONFIG_DRM_AMD_DC_DCN)
1522 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1523 		struct dc_phy_addr_space_config pa_config;
1524 
1525 		mmhub_read_system_context(adev, &pa_config);
1526 
1527 		// Call the DC init_memory func
1528 		dc_setup_system_context(adev->dm.dc, &pa_config);
1529 	}
1530 #endif
1531 
1532 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1533 	if (!adev->dm.freesync_module) {
1534 		DRM_ERROR(
1535 		"amdgpu: failed to initialize freesync_module.\n");
1536 	} else
1537 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1538 				adev->dm.freesync_module);
1539 
1540 	amdgpu_dm_init_color_mod();
1541 
1542 #if defined(CONFIG_DRM_AMD_DC_DCN)
1543 	if (adev->dm.dc->caps.max_links > 0) {
1544 		adev->dm.vblank_control_workqueue =
1545 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1546 		if (!adev->dm.vblank_control_workqueue)
1547 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1548 	}
1549 #endif
1550 
1551 #ifdef CONFIG_DRM_AMD_DC_HDCP
1552 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1553 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1554 
1555 		if (!adev->dm.hdcp_workqueue)
1556 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1557 		else
1558 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1559 
1560 		dc_init_callbacks(adev->dm.dc, &init_params);
1561 	}
1562 #endif
1563 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1564 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1565 #endif
1566 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1567 		init_completion(&adev->dm.dmub_aux_transfer_done);
1568 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1569 		if (!adev->dm.dmub_notify) {
1570 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1571 			goto error;
1572 		}
1573 
1574 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1575 		if (!adev->dm.delayed_hpd_wq) {
1576 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1577 			goto error;
1578 		}
1579 
1580 		amdgpu_dm_outbox_init(adev);
1581 #if defined(CONFIG_DRM_AMD_DC_DCN)
1582 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1583 			dmub_aux_setconfig_callback, false)) {
1584 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1585 			goto error;
1586 		}
1587 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1588 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1589 			goto error;
1590 		}
1591 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1592 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1593 			goto error;
1594 		}
1595 #endif /* CONFIG_DRM_AMD_DC_DCN */
1596 	}
1597 
1598 	if (amdgpu_dm_initialize_drm_device(adev)) {
1599 		DRM_ERROR(
1600 		"amdgpu: failed to initialize sw for display support.\n");
1601 		goto error;
1602 	}
1603 
1604 	/* create fake encoders for MST */
1605 	dm_dp_create_fake_mst_encoders(adev);
1606 
1607 	/* TODO: Add_display_info? */
1608 
1609 	/* TODO use dynamic cursor width */
1610 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1611 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1612 
1613 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1614 		DRM_ERROR(
1615 		"amdgpu: failed to initialize sw for display support.\n");
1616 		goto error;
1617 	}
1618 
1619 
1620 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1621 
1622 	return 0;
1623 error:
1624 	amdgpu_dm_fini(adev);
1625 
1626 	return -EINVAL;
1627 }
1628 
1629 static int amdgpu_dm_early_fini(void *handle)
1630 {
1631 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1632 
1633 	amdgpu_dm_audio_fini(adev);
1634 
1635 	return 0;
1636 }
1637 
1638 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1639 {
1640 	int i;
1641 
1642 #if defined(CONFIG_DRM_AMD_DC_DCN)
1643 	if (adev->dm.vblank_control_workqueue) {
1644 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1645 		adev->dm.vblank_control_workqueue = NULL;
1646 	}
1647 #endif
1648 
1649 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1650 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1651 	}
1652 
1653 	amdgpu_dm_destroy_drm_device(&adev->dm);
1654 
1655 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1656 	if (adev->dm.crc_rd_wrk) {
1657 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1658 		kfree(adev->dm.crc_rd_wrk);
1659 		adev->dm.crc_rd_wrk = NULL;
1660 	}
1661 #endif
1662 #ifdef CONFIG_DRM_AMD_DC_HDCP
1663 	if (adev->dm.hdcp_workqueue) {
1664 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1665 		adev->dm.hdcp_workqueue = NULL;
1666 	}
1667 
1668 	if (adev->dm.dc)
1669 		dc_deinit_callbacks(adev->dm.dc);
1670 #endif
1671 
1672 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1673 
1674 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1675 		kfree(adev->dm.dmub_notify);
1676 		adev->dm.dmub_notify = NULL;
1677 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1678 		adev->dm.delayed_hpd_wq = NULL;
1679 	}
1680 
1681 	if (adev->dm.dmub_bo)
1682 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1683 				      &adev->dm.dmub_bo_gpu_addr,
1684 				      &adev->dm.dmub_bo_cpu_addr);
1685 
1686 	if (adev->dm.hpd_rx_offload_wq) {
1687 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1688 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1689 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1690 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1691 			}
1692 		}
1693 
1694 		kfree(adev->dm.hpd_rx_offload_wq);
1695 		adev->dm.hpd_rx_offload_wq = NULL;
1696 	}
1697 
1698 	/* DC Destroy TODO: Replace destroy DAL */
1699 	if (adev->dm.dc)
1700 		dc_destroy(&adev->dm.dc);
1701 	/*
1702 	 * TODO: pageflip, vlank interrupt
1703 	 *
1704 	 * amdgpu_dm_irq_fini(adev);
1705 	 */
1706 
1707 	if (adev->dm.cgs_device) {
1708 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1709 		adev->dm.cgs_device = NULL;
1710 	}
1711 	if (adev->dm.freesync_module) {
1712 		mod_freesync_destroy(adev->dm.freesync_module);
1713 		adev->dm.freesync_module = NULL;
1714 	}
1715 
1716 	mutex_destroy(&adev->dm.audio_lock);
1717 	mutex_destroy(&adev->dm.dc_lock);
1718 
1719 	return;
1720 }
1721 
1722 static int load_dmcu_fw(struct amdgpu_device *adev)
1723 {
1724 	const char *fw_name_dmcu = NULL;
1725 	int r;
1726 	const struct dmcu_firmware_header_v1_0 *hdr;
1727 
1728 	switch(adev->asic_type) {
1729 #if defined(CONFIG_DRM_AMD_DC_SI)
1730 	case CHIP_TAHITI:
1731 	case CHIP_PITCAIRN:
1732 	case CHIP_VERDE:
1733 	case CHIP_OLAND:
1734 #endif
1735 	case CHIP_BONAIRE:
1736 	case CHIP_HAWAII:
1737 	case CHIP_KAVERI:
1738 	case CHIP_KABINI:
1739 	case CHIP_MULLINS:
1740 	case CHIP_TONGA:
1741 	case CHIP_FIJI:
1742 	case CHIP_CARRIZO:
1743 	case CHIP_STONEY:
1744 	case CHIP_POLARIS11:
1745 	case CHIP_POLARIS10:
1746 	case CHIP_POLARIS12:
1747 	case CHIP_VEGAM:
1748 	case CHIP_VEGA10:
1749 	case CHIP_VEGA12:
1750 	case CHIP_VEGA20:
1751 		return 0;
1752 	case CHIP_NAVI12:
1753 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1754 		break;
1755 	case CHIP_RAVEN:
1756 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1757 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1758 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1759 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1760 		else
1761 			return 0;
1762 		break;
1763 	default:
1764 		switch (adev->ip_versions[DCE_HWIP][0]) {
1765 		case IP_VERSION(2, 0, 2):
1766 		case IP_VERSION(2, 0, 3):
1767 		case IP_VERSION(2, 0, 0):
1768 		case IP_VERSION(2, 1, 0):
1769 		case IP_VERSION(3, 0, 0):
1770 		case IP_VERSION(3, 0, 2):
1771 		case IP_VERSION(3, 0, 3):
1772 		case IP_VERSION(3, 0, 1):
1773 		case IP_VERSION(3, 1, 2):
1774 		case IP_VERSION(3, 1, 3):
1775 			return 0;
1776 		default:
1777 			break;
1778 		}
1779 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1780 		return -EINVAL;
1781 	}
1782 
1783 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1784 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1785 		return 0;
1786 	}
1787 
1788 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1789 	if (r == -ENOENT) {
1790 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1791 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1792 		adev->dm.fw_dmcu = NULL;
1793 		return 0;
1794 	}
1795 	if (r) {
1796 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1797 			fw_name_dmcu);
1798 		return r;
1799 	}
1800 
1801 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1802 	if (r) {
1803 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1804 			fw_name_dmcu);
1805 		release_firmware(adev->dm.fw_dmcu);
1806 		adev->dm.fw_dmcu = NULL;
1807 		return r;
1808 	}
1809 
1810 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1811 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1812 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1813 	adev->firmware.fw_size +=
1814 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1815 
1816 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1817 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1818 	adev->firmware.fw_size +=
1819 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1820 
1821 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1822 
1823 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1824 
1825 	return 0;
1826 }
1827 
1828 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1829 {
1830 	struct amdgpu_device *adev = ctx;
1831 
1832 	return dm_read_reg(adev->dm.dc->ctx, address);
1833 }
1834 
1835 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1836 				     uint32_t value)
1837 {
1838 	struct amdgpu_device *adev = ctx;
1839 
1840 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1841 }
1842 
1843 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1844 {
1845 	struct dmub_srv_create_params create_params;
1846 	struct dmub_srv_region_params region_params;
1847 	struct dmub_srv_region_info region_info;
1848 	struct dmub_srv_fb_params fb_params;
1849 	struct dmub_srv_fb_info *fb_info;
1850 	struct dmub_srv *dmub_srv;
1851 	const struct dmcub_firmware_header_v1_0 *hdr;
1852 	const char *fw_name_dmub;
1853 	enum dmub_asic dmub_asic;
1854 	enum dmub_status status;
1855 	int r;
1856 
1857 	switch (adev->ip_versions[DCE_HWIP][0]) {
1858 	case IP_VERSION(2, 1, 0):
1859 		dmub_asic = DMUB_ASIC_DCN21;
1860 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1861 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1862 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1863 		break;
1864 	case IP_VERSION(3, 0, 0):
1865 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1866 			dmub_asic = DMUB_ASIC_DCN30;
1867 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1868 		} else {
1869 			dmub_asic = DMUB_ASIC_DCN30;
1870 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1871 		}
1872 		break;
1873 	case IP_VERSION(3, 0, 1):
1874 		dmub_asic = DMUB_ASIC_DCN301;
1875 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1876 		break;
1877 	case IP_VERSION(3, 0, 2):
1878 		dmub_asic = DMUB_ASIC_DCN302;
1879 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1880 		break;
1881 	case IP_VERSION(3, 0, 3):
1882 		dmub_asic = DMUB_ASIC_DCN303;
1883 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1884 		break;
1885 	case IP_VERSION(3, 1, 2):
1886 	case IP_VERSION(3, 1, 3):
1887 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1888 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1889 		break;
1890 
1891 	default:
1892 		/* ASIC doesn't support DMUB. */
1893 		return 0;
1894 	}
1895 
1896 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1897 	if (r) {
1898 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1899 		return 0;
1900 	}
1901 
1902 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1903 	if (r) {
1904 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1905 		return 0;
1906 	}
1907 
1908 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1909 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1910 
1911 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1912 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1913 			AMDGPU_UCODE_ID_DMCUB;
1914 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1915 			adev->dm.dmub_fw;
1916 		adev->firmware.fw_size +=
1917 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1918 
1919 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1920 			 adev->dm.dmcub_fw_version);
1921 	}
1922 
1923 
1924 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1925 	dmub_srv = adev->dm.dmub_srv;
1926 
1927 	if (!dmub_srv) {
1928 		DRM_ERROR("Failed to allocate DMUB service!\n");
1929 		return -ENOMEM;
1930 	}
1931 
1932 	memset(&create_params, 0, sizeof(create_params));
1933 	create_params.user_ctx = adev;
1934 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1935 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1936 	create_params.asic = dmub_asic;
1937 
1938 	/* Create the DMUB service. */
1939 	status = dmub_srv_create(dmub_srv, &create_params);
1940 	if (status != DMUB_STATUS_OK) {
1941 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1942 		return -EINVAL;
1943 	}
1944 
1945 	/* Calculate the size of all the regions for the DMUB service. */
1946 	memset(&region_params, 0, sizeof(region_params));
1947 
1948 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1949 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1950 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1951 	region_params.vbios_size = adev->bios_size;
1952 	region_params.fw_bss_data = region_params.bss_data_size ?
1953 		adev->dm.dmub_fw->data +
1954 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1955 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1956 	region_params.fw_inst_const =
1957 		adev->dm.dmub_fw->data +
1958 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1959 		PSP_HEADER_BYTES;
1960 
1961 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1962 					   &region_info);
1963 
1964 	if (status != DMUB_STATUS_OK) {
1965 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1966 		return -EINVAL;
1967 	}
1968 
1969 	/*
1970 	 * Allocate a framebuffer based on the total size of all the regions.
1971 	 * TODO: Move this into GART.
1972 	 */
1973 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1974 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1975 				    &adev->dm.dmub_bo_gpu_addr,
1976 				    &adev->dm.dmub_bo_cpu_addr);
1977 	if (r)
1978 		return r;
1979 
1980 	/* Rebase the regions on the framebuffer address. */
1981 	memset(&fb_params, 0, sizeof(fb_params));
1982 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1983 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1984 	fb_params.region_info = &region_info;
1985 
1986 	adev->dm.dmub_fb_info =
1987 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1988 	fb_info = adev->dm.dmub_fb_info;
1989 
1990 	if (!fb_info) {
1991 		DRM_ERROR(
1992 			"Failed to allocate framebuffer info for DMUB service!\n");
1993 		return -ENOMEM;
1994 	}
1995 
1996 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1997 	if (status != DMUB_STATUS_OK) {
1998 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1999 		return -EINVAL;
2000 	}
2001 
2002 	return 0;
2003 }
2004 
2005 static int dm_sw_init(void *handle)
2006 {
2007 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2008 	int r;
2009 
2010 	r = dm_dmub_sw_init(adev);
2011 	if (r)
2012 		return r;
2013 
2014 	return load_dmcu_fw(adev);
2015 }
2016 
2017 static int dm_sw_fini(void *handle)
2018 {
2019 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2020 
2021 	kfree(adev->dm.dmub_fb_info);
2022 	adev->dm.dmub_fb_info = NULL;
2023 
2024 	if (adev->dm.dmub_srv) {
2025 		dmub_srv_destroy(adev->dm.dmub_srv);
2026 		adev->dm.dmub_srv = NULL;
2027 	}
2028 
2029 	release_firmware(adev->dm.dmub_fw);
2030 	adev->dm.dmub_fw = NULL;
2031 
2032 	release_firmware(adev->dm.fw_dmcu);
2033 	adev->dm.fw_dmcu = NULL;
2034 
2035 	return 0;
2036 }
2037 
2038 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2039 {
2040 	struct amdgpu_dm_connector *aconnector;
2041 	struct drm_connector *connector;
2042 	struct drm_connector_list_iter iter;
2043 	int ret = 0;
2044 
2045 	drm_connector_list_iter_begin(dev, &iter);
2046 	drm_for_each_connector_iter(connector, &iter) {
2047 		aconnector = to_amdgpu_dm_connector(connector);
2048 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2049 		    aconnector->mst_mgr.aux) {
2050 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2051 					 aconnector,
2052 					 aconnector->base.base.id);
2053 
2054 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2055 			if (ret < 0) {
2056 				DRM_ERROR("DM_MST: Failed to start MST\n");
2057 				aconnector->dc_link->type =
2058 					dc_connection_single;
2059 				break;
2060 			}
2061 		}
2062 	}
2063 	drm_connector_list_iter_end(&iter);
2064 
2065 	return ret;
2066 }
2067 
2068 static int dm_late_init(void *handle)
2069 {
2070 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2071 
2072 	struct dmcu_iram_parameters params;
2073 	unsigned int linear_lut[16];
2074 	int i;
2075 	struct dmcu *dmcu = NULL;
2076 
2077 	dmcu = adev->dm.dc->res_pool->dmcu;
2078 
2079 	for (i = 0; i < 16; i++)
2080 		linear_lut[i] = 0xFFFF * i / 15;
2081 
2082 	params.set = 0;
2083 	params.backlight_ramping_override = false;
2084 	params.backlight_ramping_start = 0xCCCC;
2085 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2086 	params.backlight_lut_array_size = 16;
2087 	params.backlight_lut_array = linear_lut;
2088 
2089 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2090 	 * 0xFFFF x 0.01 = 0x28F
2091 	 */
2092 	params.min_abm_backlight = 0x28F;
2093 	/* In the case where abm is implemented on dmcub,
2094 	* dmcu object will be null.
2095 	* ABM 2.4 and up are implemented on dmcub.
2096 	*/
2097 	if (dmcu) {
2098 		if (!dmcu_load_iram(dmcu, params))
2099 			return -EINVAL;
2100 	} else if (adev->dm.dc->ctx->dmub_srv) {
2101 		struct dc_link *edp_links[MAX_NUM_EDP];
2102 		int edp_num;
2103 
2104 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2105 		for (i = 0; i < edp_num; i++) {
2106 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2107 				return -EINVAL;
2108 		}
2109 	}
2110 
2111 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2112 }
2113 
2114 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2115 {
2116 	struct amdgpu_dm_connector *aconnector;
2117 	struct drm_connector *connector;
2118 	struct drm_connector_list_iter iter;
2119 	struct drm_dp_mst_topology_mgr *mgr;
2120 	int ret;
2121 	bool need_hotplug = false;
2122 
2123 	drm_connector_list_iter_begin(dev, &iter);
2124 	drm_for_each_connector_iter(connector, &iter) {
2125 		aconnector = to_amdgpu_dm_connector(connector);
2126 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2127 		    aconnector->mst_port)
2128 			continue;
2129 
2130 		mgr = &aconnector->mst_mgr;
2131 
2132 		if (suspend) {
2133 			drm_dp_mst_topology_mgr_suspend(mgr);
2134 		} else {
2135 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2136 			if (ret < 0) {
2137 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2138 				need_hotplug = true;
2139 			}
2140 		}
2141 	}
2142 	drm_connector_list_iter_end(&iter);
2143 
2144 	if (need_hotplug)
2145 		drm_kms_helper_hotplug_event(dev);
2146 }
2147 
2148 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2149 {
2150 	struct smu_context *smu = &adev->smu;
2151 	int ret = 0;
2152 
2153 	if (!is_support_sw_smu(adev))
2154 		return 0;
2155 
2156 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2157 	 * on window driver dc implementation.
2158 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2159 	 * should be passed to smu during boot up and resume from s3.
2160 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2161 	 * dcn20_resource_construct
2162 	 * then call pplib functions below to pass the settings to smu:
2163 	 * smu_set_watermarks_for_clock_ranges
2164 	 * smu_set_watermarks_table
2165 	 * navi10_set_watermarks_table
2166 	 * smu_write_watermarks_table
2167 	 *
2168 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2169 	 * dc has implemented different flow for window driver:
2170 	 * dc_hardware_init / dc_set_power_state
2171 	 * dcn10_init_hw
2172 	 * notify_wm_ranges
2173 	 * set_wm_ranges
2174 	 * -- Linux
2175 	 * smu_set_watermarks_for_clock_ranges
2176 	 * renoir_set_watermarks_table
2177 	 * smu_write_watermarks_table
2178 	 *
2179 	 * For Linux,
2180 	 * dc_hardware_init -> amdgpu_dm_init
2181 	 * dc_set_power_state --> dm_resume
2182 	 *
2183 	 * therefore, this function apply to navi10/12/14 but not Renoir
2184 	 * *
2185 	 */
2186 	switch (adev->ip_versions[DCE_HWIP][0]) {
2187 	case IP_VERSION(2, 0, 2):
2188 	case IP_VERSION(2, 0, 0):
2189 		break;
2190 	default:
2191 		return 0;
2192 	}
2193 
2194 	ret = smu_write_watermarks_table(smu);
2195 	if (ret) {
2196 		DRM_ERROR("Failed to update WMTABLE!\n");
2197 		return ret;
2198 	}
2199 
2200 	return 0;
2201 }
2202 
2203 /**
2204  * dm_hw_init() - Initialize DC device
2205  * @handle: The base driver device containing the amdgpu_dm device.
2206  *
2207  * Initialize the &struct amdgpu_display_manager device. This involves calling
2208  * the initializers of each DM component, then populating the struct with them.
2209  *
2210  * Although the function implies hardware initialization, both hardware and
2211  * software are initialized here. Splitting them out to their relevant init
2212  * hooks is a future TODO item.
2213  *
2214  * Some notable things that are initialized here:
2215  *
2216  * - Display Core, both software and hardware
2217  * - DC modules that we need (freesync and color management)
2218  * - DRM software states
2219  * - Interrupt sources and handlers
2220  * - Vblank support
2221  * - Debug FS entries, if enabled
2222  */
2223 static int dm_hw_init(void *handle)
2224 {
2225 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2226 	/* Create DAL display manager */
2227 	amdgpu_dm_init(adev);
2228 	amdgpu_dm_hpd_init(adev);
2229 
2230 	return 0;
2231 }
2232 
2233 /**
2234  * dm_hw_fini() - Teardown DC device
2235  * @handle: The base driver device containing the amdgpu_dm device.
2236  *
2237  * Teardown components within &struct amdgpu_display_manager that require
2238  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2239  * were loaded. Also flush IRQ workqueues and disable them.
2240  */
2241 static int dm_hw_fini(void *handle)
2242 {
2243 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2244 
2245 	amdgpu_dm_hpd_fini(adev);
2246 
2247 	amdgpu_dm_irq_fini(adev);
2248 	amdgpu_dm_fini(adev);
2249 	return 0;
2250 }
2251 
2252 
2253 static int dm_enable_vblank(struct drm_crtc *crtc);
2254 static void dm_disable_vblank(struct drm_crtc *crtc);
2255 
2256 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2257 				 struct dc_state *state, bool enable)
2258 {
2259 	enum dc_irq_source irq_source;
2260 	struct amdgpu_crtc *acrtc;
2261 	int rc = -EBUSY;
2262 	int i = 0;
2263 
2264 	for (i = 0; i < state->stream_count; i++) {
2265 		acrtc = get_crtc_by_otg_inst(
2266 				adev, state->stream_status[i].primary_otg_inst);
2267 
2268 		if (acrtc && state->stream_status[i].plane_count != 0) {
2269 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2270 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2271 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2272 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2273 			if (rc)
2274 				DRM_WARN("Failed to %s pflip interrupts\n",
2275 					 enable ? "enable" : "disable");
2276 
2277 			if (enable) {
2278 				rc = dm_enable_vblank(&acrtc->base);
2279 				if (rc)
2280 					DRM_WARN("Failed to enable vblank interrupts\n");
2281 			} else {
2282 				dm_disable_vblank(&acrtc->base);
2283 			}
2284 
2285 		}
2286 	}
2287 
2288 }
2289 
2290 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2291 {
2292 	struct dc_state *context = NULL;
2293 	enum dc_status res = DC_ERROR_UNEXPECTED;
2294 	int i;
2295 	struct dc_stream_state *del_streams[MAX_PIPES];
2296 	int del_streams_count = 0;
2297 
2298 	memset(del_streams, 0, sizeof(del_streams));
2299 
2300 	context = dc_create_state(dc);
2301 	if (context == NULL)
2302 		goto context_alloc_fail;
2303 
2304 	dc_resource_state_copy_construct_current(dc, context);
2305 
2306 	/* First remove from context all streams */
2307 	for (i = 0; i < context->stream_count; i++) {
2308 		struct dc_stream_state *stream = context->streams[i];
2309 
2310 		del_streams[del_streams_count++] = stream;
2311 	}
2312 
2313 	/* Remove all planes for removed streams and then remove the streams */
2314 	for (i = 0; i < del_streams_count; i++) {
2315 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2316 			res = DC_FAIL_DETACH_SURFACES;
2317 			goto fail;
2318 		}
2319 
2320 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2321 		if (res != DC_OK)
2322 			goto fail;
2323 	}
2324 
2325 	res = dc_commit_state(dc, context);
2326 
2327 fail:
2328 	dc_release_state(context);
2329 
2330 context_alloc_fail:
2331 	return res;
2332 }
2333 
2334 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2335 {
2336 	int i;
2337 
2338 	if (dm->hpd_rx_offload_wq) {
2339 		for (i = 0; i < dm->dc->caps.max_links; i++)
2340 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2341 	}
2342 }
2343 
2344 static int dm_suspend(void *handle)
2345 {
2346 	struct amdgpu_device *adev = handle;
2347 	struct amdgpu_display_manager *dm = &adev->dm;
2348 	int ret = 0;
2349 
2350 	if (amdgpu_in_reset(adev)) {
2351 		mutex_lock(&dm->dc_lock);
2352 
2353 #if defined(CONFIG_DRM_AMD_DC_DCN)
2354 		dc_allow_idle_optimizations(adev->dm.dc, false);
2355 #endif
2356 
2357 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2358 
2359 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2360 
2361 		amdgpu_dm_commit_zero_streams(dm->dc);
2362 
2363 		amdgpu_dm_irq_suspend(adev);
2364 
2365 		hpd_rx_irq_work_suspend(dm);
2366 
2367 		return ret;
2368 	}
2369 
2370 	WARN_ON(adev->dm.cached_state);
2371 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2372 
2373 	s3_handle_mst(adev_to_drm(adev), true);
2374 
2375 	amdgpu_dm_irq_suspend(adev);
2376 
2377 	hpd_rx_irq_work_suspend(dm);
2378 
2379 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2380 
2381 	return 0;
2382 }
2383 
2384 static struct amdgpu_dm_connector *
2385 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2386 					     struct drm_crtc *crtc)
2387 {
2388 	uint32_t i;
2389 	struct drm_connector_state *new_con_state;
2390 	struct drm_connector *connector;
2391 	struct drm_crtc *crtc_from_state;
2392 
2393 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2394 		crtc_from_state = new_con_state->crtc;
2395 
2396 		if (crtc_from_state == crtc)
2397 			return to_amdgpu_dm_connector(connector);
2398 	}
2399 
2400 	return NULL;
2401 }
2402 
2403 static void emulated_link_detect(struct dc_link *link)
2404 {
2405 	struct dc_sink_init_data sink_init_data = { 0 };
2406 	struct display_sink_capability sink_caps = { 0 };
2407 	enum dc_edid_status edid_status;
2408 	struct dc_context *dc_ctx = link->ctx;
2409 	struct dc_sink *sink = NULL;
2410 	struct dc_sink *prev_sink = NULL;
2411 
2412 	link->type = dc_connection_none;
2413 	prev_sink = link->local_sink;
2414 
2415 	if (prev_sink)
2416 		dc_sink_release(prev_sink);
2417 
2418 	switch (link->connector_signal) {
2419 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2420 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2421 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2422 		break;
2423 	}
2424 
2425 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2426 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2427 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2428 		break;
2429 	}
2430 
2431 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2432 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2433 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2434 		break;
2435 	}
2436 
2437 	case SIGNAL_TYPE_LVDS: {
2438 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2439 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2440 		break;
2441 	}
2442 
2443 	case SIGNAL_TYPE_EDP: {
2444 		sink_caps.transaction_type =
2445 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2446 		sink_caps.signal = SIGNAL_TYPE_EDP;
2447 		break;
2448 	}
2449 
2450 	case SIGNAL_TYPE_DISPLAY_PORT: {
2451 		sink_caps.transaction_type =
2452 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2453 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2454 		break;
2455 	}
2456 
2457 	default:
2458 		DC_ERROR("Invalid connector type! signal:%d\n",
2459 			link->connector_signal);
2460 		return;
2461 	}
2462 
2463 	sink_init_data.link = link;
2464 	sink_init_data.sink_signal = sink_caps.signal;
2465 
2466 	sink = dc_sink_create(&sink_init_data);
2467 	if (!sink) {
2468 		DC_ERROR("Failed to create sink!\n");
2469 		return;
2470 	}
2471 
2472 	/* dc_sink_create returns a new reference */
2473 	link->local_sink = sink;
2474 
2475 	edid_status = dm_helpers_read_local_edid(
2476 			link->ctx,
2477 			link,
2478 			sink);
2479 
2480 	if (edid_status != EDID_OK)
2481 		DC_ERROR("Failed to read EDID");
2482 
2483 }
2484 
2485 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2486 				     struct amdgpu_display_manager *dm)
2487 {
2488 	struct {
2489 		struct dc_surface_update surface_updates[MAX_SURFACES];
2490 		struct dc_plane_info plane_infos[MAX_SURFACES];
2491 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2492 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2493 		struct dc_stream_update stream_update;
2494 	} * bundle;
2495 	int k, m;
2496 
2497 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2498 
2499 	if (!bundle) {
2500 		dm_error("Failed to allocate update bundle\n");
2501 		goto cleanup;
2502 	}
2503 
2504 	for (k = 0; k < dc_state->stream_count; k++) {
2505 		bundle->stream_update.stream = dc_state->streams[k];
2506 
2507 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2508 			bundle->surface_updates[m].surface =
2509 				dc_state->stream_status->plane_states[m];
2510 			bundle->surface_updates[m].surface->force_full_update =
2511 				true;
2512 		}
2513 		dc_commit_updates_for_stream(
2514 			dm->dc, bundle->surface_updates,
2515 			dc_state->stream_status->plane_count,
2516 			dc_state->streams[k], &bundle->stream_update, dc_state);
2517 	}
2518 
2519 cleanup:
2520 	kfree(bundle);
2521 
2522 	return;
2523 }
2524 
2525 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2526 {
2527 	struct dc_stream_state *stream_state;
2528 	struct amdgpu_dm_connector *aconnector = link->priv;
2529 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2530 	struct dc_stream_update stream_update;
2531 	bool dpms_off = true;
2532 
2533 	memset(&stream_update, 0, sizeof(stream_update));
2534 	stream_update.dpms_off = &dpms_off;
2535 
2536 	mutex_lock(&adev->dm.dc_lock);
2537 	stream_state = dc_stream_find_from_link(link);
2538 
2539 	if (stream_state == NULL) {
2540 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2541 		mutex_unlock(&adev->dm.dc_lock);
2542 		return;
2543 	}
2544 
2545 	stream_update.stream = stream_state;
2546 	acrtc_state->force_dpms_off = true;
2547 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2548 				     stream_state, &stream_update,
2549 				     stream_state->ctx->dc->current_state);
2550 	mutex_unlock(&adev->dm.dc_lock);
2551 }
2552 
2553 static int dm_resume(void *handle)
2554 {
2555 	struct amdgpu_device *adev = handle;
2556 	struct drm_device *ddev = adev_to_drm(adev);
2557 	struct amdgpu_display_manager *dm = &adev->dm;
2558 	struct amdgpu_dm_connector *aconnector;
2559 	struct drm_connector *connector;
2560 	struct drm_connector_list_iter iter;
2561 	struct drm_crtc *crtc;
2562 	struct drm_crtc_state *new_crtc_state;
2563 	struct dm_crtc_state *dm_new_crtc_state;
2564 	struct drm_plane *plane;
2565 	struct drm_plane_state *new_plane_state;
2566 	struct dm_plane_state *dm_new_plane_state;
2567 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2568 	enum dc_connection_type new_connection_type = dc_connection_none;
2569 	struct dc_state *dc_state;
2570 	int i, r, j;
2571 
2572 	if (amdgpu_in_reset(adev)) {
2573 		dc_state = dm->cached_dc_state;
2574 
2575 		/*
2576 		 * The dc->current_state is backed up into dm->cached_dc_state
2577 		 * before we commit 0 streams.
2578 		 *
2579 		 * DC will clear link encoder assignments on the real state
2580 		 * but the changes won't propagate over to the copy we made
2581 		 * before the 0 streams commit.
2582 		 *
2583 		 * DC expects that link encoder assignments are *not* valid
2584 		 * when committing a state, so as a workaround it needs to be
2585 		 * cleared here.
2586 		 */
2587 		link_enc_cfg_init(dm->dc, dc_state);
2588 
2589 		if (dc_enable_dmub_notifications(adev->dm.dc))
2590 			amdgpu_dm_outbox_init(adev);
2591 
2592 		r = dm_dmub_hw_init(adev);
2593 		if (r)
2594 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2595 
2596 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2597 		dc_resume(dm->dc);
2598 
2599 		amdgpu_dm_irq_resume_early(adev);
2600 
2601 		for (i = 0; i < dc_state->stream_count; i++) {
2602 			dc_state->streams[i]->mode_changed = true;
2603 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2604 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2605 					= 0xffffffff;
2606 			}
2607 		}
2608 
2609 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2610 
2611 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2612 
2613 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2614 
2615 		dc_release_state(dm->cached_dc_state);
2616 		dm->cached_dc_state = NULL;
2617 
2618 		amdgpu_dm_irq_resume_late(adev);
2619 
2620 		mutex_unlock(&dm->dc_lock);
2621 
2622 		return 0;
2623 	}
2624 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2625 	dc_release_state(dm_state->context);
2626 	dm_state->context = dc_create_state(dm->dc);
2627 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2628 	dc_resource_state_construct(dm->dc, dm_state->context);
2629 
2630 	/* Re-enable outbox interrupts for DPIA. */
2631 	if (dc_enable_dmub_notifications(adev->dm.dc))
2632 		amdgpu_dm_outbox_init(adev);
2633 
2634 	/* Before powering on DC we need to re-initialize DMUB. */
2635 	r = dm_dmub_hw_init(adev);
2636 	if (r)
2637 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2638 
2639 	/* power on hardware */
2640 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2641 
2642 	/* program HPD filter */
2643 	dc_resume(dm->dc);
2644 
2645 	/*
2646 	 * early enable HPD Rx IRQ, should be done before set mode as short
2647 	 * pulse interrupts are used for MST
2648 	 */
2649 	amdgpu_dm_irq_resume_early(adev);
2650 
2651 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2652 	s3_handle_mst(ddev, false);
2653 
2654 	/* Do detection*/
2655 	drm_connector_list_iter_begin(ddev, &iter);
2656 	drm_for_each_connector_iter(connector, &iter) {
2657 		aconnector = to_amdgpu_dm_connector(connector);
2658 
2659 		/*
2660 		 * this is the case when traversing through already created
2661 		 * MST connectors, should be skipped
2662 		 */
2663 		if (aconnector->mst_port)
2664 			continue;
2665 
2666 		mutex_lock(&aconnector->hpd_lock);
2667 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2668 			DRM_ERROR("KMS: Failed to detect connector\n");
2669 
2670 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2671 			emulated_link_detect(aconnector->dc_link);
2672 		else
2673 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2674 
2675 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2676 			aconnector->fake_enable = false;
2677 
2678 		if (aconnector->dc_sink)
2679 			dc_sink_release(aconnector->dc_sink);
2680 		aconnector->dc_sink = NULL;
2681 		amdgpu_dm_update_connector_after_detect(aconnector);
2682 		mutex_unlock(&aconnector->hpd_lock);
2683 	}
2684 	drm_connector_list_iter_end(&iter);
2685 
2686 	/* Force mode set in atomic commit */
2687 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2688 		new_crtc_state->active_changed = true;
2689 
2690 	/*
2691 	 * atomic_check is expected to create the dc states. We need to release
2692 	 * them here, since they were duplicated as part of the suspend
2693 	 * procedure.
2694 	 */
2695 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2696 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2697 		if (dm_new_crtc_state->stream) {
2698 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2699 			dc_stream_release(dm_new_crtc_state->stream);
2700 			dm_new_crtc_state->stream = NULL;
2701 		}
2702 	}
2703 
2704 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2705 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2706 		if (dm_new_plane_state->dc_state) {
2707 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2708 			dc_plane_state_release(dm_new_plane_state->dc_state);
2709 			dm_new_plane_state->dc_state = NULL;
2710 		}
2711 	}
2712 
2713 	drm_atomic_helper_resume(ddev, dm->cached_state);
2714 
2715 	dm->cached_state = NULL;
2716 
2717 	amdgpu_dm_irq_resume_late(adev);
2718 
2719 	amdgpu_dm_smu_write_watermarks_table(adev);
2720 
2721 	return 0;
2722 }
2723 
2724 /**
2725  * DOC: DM Lifecycle
2726  *
2727  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2728  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2729  * the base driver's device list to be initialized and torn down accordingly.
2730  *
2731  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2732  */
2733 
2734 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2735 	.name = "dm",
2736 	.early_init = dm_early_init,
2737 	.late_init = dm_late_init,
2738 	.sw_init = dm_sw_init,
2739 	.sw_fini = dm_sw_fini,
2740 	.early_fini = amdgpu_dm_early_fini,
2741 	.hw_init = dm_hw_init,
2742 	.hw_fini = dm_hw_fini,
2743 	.suspend = dm_suspend,
2744 	.resume = dm_resume,
2745 	.is_idle = dm_is_idle,
2746 	.wait_for_idle = dm_wait_for_idle,
2747 	.check_soft_reset = dm_check_soft_reset,
2748 	.soft_reset = dm_soft_reset,
2749 	.set_clockgating_state = dm_set_clockgating_state,
2750 	.set_powergating_state = dm_set_powergating_state,
2751 };
2752 
2753 const struct amdgpu_ip_block_version dm_ip_block =
2754 {
2755 	.type = AMD_IP_BLOCK_TYPE_DCE,
2756 	.major = 1,
2757 	.minor = 0,
2758 	.rev = 0,
2759 	.funcs = &amdgpu_dm_funcs,
2760 };
2761 
2762 
2763 /**
2764  * DOC: atomic
2765  *
2766  * *WIP*
2767  */
2768 
2769 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2770 	.fb_create = amdgpu_display_user_framebuffer_create,
2771 	.get_format_info = amd_get_format_info,
2772 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2773 	.atomic_check = amdgpu_dm_atomic_check,
2774 	.atomic_commit = drm_atomic_helper_commit,
2775 };
2776 
2777 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2778 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2779 };
2780 
2781 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2782 {
2783 	u32 max_cll, min_cll, max, min, q, r;
2784 	struct amdgpu_dm_backlight_caps *caps;
2785 	struct amdgpu_display_manager *dm;
2786 	struct drm_connector *conn_base;
2787 	struct amdgpu_device *adev;
2788 	struct dc_link *link = NULL;
2789 	static const u8 pre_computed_values[] = {
2790 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2791 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2792 	int i;
2793 
2794 	if (!aconnector || !aconnector->dc_link)
2795 		return;
2796 
2797 	link = aconnector->dc_link;
2798 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2799 		return;
2800 
2801 	conn_base = &aconnector->base;
2802 	adev = drm_to_adev(conn_base->dev);
2803 	dm = &adev->dm;
2804 	for (i = 0; i < dm->num_of_edps; i++) {
2805 		if (link == dm->backlight_link[i])
2806 			break;
2807 	}
2808 	if (i >= dm->num_of_edps)
2809 		return;
2810 	caps = &dm->backlight_caps[i];
2811 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2812 	caps->aux_support = false;
2813 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2814 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2815 
2816 	if (caps->ext_caps->bits.oled == 1 /*||
2817 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2818 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2819 		caps->aux_support = true;
2820 
2821 	if (amdgpu_backlight == 0)
2822 		caps->aux_support = false;
2823 	else if (amdgpu_backlight == 1)
2824 		caps->aux_support = true;
2825 
2826 	/* From the specification (CTA-861-G), for calculating the maximum
2827 	 * luminance we need to use:
2828 	 *	Luminance = 50*2**(CV/32)
2829 	 * Where CV is a one-byte value.
2830 	 * For calculating this expression we may need float point precision;
2831 	 * to avoid this complexity level, we take advantage that CV is divided
2832 	 * by a constant. From the Euclids division algorithm, we know that CV
2833 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2834 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2835 	 * need to pre-compute the value of r/32. For pre-computing the values
2836 	 * We just used the following Ruby line:
2837 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2838 	 * The results of the above expressions can be verified at
2839 	 * pre_computed_values.
2840 	 */
2841 	q = max_cll >> 5;
2842 	r = max_cll % 32;
2843 	max = (1 << q) * pre_computed_values[r];
2844 
2845 	// min luminance: maxLum * (CV/255)^2 / 100
2846 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2847 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2848 
2849 	caps->aux_max_input_signal = max;
2850 	caps->aux_min_input_signal = min;
2851 }
2852 
2853 void amdgpu_dm_update_connector_after_detect(
2854 		struct amdgpu_dm_connector *aconnector)
2855 {
2856 	struct drm_connector *connector = &aconnector->base;
2857 	struct drm_device *dev = connector->dev;
2858 	struct dc_sink *sink;
2859 
2860 	/* MST handled by drm_mst framework */
2861 	if (aconnector->mst_mgr.mst_state == true)
2862 		return;
2863 
2864 	sink = aconnector->dc_link->local_sink;
2865 	if (sink)
2866 		dc_sink_retain(sink);
2867 
2868 	/*
2869 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2870 	 * the connector sink is set to either fake or physical sink depends on link status.
2871 	 * Skip if already done during boot.
2872 	 */
2873 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2874 			&& aconnector->dc_em_sink) {
2875 
2876 		/*
2877 		 * For S3 resume with headless use eml_sink to fake stream
2878 		 * because on resume connector->sink is set to NULL
2879 		 */
2880 		mutex_lock(&dev->mode_config.mutex);
2881 
2882 		if (sink) {
2883 			if (aconnector->dc_sink) {
2884 				amdgpu_dm_update_freesync_caps(connector, NULL);
2885 				/*
2886 				 * retain and release below are used to
2887 				 * bump up refcount for sink because the link doesn't point
2888 				 * to it anymore after disconnect, so on next crtc to connector
2889 				 * reshuffle by UMD we will get into unwanted dc_sink release
2890 				 */
2891 				dc_sink_release(aconnector->dc_sink);
2892 			}
2893 			aconnector->dc_sink = sink;
2894 			dc_sink_retain(aconnector->dc_sink);
2895 			amdgpu_dm_update_freesync_caps(connector,
2896 					aconnector->edid);
2897 		} else {
2898 			amdgpu_dm_update_freesync_caps(connector, NULL);
2899 			if (!aconnector->dc_sink) {
2900 				aconnector->dc_sink = aconnector->dc_em_sink;
2901 				dc_sink_retain(aconnector->dc_sink);
2902 			}
2903 		}
2904 
2905 		mutex_unlock(&dev->mode_config.mutex);
2906 
2907 		if (sink)
2908 			dc_sink_release(sink);
2909 		return;
2910 	}
2911 
2912 	/*
2913 	 * TODO: temporary guard to look for proper fix
2914 	 * if this sink is MST sink, we should not do anything
2915 	 */
2916 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2917 		dc_sink_release(sink);
2918 		return;
2919 	}
2920 
2921 	if (aconnector->dc_sink == sink) {
2922 		/*
2923 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2924 		 * Do nothing!!
2925 		 */
2926 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2927 				aconnector->connector_id);
2928 		if (sink)
2929 			dc_sink_release(sink);
2930 		return;
2931 	}
2932 
2933 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2934 		aconnector->connector_id, aconnector->dc_sink, sink);
2935 
2936 	mutex_lock(&dev->mode_config.mutex);
2937 
2938 	/*
2939 	 * 1. Update status of the drm connector
2940 	 * 2. Send an event and let userspace tell us what to do
2941 	 */
2942 	if (sink) {
2943 		/*
2944 		 * TODO: check if we still need the S3 mode update workaround.
2945 		 * If yes, put it here.
2946 		 */
2947 		if (aconnector->dc_sink) {
2948 			amdgpu_dm_update_freesync_caps(connector, NULL);
2949 			dc_sink_release(aconnector->dc_sink);
2950 		}
2951 
2952 		aconnector->dc_sink = sink;
2953 		dc_sink_retain(aconnector->dc_sink);
2954 		if (sink->dc_edid.length == 0) {
2955 			aconnector->edid = NULL;
2956 			if (aconnector->dc_link->aux_mode) {
2957 				drm_dp_cec_unset_edid(
2958 					&aconnector->dm_dp_aux.aux);
2959 			}
2960 		} else {
2961 			aconnector->edid =
2962 				(struct edid *)sink->dc_edid.raw_edid;
2963 
2964 			if (aconnector->dc_link->aux_mode)
2965 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2966 						    aconnector->edid);
2967 		}
2968 
2969 		drm_connector_update_edid_property(connector, aconnector->edid);
2970 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2971 		update_connector_ext_caps(aconnector);
2972 	} else {
2973 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2974 		amdgpu_dm_update_freesync_caps(connector, NULL);
2975 		drm_connector_update_edid_property(connector, NULL);
2976 		aconnector->num_modes = 0;
2977 		dc_sink_release(aconnector->dc_sink);
2978 		aconnector->dc_sink = NULL;
2979 		aconnector->edid = NULL;
2980 #ifdef CONFIG_DRM_AMD_DC_HDCP
2981 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2982 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2983 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2984 #endif
2985 	}
2986 
2987 	mutex_unlock(&dev->mode_config.mutex);
2988 
2989 	update_subconnector_property(aconnector);
2990 
2991 	if (sink)
2992 		dc_sink_release(sink);
2993 }
2994 
2995 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2996 {
2997 	struct drm_connector *connector = &aconnector->base;
2998 	struct drm_device *dev = connector->dev;
2999 	enum dc_connection_type new_connection_type = dc_connection_none;
3000 	struct amdgpu_device *adev = drm_to_adev(dev);
3001 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3002 	struct dm_crtc_state *dm_crtc_state = NULL;
3003 
3004 	if (adev->dm.disable_hpd_irq)
3005 		return;
3006 
3007 	if (dm_con_state->base.state && dm_con_state->base.crtc)
3008 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3009 					dm_con_state->base.state,
3010 					dm_con_state->base.crtc));
3011 	/*
3012 	 * In case of failure or MST no need to update connector status or notify the OS
3013 	 * since (for MST case) MST does this in its own context.
3014 	 */
3015 	mutex_lock(&aconnector->hpd_lock);
3016 
3017 #ifdef CONFIG_DRM_AMD_DC_HDCP
3018 	if (adev->dm.hdcp_workqueue) {
3019 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3020 		dm_con_state->update_hdcp = true;
3021 	}
3022 #endif
3023 	if (aconnector->fake_enable)
3024 		aconnector->fake_enable = false;
3025 
3026 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3027 		DRM_ERROR("KMS: Failed to detect connector\n");
3028 
3029 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3030 		emulated_link_detect(aconnector->dc_link);
3031 
3032 		drm_modeset_lock_all(dev);
3033 		dm_restore_drm_connector_state(dev, connector);
3034 		drm_modeset_unlock_all(dev);
3035 
3036 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3037 			drm_kms_helper_hotplug_event(dev);
3038 
3039 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3040 		if (new_connection_type == dc_connection_none &&
3041 		    aconnector->dc_link->type == dc_connection_none &&
3042 		    dm_crtc_state)
3043 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3044 
3045 		amdgpu_dm_update_connector_after_detect(aconnector);
3046 
3047 		drm_modeset_lock_all(dev);
3048 		dm_restore_drm_connector_state(dev, connector);
3049 		drm_modeset_unlock_all(dev);
3050 
3051 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3052 			drm_kms_helper_hotplug_event(dev);
3053 	}
3054 	mutex_unlock(&aconnector->hpd_lock);
3055 
3056 }
3057 
3058 static void handle_hpd_irq(void *param)
3059 {
3060 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3061 
3062 	handle_hpd_irq_helper(aconnector);
3063 
3064 }
3065 
3066 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3067 {
3068 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3069 	uint8_t dret;
3070 	bool new_irq_handled = false;
3071 	int dpcd_addr;
3072 	int dpcd_bytes_to_read;
3073 
3074 	const int max_process_count = 30;
3075 	int process_count = 0;
3076 
3077 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3078 
3079 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3080 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3081 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3082 		dpcd_addr = DP_SINK_COUNT;
3083 	} else {
3084 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3085 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3086 		dpcd_addr = DP_SINK_COUNT_ESI;
3087 	}
3088 
3089 	dret = drm_dp_dpcd_read(
3090 		&aconnector->dm_dp_aux.aux,
3091 		dpcd_addr,
3092 		esi,
3093 		dpcd_bytes_to_read);
3094 
3095 	while (dret == dpcd_bytes_to_read &&
3096 		process_count < max_process_count) {
3097 		uint8_t retry;
3098 		dret = 0;
3099 
3100 		process_count++;
3101 
3102 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3103 		/* handle HPD short pulse irq */
3104 		if (aconnector->mst_mgr.mst_state)
3105 			drm_dp_mst_hpd_irq(
3106 				&aconnector->mst_mgr,
3107 				esi,
3108 				&new_irq_handled);
3109 
3110 		if (new_irq_handled) {
3111 			/* ACK at DPCD to notify down stream */
3112 			const int ack_dpcd_bytes_to_write =
3113 				dpcd_bytes_to_read - 1;
3114 
3115 			for (retry = 0; retry < 3; retry++) {
3116 				uint8_t wret;
3117 
3118 				wret = drm_dp_dpcd_write(
3119 					&aconnector->dm_dp_aux.aux,
3120 					dpcd_addr + 1,
3121 					&esi[1],
3122 					ack_dpcd_bytes_to_write);
3123 				if (wret == ack_dpcd_bytes_to_write)
3124 					break;
3125 			}
3126 
3127 			/* check if there is new irq to be handled */
3128 			dret = drm_dp_dpcd_read(
3129 				&aconnector->dm_dp_aux.aux,
3130 				dpcd_addr,
3131 				esi,
3132 				dpcd_bytes_to_read);
3133 
3134 			new_irq_handled = false;
3135 		} else {
3136 			break;
3137 		}
3138 	}
3139 
3140 	if (process_count == max_process_count)
3141 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3142 }
3143 
3144 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3145 							union hpd_irq_data hpd_irq_data)
3146 {
3147 	struct hpd_rx_irq_offload_work *offload_work =
3148 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3149 
3150 	if (!offload_work) {
3151 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3152 		return;
3153 	}
3154 
3155 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3156 	offload_work->data = hpd_irq_data;
3157 	offload_work->offload_wq = offload_wq;
3158 
3159 	queue_work(offload_wq->wq, &offload_work->work);
3160 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3161 }
3162 
3163 static void handle_hpd_rx_irq(void *param)
3164 {
3165 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3166 	struct drm_connector *connector = &aconnector->base;
3167 	struct drm_device *dev = connector->dev;
3168 	struct dc_link *dc_link = aconnector->dc_link;
3169 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3170 	bool result = false;
3171 	enum dc_connection_type new_connection_type = dc_connection_none;
3172 	struct amdgpu_device *adev = drm_to_adev(dev);
3173 	union hpd_irq_data hpd_irq_data;
3174 	bool link_loss = false;
3175 	bool has_left_work = false;
3176 	int idx = aconnector->base.index;
3177 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3178 
3179 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3180 
3181 	if (adev->dm.disable_hpd_irq)
3182 		return;
3183 
3184 	/*
3185 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3186 	 * conflict, after implement i2c helper, this mutex should be
3187 	 * retired.
3188 	 */
3189 	mutex_lock(&aconnector->hpd_lock);
3190 
3191 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3192 						&link_loss, true, &has_left_work);
3193 
3194 	if (!has_left_work)
3195 		goto out;
3196 
3197 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3198 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3199 		goto out;
3200 	}
3201 
3202 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3203 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3204 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3205 			dm_handle_mst_sideband_msg(aconnector);
3206 			goto out;
3207 		}
3208 
3209 		if (link_loss) {
3210 			bool skip = false;
3211 
3212 			spin_lock(&offload_wq->offload_lock);
3213 			skip = offload_wq->is_handling_link_loss;
3214 
3215 			if (!skip)
3216 				offload_wq->is_handling_link_loss = true;
3217 
3218 			spin_unlock(&offload_wq->offload_lock);
3219 
3220 			if (!skip)
3221 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3222 
3223 			goto out;
3224 		}
3225 	}
3226 
3227 out:
3228 	if (result && !is_mst_root_connector) {
3229 		/* Downstream Port status changed. */
3230 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3231 			DRM_ERROR("KMS: Failed to detect connector\n");
3232 
3233 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3234 			emulated_link_detect(dc_link);
3235 
3236 			if (aconnector->fake_enable)
3237 				aconnector->fake_enable = false;
3238 
3239 			amdgpu_dm_update_connector_after_detect(aconnector);
3240 
3241 
3242 			drm_modeset_lock_all(dev);
3243 			dm_restore_drm_connector_state(dev, connector);
3244 			drm_modeset_unlock_all(dev);
3245 
3246 			drm_kms_helper_hotplug_event(dev);
3247 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3248 
3249 			if (aconnector->fake_enable)
3250 				aconnector->fake_enable = false;
3251 
3252 			amdgpu_dm_update_connector_after_detect(aconnector);
3253 
3254 
3255 			drm_modeset_lock_all(dev);
3256 			dm_restore_drm_connector_state(dev, connector);
3257 			drm_modeset_unlock_all(dev);
3258 
3259 			drm_kms_helper_hotplug_event(dev);
3260 		}
3261 	}
3262 #ifdef CONFIG_DRM_AMD_DC_HDCP
3263 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3264 		if (adev->dm.hdcp_workqueue)
3265 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3266 	}
3267 #endif
3268 
3269 	if (dc_link->type != dc_connection_mst_branch)
3270 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3271 
3272 	mutex_unlock(&aconnector->hpd_lock);
3273 }
3274 
3275 static void register_hpd_handlers(struct amdgpu_device *adev)
3276 {
3277 	struct drm_device *dev = adev_to_drm(adev);
3278 	struct drm_connector *connector;
3279 	struct amdgpu_dm_connector *aconnector;
3280 	const struct dc_link *dc_link;
3281 	struct dc_interrupt_params int_params = {0};
3282 
3283 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3284 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3285 
3286 	list_for_each_entry(connector,
3287 			&dev->mode_config.connector_list, head)	{
3288 
3289 		aconnector = to_amdgpu_dm_connector(connector);
3290 		dc_link = aconnector->dc_link;
3291 
3292 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3293 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3294 			int_params.irq_source = dc_link->irq_source_hpd;
3295 
3296 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3297 					handle_hpd_irq,
3298 					(void *) aconnector);
3299 		}
3300 
3301 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3302 
3303 			/* Also register for DP short pulse (hpd_rx). */
3304 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3305 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3306 
3307 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3308 					handle_hpd_rx_irq,
3309 					(void *) aconnector);
3310 
3311 			if (adev->dm.hpd_rx_offload_wq)
3312 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3313 					aconnector;
3314 		}
3315 	}
3316 }
3317 
3318 #if defined(CONFIG_DRM_AMD_DC_SI)
3319 /* Register IRQ sources and initialize IRQ callbacks */
3320 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3321 {
3322 	struct dc *dc = adev->dm.dc;
3323 	struct common_irq_params *c_irq_params;
3324 	struct dc_interrupt_params int_params = {0};
3325 	int r;
3326 	int i;
3327 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3328 
3329 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3330 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3331 
3332 	/*
3333 	 * Actions of amdgpu_irq_add_id():
3334 	 * 1. Register a set() function with base driver.
3335 	 *    Base driver will call set() function to enable/disable an
3336 	 *    interrupt in DC hardware.
3337 	 * 2. Register amdgpu_dm_irq_handler().
3338 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3339 	 *    coming from DC hardware.
3340 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3341 	 *    for acknowledging and handling. */
3342 
3343 	/* Use VBLANK interrupt */
3344 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3345 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3346 		if (r) {
3347 			DRM_ERROR("Failed to add crtc irq id!\n");
3348 			return r;
3349 		}
3350 
3351 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3352 		int_params.irq_source =
3353 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3354 
3355 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3356 
3357 		c_irq_params->adev = adev;
3358 		c_irq_params->irq_src = int_params.irq_source;
3359 
3360 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3361 				dm_crtc_high_irq, c_irq_params);
3362 	}
3363 
3364 	/* Use GRPH_PFLIP interrupt */
3365 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3366 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3367 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3368 		if (r) {
3369 			DRM_ERROR("Failed to add page flip irq id!\n");
3370 			return r;
3371 		}
3372 
3373 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3374 		int_params.irq_source =
3375 			dc_interrupt_to_irq_source(dc, i, 0);
3376 
3377 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3378 
3379 		c_irq_params->adev = adev;
3380 		c_irq_params->irq_src = int_params.irq_source;
3381 
3382 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3383 				dm_pflip_high_irq, c_irq_params);
3384 
3385 	}
3386 
3387 	/* HPD */
3388 	r = amdgpu_irq_add_id(adev, client_id,
3389 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3390 	if (r) {
3391 		DRM_ERROR("Failed to add hpd irq id!\n");
3392 		return r;
3393 	}
3394 
3395 	register_hpd_handlers(adev);
3396 
3397 	return 0;
3398 }
3399 #endif
3400 
3401 /* Register IRQ sources and initialize IRQ callbacks */
3402 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3403 {
3404 	struct dc *dc = adev->dm.dc;
3405 	struct common_irq_params *c_irq_params;
3406 	struct dc_interrupt_params int_params = {0};
3407 	int r;
3408 	int i;
3409 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3410 
3411 	if (adev->family >= AMDGPU_FAMILY_AI)
3412 		client_id = SOC15_IH_CLIENTID_DCE;
3413 
3414 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3415 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3416 
3417 	/*
3418 	 * Actions of amdgpu_irq_add_id():
3419 	 * 1. Register a set() function with base driver.
3420 	 *    Base driver will call set() function to enable/disable an
3421 	 *    interrupt in DC hardware.
3422 	 * 2. Register amdgpu_dm_irq_handler().
3423 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3424 	 *    coming from DC hardware.
3425 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3426 	 *    for acknowledging and handling. */
3427 
3428 	/* Use VBLANK interrupt */
3429 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3430 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3431 		if (r) {
3432 			DRM_ERROR("Failed to add crtc irq id!\n");
3433 			return r;
3434 		}
3435 
3436 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3437 		int_params.irq_source =
3438 			dc_interrupt_to_irq_source(dc, i, 0);
3439 
3440 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3441 
3442 		c_irq_params->adev = adev;
3443 		c_irq_params->irq_src = int_params.irq_source;
3444 
3445 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3446 				dm_crtc_high_irq, c_irq_params);
3447 	}
3448 
3449 	/* Use VUPDATE interrupt */
3450 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3451 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3452 		if (r) {
3453 			DRM_ERROR("Failed to add vupdate irq id!\n");
3454 			return r;
3455 		}
3456 
3457 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3458 		int_params.irq_source =
3459 			dc_interrupt_to_irq_source(dc, i, 0);
3460 
3461 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3462 
3463 		c_irq_params->adev = adev;
3464 		c_irq_params->irq_src = int_params.irq_source;
3465 
3466 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3467 				dm_vupdate_high_irq, c_irq_params);
3468 	}
3469 
3470 	/* Use GRPH_PFLIP interrupt */
3471 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3472 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3473 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3474 		if (r) {
3475 			DRM_ERROR("Failed to add page flip irq id!\n");
3476 			return r;
3477 		}
3478 
3479 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3480 		int_params.irq_source =
3481 			dc_interrupt_to_irq_source(dc, i, 0);
3482 
3483 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3484 
3485 		c_irq_params->adev = adev;
3486 		c_irq_params->irq_src = int_params.irq_source;
3487 
3488 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3489 				dm_pflip_high_irq, c_irq_params);
3490 
3491 	}
3492 
3493 	/* HPD */
3494 	r = amdgpu_irq_add_id(adev, client_id,
3495 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3496 	if (r) {
3497 		DRM_ERROR("Failed to add hpd irq id!\n");
3498 		return r;
3499 	}
3500 
3501 	register_hpd_handlers(adev);
3502 
3503 	return 0;
3504 }
3505 
3506 #if defined(CONFIG_DRM_AMD_DC_DCN)
3507 /* Register IRQ sources and initialize IRQ callbacks */
3508 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3509 {
3510 	struct dc *dc = adev->dm.dc;
3511 	struct common_irq_params *c_irq_params;
3512 	struct dc_interrupt_params int_params = {0};
3513 	int r;
3514 	int i;
3515 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3516 	static const unsigned int vrtl_int_srcid[] = {
3517 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3518 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3519 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3520 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3521 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3522 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3523 	};
3524 #endif
3525 
3526 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3527 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3528 
3529 	/*
3530 	 * Actions of amdgpu_irq_add_id():
3531 	 * 1. Register a set() function with base driver.
3532 	 *    Base driver will call set() function to enable/disable an
3533 	 *    interrupt in DC hardware.
3534 	 * 2. Register amdgpu_dm_irq_handler().
3535 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3536 	 *    coming from DC hardware.
3537 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3538 	 *    for acknowledging and handling.
3539 	 */
3540 
3541 	/* Use VSTARTUP interrupt */
3542 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3543 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3544 			i++) {
3545 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3546 
3547 		if (r) {
3548 			DRM_ERROR("Failed to add crtc irq id!\n");
3549 			return r;
3550 		}
3551 
3552 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3553 		int_params.irq_source =
3554 			dc_interrupt_to_irq_source(dc, i, 0);
3555 
3556 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3557 
3558 		c_irq_params->adev = adev;
3559 		c_irq_params->irq_src = int_params.irq_source;
3560 
3561 		amdgpu_dm_irq_register_interrupt(
3562 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3563 	}
3564 
3565 	/* Use otg vertical line interrupt */
3566 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3567 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3568 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3569 				vrtl_int_srcid[i], &adev->vline0_irq);
3570 
3571 		if (r) {
3572 			DRM_ERROR("Failed to add vline0 irq id!\n");
3573 			return r;
3574 		}
3575 
3576 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3577 		int_params.irq_source =
3578 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3579 
3580 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3581 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3582 			break;
3583 		}
3584 
3585 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3586 					- DC_IRQ_SOURCE_DC1_VLINE0];
3587 
3588 		c_irq_params->adev = adev;
3589 		c_irq_params->irq_src = int_params.irq_source;
3590 
3591 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3592 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3593 	}
3594 #endif
3595 
3596 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3597 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3598 	 * to trigger at end of each vblank, regardless of state of the lock,
3599 	 * matching DCE behaviour.
3600 	 */
3601 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3602 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3603 	     i++) {
3604 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3605 
3606 		if (r) {
3607 			DRM_ERROR("Failed to add vupdate irq id!\n");
3608 			return r;
3609 		}
3610 
3611 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3612 		int_params.irq_source =
3613 			dc_interrupt_to_irq_source(dc, i, 0);
3614 
3615 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3616 
3617 		c_irq_params->adev = adev;
3618 		c_irq_params->irq_src = int_params.irq_source;
3619 
3620 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3621 				dm_vupdate_high_irq, c_irq_params);
3622 	}
3623 
3624 	/* Use GRPH_PFLIP interrupt */
3625 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3626 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3627 			i++) {
3628 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3629 		if (r) {
3630 			DRM_ERROR("Failed to add page flip irq id!\n");
3631 			return r;
3632 		}
3633 
3634 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3635 		int_params.irq_source =
3636 			dc_interrupt_to_irq_source(dc, i, 0);
3637 
3638 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3639 
3640 		c_irq_params->adev = adev;
3641 		c_irq_params->irq_src = int_params.irq_source;
3642 
3643 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3644 				dm_pflip_high_irq, c_irq_params);
3645 
3646 	}
3647 
3648 	/* HPD */
3649 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3650 			&adev->hpd_irq);
3651 	if (r) {
3652 		DRM_ERROR("Failed to add hpd irq id!\n");
3653 		return r;
3654 	}
3655 
3656 	register_hpd_handlers(adev);
3657 
3658 	return 0;
3659 }
3660 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3661 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3662 {
3663 	struct dc *dc = adev->dm.dc;
3664 	struct common_irq_params *c_irq_params;
3665 	struct dc_interrupt_params int_params = {0};
3666 	int r, i;
3667 
3668 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3669 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3670 
3671 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3672 			&adev->dmub_outbox_irq);
3673 	if (r) {
3674 		DRM_ERROR("Failed to add outbox irq id!\n");
3675 		return r;
3676 	}
3677 
3678 	if (dc->ctx->dmub_srv) {
3679 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3680 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3681 		int_params.irq_source =
3682 		dc_interrupt_to_irq_source(dc, i, 0);
3683 
3684 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3685 
3686 		c_irq_params->adev = adev;
3687 		c_irq_params->irq_src = int_params.irq_source;
3688 
3689 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3690 				dm_dmub_outbox1_low_irq, c_irq_params);
3691 	}
3692 
3693 	return 0;
3694 }
3695 #endif
3696 
3697 /*
3698  * Acquires the lock for the atomic state object and returns
3699  * the new atomic state.
3700  *
3701  * This should only be called during atomic check.
3702  */
3703 static int dm_atomic_get_state(struct drm_atomic_state *state,
3704 			       struct dm_atomic_state **dm_state)
3705 {
3706 	struct drm_device *dev = state->dev;
3707 	struct amdgpu_device *adev = drm_to_adev(dev);
3708 	struct amdgpu_display_manager *dm = &adev->dm;
3709 	struct drm_private_state *priv_state;
3710 
3711 	if (*dm_state)
3712 		return 0;
3713 
3714 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3715 	if (IS_ERR(priv_state))
3716 		return PTR_ERR(priv_state);
3717 
3718 	*dm_state = to_dm_atomic_state(priv_state);
3719 
3720 	return 0;
3721 }
3722 
3723 static struct dm_atomic_state *
3724 dm_atomic_get_new_state(struct drm_atomic_state *state)
3725 {
3726 	struct drm_device *dev = state->dev;
3727 	struct amdgpu_device *adev = drm_to_adev(dev);
3728 	struct amdgpu_display_manager *dm = &adev->dm;
3729 	struct drm_private_obj *obj;
3730 	struct drm_private_state *new_obj_state;
3731 	int i;
3732 
3733 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3734 		if (obj->funcs == dm->atomic_obj.funcs)
3735 			return to_dm_atomic_state(new_obj_state);
3736 	}
3737 
3738 	return NULL;
3739 }
3740 
3741 static struct drm_private_state *
3742 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3743 {
3744 	struct dm_atomic_state *old_state, *new_state;
3745 
3746 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3747 	if (!new_state)
3748 		return NULL;
3749 
3750 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3751 
3752 	old_state = to_dm_atomic_state(obj->state);
3753 
3754 	if (old_state && old_state->context)
3755 		new_state->context = dc_copy_state(old_state->context);
3756 
3757 	if (!new_state->context) {
3758 		kfree(new_state);
3759 		return NULL;
3760 	}
3761 
3762 	return &new_state->base;
3763 }
3764 
3765 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3766 				    struct drm_private_state *state)
3767 {
3768 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3769 
3770 	if (dm_state && dm_state->context)
3771 		dc_release_state(dm_state->context);
3772 
3773 	kfree(dm_state);
3774 }
3775 
3776 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3777 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3778 	.atomic_destroy_state = dm_atomic_destroy_state,
3779 };
3780 
3781 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3782 {
3783 	struct dm_atomic_state *state;
3784 	int r;
3785 
3786 	adev->mode_info.mode_config_initialized = true;
3787 
3788 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3789 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3790 
3791 	adev_to_drm(adev)->mode_config.max_width = 16384;
3792 	adev_to_drm(adev)->mode_config.max_height = 16384;
3793 
3794 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3795 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3796 	/* indicates support for immediate flip */
3797 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3798 
3799 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3800 
3801 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3802 	if (!state)
3803 		return -ENOMEM;
3804 
3805 	state->context = dc_create_state(adev->dm.dc);
3806 	if (!state->context) {
3807 		kfree(state);
3808 		return -ENOMEM;
3809 	}
3810 
3811 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3812 
3813 	drm_atomic_private_obj_init(adev_to_drm(adev),
3814 				    &adev->dm.atomic_obj,
3815 				    &state->base,
3816 				    &dm_atomic_state_funcs);
3817 
3818 	r = amdgpu_display_modeset_create_props(adev);
3819 	if (r) {
3820 		dc_release_state(state->context);
3821 		kfree(state);
3822 		return r;
3823 	}
3824 
3825 	r = amdgpu_dm_audio_init(adev);
3826 	if (r) {
3827 		dc_release_state(state->context);
3828 		kfree(state);
3829 		return r;
3830 	}
3831 
3832 	return 0;
3833 }
3834 
3835 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3836 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3837 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3838 
3839 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3840 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3841 
3842 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3843 					    int bl_idx)
3844 {
3845 #if defined(CONFIG_ACPI)
3846 	struct amdgpu_dm_backlight_caps caps;
3847 
3848 	memset(&caps, 0, sizeof(caps));
3849 
3850 	if (dm->backlight_caps[bl_idx].caps_valid)
3851 		return;
3852 
3853 	amdgpu_acpi_get_backlight_caps(&caps);
3854 	if (caps.caps_valid) {
3855 		dm->backlight_caps[bl_idx].caps_valid = true;
3856 		if (caps.aux_support)
3857 			return;
3858 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3859 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3860 	} else {
3861 		dm->backlight_caps[bl_idx].min_input_signal =
3862 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3863 		dm->backlight_caps[bl_idx].max_input_signal =
3864 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3865 	}
3866 #else
3867 	if (dm->backlight_caps[bl_idx].aux_support)
3868 		return;
3869 
3870 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3871 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3872 #endif
3873 }
3874 
3875 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3876 				unsigned *min, unsigned *max)
3877 {
3878 	if (!caps)
3879 		return 0;
3880 
3881 	if (caps->aux_support) {
3882 		// Firmware limits are in nits, DC API wants millinits.
3883 		*max = 1000 * caps->aux_max_input_signal;
3884 		*min = 1000 * caps->aux_min_input_signal;
3885 	} else {
3886 		// Firmware limits are 8-bit, PWM control is 16-bit.
3887 		*max = 0x101 * caps->max_input_signal;
3888 		*min = 0x101 * caps->min_input_signal;
3889 	}
3890 	return 1;
3891 }
3892 
3893 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3894 					uint32_t brightness)
3895 {
3896 	unsigned min, max;
3897 
3898 	if (!get_brightness_range(caps, &min, &max))
3899 		return brightness;
3900 
3901 	// Rescale 0..255 to min..max
3902 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3903 				       AMDGPU_MAX_BL_LEVEL);
3904 }
3905 
3906 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3907 				      uint32_t brightness)
3908 {
3909 	unsigned min, max;
3910 
3911 	if (!get_brightness_range(caps, &min, &max))
3912 		return brightness;
3913 
3914 	if (brightness < min)
3915 		return 0;
3916 	// Rescale min..max to 0..255
3917 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3918 				 max - min);
3919 }
3920 
3921 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3922 					 int bl_idx,
3923 					 u32 user_brightness)
3924 {
3925 	struct amdgpu_dm_backlight_caps caps;
3926 	struct dc_link *link;
3927 	u32 brightness;
3928 	bool rc;
3929 
3930 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3931 	caps = dm->backlight_caps[bl_idx];
3932 
3933 	dm->brightness[bl_idx] = user_brightness;
3934 	/* update scratch register */
3935 	if (bl_idx == 0)
3936 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3937 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3938 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3939 
3940 	/* Change brightness based on AUX property */
3941 	if (caps.aux_support) {
3942 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3943 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3944 		if (!rc)
3945 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3946 	} else {
3947 		rc = dc_link_set_backlight_level(link, brightness, 0);
3948 		if (!rc)
3949 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3950 	}
3951 
3952 	return rc ? 0 : 1;
3953 }
3954 
3955 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3956 {
3957 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3958 	int i;
3959 
3960 	for (i = 0; i < dm->num_of_edps; i++) {
3961 		if (bd == dm->backlight_dev[i])
3962 			break;
3963 	}
3964 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3965 		i = 0;
3966 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3967 
3968 	return 0;
3969 }
3970 
3971 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3972 					 int bl_idx)
3973 {
3974 	struct amdgpu_dm_backlight_caps caps;
3975 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3976 
3977 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3978 	caps = dm->backlight_caps[bl_idx];
3979 
3980 	if (caps.aux_support) {
3981 		u32 avg, peak;
3982 		bool rc;
3983 
3984 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3985 		if (!rc)
3986 			return dm->brightness[bl_idx];
3987 		return convert_brightness_to_user(&caps, avg);
3988 	} else {
3989 		int ret = dc_link_get_backlight_level(link);
3990 
3991 		if (ret == DC_ERROR_UNEXPECTED)
3992 			return dm->brightness[bl_idx];
3993 		return convert_brightness_to_user(&caps, ret);
3994 	}
3995 }
3996 
3997 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3998 {
3999 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4000 	int i;
4001 
4002 	for (i = 0; i < dm->num_of_edps; i++) {
4003 		if (bd == dm->backlight_dev[i])
4004 			break;
4005 	}
4006 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4007 		i = 0;
4008 	return amdgpu_dm_backlight_get_level(dm, i);
4009 }
4010 
4011 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4012 	.options = BL_CORE_SUSPENDRESUME,
4013 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4014 	.update_status	= amdgpu_dm_backlight_update_status,
4015 };
4016 
4017 static void
4018 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4019 {
4020 	char bl_name[16];
4021 	struct backlight_properties props = { 0 };
4022 
4023 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4024 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4025 
4026 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4027 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4028 	props.type = BACKLIGHT_RAW;
4029 
4030 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4031 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4032 
4033 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4034 								       adev_to_drm(dm->adev)->dev,
4035 								       dm,
4036 								       &amdgpu_dm_backlight_ops,
4037 								       &props);
4038 
4039 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4040 		DRM_ERROR("DM: Backlight registration failed!\n");
4041 	else
4042 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4043 }
4044 #endif
4045 
4046 static int initialize_plane(struct amdgpu_display_manager *dm,
4047 			    struct amdgpu_mode_info *mode_info, int plane_id,
4048 			    enum drm_plane_type plane_type,
4049 			    const struct dc_plane_cap *plane_cap)
4050 {
4051 	struct drm_plane *plane;
4052 	unsigned long possible_crtcs;
4053 	int ret = 0;
4054 
4055 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4056 	if (!plane) {
4057 		DRM_ERROR("KMS: Failed to allocate plane\n");
4058 		return -ENOMEM;
4059 	}
4060 	plane->type = plane_type;
4061 
4062 	/*
4063 	 * HACK: IGT tests expect that the primary plane for a CRTC
4064 	 * can only have one possible CRTC. Only expose support for
4065 	 * any CRTC if they're not going to be used as a primary plane
4066 	 * for a CRTC - like overlay or underlay planes.
4067 	 */
4068 	possible_crtcs = 1 << plane_id;
4069 	if (plane_id >= dm->dc->caps.max_streams)
4070 		possible_crtcs = 0xff;
4071 
4072 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4073 
4074 	if (ret) {
4075 		DRM_ERROR("KMS: Failed to initialize plane\n");
4076 		kfree(plane);
4077 		return ret;
4078 	}
4079 
4080 	if (mode_info)
4081 		mode_info->planes[plane_id] = plane;
4082 
4083 	return ret;
4084 }
4085 
4086 
4087 static void register_backlight_device(struct amdgpu_display_manager *dm,
4088 				      struct dc_link *link)
4089 {
4090 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4091 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4092 
4093 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4094 	    link->type != dc_connection_none) {
4095 		/*
4096 		 * Event if registration failed, we should continue with
4097 		 * DM initialization because not having a backlight control
4098 		 * is better then a black screen.
4099 		 */
4100 		if (!dm->backlight_dev[dm->num_of_edps])
4101 			amdgpu_dm_register_backlight_device(dm);
4102 
4103 		if (dm->backlight_dev[dm->num_of_edps]) {
4104 			dm->backlight_link[dm->num_of_edps] = link;
4105 			dm->num_of_edps++;
4106 		}
4107 	}
4108 #endif
4109 }
4110 
4111 
4112 /*
4113  * In this architecture, the association
4114  * connector -> encoder -> crtc
4115  * id not really requried. The crtc and connector will hold the
4116  * display_index as an abstraction to use with DAL component
4117  *
4118  * Returns 0 on success
4119  */
4120 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4121 {
4122 	struct amdgpu_display_manager *dm = &adev->dm;
4123 	int32_t i;
4124 	struct amdgpu_dm_connector *aconnector = NULL;
4125 	struct amdgpu_encoder *aencoder = NULL;
4126 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4127 	uint32_t link_cnt;
4128 	int32_t primary_planes;
4129 	enum dc_connection_type new_connection_type = dc_connection_none;
4130 	const struct dc_plane_cap *plane;
4131 	bool psr_feature_enabled = false;
4132 
4133 	dm->display_indexes_num = dm->dc->caps.max_streams;
4134 	/* Update the actual used number of crtc */
4135 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4136 
4137 	link_cnt = dm->dc->caps.max_links;
4138 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4139 		DRM_ERROR("DM: Failed to initialize mode config\n");
4140 		return -EINVAL;
4141 	}
4142 
4143 	/* There is one primary plane per CRTC */
4144 	primary_planes = dm->dc->caps.max_streams;
4145 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4146 
4147 	/*
4148 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4149 	 * Order is reversed to match iteration order in atomic check.
4150 	 */
4151 	for (i = (primary_planes - 1); i >= 0; i--) {
4152 		plane = &dm->dc->caps.planes[i];
4153 
4154 		if (initialize_plane(dm, mode_info, i,
4155 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4156 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4157 			goto fail;
4158 		}
4159 	}
4160 
4161 	/*
4162 	 * Initialize overlay planes, index starting after primary planes.
4163 	 * These planes have a higher DRM index than the primary planes since
4164 	 * they should be considered as having a higher z-order.
4165 	 * Order is reversed to match iteration order in atomic check.
4166 	 *
4167 	 * Only support DCN for now, and only expose one so we don't encourage
4168 	 * userspace to use up all the pipes.
4169 	 */
4170 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4171 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4172 
4173 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4174 			continue;
4175 
4176 		if (!plane->blends_with_above || !plane->blends_with_below)
4177 			continue;
4178 
4179 		if (!plane->pixel_format_support.argb8888)
4180 			continue;
4181 
4182 		if (initialize_plane(dm, NULL, primary_planes + i,
4183 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4184 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4185 			goto fail;
4186 		}
4187 
4188 		/* Only create one overlay plane. */
4189 		break;
4190 	}
4191 
4192 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4193 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4194 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4195 			goto fail;
4196 		}
4197 
4198 #if defined(CONFIG_DRM_AMD_DC_DCN)
4199 	/* Use Outbox interrupt */
4200 	switch (adev->ip_versions[DCE_HWIP][0]) {
4201 	case IP_VERSION(3, 0, 0):
4202 	case IP_VERSION(3, 1, 2):
4203 	case IP_VERSION(3, 1, 3):
4204 	case IP_VERSION(2, 1, 0):
4205 		if (register_outbox_irq_handlers(dm->adev)) {
4206 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4207 			goto fail;
4208 		}
4209 		break;
4210 	default:
4211 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4212 			      adev->ip_versions[DCE_HWIP][0]);
4213 	}
4214 
4215 	/* Determine whether to enable PSR support by default. */
4216 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4217 		switch (adev->ip_versions[DCE_HWIP][0]) {
4218 		case IP_VERSION(3, 1, 2):
4219 		case IP_VERSION(3, 1, 3):
4220 			psr_feature_enabled = true;
4221 			break;
4222 		default:
4223 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4224 			break;
4225 		}
4226 	}
4227 #endif
4228 
4229 	/* loops over all connectors on the board */
4230 	for (i = 0; i < link_cnt; i++) {
4231 		struct dc_link *link = NULL;
4232 
4233 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4234 			DRM_ERROR(
4235 				"KMS: Cannot support more than %d display indexes\n",
4236 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4237 			continue;
4238 		}
4239 
4240 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4241 		if (!aconnector)
4242 			goto fail;
4243 
4244 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4245 		if (!aencoder)
4246 			goto fail;
4247 
4248 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4249 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4250 			goto fail;
4251 		}
4252 
4253 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4254 			DRM_ERROR("KMS: Failed to initialize connector\n");
4255 			goto fail;
4256 		}
4257 
4258 		link = dc_get_link_at_index(dm->dc, i);
4259 
4260 		if (!dc_link_detect_sink(link, &new_connection_type))
4261 			DRM_ERROR("KMS: Failed to detect connector\n");
4262 
4263 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4264 			emulated_link_detect(link);
4265 			amdgpu_dm_update_connector_after_detect(aconnector);
4266 
4267 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4268 			amdgpu_dm_update_connector_after_detect(aconnector);
4269 			register_backlight_device(dm, link);
4270 			if (dm->num_of_edps)
4271 				update_connector_ext_caps(aconnector);
4272 			if (psr_feature_enabled)
4273 				amdgpu_dm_set_psr_caps(link);
4274 		}
4275 
4276 
4277 	}
4278 
4279 	/*
4280 	 * Disable vblank IRQs aggressively for power-saving.
4281 	 *
4282 	 * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
4283 	 * is also supported.
4284 	 */
4285 	adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
4286 
4287 	/* Software is initialized. Now we can register interrupt handlers. */
4288 	switch (adev->asic_type) {
4289 #if defined(CONFIG_DRM_AMD_DC_SI)
4290 	case CHIP_TAHITI:
4291 	case CHIP_PITCAIRN:
4292 	case CHIP_VERDE:
4293 	case CHIP_OLAND:
4294 		if (dce60_register_irq_handlers(dm->adev)) {
4295 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4296 			goto fail;
4297 		}
4298 		break;
4299 #endif
4300 	case CHIP_BONAIRE:
4301 	case CHIP_HAWAII:
4302 	case CHIP_KAVERI:
4303 	case CHIP_KABINI:
4304 	case CHIP_MULLINS:
4305 	case CHIP_TONGA:
4306 	case CHIP_FIJI:
4307 	case CHIP_CARRIZO:
4308 	case CHIP_STONEY:
4309 	case CHIP_POLARIS11:
4310 	case CHIP_POLARIS10:
4311 	case CHIP_POLARIS12:
4312 	case CHIP_VEGAM:
4313 	case CHIP_VEGA10:
4314 	case CHIP_VEGA12:
4315 	case CHIP_VEGA20:
4316 		if (dce110_register_irq_handlers(dm->adev)) {
4317 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4318 			goto fail;
4319 		}
4320 		break;
4321 	default:
4322 #if defined(CONFIG_DRM_AMD_DC_DCN)
4323 		switch (adev->ip_versions[DCE_HWIP][0]) {
4324 		case IP_VERSION(1, 0, 0):
4325 		case IP_VERSION(1, 0, 1):
4326 		case IP_VERSION(2, 0, 2):
4327 		case IP_VERSION(2, 0, 3):
4328 		case IP_VERSION(2, 0, 0):
4329 		case IP_VERSION(2, 1, 0):
4330 		case IP_VERSION(3, 0, 0):
4331 		case IP_VERSION(3, 0, 2):
4332 		case IP_VERSION(3, 0, 3):
4333 		case IP_VERSION(3, 0, 1):
4334 		case IP_VERSION(3, 1, 2):
4335 		case IP_VERSION(3, 1, 3):
4336 			if (dcn10_register_irq_handlers(dm->adev)) {
4337 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4338 				goto fail;
4339 			}
4340 			break;
4341 		default:
4342 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4343 					adev->ip_versions[DCE_HWIP][0]);
4344 			goto fail;
4345 		}
4346 #endif
4347 		break;
4348 	}
4349 
4350 	return 0;
4351 fail:
4352 	kfree(aencoder);
4353 	kfree(aconnector);
4354 
4355 	return -EINVAL;
4356 }
4357 
4358 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4359 {
4360 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4361 	return;
4362 }
4363 
4364 /******************************************************************************
4365  * amdgpu_display_funcs functions
4366  *****************************************************************************/
4367 
4368 /*
4369  * dm_bandwidth_update - program display watermarks
4370  *
4371  * @adev: amdgpu_device pointer
4372  *
4373  * Calculate and program the display watermarks and line buffer allocation.
4374  */
4375 static void dm_bandwidth_update(struct amdgpu_device *adev)
4376 {
4377 	/* TODO: implement later */
4378 }
4379 
4380 static const struct amdgpu_display_funcs dm_display_funcs = {
4381 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4382 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4383 	.backlight_set_level = NULL, /* never called for DC */
4384 	.backlight_get_level = NULL, /* never called for DC */
4385 	.hpd_sense = NULL,/* called unconditionally */
4386 	.hpd_set_polarity = NULL, /* called unconditionally */
4387 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4388 	.page_flip_get_scanoutpos =
4389 		dm_crtc_get_scanoutpos,/* called unconditionally */
4390 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4391 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4392 };
4393 
4394 #if defined(CONFIG_DEBUG_KERNEL_DC)
4395 
4396 static ssize_t s3_debug_store(struct device *device,
4397 			      struct device_attribute *attr,
4398 			      const char *buf,
4399 			      size_t count)
4400 {
4401 	int ret;
4402 	int s3_state;
4403 	struct drm_device *drm_dev = dev_get_drvdata(device);
4404 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4405 
4406 	ret = kstrtoint(buf, 0, &s3_state);
4407 
4408 	if (ret == 0) {
4409 		if (s3_state) {
4410 			dm_resume(adev);
4411 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4412 		} else
4413 			dm_suspend(adev);
4414 	}
4415 
4416 	return ret == 0 ? count : 0;
4417 }
4418 
4419 DEVICE_ATTR_WO(s3_debug);
4420 
4421 #endif
4422 
4423 static int dm_early_init(void *handle)
4424 {
4425 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4426 
4427 	switch (adev->asic_type) {
4428 #if defined(CONFIG_DRM_AMD_DC_SI)
4429 	case CHIP_TAHITI:
4430 	case CHIP_PITCAIRN:
4431 	case CHIP_VERDE:
4432 		adev->mode_info.num_crtc = 6;
4433 		adev->mode_info.num_hpd = 6;
4434 		adev->mode_info.num_dig = 6;
4435 		break;
4436 	case CHIP_OLAND:
4437 		adev->mode_info.num_crtc = 2;
4438 		adev->mode_info.num_hpd = 2;
4439 		adev->mode_info.num_dig = 2;
4440 		break;
4441 #endif
4442 	case CHIP_BONAIRE:
4443 	case CHIP_HAWAII:
4444 		adev->mode_info.num_crtc = 6;
4445 		adev->mode_info.num_hpd = 6;
4446 		adev->mode_info.num_dig = 6;
4447 		break;
4448 	case CHIP_KAVERI:
4449 		adev->mode_info.num_crtc = 4;
4450 		adev->mode_info.num_hpd = 6;
4451 		adev->mode_info.num_dig = 7;
4452 		break;
4453 	case CHIP_KABINI:
4454 	case CHIP_MULLINS:
4455 		adev->mode_info.num_crtc = 2;
4456 		adev->mode_info.num_hpd = 6;
4457 		adev->mode_info.num_dig = 6;
4458 		break;
4459 	case CHIP_FIJI:
4460 	case CHIP_TONGA:
4461 		adev->mode_info.num_crtc = 6;
4462 		adev->mode_info.num_hpd = 6;
4463 		adev->mode_info.num_dig = 7;
4464 		break;
4465 	case CHIP_CARRIZO:
4466 		adev->mode_info.num_crtc = 3;
4467 		adev->mode_info.num_hpd = 6;
4468 		adev->mode_info.num_dig = 9;
4469 		break;
4470 	case CHIP_STONEY:
4471 		adev->mode_info.num_crtc = 2;
4472 		adev->mode_info.num_hpd = 6;
4473 		adev->mode_info.num_dig = 9;
4474 		break;
4475 	case CHIP_POLARIS11:
4476 	case CHIP_POLARIS12:
4477 		adev->mode_info.num_crtc = 5;
4478 		adev->mode_info.num_hpd = 5;
4479 		adev->mode_info.num_dig = 5;
4480 		break;
4481 	case CHIP_POLARIS10:
4482 	case CHIP_VEGAM:
4483 		adev->mode_info.num_crtc = 6;
4484 		adev->mode_info.num_hpd = 6;
4485 		adev->mode_info.num_dig = 6;
4486 		break;
4487 	case CHIP_VEGA10:
4488 	case CHIP_VEGA12:
4489 	case CHIP_VEGA20:
4490 		adev->mode_info.num_crtc = 6;
4491 		adev->mode_info.num_hpd = 6;
4492 		adev->mode_info.num_dig = 6;
4493 		break;
4494 	default:
4495 #if defined(CONFIG_DRM_AMD_DC_DCN)
4496 		switch (adev->ip_versions[DCE_HWIP][0]) {
4497 		case IP_VERSION(2, 0, 2):
4498 		case IP_VERSION(3, 0, 0):
4499 			adev->mode_info.num_crtc = 6;
4500 			adev->mode_info.num_hpd = 6;
4501 			adev->mode_info.num_dig = 6;
4502 			break;
4503 		case IP_VERSION(2, 0, 0):
4504 		case IP_VERSION(3, 0, 2):
4505 			adev->mode_info.num_crtc = 5;
4506 			adev->mode_info.num_hpd = 5;
4507 			adev->mode_info.num_dig = 5;
4508 			break;
4509 		case IP_VERSION(2, 0, 3):
4510 		case IP_VERSION(3, 0, 3):
4511 			adev->mode_info.num_crtc = 2;
4512 			adev->mode_info.num_hpd = 2;
4513 			adev->mode_info.num_dig = 2;
4514 			break;
4515 		case IP_VERSION(1, 0, 0):
4516 		case IP_VERSION(1, 0, 1):
4517 		case IP_VERSION(3, 0, 1):
4518 		case IP_VERSION(2, 1, 0):
4519 		case IP_VERSION(3, 1, 2):
4520 		case IP_VERSION(3, 1, 3):
4521 			adev->mode_info.num_crtc = 4;
4522 			adev->mode_info.num_hpd = 4;
4523 			adev->mode_info.num_dig = 4;
4524 			break;
4525 		default:
4526 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4527 					adev->ip_versions[DCE_HWIP][0]);
4528 			return -EINVAL;
4529 		}
4530 #endif
4531 		break;
4532 	}
4533 
4534 	amdgpu_dm_set_irq_funcs(adev);
4535 
4536 	if (adev->mode_info.funcs == NULL)
4537 		adev->mode_info.funcs = &dm_display_funcs;
4538 
4539 	/*
4540 	 * Note: Do NOT change adev->audio_endpt_rreg and
4541 	 * adev->audio_endpt_wreg because they are initialised in
4542 	 * amdgpu_device_init()
4543 	 */
4544 #if defined(CONFIG_DEBUG_KERNEL_DC)
4545 	device_create_file(
4546 		adev_to_drm(adev)->dev,
4547 		&dev_attr_s3_debug);
4548 #endif
4549 
4550 	return 0;
4551 }
4552 
4553 static bool modeset_required(struct drm_crtc_state *crtc_state,
4554 			     struct dc_stream_state *new_stream,
4555 			     struct dc_stream_state *old_stream)
4556 {
4557 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4558 }
4559 
4560 static bool modereset_required(struct drm_crtc_state *crtc_state)
4561 {
4562 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4563 }
4564 
4565 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4566 {
4567 	drm_encoder_cleanup(encoder);
4568 	kfree(encoder);
4569 }
4570 
4571 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4572 	.destroy = amdgpu_dm_encoder_destroy,
4573 };
4574 
4575 
4576 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4577 					 struct drm_framebuffer *fb,
4578 					 int *min_downscale, int *max_upscale)
4579 {
4580 	struct amdgpu_device *adev = drm_to_adev(dev);
4581 	struct dc *dc = adev->dm.dc;
4582 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4583 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4584 
4585 	switch (fb->format->format) {
4586 	case DRM_FORMAT_P010:
4587 	case DRM_FORMAT_NV12:
4588 	case DRM_FORMAT_NV21:
4589 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4590 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4591 		break;
4592 
4593 	case DRM_FORMAT_XRGB16161616F:
4594 	case DRM_FORMAT_ARGB16161616F:
4595 	case DRM_FORMAT_XBGR16161616F:
4596 	case DRM_FORMAT_ABGR16161616F:
4597 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4598 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4599 		break;
4600 
4601 	default:
4602 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4603 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4604 		break;
4605 	}
4606 
4607 	/*
4608 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4609 	 * scaling factor of 1.0 == 1000 units.
4610 	 */
4611 	if (*max_upscale == 1)
4612 		*max_upscale = 1000;
4613 
4614 	if (*min_downscale == 1)
4615 		*min_downscale = 1000;
4616 }
4617 
4618 
4619 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4620 				const struct drm_plane_state *state,
4621 				struct dc_scaling_info *scaling_info)
4622 {
4623 	int scale_w, scale_h, min_downscale, max_upscale;
4624 
4625 	memset(scaling_info, 0, sizeof(*scaling_info));
4626 
4627 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4628 	scaling_info->src_rect.x = state->src_x >> 16;
4629 	scaling_info->src_rect.y = state->src_y >> 16;
4630 
4631 	/*
4632 	 * For reasons we don't (yet) fully understand a non-zero
4633 	 * src_y coordinate into an NV12 buffer can cause a
4634 	 * system hang on DCN1x.
4635 	 * To avoid hangs (and maybe be overly cautious)
4636 	 * let's reject both non-zero src_x and src_y.
4637 	 *
4638 	 * We currently know of only one use-case to reproduce a
4639 	 * scenario with non-zero src_x and src_y for NV12, which
4640 	 * is to gesture the YouTube Android app into full screen
4641 	 * on ChromeOS.
4642 	 */
4643 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4644 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4645 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4646 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4647 		return -EINVAL;
4648 
4649 	scaling_info->src_rect.width = state->src_w >> 16;
4650 	if (scaling_info->src_rect.width == 0)
4651 		return -EINVAL;
4652 
4653 	scaling_info->src_rect.height = state->src_h >> 16;
4654 	if (scaling_info->src_rect.height == 0)
4655 		return -EINVAL;
4656 
4657 	scaling_info->dst_rect.x = state->crtc_x;
4658 	scaling_info->dst_rect.y = state->crtc_y;
4659 
4660 	if (state->crtc_w == 0)
4661 		return -EINVAL;
4662 
4663 	scaling_info->dst_rect.width = state->crtc_w;
4664 
4665 	if (state->crtc_h == 0)
4666 		return -EINVAL;
4667 
4668 	scaling_info->dst_rect.height = state->crtc_h;
4669 
4670 	/* DRM doesn't specify clipping on destination output. */
4671 	scaling_info->clip_rect = scaling_info->dst_rect;
4672 
4673 	/* Validate scaling per-format with DC plane caps */
4674 	if (state->plane && state->plane->dev && state->fb) {
4675 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4676 					     &min_downscale, &max_upscale);
4677 	} else {
4678 		min_downscale = 250;
4679 		max_upscale = 16000;
4680 	}
4681 
4682 	scale_w = scaling_info->dst_rect.width * 1000 /
4683 		  scaling_info->src_rect.width;
4684 
4685 	if (scale_w < min_downscale || scale_w > max_upscale)
4686 		return -EINVAL;
4687 
4688 	scale_h = scaling_info->dst_rect.height * 1000 /
4689 		  scaling_info->src_rect.height;
4690 
4691 	if (scale_h < min_downscale || scale_h > max_upscale)
4692 		return -EINVAL;
4693 
4694 	/*
4695 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4696 	 * assume reasonable defaults based on the format.
4697 	 */
4698 
4699 	return 0;
4700 }
4701 
4702 static void
4703 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4704 				 uint64_t tiling_flags)
4705 {
4706 	/* Fill GFX8 params */
4707 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4708 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4709 
4710 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4711 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4712 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4713 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4714 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4715 
4716 		/* XXX fix me for VI */
4717 		tiling_info->gfx8.num_banks = num_banks;
4718 		tiling_info->gfx8.array_mode =
4719 				DC_ARRAY_2D_TILED_THIN1;
4720 		tiling_info->gfx8.tile_split = tile_split;
4721 		tiling_info->gfx8.bank_width = bankw;
4722 		tiling_info->gfx8.bank_height = bankh;
4723 		tiling_info->gfx8.tile_aspect = mtaspect;
4724 		tiling_info->gfx8.tile_mode =
4725 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4726 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4727 			== DC_ARRAY_1D_TILED_THIN1) {
4728 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4729 	}
4730 
4731 	tiling_info->gfx8.pipe_config =
4732 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4733 }
4734 
4735 static void
4736 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4737 				  union dc_tiling_info *tiling_info)
4738 {
4739 	tiling_info->gfx9.num_pipes =
4740 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4741 	tiling_info->gfx9.num_banks =
4742 		adev->gfx.config.gb_addr_config_fields.num_banks;
4743 	tiling_info->gfx9.pipe_interleave =
4744 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4745 	tiling_info->gfx9.num_shader_engines =
4746 		adev->gfx.config.gb_addr_config_fields.num_se;
4747 	tiling_info->gfx9.max_compressed_frags =
4748 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4749 	tiling_info->gfx9.num_rb_per_se =
4750 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4751 	tiling_info->gfx9.shaderEnable = 1;
4752 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4753 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4754 }
4755 
4756 static int
4757 validate_dcc(struct amdgpu_device *adev,
4758 	     const enum surface_pixel_format format,
4759 	     const enum dc_rotation_angle rotation,
4760 	     const union dc_tiling_info *tiling_info,
4761 	     const struct dc_plane_dcc_param *dcc,
4762 	     const struct dc_plane_address *address,
4763 	     const struct plane_size *plane_size)
4764 {
4765 	struct dc *dc = adev->dm.dc;
4766 	struct dc_dcc_surface_param input;
4767 	struct dc_surface_dcc_cap output;
4768 
4769 	memset(&input, 0, sizeof(input));
4770 	memset(&output, 0, sizeof(output));
4771 
4772 	if (!dcc->enable)
4773 		return 0;
4774 
4775 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4776 	    !dc->cap_funcs.get_dcc_compression_cap)
4777 		return -EINVAL;
4778 
4779 	input.format = format;
4780 	input.surface_size.width = plane_size->surface_size.width;
4781 	input.surface_size.height = plane_size->surface_size.height;
4782 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4783 
4784 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4785 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4786 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4787 		input.scan = SCAN_DIRECTION_VERTICAL;
4788 
4789 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4790 		return -EINVAL;
4791 
4792 	if (!output.capable)
4793 		return -EINVAL;
4794 
4795 	if (dcc->independent_64b_blks == 0 &&
4796 	    output.grph.rgb.independent_64b_blks != 0)
4797 		return -EINVAL;
4798 
4799 	return 0;
4800 }
4801 
4802 static bool
4803 modifier_has_dcc(uint64_t modifier)
4804 {
4805 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4806 }
4807 
4808 static unsigned
4809 modifier_gfx9_swizzle_mode(uint64_t modifier)
4810 {
4811 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4812 		return 0;
4813 
4814 	return AMD_FMT_MOD_GET(TILE, modifier);
4815 }
4816 
4817 static const struct drm_format_info *
4818 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4819 {
4820 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4821 }
4822 
4823 static void
4824 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4825 				    union dc_tiling_info *tiling_info,
4826 				    uint64_t modifier)
4827 {
4828 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4829 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4830 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4831 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4832 
4833 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4834 
4835 	if (!IS_AMD_FMT_MOD(modifier))
4836 		return;
4837 
4838 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4839 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4840 
4841 	if (adev->family >= AMDGPU_FAMILY_NV) {
4842 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4843 	} else {
4844 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4845 
4846 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4847 	}
4848 }
4849 
4850 enum dm_micro_swizzle {
4851 	MICRO_SWIZZLE_Z = 0,
4852 	MICRO_SWIZZLE_S = 1,
4853 	MICRO_SWIZZLE_D = 2,
4854 	MICRO_SWIZZLE_R = 3
4855 };
4856 
4857 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4858 					  uint32_t format,
4859 					  uint64_t modifier)
4860 {
4861 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4862 	const struct drm_format_info *info = drm_format_info(format);
4863 	int i;
4864 
4865 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4866 
4867 	if (!info)
4868 		return false;
4869 
4870 	/*
4871 	 * We always have to allow these modifiers:
4872 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4873 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4874 	 */
4875 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4876 	    modifier == DRM_FORMAT_MOD_INVALID) {
4877 		return true;
4878 	}
4879 
4880 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4881 	for (i = 0; i < plane->modifier_count; i++) {
4882 		if (modifier == plane->modifiers[i])
4883 			break;
4884 	}
4885 	if (i == plane->modifier_count)
4886 		return false;
4887 
4888 	/*
4889 	 * For D swizzle the canonical modifier depends on the bpp, so check
4890 	 * it here.
4891 	 */
4892 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4893 	    adev->family >= AMDGPU_FAMILY_NV) {
4894 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4895 			return false;
4896 	}
4897 
4898 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4899 	    info->cpp[0] < 8)
4900 		return false;
4901 
4902 	if (modifier_has_dcc(modifier)) {
4903 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4904 		if (info->cpp[0] != 4)
4905 			return false;
4906 		/* We support multi-planar formats, but not when combined with
4907 		 * additional DCC metadata planes. */
4908 		if (info->num_planes > 1)
4909 			return false;
4910 	}
4911 
4912 	return true;
4913 }
4914 
4915 static void
4916 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4917 {
4918 	if (!*mods)
4919 		return;
4920 
4921 	if (*cap - *size < 1) {
4922 		uint64_t new_cap = *cap * 2;
4923 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4924 
4925 		if (!new_mods) {
4926 			kfree(*mods);
4927 			*mods = NULL;
4928 			return;
4929 		}
4930 
4931 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4932 		kfree(*mods);
4933 		*mods = new_mods;
4934 		*cap = new_cap;
4935 	}
4936 
4937 	(*mods)[*size] = mod;
4938 	*size += 1;
4939 }
4940 
4941 static void
4942 add_gfx9_modifiers(const struct amdgpu_device *adev,
4943 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4944 {
4945 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4946 	int pipe_xor_bits = min(8, pipes +
4947 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4948 	int bank_xor_bits = min(8 - pipe_xor_bits,
4949 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4950 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4951 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4952 
4953 
4954 	if (adev->family == AMDGPU_FAMILY_RV) {
4955 		/* Raven2 and later */
4956 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4957 
4958 		/*
4959 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4960 		 * doesn't support _D on DCN
4961 		 */
4962 
4963 		if (has_constant_encode) {
4964 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4965 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4966 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4967 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4968 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4969 				    AMD_FMT_MOD_SET(DCC, 1) |
4970 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4971 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4972 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4973 		}
4974 
4975 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4976 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4977 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4978 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4979 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4980 			    AMD_FMT_MOD_SET(DCC, 1) |
4981 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4982 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4983 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4984 
4985 		if (has_constant_encode) {
4986 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4987 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4988 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4989 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4990 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4991 				    AMD_FMT_MOD_SET(DCC, 1) |
4992 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4993 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4994 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4995 
4996 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4997 				    AMD_FMT_MOD_SET(RB, rb) |
4998 				    AMD_FMT_MOD_SET(PIPE, pipes));
4999 		}
5000 
5001 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5002 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5003 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5004 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5005 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5006 			    AMD_FMT_MOD_SET(DCC, 1) |
5007 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5008 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5009 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5010 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5011 			    AMD_FMT_MOD_SET(RB, rb) |
5012 			    AMD_FMT_MOD_SET(PIPE, pipes));
5013 	}
5014 
5015 	/*
5016 	 * Only supported for 64bpp on Raven, will be filtered on format in
5017 	 * dm_plane_format_mod_supported.
5018 	 */
5019 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5020 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5021 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5022 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5023 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5024 
5025 	if (adev->family == AMDGPU_FAMILY_RV) {
5026 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5027 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5028 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5029 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5030 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5031 	}
5032 
5033 	/*
5034 	 * Only supported for 64bpp on Raven, will be filtered on format in
5035 	 * dm_plane_format_mod_supported.
5036 	 */
5037 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5038 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5039 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5040 
5041 	if (adev->family == AMDGPU_FAMILY_RV) {
5042 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5043 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5044 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5045 	}
5046 }
5047 
5048 static void
5049 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5050 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5051 {
5052 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5053 
5054 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5055 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5056 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5057 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5058 		    AMD_FMT_MOD_SET(DCC, 1) |
5059 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5060 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5061 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5062 
5063 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5064 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5065 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5066 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5067 		    AMD_FMT_MOD_SET(DCC, 1) |
5068 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5069 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5070 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5071 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5072 
5073 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5074 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5075 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5076 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5077 
5078 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5079 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5080 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5081 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5082 
5083 
5084 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5085 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5086 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5087 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5088 
5089 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5090 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5091 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5092 }
5093 
5094 static void
5095 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5096 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5097 {
5098 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5099 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5100 
5101 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5102 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5103 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5104 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5105 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5106 		    AMD_FMT_MOD_SET(DCC, 1) |
5107 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5108 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5109 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5110 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5111 
5112 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5113 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5114 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5115 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5116 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5117 		    AMD_FMT_MOD_SET(DCC, 1) |
5118 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5119 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5120 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5121 
5122 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5123 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5124 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5125 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5126 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5127 		    AMD_FMT_MOD_SET(DCC, 1) |
5128 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5129 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5130 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5131 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5132 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5133 
5134 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5135 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5136 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5137 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5138 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5139 		    AMD_FMT_MOD_SET(DCC, 1) |
5140 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5141 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5142 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5143 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5144 
5145 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5146 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5147 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5148 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5149 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5150 
5151 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5152 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5153 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5154 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5155 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5156 
5157 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5158 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5159 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5160 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5161 
5162 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5163 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5164 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5165 }
5166 
5167 static int
5168 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5169 {
5170 	uint64_t size = 0, capacity = 128;
5171 	*mods = NULL;
5172 
5173 	/* We have not hooked up any pre-GFX9 modifiers. */
5174 	if (adev->family < AMDGPU_FAMILY_AI)
5175 		return 0;
5176 
5177 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5178 
5179 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5180 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5181 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5182 		return *mods ? 0 : -ENOMEM;
5183 	}
5184 
5185 	switch (adev->family) {
5186 	case AMDGPU_FAMILY_AI:
5187 	case AMDGPU_FAMILY_RV:
5188 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5189 		break;
5190 	case AMDGPU_FAMILY_NV:
5191 	case AMDGPU_FAMILY_VGH:
5192 	case AMDGPU_FAMILY_YC:
5193 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5194 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5195 		else
5196 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5197 		break;
5198 	}
5199 
5200 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5201 
5202 	/* INVALID marks the end of the list. */
5203 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5204 
5205 	if (!*mods)
5206 		return -ENOMEM;
5207 
5208 	return 0;
5209 }
5210 
5211 static int
5212 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5213 					  const struct amdgpu_framebuffer *afb,
5214 					  const enum surface_pixel_format format,
5215 					  const enum dc_rotation_angle rotation,
5216 					  const struct plane_size *plane_size,
5217 					  union dc_tiling_info *tiling_info,
5218 					  struct dc_plane_dcc_param *dcc,
5219 					  struct dc_plane_address *address,
5220 					  const bool force_disable_dcc)
5221 {
5222 	const uint64_t modifier = afb->base.modifier;
5223 	int ret = 0;
5224 
5225 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5226 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5227 
5228 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5229 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5230 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5231 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5232 
5233 		dcc->enable = 1;
5234 		dcc->meta_pitch = afb->base.pitches[1];
5235 		dcc->independent_64b_blks = independent_64b_blks;
5236 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5237 			if (independent_64b_blks && independent_128b_blks)
5238 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5239 			else if (independent_128b_blks)
5240 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5241 			else if (independent_64b_blks && !independent_128b_blks)
5242 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5243 			else
5244 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5245 		} else {
5246 			if (independent_64b_blks)
5247 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5248 			else
5249 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5250 		}
5251 
5252 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5253 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5254 	}
5255 
5256 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5257 	if (ret)
5258 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5259 
5260 	return ret;
5261 }
5262 
5263 static int
5264 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5265 			     const struct amdgpu_framebuffer *afb,
5266 			     const enum surface_pixel_format format,
5267 			     const enum dc_rotation_angle rotation,
5268 			     const uint64_t tiling_flags,
5269 			     union dc_tiling_info *tiling_info,
5270 			     struct plane_size *plane_size,
5271 			     struct dc_plane_dcc_param *dcc,
5272 			     struct dc_plane_address *address,
5273 			     bool tmz_surface,
5274 			     bool force_disable_dcc)
5275 {
5276 	const struct drm_framebuffer *fb = &afb->base;
5277 	int ret;
5278 
5279 	memset(tiling_info, 0, sizeof(*tiling_info));
5280 	memset(plane_size, 0, sizeof(*plane_size));
5281 	memset(dcc, 0, sizeof(*dcc));
5282 	memset(address, 0, sizeof(*address));
5283 
5284 	address->tmz_surface = tmz_surface;
5285 
5286 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5287 		uint64_t addr = afb->address + fb->offsets[0];
5288 
5289 		plane_size->surface_size.x = 0;
5290 		plane_size->surface_size.y = 0;
5291 		plane_size->surface_size.width = fb->width;
5292 		plane_size->surface_size.height = fb->height;
5293 		plane_size->surface_pitch =
5294 			fb->pitches[0] / fb->format->cpp[0];
5295 
5296 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5297 		address->grph.addr.low_part = lower_32_bits(addr);
5298 		address->grph.addr.high_part = upper_32_bits(addr);
5299 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5300 		uint64_t luma_addr = afb->address + fb->offsets[0];
5301 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5302 
5303 		plane_size->surface_size.x = 0;
5304 		plane_size->surface_size.y = 0;
5305 		plane_size->surface_size.width = fb->width;
5306 		plane_size->surface_size.height = fb->height;
5307 		plane_size->surface_pitch =
5308 			fb->pitches[0] / fb->format->cpp[0];
5309 
5310 		plane_size->chroma_size.x = 0;
5311 		plane_size->chroma_size.y = 0;
5312 		/* TODO: set these based on surface format */
5313 		plane_size->chroma_size.width = fb->width / 2;
5314 		plane_size->chroma_size.height = fb->height / 2;
5315 
5316 		plane_size->chroma_pitch =
5317 			fb->pitches[1] / fb->format->cpp[1];
5318 
5319 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5320 		address->video_progressive.luma_addr.low_part =
5321 			lower_32_bits(luma_addr);
5322 		address->video_progressive.luma_addr.high_part =
5323 			upper_32_bits(luma_addr);
5324 		address->video_progressive.chroma_addr.low_part =
5325 			lower_32_bits(chroma_addr);
5326 		address->video_progressive.chroma_addr.high_part =
5327 			upper_32_bits(chroma_addr);
5328 	}
5329 
5330 	if (adev->family >= AMDGPU_FAMILY_AI) {
5331 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5332 								rotation, plane_size,
5333 								tiling_info, dcc,
5334 								address,
5335 								force_disable_dcc);
5336 		if (ret)
5337 			return ret;
5338 	} else {
5339 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5340 	}
5341 
5342 	return 0;
5343 }
5344 
5345 static void
5346 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5347 			       bool *per_pixel_alpha, bool *global_alpha,
5348 			       int *global_alpha_value)
5349 {
5350 	*per_pixel_alpha = false;
5351 	*global_alpha = false;
5352 	*global_alpha_value = 0xff;
5353 
5354 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5355 		return;
5356 
5357 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5358 		static const uint32_t alpha_formats[] = {
5359 			DRM_FORMAT_ARGB8888,
5360 			DRM_FORMAT_RGBA8888,
5361 			DRM_FORMAT_ABGR8888,
5362 		};
5363 		uint32_t format = plane_state->fb->format->format;
5364 		unsigned int i;
5365 
5366 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5367 			if (format == alpha_formats[i]) {
5368 				*per_pixel_alpha = true;
5369 				break;
5370 			}
5371 		}
5372 	}
5373 
5374 	if (plane_state->alpha < 0xffff) {
5375 		*global_alpha = true;
5376 		*global_alpha_value = plane_state->alpha >> 8;
5377 	}
5378 }
5379 
5380 static int
5381 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5382 			    const enum surface_pixel_format format,
5383 			    enum dc_color_space *color_space)
5384 {
5385 	bool full_range;
5386 
5387 	*color_space = COLOR_SPACE_SRGB;
5388 
5389 	/* DRM color properties only affect non-RGB formats. */
5390 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5391 		return 0;
5392 
5393 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5394 
5395 	switch (plane_state->color_encoding) {
5396 	case DRM_COLOR_YCBCR_BT601:
5397 		if (full_range)
5398 			*color_space = COLOR_SPACE_YCBCR601;
5399 		else
5400 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5401 		break;
5402 
5403 	case DRM_COLOR_YCBCR_BT709:
5404 		if (full_range)
5405 			*color_space = COLOR_SPACE_YCBCR709;
5406 		else
5407 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5408 		break;
5409 
5410 	case DRM_COLOR_YCBCR_BT2020:
5411 		if (full_range)
5412 			*color_space = COLOR_SPACE_2020_YCBCR;
5413 		else
5414 			return -EINVAL;
5415 		break;
5416 
5417 	default:
5418 		return -EINVAL;
5419 	}
5420 
5421 	return 0;
5422 }
5423 
5424 static int
5425 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5426 			    const struct drm_plane_state *plane_state,
5427 			    const uint64_t tiling_flags,
5428 			    struct dc_plane_info *plane_info,
5429 			    struct dc_plane_address *address,
5430 			    bool tmz_surface,
5431 			    bool force_disable_dcc)
5432 {
5433 	const struct drm_framebuffer *fb = plane_state->fb;
5434 	const struct amdgpu_framebuffer *afb =
5435 		to_amdgpu_framebuffer(plane_state->fb);
5436 	int ret;
5437 
5438 	memset(plane_info, 0, sizeof(*plane_info));
5439 
5440 	switch (fb->format->format) {
5441 	case DRM_FORMAT_C8:
5442 		plane_info->format =
5443 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5444 		break;
5445 	case DRM_FORMAT_RGB565:
5446 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5447 		break;
5448 	case DRM_FORMAT_XRGB8888:
5449 	case DRM_FORMAT_ARGB8888:
5450 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5451 		break;
5452 	case DRM_FORMAT_XRGB2101010:
5453 	case DRM_FORMAT_ARGB2101010:
5454 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5455 		break;
5456 	case DRM_FORMAT_XBGR2101010:
5457 	case DRM_FORMAT_ABGR2101010:
5458 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5459 		break;
5460 	case DRM_FORMAT_XBGR8888:
5461 	case DRM_FORMAT_ABGR8888:
5462 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5463 		break;
5464 	case DRM_FORMAT_NV21:
5465 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5466 		break;
5467 	case DRM_FORMAT_NV12:
5468 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5469 		break;
5470 	case DRM_FORMAT_P010:
5471 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5472 		break;
5473 	case DRM_FORMAT_XRGB16161616F:
5474 	case DRM_FORMAT_ARGB16161616F:
5475 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5476 		break;
5477 	case DRM_FORMAT_XBGR16161616F:
5478 	case DRM_FORMAT_ABGR16161616F:
5479 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5480 		break;
5481 	case DRM_FORMAT_XRGB16161616:
5482 	case DRM_FORMAT_ARGB16161616:
5483 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5484 		break;
5485 	case DRM_FORMAT_XBGR16161616:
5486 	case DRM_FORMAT_ABGR16161616:
5487 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5488 		break;
5489 	default:
5490 		DRM_ERROR(
5491 			"Unsupported screen format %p4cc\n",
5492 			&fb->format->format);
5493 		return -EINVAL;
5494 	}
5495 
5496 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5497 	case DRM_MODE_ROTATE_0:
5498 		plane_info->rotation = ROTATION_ANGLE_0;
5499 		break;
5500 	case DRM_MODE_ROTATE_90:
5501 		plane_info->rotation = ROTATION_ANGLE_90;
5502 		break;
5503 	case DRM_MODE_ROTATE_180:
5504 		plane_info->rotation = ROTATION_ANGLE_180;
5505 		break;
5506 	case DRM_MODE_ROTATE_270:
5507 		plane_info->rotation = ROTATION_ANGLE_270;
5508 		break;
5509 	default:
5510 		plane_info->rotation = ROTATION_ANGLE_0;
5511 		break;
5512 	}
5513 
5514 	plane_info->visible = true;
5515 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5516 
5517 	plane_info->layer_index = 0;
5518 
5519 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5520 					  &plane_info->color_space);
5521 	if (ret)
5522 		return ret;
5523 
5524 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5525 					   plane_info->rotation, tiling_flags,
5526 					   &plane_info->tiling_info,
5527 					   &plane_info->plane_size,
5528 					   &plane_info->dcc, address, tmz_surface,
5529 					   force_disable_dcc);
5530 	if (ret)
5531 		return ret;
5532 
5533 	fill_blending_from_plane_state(
5534 		plane_state, &plane_info->per_pixel_alpha,
5535 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5536 
5537 	return 0;
5538 }
5539 
5540 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5541 				    struct dc_plane_state *dc_plane_state,
5542 				    struct drm_plane_state *plane_state,
5543 				    struct drm_crtc_state *crtc_state)
5544 {
5545 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5546 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5547 	struct dc_scaling_info scaling_info;
5548 	struct dc_plane_info plane_info;
5549 	int ret;
5550 	bool force_disable_dcc = false;
5551 
5552 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5553 	if (ret)
5554 		return ret;
5555 
5556 	dc_plane_state->src_rect = scaling_info.src_rect;
5557 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5558 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5559 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5560 
5561 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5562 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5563 					  afb->tiling_flags,
5564 					  &plane_info,
5565 					  &dc_plane_state->address,
5566 					  afb->tmz_surface,
5567 					  force_disable_dcc);
5568 	if (ret)
5569 		return ret;
5570 
5571 	dc_plane_state->format = plane_info.format;
5572 	dc_plane_state->color_space = plane_info.color_space;
5573 	dc_plane_state->format = plane_info.format;
5574 	dc_plane_state->plane_size = plane_info.plane_size;
5575 	dc_plane_state->rotation = plane_info.rotation;
5576 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5577 	dc_plane_state->stereo_format = plane_info.stereo_format;
5578 	dc_plane_state->tiling_info = plane_info.tiling_info;
5579 	dc_plane_state->visible = plane_info.visible;
5580 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5581 	dc_plane_state->global_alpha = plane_info.global_alpha;
5582 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5583 	dc_plane_state->dcc = plane_info.dcc;
5584 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5585 	dc_plane_state->flip_int_enabled = true;
5586 
5587 	/*
5588 	 * Always set input transfer function, since plane state is refreshed
5589 	 * every time.
5590 	 */
5591 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5592 	if (ret)
5593 		return ret;
5594 
5595 	return 0;
5596 }
5597 
5598 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5599 					   const struct dm_connector_state *dm_state,
5600 					   struct dc_stream_state *stream)
5601 {
5602 	enum amdgpu_rmx_type rmx_type;
5603 
5604 	struct rect src = { 0 }; /* viewport in composition space*/
5605 	struct rect dst = { 0 }; /* stream addressable area */
5606 
5607 	/* no mode. nothing to be done */
5608 	if (!mode)
5609 		return;
5610 
5611 	/* Full screen scaling by default */
5612 	src.width = mode->hdisplay;
5613 	src.height = mode->vdisplay;
5614 	dst.width = stream->timing.h_addressable;
5615 	dst.height = stream->timing.v_addressable;
5616 
5617 	if (dm_state) {
5618 		rmx_type = dm_state->scaling;
5619 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5620 			if (src.width * dst.height <
5621 					src.height * dst.width) {
5622 				/* height needs less upscaling/more downscaling */
5623 				dst.width = src.width *
5624 						dst.height / src.height;
5625 			} else {
5626 				/* width needs less upscaling/more downscaling */
5627 				dst.height = src.height *
5628 						dst.width / src.width;
5629 			}
5630 		} else if (rmx_type == RMX_CENTER) {
5631 			dst = src;
5632 		}
5633 
5634 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5635 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5636 
5637 		if (dm_state->underscan_enable) {
5638 			dst.x += dm_state->underscan_hborder / 2;
5639 			dst.y += dm_state->underscan_vborder / 2;
5640 			dst.width -= dm_state->underscan_hborder;
5641 			dst.height -= dm_state->underscan_vborder;
5642 		}
5643 	}
5644 
5645 	stream->src = src;
5646 	stream->dst = dst;
5647 
5648 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5649 		      dst.x, dst.y, dst.width, dst.height);
5650 
5651 }
5652 
5653 static enum dc_color_depth
5654 convert_color_depth_from_display_info(const struct drm_connector *connector,
5655 				      bool is_y420, int requested_bpc)
5656 {
5657 	uint8_t bpc;
5658 
5659 	if (is_y420) {
5660 		bpc = 8;
5661 
5662 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5663 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5664 			bpc = 16;
5665 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5666 			bpc = 12;
5667 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5668 			bpc = 10;
5669 	} else {
5670 		bpc = (uint8_t)connector->display_info.bpc;
5671 		/* Assume 8 bpc by default if no bpc is specified. */
5672 		bpc = bpc ? bpc : 8;
5673 	}
5674 
5675 	if (requested_bpc > 0) {
5676 		/*
5677 		 * Cap display bpc based on the user requested value.
5678 		 *
5679 		 * The value for state->max_bpc may not correctly updated
5680 		 * depending on when the connector gets added to the state
5681 		 * or if this was called outside of atomic check, so it
5682 		 * can't be used directly.
5683 		 */
5684 		bpc = min_t(u8, bpc, requested_bpc);
5685 
5686 		/* Round down to the nearest even number. */
5687 		bpc = bpc - (bpc & 1);
5688 	}
5689 
5690 	switch (bpc) {
5691 	case 0:
5692 		/*
5693 		 * Temporary Work around, DRM doesn't parse color depth for
5694 		 * EDID revision before 1.4
5695 		 * TODO: Fix edid parsing
5696 		 */
5697 		return COLOR_DEPTH_888;
5698 	case 6:
5699 		return COLOR_DEPTH_666;
5700 	case 8:
5701 		return COLOR_DEPTH_888;
5702 	case 10:
5703 		return COLOR_DEPTH_101010;
5704 	case 12:
5705 		return COLOR_DEPTH_121212;
5706 	case 14:
5707 		return COLOR_DEPTH_141414;
5708 	case 16:
5709 		return COLOR_DEPTH_161616;
5710 	default:
5711 		return COLOR_DEPTH_UNDEFINED;
5712 	}
5713 }
5714 
5715 static enum dc_aspect_ratio
5716 get_aspect_ratio(const struct drm_display_mode *mode_in)
5717 {
5718 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5719 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5720 }
5721 
5722 static enum dc_color_space
5723 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5724 {
5725 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5726 
5727 	switch (dc_crtc_timing->pixel_encoding)	{
5728 	case PIXEL_ENCODING_YCBCR422:
5729 	case PIXEL_ENCODING_YCBCR444:
5730 	case PIXEL_ENCODING_YCBCR420:
5731 	{
5732 		/*
5733 		 * 27030khz is the separation point between HDTV and SDTV
5734 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5735 		 * respectively
5736 		 */
5737 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5738 			if (dc_crtc_timing->flags.Y_ONLY)
5739 				color_space =
5740 					COLOR_SPACE_YCBCR709_LIMITED;
5741 			else
5742 				color_space = COLOR_SPACE_YCBCR709;
5743 		} else {
5744 			if (dc_crtc_timing->flags.Y_ONLY)
5745 				color_space =
5746 					COLOR_SPACE_YCBCR601_LIMITED;
5747 			else
5748 				color_space = COLOR_SPACE_YCBCR601;
5749 		}
5750 
5751 	}
5752 	break;
5753 	case PIXEL_ENCODING_RGB:
5754 		color_space = COLOR_SPACE_SRGB;
5755 		break;
5756 
5757 	default:
5758 		WARN_ON(1);
5759 		break;
5760 	}
5761 
5762 	return color_space;
5763 }
5764 
5765 static bool adjust_colour_depth_from_display_info(
5766 	struct dc_crtc_timing *timing_out,
5767 	const struct drm_display_info *info)
5768 {
5769 	enum dc_color_depth depth = timing_out->display_color_depth;
5770 	int normalized_clk;
5771 	do {
5772 		normalized_clk = timing_out->pix_clk_100hz / 10;
5773 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5774 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5775 			normalized_clk /= 2;
5776 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5777 		switch (depth) {
5778 		case COLOR_DEPTH_888:
5779 			break;
5780 		case COLOR_DEPTH_101010:
5781 			normalized_clk = (normalized_clk * 30) / 24;
5782 			break;
5783 		case COLOR_DEPTH_121212:
5784 			normalized_clk = (normalized_clk * 36) / 24;
5785 			break;
5786 		case COLOR_DEPTH_161616:
5787 			normalized_clk = (normalized_clk * 48) / 24;
5788 			break;
5789 		default:
5790 			/* The above depths are the only ones valid for HDMI. */
5791 			return false;
5792 		}
5793 		if (normalized_clk <= info->max_tmds_clock) {
5794 			timing_out->display_color_depth = depth;
5795 			return true;
5796 		}
5797 	} while (--depth > COLOR_DEPTH_666);
5798 	return false;
5799 }
5800 
5801 static void fill_stream_properties_from_drm_display_mode(
5802 	struct dc_stream_state *stream,
5803 	const struct drm_display_mode *mode_in,
5804 	const struct drm_connector *connector,
5805 	const struct drm_connector_state *connector_state,
5806 	const struct dc_stream_state *old_stream,
5807 	int requested_bpc)
5808 {
5809 	struct dc_crtc_timing *timing_out = &stream->timing;
5810 	const struct drm_display_info *info = &connector->display_info;
5811 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5812 	struct hdmi_vendor_infoframe hv_frame;
5813 	struct hdmi_avi_infoframe avi_frame;
5814 
5815 	memset(&hv_frame, 0, sizeof(hv_frame));
5816 	memset(&avi_frame, 0, sizeof(avi_frame));
5817 
5818 	timing_out->h_border_left = 0;
5819 	timing_out->h_border_right = 0;
5820 	timing_out->v_border_top = 0;
5821 	timing_out->v_border_bottom = 0;
5822 	/* TODO: un-hardcode */
5823 	if (drm_mode_is_420_only(info, mode_in)
5824 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5825 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5826 	else if (drm_mode_is_420_also(info, mode_in)
5827 			&& aconnector->force_yuv420_output)
5828 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5829 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5830 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5831 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5832 	else
5833 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5834 
5835 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5836 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5837 		connector,
5838 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5839 		requested_bpc);
5840 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5841 	timing_out->hdmi_vic = 0;
5842 
5843 	if(old_stream) {
5844 		timing_out->vic = old_stream->timing.vic;
5845 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5846 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5847 	} else {
5848 		timing_out->vic = drm_match_cea_mode(mode_in);
5849 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5850 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5851 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5852 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5853 	}
5854 
5855 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5856 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5857 		timing_out->vic = avi_frame.video_code;
5858 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5859 		timing_out->hdmi_vic = hv_frame.vic;
5860 	}
5861 
5862 	if (is_freesync_video_mode(mode_in, aconnector)) {
5863 		timing_out->h_addressable = mode_in->hdisplay;
5864 		timing_out->h_total = mode_in->htotal;
5865 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5866 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5867 		timing_out->v_total = mode_in->vtotal;
5868 		timing_out->v_addressable = mode_in->vdisplay;
5869 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5870 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5871 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5872 	} else {
5873 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5874 		timing_out->h_total = mode_in->crtc_htotal;
5875 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5876 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5877 		timing_out->v_total = mode_in->crtc_vtotal;
5878 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5879 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5880 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5881 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5882 	}
5883 
5884 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5885 
5886 	stream->output_color_space = get_output_color_space(timing_out);
5887 
5888 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5889 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5890 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5891 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5892 		    drm_mode_is_420_also(info, mode_in) &&
5893 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5894 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5895 			adjust_colour_depth_from_display_info(timing_out, info);
5896 		}
5897 	}
5898 }
5899 
5900 static void fill_audio_info(struct audio_info *audio_info,
5901 			    const struct drm_connector *drm_connector,
5902 			    const struct dc_sink *dc_sink)
5903 {
5904 	int i = 0;
5905 	int cea_revision = 0;
5906 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5907 
5908 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5909 	audio_info->product_id = edid_caps->product_id;
5910 
5911 	cea_revision = drm_connector->display_info.cea_rev;
5912 
5913 	strscpy(audio_info->display_name,
5914 		edid_caps->display_name,
5915 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5916 
5917 	if (cea_revision >= 3) {
5918 		audio_info->mode_count = edid_caps->audio_mode_count;
5919 
5920 		for (i = 0; i < audio_info->mode_count; ++i) {
5921 			audio_info->modes[i].format_code =
5922 					(enum audio_format_code)
5923 					(edid_caps->audio_modes[i].format_code);
5924 			audio_info->modes[i].channel_count =
5925 					edid_caps->audio_modes[i].channel_count;
5926 			audio_info->modes[i].sample_rates.all =
5927 					edid_caps->audio_modes[i].sample_rate;
5928 			audio_info->modes[i].sample_size =
5929 					edid_caps->audio_modes[i].sample_size;
5930 		}
5931 	}
5932 
5933 	audio_info->flags.all = edid_caps->speaker_flags;
5934 
5935 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5936 	if (drm_connector->latency_present[0]) {
5937 		audio_info->video_latency = drm_connector->video_latency[0];
5938 		audio_info->audio_latency = drm_connector->audio_latency[0];
5939 	}
5940 
5941 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5942 
5943 }
5944 
5945 static void
5946 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5947 				      struct drm_display_mode *dst_mode)
5948 {
5949 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5950 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5951 	dst_mode->crtc_clock = src_mode->crtc_clock;
5952 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5953 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5954 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5955 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5956 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5957 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5958 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5959 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5960 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5961 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5962 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5963 }
5964 
5965 static void
5966 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5967 					const struct drm_display_mode *native_mode,
5968 					bool scale_enabled)
5969 {
5970 	if (scale_enabled) {
5971 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5972 	} else if (native_mode->clock == drm_mode->clock &&
5973 			native_mode->htotal == drm_mode->htotal &&
5974 			native_mode->vtotal == drm_mode->vtotal) {
5975 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5976 	} else {
5977 		/* no scaling nor amdgpu inserted, no need to patch */
5978 	}
5979 }
5980 
5981 static struct dc_sink *
5982 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5983 {
5984 	struct dc_sink_init_data sink_init_data = { 0 };
5985 	struct dc_sink *sink = NULL;
5986 	sink_init_data.link = aconnector->dc_link;
5987 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5988 
5989 	sink = dc_sink_create(&sink_init_data);
5990 	if (!sink) {
5991 		DRM_ERROR("Failed to create sink!\n");
5992 		return NULL;
5993 	}
5994 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5995 
5996 	return sink;
5997 }
5998 
5999 static void set_multisync_trigger_params(
6000 		struct dc_stream_state *stream)
6001 {
6002 	struct dc_stream_state *master = NULL;
6003 
6004 	if (stream->triggered_crtc_reset.enabled) {
6005 		master = stream->triggered_crtc_reset.event_source;
6006 		stream->triggered_crtc_reset.event =
6007 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6008 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6009 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6010 	}
6011 }
6012 
6013 static void set_master_stream(struct dc_stream_state *stream_set[],
6014 			      int stream_count)
6015 {
6016 	int j, highest_rfr = 0, master_stream = 0;
6017 
6018 	for (j = 0;  j < stream_count; j++) {
6019 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6020 			int refresh_rate = 0;
6021 
6022 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6023 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6024 			if (refresh_rate > highest_rfr) {
6025 				highest_rfr = refresh_rate;
6026 				master_stream = j;
6027 			}
6028 		}
6029 	}
6030 	for (j = 0;  j < stream_count; j++) {
6031 		if (stream_set[j])
6032 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6033 	}
6034 }
6035 
6036 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6037 {
6038 	int i = 0;
6039 	struct dc_stream_state *stream;
6040 
6041 	if (context->stream_count < 2)
6042 		return;
6043 	for (i = 0; i < context->stream_count ; i++) {
6044 		if (!context->streams[i])
6045 			continue;
6046 		/*
6047 		 * TODO: add a function to read AMD VSDB bits and set
6048 		 * crtc_sync_master.multi_sync_enabled flag
6049 		 * For now it's set to false
6050 		 */
6051 	}
6052 
6053 	set_master_stream(context->streams, context->stream_count);
6054 
6055 	for (i = 0; i < context->stream_count ; i++) {
6056 		stream = context->streams[i];
6057 
6058 		if (!stream)
6059 			continue;
6060 
6061 		set_multisync_trigger_params(stream);
6062 	}
6063 }
6064 
6065 #if defined(CONFIG_DRM_AMD_DC_DCN)
6066 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6067 							struct dc_sink *sink, struct dc_stream_state *stream,
6068 							struct dsc_dec_dpcd_caps *dsc_caps)
6069 {
6070 	stream->timing.flags.DSC = 0;
6071 
6072 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6073 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6074 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6075 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6076 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6077 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6078 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6079 				dsc_caps);
6080 	}
6081 }
6082 
6083 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6084 				    struct dc_sink *sink, struct dc_stream_state *stream,
6085 				    struct dsc_dec_dpcd_caps *dsc_caps,
6086 				    uint32_t max_dsc_target_bpp_limit_override)
6087 {
6088 	const struct dc_link_settings *verified_link_cap = NULL;
6089 	uint32_t link_bw_in_kbps;
6090 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6091 	struct dc *dc = sink->ctx->dc;
6092 	struct dc_dsc_bw_range bw_range = {0};
6093 	struct dc_dsc_config dsc_cfg = {0};
6094 
6095 	verified_link_cap = dc_link_get_link_cap(stream->link);
6096 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6097 	edp_min_bpp_x16 = 8 * 16;
6098 	edp_max_bpp_x16 = 8 * 16;
6099 
6100 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6101 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6102 
6103 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6104 		edp_min_bpp_x16 = edp_max_bpp_x16;
6105 
6106 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6107 				dc->debug.dsc_min_slice_height_override,
6108 				edp_min_bpp_x16, edp_max_bpp_x16,
6109 				dsc_caps,
6110 				&stream->timing,
6111 				&bw_range)) {
6112 
6113 		if (bw_range.max_kbps < link_bw_in_kbps) {
6114 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6115 					dsc_caps,
6116 					dc->debug.dsc_min_slice_height_override,
6117 					max_dsc_target_bpp_limit_override,
6118 					0,
6119 					&stream->timing,
6120 					&dsc_cfg)) {
6121 				stream->timing.dsc_cfg = dsc_cfg;
6122 				stream->timing.flags.DSC = 1;
6123 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6124 			}
6125 			return;
6126 		}
6127 	}
6128 
6129 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6130 				dsc_caps,
6131 				dc->debug.dsc_min_slice_height_override,
6132 				max_dsc_target_bpp_limit_override,
6133 				link_bw_in_kbps,
6134 				&stream->timing,
6135 				&dsc_cfg)) {
6136 		stream->timing.dsc_cfg = dsc_cfg;
6137 		stream->timing.flags.DSC = 1;
6138 	}
6139 }
6140 
6141 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6142 										struct dc_sink *sink, struct dc_stream_state *stream,
6143 										struct dsc_dec_dpcd_caps *dsc_caps)
6144 {
6145 	struct drm_connector *drm_connector = &aconnector->base;
6146 	uint32_t link_bandwidth_kbps;
6147 	uint32_t max_dsc_target_bpp_limit_override = 0;
6148 	struct dc *dc = sink->ctx->dc;
6149 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6150 	uint32_t dsc_max_supported_bw_in_kbps;
6151 
6152 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6153 							dc_link_get_link_cap(aconnector->dc_link));
6154 
6155 	if (stream->link && stream->link->local_sink)
6156 		max_dsc_target_bpp_limit_override =
6157 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6158 
6159 	/* Set DSC policy according to dsc_clock_en */
6160 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6161 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6162 
6163 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6164 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6165 
6166 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6167 
6168 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6169 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6170 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6171 						dsc_caps,
6172 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6173 						max_dsc_target_bpp_limit_override,
6174 						link_bandwidth_kbps,
6175 						&stream->timing,
6176 						&stream->timing.dsc_cfg)) {
6177 				stream->timing.flags.DSC = 1;
6178 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6179 								 __func__, drm_connector->name);
6180 			}
6181 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6182 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6183 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6184 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6185 
6186 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6187 					max_supported_bw_in_kbps > 0 &&
6188 					dsc_max_supported_bw_in_kbps > 0)
6189 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6190 						dsc_caps,
6191 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6192 						max_dsc_target_bpp_limit_override,
6193 						dsc_max_supported_bw_in_kbps,
6194 						&stream->timing,
6195 						&stream->timing.dsc_cfg)) {
6196 					stream->timing.flags.DSC = 1;
6197 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6198 									 __func__, drm_connector->name);
6199 				}
6200 		}
6201 	}
6202 
6203 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6204 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6205 		stream->timing.flags.DSC = 1;
6206 
6207 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6208 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6209 
6210 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6211 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6212 
6213 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6214 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6215 }
6216 #endif /* CONFIG_DRM_AMD_DC_DCN */
6217 
6218 /**
6219  * DOC: FreeSync Video
6220  *
6221  * When a userspace application wants to play a video, the content follows a
6222  * standard format definition that usually specifies the FPS for that format.
6223  * The below list illustrates some video format and the expected FPS,
6224  * respectively:
6225  *
6226  * - TV/NTSC (23.976 FPS)
6227  * - Cinema (24 FPS)
6228  * - TV/PAL (25 FPS)
6229  * - TV/NTSC (29.97 FPS)
6230  * - TV/NTSC (30 FPS)
6231  * - Cinema HFR (48 FPS)
6232  * - TV/PAL (50 FPS)
6233  * - Commonly used (60 FPS)
6234  * - Multiples of 24 (48,72,96,120 FPS)
6235  *
6236  * The list of standards video format is not huge and can be added to the
6237  * connector modeset list beforehand. With that, userspace can leverage
6238  * FreeSync to extends the front porch in order to attain the target refresh
6239  * rate. Such a switch will happen seamlessly, without screen blanking or
6240  * reprogramming of the output in any other way. If the userspace requests a
6241  * modesetting change compatible with FreeSync modes that only differ in the
6242  * refresh rate, DC will skip the full update and avoid blink during the
6243  * transition. For example, the video player can change the modesetting from
6244  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6245  * causing any display blink. This same concept can be applied to a mode
6246  * setting change.
6247  */
6248 static struct drm_display_mode *
6249 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6250 			  bool use_probed_modes)
6251 {
6252 	struct drm_display_mode *m, *m_pref = NULL;
6253 	u16 current_refresh, highest_refresh;
6254 	struct list_head *list_head = use_probed_modes ?
6255 						    &aconnector->base.probed_modes :
6256 						    &aconnector->base.modes;
6257 
6258 	if (aconnector->freesync_vid_base.clock != 0)
6259 		return &aconnector->freesync_vid_base;
6260 
6261 	/* Find the preferred mode */
6262 	list_for_each_entry (m, list_head, head) {
6263 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6264 			m_pref = m;
6265 			break;
6266 		}
6267 	}
6268 
6269 	if (!m_pref) {
6270 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6271 		m_pref = list_first_entry_or_null(
6272 			&aconnector->base.modes, struct drm_display_mode, head);
6273 		if (!m_pref) {
6274 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6275 			return NULL;
6276 		}
6277 	}
6278 
6279 	highest_refresh = drm_mode_vrefresh(m_pref);
6280 
6281 	/*
6282 	 * Find the mode with highest refresh rate with same resolution.
6283 	 * For some monitors, preferred mode is not the mode with highest
6284 	 * supported refresh rate.
6285 	 */
6286 	list_for_each_entry (m, list_head, head) {
6287 		current_refresh  = drm_mode_vrefresh(m);
6288 
6289 		if (m->hdisplay == m_pref->hdisplay &&
6290 		    m->vdisplay == m_pref->vdisplay &&
6291 		    highest_refresh < current_refresh) {
6292 			highest_refresh = current_refresh;
6293 			m_pref = m;
6294 		}
6295 	}
6296 
6297 	aconnector->freesync_vid_base = *m_pref;
6298 	return m_pref;
6299 }
6300 
6301 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6302 				   struct amdgpu_dm_connector *aconnector)
6303 {
6304 	struct drm_display_mode *high_mode;
6305 	int timing_diff;
6306 
6307 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6308 	if (!high_mode || !mode)
6309 		return false;
6310 
6311 	timing_diff = high_mode->vtotal - mode->vtotal;
6312 
6313 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6314 	    high_mode->hdisplay != mode->hdisplay ||
6315 	    high_mode->vdisplay != mode->vdisplay ||
6316 	    high_mode->hsync_start != mode->hsync_start ||
6317 	    high_mode->hsync_end != mode->hsync_end ||
6318 	    high_mode->htotal != mode->htotal ||
6319 	    high_mode->hskew != mode->hskew ||
6320 	    high_mode->vscan != mode->vscan ||
6321 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6322 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6323 		return false;
6324 	else
6325 		return true;
6326 }
6327 
6328 static struct dc_stream_state *
6329 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6330 		       const struct drm_display_mode *drm_mode,
6331 		       const struct dm_connector_state *dm_state,
6332 		       const struct dc_stream_state *old_stream,
6333 		       int requested_bpc)
6334 {
6335 	struct drm_display_mode *preferred_mode = NULL;
6336 	struct drm_connector *drm_connector;
6337 	const struct drm_connector_state *con_state =
6338 		dm_state ? &dm_state->base : NULL;
6339 	struct dc_stream_state *stream = NULL;
6340 	struct drm_display_mode mode = *drm_mode;
6341 	struct drm_display_mode saved_mode;
6342 	struct drm_display_mode *freesync_mode = NULL;
6343 	bool native_mode_found = false;
6344 	bool recalculate_timing = false;
6345 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6346 	int mode_refresh;
6347 	int preferred_refresh = 0;
6348 #if defined(CONFIG_DRM_AMD_DC_DCN)
6349 	struct dsc_dec_dpcd_caps dsc_caps;
6350 #endif
6351 	struct dc_sink *sink = NULL;
6352 
6353 	memset(&saved_mode, 0, sizeof(saved_mode));
6354 
6355 	if (aconnector == NULL) {
6356 		DRM_ERROR("aconnector is NULL!\n");
6357 		return stream;
6358 	}
6359 
6360 	drm_connector = &aconnector->base;
6361 
6362 	if (!aconnector->dc_sink) {
6363 		sink = create_fake_sink(aconnector);
6364 		if (!sink)
6365 			return stream;
6366 	} else {
6367 		sink = aconnector->dc_sink;
6368 		dc_sink_retain(sink);
6369 	}
6370 
6371 	stream = dc_create_stream_for_sink(sink);
6372 
6373 	if (stream == NULL) {
6374 		DRM_ERROR("Failed to create stream for sink!\n");
6375 		goto finish;
6376 	}
6377 
6378 	stream->dm_stream_context = aconnector;
6379 
6380 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6381 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6382 
6383 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6384 		/* Search for preferred mode */
6385 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6386 			native_mode_found = true;
6387 			break;
6388 		}
6389 	}
6390 	if (!native_mode_found)
6391 		preferred_mode = list_first_entry_or_null(
6392 				&aconnector->base.modes,
6393 				struct drm_display_mode,
6394 				head);
6395 
6396 	mode_refresh = drm_mode_vrefresh(&mode);
6397 
6398 	if (preferred_mode == NULL) {
6399 		/*
6400 		 * This may not be an error, the use case is when we have no
6401 		 * usermode calls to reset and set mode upon hotplug. In this
6402 		 * case, we call set mode ourselves to restore the previous mode
6403 		 * and the modelist may not be filled in in time.
6404 		 */
6405 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6406 	} else {
6407 		recalculate_timing = amdgpu_freesync_vid_mode &&
6408 				 is_freesync_video_mode(&mode, aconnector);
6409 		if (recalculate_timing) {
6410 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6411 			saved_mode = mode;
6412 			mode = *freesync_mode;
6413 		} else {
6414 			decide_crtc_timing_for_drm_display_mode(
6415 				&mode, preferred_mode, scale);
6416 
6417 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6418 		}
6419 	}
6420 
6421 	if (recalculate_timing)
6422 		drm_mode_set_crtcinfo(&saved_mode, 0);
6423 	else if (!dm_state)
6424 		drm_mode_set_crtcinfo(&mode, 0);
6425 
6426        /*
6427 	* If scaling is enabled and refresh rate didn't change
6428 	* we copy the vic and polarities of the old timings
6429 	*/
6430 	if (!scale || mode_refresh != preferred_refresh)
6431 		fill_stream_properties_from_drm_display_mode(
6432 			stream, &mode, &aconnector->base, con_state, NULL,
6433 			requested_bpc);
6434 	else
6435 		fill_stream_properties_from_drm_display_mode(
6436 			stream, &mode, &aconnector->base, con_state, old_stream,
6437 			requested_bpc);
6438 
6439 #if defined(CONFIG_DRM_AMD_DC_DCN)
6440 	/* SST DSC determination policy */
6441 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6442 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6443 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6444 #endif
6445 
6446 	update_stream_scaling_settings(&mode, dm_state, stream);
6447 
6448 	fill_audio_info(
6449 		&stream->audio_info,
6450 		drm_connector,
6451 		sink);
6452 
6453 	update_stream_signal(stream, sink);
6454 
6455 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6456 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6457 
6458 	if (stream->link->psr_settings.psr_feature_enabled) {
6459 		//
6460 		// should decide stream support vsc sdp colorimetry capability
6461 		// before building vsc info packet
6462 		//
6463 		stream->use_vsc_sdp_for_colorimetry = false;
6464 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6465 			stream->use_vsc_sdp_for_colorimetry =
6466 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6467 		} else {
6468 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6469 				stream->use_vsc_sdp_for_colorimetry = true;
6470 		}
6471 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6472 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6473 
6474 	}
6475 finish:
6476 	dc_sink_release(sink);
6477 
6478 	return stream;
6479 }
6480 
6481 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6482 {
6483 	drm_crtc_cleanup(crtc);
6484 	kfree(crtc);
6485 }
6486 
6487 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6488 				  struct drm_crtc_state *state)
6489 {
6490 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6491 
6492 	/* TODO Destroy dc_stream objects are stream object is flattened */
6493 	if (cur->stream)
6494 		dc_stream_release(cur->stream);
6495 
6496 
6497 	__drm_atomic_helper_crtc_destroy_state(state);
6498 
6499 
6500 	kfree(state);
6501 }
6502 
6503 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6504 {
6505 	struct dm_crtc_state *state;
6506 
6507 	if (crtc->state)
6508 		dm_crtc_destroy_state(crtc, crtc->state);
6509 
6510 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6511 	if (WARN_ON(!state))
6512 		return;
6513 
6514 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6515 }
6516 
6517 static struct drm_crtc_state *
6518 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6519 {
6520 	struct dm_crtc_state *state, *cur;
6521 
6522 	cur = to_dm_crtc_state(crtc->state);
6523 
6524 	if (WARN_ON(!crtc->state))
6525 		return NULL;
6526 
6527 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6528 	if (!state)
6529 		return NULL;
6530 
6531 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6532 
6533 	if (cur->stream) {
6534 		state->stream = cur->stream;
6535 		dc_stream_retain(state->stream);
6536 	}
6537 
6538 	state->active_planes = cur->active_planes;
6539 	state->vrr_infopacket = cur->vrr_infopacket;
6540 	state->abm_level = cur->abm_level;
6541 	state->vrr_supported = cur->vrr_supported;
6542 	state->freesync_config = cur->freesync_config;
6543 	state->cm_has_degamma = cur->cm_has_degamma;
6544 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6545 	state->force_dpms_off = cur->force_dpms_off;
6546 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6547 
6548 	return &state->base;
6549 }
6550 
6551 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6552 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6553 {
6554 	crtc_debugfs_init(crtc);
6555 
6556 	return 0;
6557 }
6558 #endif
6559 
6560 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6561 {
6562 	enum dc_irq_source irq_source;
6563 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6564 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6565 	int rc;
6566 
6567 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6568 
6569 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6570 
6571 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6572 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6573 	return rc;
6574 }
6575 
6576 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6577 {
6578 	enum dc_irq_source irq_source;
6579 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6580 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6581 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6582 #if defined(CONFIG_DRM_AMD_DC_DCN)
6583 	struct amdgpu_display_manager *dm = &adev->dm;
6584 	struct vblank_control_work *work;
6585 #endif
6586 	int rc = 0;
6587 
6588 	if (enable) {
6589 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6590 		if (amdgpu_dm_vrr_active(acrtc_state))
6591 			rc = dm_set_vupdate_irq(crtc, true);
6592 	} else {
6593 		/* vblank irq off -> vupdate irq off */
6594 		rc = dm_set_vupdate_irq(crtc, false);
6595 	}
6596 
6597 	if (rc)
6598 		return rc;
6599 
6600 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6601 
6602 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6603 		return -EBUSY;
6604 
6605 	if (amdgpu_in_reset(adev))
6606 		return 0;
6607 
6608 #if defined(CONFIG_DRM_AMD_DC_DCN)
6609 	if (dm->vblank_control_workqueue) {
6610 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6611 		if (!work)
6612 			return -ENOMEM;
6613 
6614 		INIT_WORK(&work->work, vblank_control_worker);
6615 		work->dm = dm;
6616 		work->acrtc = acrtc;
6617 		work->enable = enable;
6618 
6619 		if (acrtc_state->stream) {
6620 			dc_stream_retain(acrtc_state->stream);
6621 			work->stream = acrtc_state->stream;
6622 		}
6623 
6624 		queue_work(dm->vblank_control_workqueue, &work->work);
6625 	}
6626 #endif
6627 
6628 	return 0;
6629 }
6630 
6631 static int dm_enable_vblank(struct drm_crtc *crtc)
6632 {
6633 	return dm_set_vblank(crtc, true);
6634 }
6635 
6636 static void dm_disable_vblank(struct drm_crtc *crtc)
6637 {
6638 	dm_set_vblank(crtc, false);
6639 }
6640 
6641 /* Implemented only the options currently availible for the driver */
6642 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6643 	.reset = dm_crtc_reset_state,
6644 	.destroy = amdgpu_dm_crtc_destroy,
6645 	.set_config = drm_atomic_helper_set_config,
6646 	.page_flip = drm_atomic_helper_page_flip,
6647 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6648 	.atomic_destroy_state = dm_crtc_destroy_state,
6649 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6650 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6651 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6652 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6653 	.enable_vblank = dm_enable_vblank,
6654 	.disable_vblank = dm_disable_vblank,
6655 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6656 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6657 	.late_register = amdgpu_dm_crtc_late_register,
6658 #endif
6659 };
6660 
6661 static enum drm_connector_status
6662 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6663 {
6664 	bool connected;
6665 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6666 
6667 	/*
6668 	 * Notes:
6669 	 * 1. This interface is NOT called in context of HPD irq.
6670 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6671 	 * makes it a bad place for *any* MST-related activity.
6672 	 */
6673 
6674 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6675 	    !aconnector->fake_enable)
6676 		connected = (aconnector->dc_sink != NULL);
6677 	else
6678 		connected = (aconnector->base.force == DRM_FORCE_ON);
6679 
6680 	update_subconnector_property(aconnector);
6681 
6682 	return (connected ? connector_status_connected :
6683 			connector_status_disconnected);
6684 }
6685 
6686 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6687 					    struct drm_connector_state *connector_state,
6688 					    struct drm_property *property,
6689 					    uint64_t val)
6690 {
6691 	struct drm_device *dev = connector->dev;
6692 	struct amdgpu_device *adev = drm_to_adev(dev);
6693 	struct dm_connector_state *dm_old_state =
6694 		to_dm_connector_state(connector->state);
6695 	struct dm_connector_state *dm_new_state =
6696 		to_dm_connector_state(connector_state);
6697 
6698 	int ret = -EINVAL;
6699 
6700 	if (property == dev->mode_config.scaling_mode_property) {
6701 		enum amdgpu_rmx_type rmx_type;
6702 
6703 		switch (val) {
6704 		case DRM_MODE_SCALE_CENTER:
6705 			rmx_type = RMX_CENTER;
6706 			break;
6707 		case DRM_MODE_SCALE_ASPECT:
6708 			rmx_type = RMX_ASPECT;
6709 			break;
6710 		case DRM_MODE_SCALE_FULLSCREEN:
6711 			rmx_type = RMX_FULL;
6712 			break;
6713 		case DRM_MODE_SCALE_NONE:
6714 		default:
6715 			rmx_type = RMX_OFF;
6716 			break;
6717 		}
6718 
6719 		if (dm_old_state->scaling == rmx_type)
6720 			return 0;
6721 
6722 		dm_new_state->scaling = rmx_type;
6723 		ret = 0;
6724 	} else if (property == adev->mode_info.underscan_hborder_property) {
6725 		dm_new_state->underscan_hborder = val;
6726 		ret = 0;
6727 	} else if (property == adev->mode_info.underscan_vborder_property) {
6728 		dm_new_state->underscan_vborder = val;
6729 		ret = 0;
6730 	} else if (property == adev->mode_info.underscan_property) {
6731 		dm_new_state->underscan_enable = val;
6732 		ret = 0;
6733 	} else if (property == adev->mode_info.abm_level_property) {
6734 		dm_new_state->abm_level = val;
6735 		ret = 0;
6736 	}
6737 
6738 	return ret;
6739 }
6740 
6741 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6742 					    const struct drm_connector_state *state,
6743 					    struct drm_property *property,
6744 					    uint64_t *val)
6745 {
6746 	struct drm_device *dev = connector->dev;
6747 	struct amdgpu_device *adev = drm_to_adev(dev);
6748 	struct dm_connector_state *dm_state =
6749 		to_dm_connector_state(state);
6750 	int ret = -EINVAL;
6751 
6752 	if (property == dev->mode_config.scaling_mode_property) {
6753 		switch (dm_state->scaling) {
6754 		case RMX_CENTER:
6755 			*val = DRM_MODE_SCALE_CENTER;
6756 			break;
6757 		case RMX_ASPECT:
6758 			*val = DRM_MODE_SCALE_ASPECT;
6759 			break;
6760 		case RMX_FULL:
6761 			*val = DRM_MODE_SCALE_FULLSCREEN;
6762 			break;
6763 		case RMX_OFF:
6764 		default:
6765 			*val = DRM_MODE_SCALE_NONE;
6766 			break;
6767 		}
6768 		ret = 0;
6769 	} else if (property == adev->mode_info.underscan_hborder_property) {
6770 		*val = dm_state->underscan_hborder;
6771 		ret = 0;
6772 	} else if (property == adev->mode_info.underscan_vborder_property) {
6773 		*val = dm_state->underscan_vborder;
6774 		ret = 0;
6775 	} else if (property == adev->mode_info.underscan_property) {
6776 		*val = dm_state->underscan_enable;
6777 		ret = 0;
6778 	} else if (property == adev->mode_info.abm_level_property) {
6779 		*val = dm_state->abm_level;
6780 		ret = 0;
6781 	}
6782 
6783 	return ret;
6784 }
6785 
6786 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6787 {
6788 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6789 
6790 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6791 }
6792 
6793 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6794 {
6795 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6796 	const struct dc_link *link = aconnector->dc_link;
6797 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6798 	struct amdgpu_display_manager *dm = &adev->dm;
6799 	int i;
6800 
6801 	/*
6802 	 * Call only if mst_mgr was iniitalized before since it's not done
6803 	 * for all connector types.
6804 	 */
6805 	if (aconnector->mst_mgr.dev)
6806 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6807 
6808 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6809 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6810 	for (i = 0; i < dm->num_of_edps; i++) {
6811 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6812 			backlight_device_unregister(dm->backlight_dev[i]);
6813 			dm->backlight_dev[i] = NULL;
6814 		}
6815 	}
6816 #endif
6817 
6818 	if (aconnector->dc_em_sink)
6819 		dc_sink_release(aconnector->dc_em_sink);
6820 	aconnector->dc_em_sink = NULL;
6821 	if (aconnector->dc_sink)
6822 		dc_sink_release(aconnector->dc_sink);
6823 	aconnector->dc_sink = NULL;
6824 
6825 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6826 	drm_connector_unregister(connector);
6827 	drm_connector_cleanup(connector);
6828 	if (aconnector->i2c) {
6829 		i2c_del_adapter(&aconnector->i2c->base);
6830 		kfree(aconnector->i2c);
6831 	}
6832 	kfree(aconnector->dm_dp_aux.aux.name);
6833 
6834 	kfree(connector);
6835 }
6836 
6837 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6838 {
6839 	struct dm_connector_state *state =
6840 		to_dm_connector_state(connector->state);
6841 
6842 	if (connector->state)
6843 		__drm_atomic_helper_connector_destroy_state(connector->state);
6844 
6845 	kfree(state);
6846 
6847 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6848 
6849 	if (state) {
6850 		state->scaling = RMX_OFF;
6851 		state->underscan_enable = false;
6852 		state->underscan_hborder = 0;
6853 		state->underscan_vborder = 0;
6854 		state->base.max_requested_bpc = 8;
6855 		state->vcpi_slots = 0;
6856 		state->pbn = 0;
6857 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6858 			state->abm_level = amdgpu_dm_abm_level;
6859 
6860 		__drm_atomic_helper_connector_reset(connector, &state->base);
6861 	}
6862 }
6863 
6864 struct drm_connector_state *
6865 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6866 {
6867 	struct dm_connector_state *state =
6868 		to_dm_connector_state(connector->state);
6869 
6870 	struct dm_connector_state *new_state =
6871 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6872 
6873 	if (!new_state)
6874 		return NULL;
6875 
6876 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6877 
6878 	new_state->freesync_capable = state->freesync_capable;
6879 	new_state->abm_level = state->abm_level;
6880 	new_state->scaling = state->scaling;
6881 	new_state->underscan_enable = state->underscan_enable;
6882 	new_state->underscan_hborder = state->underscan_hborder;
6883 	new_state->underscan_vborder = state->underscan_vborder;
6884 	new_state->vcpi_slots = state->vcpi_slots;
6885 	new_state->pbn = state->pbn;
6886 	return &new_state->base;
6887 }
6888 
6889 static int
6890 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6891 {
6892 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6893 		to_amdgpu_dm_connector(connector);
6894 	int r;
6895 
6896 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6897 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6898 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6899 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6900 		if (r)
6901 			return r;
6902 	}
6903 
6904 #if defined(CONFIG_DEBUG_FS)
6905 	connector_debugfs_init(amdgpu_dm_connector);
6906 #endif
6907 
6908 	return 0;
6909 }
6910 
6911 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6912 	.reset = amdgpu_dm_connector_funcs_reset,
6913 	.detect = amdgpu_dm_connector_detect,
6914 	.fill_modes = drm_helper_probe_single_connector_modes,
6915 	.destroy = amdgpu_dm_connector_destroy,
6916 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6917 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6918 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6919 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6920 	.late_register = amdgpu_dm_connector_late_register,
6921 	.early_unregister = amdgpu_dm_connector_unregister
6922 };
6923 
6924 static int get_modes(struct drm_connector *connector)
6925 {
6926 	return amdgpu_dm_connector_get_modes(connector);
6927 }
6928 
6929 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6930 {
6931 	struct dc_sink_init_data init_params = {
6932 			.link = aconnector->dc_link,
6933 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6934 	};
6935 	struct edid *edid;
6936 
6937 	if (!aconnector->base.edid_blob_ptr) {
6938 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6939 				aconnector->base.name);
6940 
6941 		aconnector->base.force = DRM_FORCE_OFF;
6942 		aconnector->base.override_edid = false;
6943 		return;
6944 	}
6945 
6946 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6947 
6948 	aconnector->edid = edid;
6949 
6950 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6951 		aconnector->dc_link,
6952 		(uint8_t *)edid,
6953 		(edid->extensions + 1) * EDID_LENGTH,
6954 		&init_params);
6955 
6956 	if (aconnector->base.force == DRM_FORCE_ON) {
6957 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6958 		aconnector->dc_link->local_sink :
6959 		aconnector->dc_em_sink;
6960 		dc_sink_retain(aconnector->dc_sink);
6961 	}
6962 }
6963 
6964 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6965 {
6966 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6967 
6968 	/*
6969 	 * In case of headless boot with force on for DP managed connector
6970 	 * Those settings have to be != 0 to get initial modeset
6971 	 */
6972 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6973 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6974 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6975 	}
6976 
6977 
6978 	aconnector->base.override_edid = true;
6979 	create_eml_sink(aconnector);
6980 }
6981 
6982 static struct dc_stream_state *
6983 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6984 				const struct drm_display_mode *drm_mode,
6985 				const struct dm_connector_state *dm_state,
6986 				const struct dc_stream_state *old_stream)
6987 {
6988 	struct drm_connector *connector = &aconnector->base;
6989 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6990 	struct dc_stream_state *stream;
6991 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6992 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6993 	enum dc_status dc_result = DC_OK;
6994 
6995 	do {
6996 		stream = create_stream_for_sink(aconnector, drm_mode,
6997 						dm_state, old_stream,
6998 						requested_bpc);
6999 		if (stream == NULL) {
7000 			DRM_ERROR("Failed to create stream for sink!\n");
7001 			break;
7002 		}
7003 
7004 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7005 
7006 		if (dc_result != DC_OK) {
7007 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7008 				      drm_mode->hdisplay,
7009 				      drm_mode->vdisplay,
7010 				      drm_mode->clock,
7011 				      dc_result,
7012 				      dc_status_to_str(dc_result));
7013 
7014 			dc_stream_release(stream);
7015 			stream = NULL;
7016 			requested_bpc -= 2; /* lower bpc to retry validation */
7017 		}
7018 
7019 	} while (stream == NULL && requested_bpc >= 6);
7020 
7021 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7022 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7023 
7024 		aconnector->force_yuv420_output = true;
7025 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7026 						dm_state, old_stream);
7027 		aconnector->force_yuv420_output = false;
7028 	}
7029 
7030 	return stream;
7031 }
7032 
7033 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7034 				   struct drm_display_mode *mode)
7035 {
7036 	int result = MODE_ERROR;
7037 	struct dc_sink *dc_sink;
7038 	/* TODO: Unhardcode stream count */
7039 	struct dc_stream_state *stream;
7040 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7041 
7042 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7043 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7044 		return result;
7045 
7046 	/*
7047 	 * Only run this the first time mode_valid is called to initilialize
7048 	 * EDID mgmt
7049 	 */
7050 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7051 		!aconnector->dc_em_sink)
7052 		handle_edid_mgmt(aconnector);
7053 
7054 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7055 
7056 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7057 				aconnector->base.force != DRM_FORCE_ON) {
7058 		DRM_ERROR("dc_sink is NULL!\n");
7059 		goto fail;
7060 	}
7061 
7062 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7063 	if (stream) {
7064 		dc_stream_release(stream);
7065 		result = MODE_OK;
7066 	}
7067 
7068 fail:
7069 	/* TODO: error handling*/
7070 	return result;
7071 }
7072 
7073 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7074 				struct dc_info_packet *out)
7075 {
7076 	struct hdmi_drm_infoframe frame;
7077 	unsigned char buf[30]; /* 26 + 4 */
7078 	ssize_t len;
7079 	int ret, i;
7080 
7081 	memset(out, 0, sizeof(*out));
7082 
7083 	if (!state->hdr_output_metadata)
7084 		return 0;
7085 
7086 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7087 	if (ret)
7088 		return ret;
7089 
7090 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7091 	if (len < 0)
7092 		return (int)len;
7093 
7094 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7095 	if (len != 30)
7096 		return -EINVAL;
7097 
7098 	/* Prepare the infopacket for DC. */
7099 	switch (state->connector->connector_type) {
7100 	case DRM_MODE_CONNECTOR_HDMIA:
7101 		out->hb0 = 0x87; /* type */
7102 		out->hb1 = 0x01; /* version */
7103 		out->hb2 = 0x1A; /* length */
7104 		out->sb[0] = buf[3]; /* checksum */
7105 		i = 1;
7106 		break;
7107 
7108 	case DRM_MODE_CONNECTOR_DisplayPort:
7109 	case DRM_MODE_CONNECTOR_eDP:
7110 		out->hb0 = 0x00; /* sdp id, zero */
7111 		out->hb1 = 0x87; /* type */
7112 		out->hb2 = 0x1D; /* payload len - 1 */
7113 		out->hb3 = (0x13 << 2); /* sdp version */
7114 		out->sb[0] = 0x01; /* version */
7115 		out->sb[1] = 0x1A; /* length */
7116 		i = 2;
7117 		break;
7118 
7119 	default:
7120 		return -EINVAL;
7121 	}
7122 
7123 	memcpy(&out->sb[i], &buf[4], 26);
7124 	out->valid = true;
7125 
7126 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7127 		       sizeof(out->sb), false);
7128 
7129 	return 0;
7130 }
7131 
7132 static int
7133 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7134 				 struct drm_atomic_state *state)
7135 {
7136 	struct drm_connector_state *new_con_state =
7137 		drm_atomic_get_new_connector_state(state, conn);
7138 	struct drm_connector_state *old_con_state =
7139 		drm_atomic_get_old_connector_state(state, conn);
7140 	struct drm_crtc *crtc = new_con_state->crtc;
7141 	struct drm_crtc_state *new_crtc_state;
7142 	int ret;
7143 
7144 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7145 
7146 	if (!crtc)
7147 		return 0;
7148 
7149 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7150 		struct dc_info_packet hdr_infopacket;
7151 
7152 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7153 		if (ret)
7154 			return ret;
7155 
7156 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7157 		if (IS_ERR(new_crtc_state))
7158 			return PTR_ERR(new_crtc_state);
7159 
7160 		/*
7161 		 * DC considers the stream backends changed if the
7162 		 * static metadata changes. Forcing the modeset also
7163 		 * gives a simple way for userspace to switch from
7164 		 * 8bpc to 10bpc when setting the metadata to enter
7165 		 * or exit HDR.
7166 		 *
7167 		 * Changing the static metadata after it's been
7168 		 * set is permissible, however. So only force a
7169 		 * modeset if we're entering or exiting HDR.
7170 		 */
7171 		new_crtc_state->mode_changed =
7172 			!old_con_state->hdr_output_metadata ||
7173 			!new_con_state->hdr_output_metadata;
7174 	}
7175 
7176 	return 0;
7177 }
7178 
7179 static const struct drm_connector_helper_funcs
7180 amdgpu_dm_connector_helper_funcs = {
7181 	/*
7182 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7183 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7184 	 * are missing after user start lightdm. So we need to renew modes list.
7185 	 * in get_modes call back, not just return the modes count
7186 	 */
7187 	.get_modes = get_modes,
7188 	.mode_valid = amdgpu_dm_connector_mode_valid,
7189 	.atomic_check = amdgpu_dm_connector_atomic_check,
7190 };
7191 
7192 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7193 {
7194 }
7195 
7196 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7197 {
7198 	struct drm_atomic_state *state = new_crtc_state->state;
7199 	struct drm_plane *plane;
7200 	int num_active = 0;
7201 
7202 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7203 		struct drm_plane_state *new_plane_state;
7204 
7205 		/* Cursor planes are "fake". */
7206 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7207 			continue;
7208 
7209 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7210 
7211 		if (!new_plane_state) {
7212 			/*
7213 			 * The plane is enable on the CRTC and hasn't changed
7214 			 * state. This means that it previously passed
7215 			 * validation and is therefore enabled.
7216 			 */
7217 			num_active += 1;
7218 			continue;
7219 		}
7220 
7221 		/* We need a framebuffer to be considered enabled. */
7222 		num_active += (new_plane_state->fb != NULL);
7223 	}
7224 
7225 	return num_active;
7226 }
7227 
7228 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7229 					 struct drm_crtc_state *new_crtc_state)
7230 {
7231 	struct dm_crtc_state *dm_new_crtc_state =
7232 		to_dm_crtc_state(new_crtc_state);
7233 
7234 	dm_new_crtc_state->active_planes = 0;
7235 
7236 	if (!dm_new_crtc_state->stream)
7237 		return;
7238 
7239 	dm_new_crtc_state->active_planes =
7240 		count_crtc_active_planes(new_crtc_state);
7241 }
7242 
7243 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7244 				       struct drm_atomic_state *state)
7245 {
7246 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7247 									  crtc);
7248 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7249 	struct dc *dc = adev->dm.dc;
7250 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7251 	int ret = -EINVAL;
7252 
7253 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7254 
7255 	dm_update_crtc_active_planes(crtc, crtc_state);
7256 
7257 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7258 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7259 		return ret;
7260 	}
7261 
7262 	/*
7263 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7264 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7265 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7266 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7267 	 */
7268 	if (crtc_state->enable &&
7269 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7270 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7271 		return -EINVAL;
7272 	}
7273 
7274 	/* In some use cases, like reset, no stream is attached */
7275 	if (!dm_crtc_state->stream)
7276 		return 0;
7277 
7278 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7279 		return 0;
7280 
7281 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7282 	return ret;
7283 }
7284 
7285 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7286 				      const struct drm_display_mode *mode,
7287 				      struct drm_display_mode *adjusted_mode)
7288 {
7289 	return true;
7290 }
7291 
7292 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7293 	.disable = dm_crtc_helper_disable,
7294 	.atomic_check = dm_crtc_helper_atomic_check,
7295 	.mode_fixup = dm_crtc_helper_mode_fixup,
7296 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7297 };
7298 
7299 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7300 {
7301 
7302 }
7303 
7304 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7305 {
7306 	switch (display_color_depth) {
7307 		case COLOR_DEPTH_666:
7308 			return 6;
7309 		case COLOR_DEPTH_888:
7310 			return 8;
7311 		case COLOR_DEPTH_101010:
7312 			return 10;
7313 		case COLOR_DEPTH_121212:
7314 			return 12;
7315 		case COLOR_DEPTH_141414:
7316 			return 14;
7317 		case COLOR_DEPTH_161616:
7318 			return 16;
7319 		default:
7320 			break;
7321 		}
7322 	return 0;
7323 }
7324 
7325 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7326 					  struct drm_crtc_state *crtc_state,
7327 					  struct drm_connector_state *conn_state)
7328 {
7329 	struct drm_atomic_state *state = crtc_state->state;
7330 	struct drm_connector *connector = conn_state->connector;
7331 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7332 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7333 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7334 	struct drm_dp_mst_topology_mgr *mst_mgr;
7335 	struct drm_dp_mst_port *mst_port;
7336 	enum dc_color_depth color_depth;
7337 	int clock, bpp = 0;
7338 	bool is_y420 = false;
7339 
7340 	if (!aconnector->port || !aconnector->dc_sink)
7341 		return 0;
7342 
7343 	mst_port = aconnector->port;
7344 	mst_mgr = &aconnector->mst_port->mst_mgr;
7345 
7346 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7347 		return 0;
7348 
7349 	if (!state->duplicated) {
7350 		int max_bpc = conn_state->max_requested_bpc;
7351 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7352 				aconnector->force_yuv420_output;
7353 		color_depth = convert_color_depth_from_display_info(connector,
7354 								    is_y420,
7355 								    max_bpc);
7356 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7357 		clock = adjusted_mode->clock;
7358 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7359 	}
7360 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7361 									   mst_mgr,
7362 									   mst_port,
7363 									   dm_new_connector_state->pbn,
7364 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7365 	if (dm_new_connector_state->vcpi_slots < 0) {
7366 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7367 		return dm_new_connector_state->vcpi_slots;
7368 	}
7369 	return 0;
7370 }
7371 
7372 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7373 	.disable = dm_encoder_helper_disable,
7374 	.atomic_check = dm_encoder_helper_atomic_check
7375 };
7376 
7377 #if defined(CONFIG_DRM_AMD_DC_DCN)
7378 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7379 					    struct dc_state *dc_state,
7380 					    struct dsc_mst_fairness_vars *vars)
7381 {
7382 	struct dc_stream_state *stream = NULL;
7383 	struct drm_connector *connector;
7384 	struct drm_connector_state *new_con_state;
7385 	struct amdgpu_dm_connector *aconnector;
7386 	struct dm_connector_state *dm_conn_state;
7387 	int i, j;
7388 	int vcpi, pbn_div, pbn, slot_num = 0;
7389 
7390 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7391 
7392 		aconnector = to_amdgpu_dm_connector(connector);
7393 
7394 		if (!aconnector->port)
7395 			continue;
7396 
7397 		if (!new_con_state || !new_con_state->crtc)
7398 			continue;
7399 
7400 		dm_conn_state = to_dm_connector_state(new_con_state);
7401 
7402 		for (j = 0; j < dc_state->stream_count; j++) {
7403 			stream = dc_state->streams[j];
7404 			if (!stream)
7405 				continue;
7406 
7407 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7408 				break;
7409 
7410 			stream = NULL;
7411 		}
7412 
7413 		if (!stream)
7414 			continue;
7415 
7416 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7417 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7418 		for (j = 0; j < dc_state->stream_count; j++) {
7419 			if (vars[j].aconnector == aconnector) {
7420 				pbn = vars[j].pbn;
7421 				break;
7422 			}
7423 		}
7424 
7425 		if (j == dc_state->stream_count)
7426 			continue;
7427 
7428 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7429 
7430 		if (stream->timing.flags.DSC != 1) {
7431 			dm_conn_state->pbn = pbn;
7432 			dm_conn_state->vcpi_slots = slot_num;
7433 
7434 			drm_dp_mst_atomic_enable_dsc(state,
7435 						     aconnector->port,
7436 						     dm_conn_state->pbn,
7437 						     0,
7438 						     false);
7439 			continue;
7440 		}
7441 
7442 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7443 						    aconnector->port,
7444 						    pbn, pbn_div,
7445 						    true);
7446 		if (vcpi < 0)
7447 			return vcpi;
7448 
7449 		dm_conn_state->pbn = pbn;
7450 		dm_conn_state->vcpi_slots = vcpi;
7451 	}
7452 	return 0;
7453 }
7454 #endif
7455 
7456 static void dm_drm_plane_reset(struct drm_plane *plane)
7457 {
7458 	struct dm_plane_state *amdgpu_state = NULL;
7459 
7460 	if (plane->state)
7461 		plane->funcs->atomic_destroy_state(plane, plane->state);
7462 
7463 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7464 	WARN_ON(amdgpu_state == NULL);
7465 
7466 	if (amdgpu_state)
7467 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7468 }
7469 
7470 static struct drm_plane_state *
7471 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7472 {
7473 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7474 
7475 	old_dm_plane_state = to_dm_plane_state(plane->state);
7476 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7477 	if (!dm_plane_state)
7478 		return NULL;
7479 
7480 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7481 
7482 	if (old_dm_plane_state->dc_state) {
7483 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7484 		dc_plane_state_retain(dm_plane_state->dc_state);
7485 	}
7486 
7487 	return &dm_plane_state->base;
7488 }
7489 
7490 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7491 				struct drm_plane_state *state)
7492 {
7493 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7494 
7495 	if (dm_plane_state->dc_state)
7496 		dc_plane_state_release(dm_plane_state->dc_state);
7497 
7498 	drm_atomic_helper_plane_destroy_state(plane, state);
7499 }
7500 
7501 static const struct drm_plane_funcs dm_plane_funcs = {
7502 	.update_plane	= drm_atomic_helper_update_plane,
7503 	.disable_plane	= drm_atomic_helper_disable_plane,
7504 	.destroy	= drm_primary_helper_destroy,
7505 	.reset = dm_drm_plane_reset,
7506 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7507 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7508 	.format_mod_supported = dm_plane_format_mod_supported,
7509 };
7510 
7511 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7512 				      struct drm_plane_state *new_state)
7513 {
7514 	struct amdgpu_framebuffer *afb;
7515 	struct drm_gem_object *obj;
7516 	struct amdgpu_device *adev;
7517 	struct amdgpu_bo *rbo;
7518 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7519 	struct list_head list;
7520 	struct ttm_validate_buffer tv;
7521 	struct ww_acquire_ctx ticket;
7522 	uint32_t domain;
7523 	int r;
7524 
7525 	if (!new_state->fb) {
7526 		DRM_DEBUG_KMS("No FB bound\n");
7527 		return 0;
7528 	}
7529 
7530 	afb = to_amdgpu_framebuffer(new_state->fb);
7531 	obj = new_state->fb->obj[0];
7532 	rbo = gem_to_amdgpu_bo(obj);
7533 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7534 	INIT_LIST_HEAD(&list);
7535 
7536 	tv.bo = &rbo->tbo;
7537 	tv.num_shared = 1;
7538 	list_add(&tv.head, &list);
7539 
7540 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7541 	if (r) {
7542 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7543 		return r;
7544 	}
7545 
7546 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7547 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7548 	else
7549 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7550 
7551 	r = amdgpu_bo_pin(rbo, domain);
7552 	if (unlikely(r != 0)) {
7553 		if (r != -ERESTARTSYS)
7554 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7555 		ttm_eu_backoff_reservation(&ticket, &list);
7556 		return r;
7557 	}
7558 
7559 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7560 	if (unlikely(r != 0)) {
7561 		amdgpu_bo_unpin(rbo);
7562 		ttm_eu_backoff_reservation(&ticket, &list);
7563 		DRM_ERROR("%p bind failed\n", rbo);
7564 		return r;
7565 	}
7566 
7567 	ttm_eu_backoff_reservation(&ticket, &list);
7568 
7569 	afb->address = amdgpu_bo_gpu_offset(rbo);
7570 
7571 	amdgpu_bo_ref(rbo);
7572 
7573 	/**
7574 	 * We don't do surface updates on planes that have been newly created,
7575 	 * but we also don't have the afb->address during atomic check.
7576 	 *
7577 	 * Fill in buffer attributes depending on the address here, but only on
7578 	 * newly created planes since they're not being used by DC yet and this
7579 	 * won't modify global state.
7580 	 */
7581 	dm_plane_state_old = to_dm_plane_state(plane->state);
7582 	dm_plane_state_new = to_dm_plane_state(new_state);
7583 
7584 	if (dm_plane_state_new->dc_state &&
7585 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7586 		struct dc_plane_state *plane_state =
7587 			dm_plane_state_new->dc_state;
7588 		bool force_disable_dcc = !plane_state->dcc.enable;
7589 
7590 		fill_plane_buffer_attributes(
7591 			adev, afb, plane_state->format, plane_state->rotation,
7592 			afb->tiling_flags,
7593 			&plane_state->tiling_info, &plane_state->plane_size,
7594 			&plane_state->dcc, &plane_state->address,
7595 			afb->tmz_surface, force_disable_dcc);
7596 	}
7597 
7598 	return 0;
7599 }
7600 
7601 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7602 				       struct drm_plane_state *old_state)
7603 {
7604 	struct amdgpu_bo *rbo;
7605 	int r;
7606 
7607 	if (!old_state->fb)
7608 		return;
7609 
7610 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7611 	r = amdgpu_bo_reserve(rbo, false);
7612 	if (unlikely(r)) {
7613 		DRM_ERROR("failed to reserve rbo before unpin\n");
7614 		return;
7615 	}
7616 
7617 	amdgpu_bo_unpin(rbo);
7618 	amdgpu_bo_unreserve(rbo);
7619 	amdgpu_bo_unref(&rbo);
7620 }
7621 
7622 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7623 				       struct drm_crtc_state *new_crtc_state)
7624 {
7625 	struct drm_framebuffer *fb = state->fb;
7626 	int min_downscale, max_upscale;
7627 	int min_scale = 0;
7628 	int max_scale = INT_MAX;
7629 
7630 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7631 	if (fb && state->crtc) {
7632 		/* Validate viewport to cover the case when only the position changes */
7633 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7634 			int viewport_width = state->crtc_w;
7635 			int viewport_height = state->crtc_h;
7636 
7637 			if (state->crtc_x < 0)
7638 				viewport_width += state->crtc_x;
7639 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7640 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7641 
7642 			if (state->crtc_y < 0)
7643 				viewport_height += state->crtc_y;
7644 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7645 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7646 
7647 			if (viewport_width < 0 || viewport_height < 0) {
7648 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7649 				return -EINVAL;
7650 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7651 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7652 				return -EINVAL;
7653 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7654 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7655 				return -EINVAL;
7656 			}
7657 
7658 		}
7659 
7660 		/* Get min/max allowed scaling factors from plane caps. */
7661 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7662 					     &min_downscale, &max_upscale);
7663 		/*
7664 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7665 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7666 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7667 		 */
7668 		min_scale = (1000 << 16) / max_upscale;
7669 		max_scale = (1000 << 16) / min_downscale;
7670 	}
7671 
7672 	return drm_atomic_helper_check_plane_state(
7673 		state, new_crtc_state, min_scale, max_scale, true, true);
7674 }
7675 
7676 static int dm_plane_atomic_check(struct drm_plane *plane,
7677 				 struct drm_atomic_state *state)
7678 {
7679 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7680 										 plane);
7681 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7682 	struct dc *dc = adev->dm.dc;
7683 	struct dm_plane_state *dm_plane_state;
7684 	struct dc_scaling_info scaling_info;
7685 	struct drm_crtc_state *new_crtc_state;
7686 	int ret;
7687 
7688 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7689 
7690 	dm_plane_state = to_dm_plane_state(new_plane_state);
7691 
7692 	if (!dm_plane_state->dc_state)
7693 		return 0;
7694 
7695 	new_crtc_state =
7696 		drm_atomic_get_new_crtc_state(state,
7697 					      new_plane_state->crtc);
7698 	if (!new_crtc_state)
7699 		return -EINVAL;
7700 
7701 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7702 	if (ret)
7703 		return ret;
7704 
7705 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7706 	if (ret)
7707 		return ret;
7708 
7709 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7710 		return 0;
7711 
7712 	return -EINVAL;
7713 }
7714 
7715 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7716 				       struct drm_atomic_state *state)
7717 {
7718 	/* Only support async updates on cursor planes. */
7719 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7720 		return -EINVAL;
7721 
7722 	return 0;
7723 }
7724 
7725 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7726 					 struct drm_atomic_state *state)
7727 {
7728 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7729 									   plane);
7730 	struct drm_plane_state *old_state =
7731 		drm_atomic_get_old_plane_state(state, plane);
7732 
7733 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7734 
7735 	swap(plane->state->fb, new_state->fb);
7736 
7737 	plane->state->src_x = new_state->src_x;
7738 	plane->state->src_y = new_state->src_y;
7739 	plane->state->src_w = new_state->src_w;
7740 	plane->state->src_h = new_state->src_h;
7741 	plane->state->crtc_x = new_state->crtc_x;
7742 	plane->state->crtc_y = new_state->crtc_y;
7743 	plane->state->crtc_w = new_state->crtc_w;
7744 	plane->state->crtc_h = new_state->crtc_h;
7745 
7746 	handle_cursor_update(plane, old_state);
7747 }
7748 
7749 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7750 	.prepare_fb = dm_plane_helper_prepare_fb,
7751 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7752 	.atomic_check = dm_plane_atomic_check,
7753 	.atomic_async_check = dm_plane_atomic_async_check,
7754 	.atomic_async_update = dm_plane_atomic_async_update
7755 };
7756 
7757 /*
7758  * TODO: these are currently initialized to rgb formats only.
7759  * For future use cases we should either initialize them dynamically based on
7760  * plane capabilities, or initialize this array to all formats, so internal drm
7761  * check will succeed, and let DC implement proper check
7762  */
7763 static const uint32_t rgb_formats[] = {
7764 	DRM_FORMAT_XRGB8888,
7765 	DRM_FORMAT_ARGB8888,
7766 	DRM_FORMAT_RGBA8888,
7767 	DRM_FORMAT_XRGB2101010,
7768 	DRM_FORMAT_XBGR2101010,
7769 	DRM_FORMAT_ARGB2101010,
7770 	DRM_FORMAT_ABGR2101010,
7771 	DRM_FORMAT_XRGB16161616,
7772 	DRM_FORMAT_XBGR16161616,
7773 	DRM_FORMAT_ARGB16161616,
7774 	DRM_FORMAT_ABGR16161616,
7775 	DRM_FORMAT_XBGR8888,
7776 	DRM_FORMAT_ABGR8888,
7777 	DRM_FORMAT_RGB565,
7778 };
7779 
7780 static const uint32_t overlay_formats[] = {
7781 	DRM_FORMAT_XRGB8888,
7782 	DRM_FORMAT_ARGB8888,
7783 	DRM_FORMAT_RGBA8888,
7784 	DRM_FORMAT_XBGR8888,
7785 	DRM_FORMAT_ABGR8888,
7786 	DRM_FORMAT_RGB565
7787 };
7788 
7789 static const u32 cursor_formats[] = {
7790 	DRM_FORMAT_ARGB8888
7791 };
7792 
7793 static int get_plane_formats(const struct drm_plane *plane,
7794 			     const struct dc_plane_cap *plane_cap,
7795 			     uint32_t *formats, int max_formats)
7796 {
7797 	int i, num_formats = 0;
7798 
7799 	/*
7800 	 * TODO: Query support for each group of formats directly from
7801 	 * DC plane caps. This will require adding more formats to the
7802 	 * caps list.
7803 	 */
7804 
7805 	switch (plane->type) {
7806 	case DRM_PLANE_TYPE_PRIMARY:
7807 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7808 			if (num_formats >= max_formats)
7809 				break;
7810 
7811 			formats[num_formats++] = rgb_formats[i];
7812 		}
7813 
7814 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7815 			formats[num_formats++] = DRM_FORMAT_NV12;
7816 		if (plane_cap && plane_cap->pixel_format_support.p010)
7817 			formats[num_formats++] = DRM_FORMAT_P010;
7818 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7819 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7820 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7821 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7822 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7823 		}
7824 		break;
7825 
7826 	case DRM_PLANE_TYPE_OVERLAY:
7827 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7828 			if (num_formats >= max_formats)
7829 				break;
7830 
7831 			formats[num_formats++] = overlay_formats[i];
7832 		}
7833 		break;
7834 
7835 	case DRM_PLANE_TYPE_CURSOR:
7836 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7837 			if (num_formats >= max_formats)
7838 				break;
7839 
7840 			formats[num_formats++] = cursor_formats[i];
7841 		}
7842 		break;
7843 	}
7844 
7845 	return num_formats;
7846 }
7847 
7848 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7849 				struct drm_plane *plane,
7850 				unsigned long possible_crtcs,
7851 				const struct dc_plane_cap *plane_cap)
7852 {
7853 	uint32_t formats[32];
7854 	int num_formats;
7855 	int res = -EPERM;
7856 	unsigned int supported_rotations;
7857 	uint64_t *modifiers = NULL;
7858 
7859 	num_formats = get_plane_formats(plane, plane_cap, formats,
7860 					ARRAY_SIZE(formats));
7861 
7862 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7863 	if (res)
7864 		return res;
7865 
7866 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7867 				       &dm_plane_funcs, formats, num_formats,
7868 				       modifiers, plane->type, NULL);
7869 	kfree(modifiers);
7870 	if (res)
7871 		return res;
7872 
7873 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7874 	    plane_cap && plane_cap->per_pixel_alpha) {
7875 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7876 					  BIT(DRM_MODE_BLEND_PREMULTI);
7877 
7878 		drm_plane_create_alpha_property(plane);
7879 		drm_plane_create_blend_mode_property(plane, blend_caps);
7880 	}
7881 
7882 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7883 	    plane_cap &&
7884 	    (plane_cap->pixel_format_support.nv12 ||
7885 	     plane_cap->pixel_format_support.p010)) {
7886 		/* This only affects YUV formats. */
7887 		drm_plane_create_color_properties(
7888 			plane,
7889 			BIT(DRM_COLOR_YCBCR_BT601) |
7890 			BIT(DRM_COLOR_YCBCR_BT709) |
7891 			BIT(DRM_COLOR_YCBCR_BT2020),
7892 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7893 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7894 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7895 	}
7896 
7897 	supported_rotations =
7898 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7899 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7900 
7901 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7902 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7903 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7904 						   supported_rotations);
7905 
7906 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7907 
7908 	/* Create (reset) the plane state */
7909 	if (plane->funcs->reset)
7910 		plane->funcs->reset(plane);
7911 
7912 	return 0;
7913 }
7914 
7915 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7916 			       struct drm_plane *plane,
7917 			       uint32_t crtc_index)
7918 {
7919 	struct amdgpu_crtc *acrtc = NULL;
7920 	struct drm_plane *cursor_plane;
7921 
7922 	int res = -ENOMEM;
7923 
7924 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7925 	if (!cursor_plane)
7926 		goto fail;
7927 
7928 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7929 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7930 
7931 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7932 	if (!acrtc)
7933 		goto fail;
7934 
7935 	res = drm_crtc_init_with_planes(
7936 			dm->ddev,
7937 			&acrtc->base,
7938 			plane,
7939 			cursor_plane,
7940 			&amdgpu_dm_crtc_funcs, NULL);
7941 
7942 	if (res)
7943 		goto fail;
7944 
7945 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7946 
7947 	/* Create (reset) the plane state */
7948 	if (acrtc->base.funcs->reset)
7949 		acrtc->base.funcs->reset(&acrtc->base);
7950 
7951 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7952 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7953 
7954 	acrtc->crtc_id = crtc_index;
7955 	acrtc->base.enabled = false;
7956 	acrtc->otg_inst = -1;
7957 
7958 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7959 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7960 				   true, MAX_COLOR_LUT_ENTRIES);
7961 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7962 
7963 	return 0;
7964 
7965 fail:
7966 	kfree(acrtc);
7967 	kfree(cursor_plane);
7968 	return res;
7969 }
7970 
7971 
7972 static int to_drm_connector_type(enum signal_type st)
7973 {
7974 	switch (st) {
7975 	case SIGNAL_TYPE_HDMI_TYPE_A:
7976 		return DRM_MODE_CONNECTOR_HDMIA;
7977 	case SIGNAL_TYPE_EDP:
7978 		return DRM_MODE_CONNECTOR_eDP;
7979 	case SIGNAL_TYPE_LVDS:
7980 		return DRM_MODE_CONNECTOR_LVDS;
7981 	case SIGNAL_TYPE_RGB:
7982 		return DRM_MODE_CONNECTOR_VGA;
7983 	case SIGNAL_TYPE_DISPLAY_PORT:
7984 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7985 		return DRM_MODE_CONNECTOR_DisplayPort;
7986 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7987 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7988 		return DRM_MODE_CONNECTOR_DVID;
7989 	case SIGNAL_TYPE_VIRTUAL:
7990 		return DRM_MODE_CONNECTOR_VIRTUAL;
7991 
7992 	default:
7993 		return DRM_MODE_CONNECTOR_Unknown;
7994 	}
7995 }
7996 
7997 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7998 {
7999 	struct drm_encoder *encoder;
8000 
8001 	/* There is only one encoder per connector */
8002 	drm_connector_for_each_possible_encoder(connector, encoder)
8003 		return encoder;
8004 
8005 	return NULL;
8006 }
8007 
8008 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8009 {
8010 	struct drm_encoder *encoder;
8011 	struct amdgpu_encoder *amdgpu_encoder;
8012 
8013 	encoder = amdgpu_dm_connector_to_encoder(connector);
8014 
8015 	if (encoder == NULL)
8016 		return;
8017 
8018 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8019 
8020 	amdgpu_encoder->native_mode.clock = 0;
8021 
8022 	if (!list_empty(&connector->probed_modes)) {
8023 		struct drm_display_mode *preferred_mode = NULL;
8024 
8025 		list_for_each_entry(preferred_mode,
8026 				    &connector->probed_modes,
8027 				    head) {
8028 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8029 				amdgpu_encoder->native_mode = *preferred_mode;
8030 
8031 			break;
8032 		}
8033 
8034 	}
8035 }
8036 
8037 static struct drm_display_mode *
8038 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8039 			     char *name,
8040 			     int hdisplay, int vdisplay)
8041 {
8042 	struct drm_device *dev = encoder->dev;
8043 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8044 	struct drm_display_mode *mode = NULL;
8045 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8046 
8047 	mode = drm_mode_duplicate(dev, native_mode);
8048 
8049 	if (mode == NULL)
8050 		return NULL;
8051 
8052 	mode->hdisplay = hdisplay;
8053 	mode->vdisplay = vdisplay;
8054 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8055 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8056 
8057 	return mode;
8058 
8059 }
8060 
8061 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8062 						 struct drm_connector *connector)
8063 {
8064 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8065 	struct drm_display_mode *mode = NULL;
8066 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8067 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8068 				to_amdgpu_dm_connector(connector);
8069 	int i;
8070 	int n;
8071 	struct mode_size {
8072 		char name[DRM_DISPLAY_MODE_LEN];
8073 		int w;
8074 		int h;
8075 	} common_modes[] = {
8076 		{  "640x480",  640,  480},
8077 		{  "800x600",  800,  600},
8078 		{ "1024x768", 1024,  768},
8079 		{ "1280x720", 1280,  720},
8080 		{ "1280x800", 1280,  800},
8081 		{"1280x1024", 1280, 1024},
8082 		{ "1440x900", 1440,  900},
8083 		{"1680x1050", 1680, 1050},
8084 		{"1600x1200", 1600, 1200},
8085 		{"1920x1080", 1920, 1080},
8086 		{"1920x1200", 1920, 1200}
8087 	};
8088 
8089 	n = ARRAY_SIZE(common_modes);
8090 
8091 	for (i = 0; i < n; i++) {
8092 		struct drm_display_mode *curmode = NULL;
8093 		bool mode_existed = false;
8094 
8095 		if (common_modes[i].w > native_mode->hdisplay ||
8096 		    common_modes[i].h > native_mode->vdisplay ||
8097 		   (common_modes[i].w == native_mode->hdisplay &&
8098 		    common_modes[i].h == native_mode->vdisplay))
8099 			continue;
8100 
8101 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8102 			if (common_modes[i].w == curmode->hdisplay &&
8103 			    common_modes[i].h == curmode->vdisplay) {
8104 				mode_existed = true;
8105 				break;
8106 			}
8107 		}
8108 
8109 		if (mode_existed)
8110 			continue;
8111 
8112 		mode = amdgpu_dm_create_common_mode(encoder,
8113 				common_modes[i].name, common_modes[i].w,
8114 				common_modes[i].h);
8115 		drm_mode_probed_add(connector, mode);
8116 		amdgpu_dm_connector->num_modes++;
8117 	}
8118 }
8119 
8120 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8121 {
8122 	struct drm_encoder *encoder;
8123 	struct amdgpu_encoder *amdgpu_encoder;
8124 	const struct drm_display_mode *native_mode;
8125 
8126 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8127 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8128 		return;
8129 
8130 	encoder = amdgpu_dm_connector_to_encoder(connector);
8131 	if (!encoder)
8132 		return;
8133 
8134 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8135 
8136 	native_mode = &amdgpu_encoder->native_mode;
8137 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8138 		return;
8139 
8140 	drm_connector_set_panel_orientation_with_quirk(connector,
8141 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8142 						       native_mode->hdisplay,
8143 						       native_mode->vdisplay);
8144 }
8145 
8146 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8147 					      struct edid *edid)
8148 {
8149 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8150 			to_amdgpu_dm_connector(connector);
8151 
8152 	if (edid) {
8153 		/* empty probed_modes */
8154 		INIT_LIST_HEAD(&connector->probed_modes);
8155 		amdgpu_dm_connector->num_modes =
8156 				drm_add_edid_modes(connector, edid);
8157 
8158 		/* sorting the probed modes before calling function
8159 		 * amdgpu_dm_get_native_mode() since EDID can have
8160 		 * more than one preferred mode. The modes that are
8161 		 * later in the probed mode list could be of higher
8162 		 * and preferred resolution. For example, 3840x2160
8163 		 * resolution in base EDID preferred timing and 4096x2160
8164 		 * preferred resolution in DID extension block later.
8165 		 */
8166 		drm_mode_sort(&connector->probed_modes);
8167 		amdgpu_dm_get_native_mode(connector);
8168 
8169 		/* Freesync capabilities are reset by calling
8170 		 * drm_add_edid_modes() and need to be
8171 		 * restored here.
8172 		 */
8173 		amdgpu_dm_update_freesync_caps(connector, edid);
8174 
8175 		amdgpu_set_panel_orientation(connector);
8176 	} else {
8177 		amdgpu_dm_connector->num_modes = 0;
8178 	}
8179 }
8180 
8181 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8182 			      struct drm_display_mode *mode)
8183 {
8184 	struct drm_display_mode *m;
8185 
8186 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8187 		if (drm_mode_equal(m, mode))
8188 			return true;
8189 	}
8190 
8191 	return false;
8192 }
8193 
8194 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8195 {
8196 	const struct drm_display_mode *m;
8197 	struct drm_display_mode *new_mode;
8198 	uint i;
8199 	uint32_t new_modes_count = 0;
8200 
8201 	/* Standard FPS values
8202 	 *
8203 	 * 23.976       - TV/NTSC
8204 	 * 24 	        - Cinema
8205 	 * 25 	        - TV/PAL
8206 	 * 29.97        - TV/NTSC
8207 	 * 30 	        - TV/NTSC
8208 	 * 48 	        - Cinema HFR
8209 	 * 50 	        - TV/PAL
8210 	 * 60 	        - Commonly used
8211 	 * 48,72,96,120 - Multiples of 24
8212 	 */
8213 	static const uint32_t common_rates[] = {
8214 		23976, 24000, 25000, 29970, 30000,
8215 		48000, 50000, 60000, 72000, 96000, 120000
8216 	};
8217 
8218 	/*
8219 	 * Find mode with highest refresh rate with the same resolution
8220 	 * as the preferred mode. Some monitors report a preferred mode
8221 	 * with lower resolution than the highest refresh rate supported.
8222 	 */
8223 
8224 	m = get_highest_refresh_rate_mode(aconnector, true);
8225 	if (!m)
8226 		return 0;
8227 
8228 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8229 		uint64_t target_vtotal, target_vtotal_diff;
8230 		uint64_t num, den;
8231 
8232 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8233 			continue;
8234 
8235 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8236 		    common_rates[i] > aconnector->max_vfreq * 1000)
8237 			continue;
8238 
8239 		num = (unsigned long long)m->clock * 1000 * 1000;
8240 		den = common_rates[i] * (unsigned long long)m->htotal;
8241 		target_vtotal = div_u64(num, den);
8242 		target_vtotal_diff = target_vtotal - m->vtotal;
8243 
8244 		/* Check for illegal modes */
8245 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8246 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8247 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8248 			continue;
8249 
8250 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8251 		if (!new_mode)
8252 			goto out;
8253 
8254 		new_mode->vtotal += (u16)target_vtotal_diff;
8255 		new_mode->vsync_start += (u16)target_vtotal_diff;
8256 		new_mode->vsync_end += (u16)target_vtotal_diff;
8257 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8258 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8259 
8260 		if (!is_duplicate_mode(aconnector, new_mode)) {
8261 			drm_mode_probed_add(&aconnector->base, new_mode);
8262 			new_modes_count += 1;
8263 		} else
8264 			drm_mode_destroy(aconnector->base.dev, new_mode);
8265 	}
8266  out:
8267 	return new_modes_count;
8268 }
8269 
8270 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8271 						   struct edid *edid)
8272 {
8273 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8274 		to_amdgpu_dm_connector(connector);
8275 
8276 	if (!(amdgpu_freesync_vid_mode && edid))
8277 		return;
8278 
8279 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8280 		amdgpu_dm_connector->num_modes +=
8281 			add_fs_modes(amdgpu_dm_connector);
8282 }
8283 
8284 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8285 {
8286 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8287 			to_amdgpu_dm_connector(connector);
8288 	struct drm_encoder *encoder;
8289 	struct edid *edid = amdgpu_dm_connector->edid;
8290 
8291 	encoder = amdgpu_dm_connector_to_encoder(connector);
8292 
8293 	if (!drm_edid_is_valid(edid)) {
8294 		amdgpu_dm_connector->num_modes =
8295 				drm_add_modes_noedid(connector, 640, 480);
8296 	} else {
8297 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8298 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8299 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8300 	}
8301 	amdgpu_dm_fbc_init(connector);
8302 
8303 	return amdgpu_dm_connector->num_modes;
8304 }
8305 
8306 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8307 				     struct amdgpu_dm_connector *aconnector,
8308 				     int connector_type,
8309 				     struct dc_link *link,
8310 				     int link_index)
8311 {
8312 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8313 
8314 	/*
8315 	 * Some of the properties below require access to state, like bpc.
8316 	 * Allocate some default initial connector state with our reset helper.
8317 	 */
8318 	if (aconnector->base.funcs->reset)
8319 		aconnector->base.funcs->reset(&aconnector->base);
8320 
8321 	aconnector->connector_id = link_index;
8322 	aconnector->dc_link = link;
8323 	aconnector->base.interlace_allowed = false;
8324 	aconnector->base.doublescan_allowed = false;
8325 	aconnector->base.stereo_allowed = false;
8326 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8327 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8328 	aconnector->audio_inst = -1;
8329 	mutex_init(&aconnector->hpd_lock);
8330 
8331 	/*
8332 	 * configure support HPD hot plug connector_>polled default value is 0
8333 	 * which means HPD hot plug not supported
8334 	 */
8335 	switch (connector_type) {
8336 	case DRM_MODE_CONNECTOR_HDMIA:
8337 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8338 		aconnector->base.ycbcr_420_allowed =
8339 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8340 		break;
8341 	case DRM_MODE_CONNECTOR_DisplayPort:
8342 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8343 		link->link_enc = dp_get_link_enc(link);
8344 		ASSERT(link->link_enc);
8345 		if (link->link_enc)
8346 			aconnector->base.ycbcr_420_allowed =
8347 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8348 		break;
8349 	case DRM_MODE_CONNECTOR_DVID:
8350 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8351 		break;
8352 	default:
8353 		break;
8354 	}
8355 
8356 	drm_object_attach_property(&aconnector->base.base,
8357 				dm->ddev->mode_config.scaling_mode_property,
8358 				DRM_MODE_SCALE_NONE);
8359 
8360 	drm_object_attach_property(&aconnector->base.base,
8361 				adev->mode_info.underscan_property,
8362 				UNDERSCAN_OFF);
8363 	drm_object_attach_property(&aconnector->base.base,
8364 				adev->mode_info.underscan_hborder_property,
8365 				0);
8366 	drm_object_attach_property(&aconnector->base.base,
8367 				adev->mode_info.underscan_vborder_property,
8368 				0);
8369 
8370 	if (!aconnector->mst_port)
8371 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8372 
8373 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8374 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8375 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8376 
8377 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8378 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8379 		drm_object_attach_property(&aconnector->base.base,
8380 				adev->mode_info.abm_level_property, 0);
8381 	}
8382 
8383 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8384 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8385 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8386 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8387 
8388 		if (!aconnector->mst_port)
8389 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8390 
8391 #ifdef CONFIG_DRM_AMD_DC_HDCP
8392 		if (adev->dm.hdcp_workqueue)
8393 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8394 #endif
8395 	}
8396 }
8397 
8398 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8399 			      struct i2c_msg *msgs, int num)
8400 {
8401 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8402 	struct ddc_service *ddc_service = i2c->ddc_service;
8403 	struct i2c_command cmd;
8404 	int i;
8405 	int result = -EIO;
8406 
8407 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8408 
8409 	if (!cmd.payloads)
8410 		return result;
8411 
8412 	cmd.number_of_payloads = num;
8413 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8414 	cmd.speed = 100;
8415 
8416 	for (i = 0; i < num; i++) {
8417 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8418 		cmd.payloads[i].address = msgs[i].addr;
8419 		cmd.payloads[i].length = msgs[i].len;
8420 		cmd.payloads[i].data = msgs[i].buf;
8421 	}
8422 
8423 	if (dc_submit_i2c(
8424 			ddc_service->ctx->dc,
8425 			ddc_service->ddc_pin->hw_info.ddc_channel,
8426 			&cmd))
8427 		result = num;
8428 
8429 	kfree(cmd.payloads);
8430 	return result;
8431 }
8432 
8433 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8434 {
8435 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8436 }
8437 
8438 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8439 	.master_xfer = amdgpu_dm_i2c_xfer,
8440 	.functionality = amdgpu_dm_i2c_func,
8441 };
8442 
8443 static struct amdgpu_i2c_adapter *
8444 create_i2c(struct ddc_service *ddc_service,
8445 	   int link_index,
8446 	   int *res)
8447 {
8448 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8449 	struct amdgpu_i2c_adapter *i2c;
8450 
8451 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8452 	if (!i2c)
8453 		return NULL;
8454 	i2c->base.owner = THIS_MODULE;
8455 	i2c->base.class = I2C_CLASS_DDC;
8456 	i2c->base.dev.parent = &adev->pdev->dev;
8457 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8458 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8459 	i2c_set_adapdata(&i2c->base, i2c);
8460 	i2c->ddc_service = ddc_service;
8461 	if (i2c->ddc_service->ddc_pin)
8462 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8463 
8464 	return i2c;
8465 }
8466 
8467 
8468 /*
8469  * Note: this function assumes that dc_link_detect() was called for the
8470  * dc_link which will be represented by this aconnector.
8471  */
8472 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8473 				    struct amdgpu_dm_connector *aconnector,
8474 				    uint32_t link_index,
8475 				    struct amdgpu_encoder *aencoder)
8476 {
8477 	int res = 0;
8478 	int connector_type;
8479 	struct dc *dc = dm->dc;
8480 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8481 	struct amdgpu_i2c_adapter *i2c;
8482 
8483 	link->priv = aconnector;
8484 
8485 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8486 
8487 	i2c = create_i2c(link->ddc, link->link_index, &res);
8488 	if (!i2c) {
8489 		DRM_ERROR("Failed to create i2c adapter data\n");
8490 		return -ENOMEM;
8491 	}
8492 
8493 	aconnector->i2c = i2c;
8494 	res = i2c_add_adapter(&i2c->base);
8495 
8496 	if (res) {
8497 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8498 		goto out_free;
8499 	}
8500 
8501 	connector_type = to_drm_connector_type(link->connector_signal);
8502 
8503 	res = drm_connector_init_with_ddc(
8504 			dm->ddev,
8505 			&aconnector->base,
8506 			&amdgpu_dm_connector_funcs,
8507 			connector_type,
8508 			&i2c->base);
8509 
8510 	if (res) {
8511 		DRM_ERROR("connector_init failed\n");
8512 		aconnector->connector_id = -1;
8513 		goto out_free;
8514 	}
8515 
8516 	drm_connector_helper_add(
8517 			&aconnector->base,
8518 			&amdgpu_dm_connector_helper_funcs);
8519 
8520 	amdgpu_dm_connector_init_helper(
8521 		dm,
8522 		aconnector,
8523 		connector_type,
8524 		link,
8525 		link_index);
8526 
8527 	drm_connector_attach_encoder(
8528 		&aconnector->base, &aencoder->base);
8529 
8530 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8531 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8532 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8533 
8534 out_free:
8535 	if (res) {
8536 		kfree(i2c);
8537 		aconnector->i2c = NULL;
8538 	}
8539 	return res;
8540 }
8541 
8542 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8543 {
8544 	switch (adev->mode_info.num_crtc) {
8545 	case 1:
8546 		return 0x1;
8547 	case 2:
8548 		return 0x3;
8549 	case 3:
8550 		return 0x7;
8551 	case 4:
8552 		return 0xf;
8553 	case 5:
8554 		return 0x1f;
8555 	case 6:
8556 	default:
8557 		return 0x3f;
8558 	}
8559 }
8560 
8561 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8562 				  struct amdgpu_encoder *aencoder,
8563 				  uint32_t link_index)
8564 {
8565 	struct amdgpu_device *adev = drm_to_adev(dev);
8566 
8567 	int res = drm_encoder_init(dev,
8568 				   &aencoder->base,
8569 				   &amdgpu_dm_encoder_funcs,
8570 				   DRM_MODE_ENCODER_TMDS,
8571 				   NULL);
8572 
8573 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8574 
8575 	if (!res)
8576 		aencoder->encoder_id = link_index;
8577 	else
8578 		aencoder->encoder_id = -1;
8579 
8580 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8581 
8582 	return res;
8583 }
8584 
8585 static void manage_dm_interrupts(struct amdgpu_device *adev,
8586 				 struct amdgpu_crtc *acrtc,
8587 				 bool enable)
8588 {
8589 	/*
8590 	 * We have no guarantee that the frontend index maps to the same
8591 	 * backend index - some even map to more than one.
8592 	 *
8593 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8594 	 */
8595 	int irq_type =
8596 		amdgpu_display_crtc_idx_to_irq_type(
8597 			adev,
8598 			acrtc->crtc_id);
8599 
8600 	if (enable) {
8601 		drm_crtc_vblank_on(&acrtc->base);
8602 		amdgpu_irq_get(
8603 			adev,
8604 			&adev->pageflip_irq,
8605 			irq_type);
8606 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8607 		amdgpu_irq_get(
8608 			adev,
8609 			&adev->vline0_irq,
8610 			irq_type);
8611 #endif
8612 	} else {
8613 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8614 		amdgpu_irq_put(
8615 			adev,
8616 			&adev->vline0_irq,
8617 			irq_type);
8618 #endif
8619 		amdgpu_irq_put(
8620 			adev,
8621 			&adev->pageflip_irq,
8622 			irq_type);
8623 		drm_crtc_vblank_off(&acrtc->base);
8624 	}
8625 }
8626 
8627 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8628 				      struct amdgpu_crtc *acrtc)
8629 {
8630 	int irq_type =
8631 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8632 
8633 	/**
8634 	 * This reads the current state for the IRQ and force reapplies
8635 	 * the setting to hardware.
8636 	 */
8637 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8638 }
8639 
8640 static bool
8641 is_scaling_state_different(const struct dm_connector_state *dm_state,
8642 			   const struct dm_connector_state *old_dm_state)
8643 {
8644 	if (dm_state->scaling != old_dm_state->scaling)
8645 		return true;
8646 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8647 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8648 			return true;
8649 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8650 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8651 			return true;
8652 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8653 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8654 		return true;
8655 	return false;
8656 }
8657 
8658 #ifdef CONFIG_DRM_AMD_DC_HDCP
8659 static bool is_content_protection_different(struct drm_connector_state *state,
8660 					    const struct drm_connector_state *old_state,
8661 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8662 {
8663 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8664 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8665 
8666 	/* Handle: Type0/1 change */
8667 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8668 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8669 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8670 		return true;
8671 	}
8672 
8673 	/* CP is being re enabled, ignore this
8674 	 *
8675 	 * Handles:	ENABLED -> DESIRED
8676 	 */
8677 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8678 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8679 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8680 		return false;
8681 	}
8682 
8683 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8684 	 *
8685 	 * Handles:	UNDESIRED -> ENABLED
8686 	 */
8687 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8688 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8689 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8690 
8691 	/* Stream removed and re-enabled
8692 	 *
8693 	 * Can sometimes overlap with the HPD case,
8694 	 * thus set update_hdcp to false to avoid
8695 	 * setting HDCP multiple times.
8696 	 *
8697 	 * Handles:	DESIRED -> DESIRED (Special case)
8698 	 */
8699 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8700 		state->crtc && state->crtc->enabled &&
8701 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8702 		dm_con_state->update_hdcp = false;
8703 		return true;
8704 	}
8705 
8706 	/* Hot-plug, headless s3, dpms
8707 	 *
8708 	 * Only start HDCP if the display is connected/enabled.
8709 	 * update_hdcp flag will be set to false until the next
8710 	 * HPD comes in.
8711 	 *
8712 	 * Handles:	DESIRED -> DESIRED (Special case)
8713 	 */
8714 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8715 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8716 		dm_con_state->update_hdcp = false;
8717 		return true;
8718 	}
8719 
8720 	/*
8721 	 * Handles:	UNDESIRED -> UNDESIRED
8722 	 *		DESIRED -> DESIRED
8723 	 *		ENABLED -> ENABLED
8724 	 */
8725 	if (old_state->content_protection == state->content_protection)
8726 		return false;
8727 
8728 	/*
8729 	 * Handles:	UNDESIRED -> DESIRED
8730 	 *		DESIRED -> UNDESIRED
8731 	 *		ENABLED -> UNDESIRED
8732 	 */
8733 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8734 		return true;
8735 
8736 	/*
8737 	 * Handles:	DESIRED -> ENABLED
8738 	 */
8739 	return false;
8740 }
8741 
8742 #endif
8743 static void remove_stream(struct amdgpu_device *adev,
8744 			  struct amdgpu_crtc *acrtc,
8745 			  struct dc_stream_state *stream)
8746 {
8747 	/* this is the update mode case */
8748 
8749 	acrtc->otg_inst = -1;
8750 	acrtc->enabled = false;
8751 }
8752 
8753 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8754 			       struct dc_cursor_position *position)
8755 {
8756 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8757 	int x, y;
8758 	int xorigin = 0, yorigin = 0;
8759 
8760 	if (!crtc || !plane->state->fb)
8761 		return 0;
8762 
8763 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8764 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8765 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8766 			  __func__,
8767 			  plane->state->crtc_w,
8768 			  plane->state->crtc_h);
8769 		return -EINVAL;
8770 	}
8771 
8772 	x = plane->state->crtc_x;
8773 	y = plane->state->crtc_y;
8774 
8775 	if (x <= -amdgpu_crtc->max_cursor_width ||
8776 	    y <= -amdgpu_crtc->max_cursor_height)
8777 		return 0;
8778 
8779 	if (x < 0) {
8780 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8781 		x = 0;
8782 	}
8783 	if (y < 0) {
8784 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8785 		y = 0;
8786 	}
8787 	position->enable = true;
8788 	position->translate_by_source = true;
8789 	position->x = x;
8790 	position->y = y;
8791 	position->x_hotspot = xorigin;
8792 	position->y_hotspot = yorigin;
8793 
8794 	return 0;
8795 }
8796 
8797 static void handle_cursor_update(struct drm_plane *plane,
8798 				 struct drm_plane_state *old_plane_state)
8799 {
8800 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8801 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8802 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8803 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8804 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8805 	uint64_t address = afb ? afb->address : 0;
8806 	struct dc_cursor_position position = {0};
8807 	struct dc_cursor_attributes attributes;
8808 	int ret;
8809 
8810 	if (!plane->state->fb && !old_plane_state->fb)
8811 		return;
8812 
8813 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8814 		      __func__,
8815 		      amdgpu_crtc->crtc_id,
8816 		      plane->state->crtc_w,
8817 		      plane->state->crtc_h);
8818 
8819 	ret = get_cursor_position(plane, crtc, &position);
8820 	if (ret)
8821 		return;
8822 
8823 	if (!position.enable) {
8824 		/* turn off cursor */
8825 		if (crtc_state && crtc_state->stream) {
8826 			mutex_lock(&adev->dm.dc_lock);
8827 			dc_stream_set_cursor_position(crtc_state->stream,
8828 						      &position);
8829 			mutex_unlock(&adev->dm.dc_lock);
8830 		}
8831 		return;
8832 	}
8833 
8834 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8835 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8836 
8837 	memset(&attributes, 0, sizeof(attributes));
8838 	attributes.address.high_part = upper_32_bits(address);
8839 	attributes.address.low_part  = lower_32_bits(address);
8840 	attributes.width             = plane->state->crtc_w;
8841 	attributes.height            = plane->state->crtc_h;
8842 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8843 	attributes.rotation_angle    = 0;
8844 	attributes.attribute_flags.value = 0;
8845 
8846 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8847 
8848 	if (crtc_state->stream) {
8849 		mutex_lock(&adev->dm.dc_lock);
8850 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8851 							 &attributes))
8852 			DRM_ERROR("DC failed to set cursor attributes\n");
8853 
8854 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8855 						   &position))
8856 			DRM_ERROR("DC failed to set cursor position\n");
8857 		mutex_unlock(&adev->dm.dc_lock);
8858 	}
8859 }
8860 
8861 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8862 {
8863 
8864 	assert_spin_locked(&acrtc->base.dev->event_lock);
8865 	WARN_ON(acrtc->event);
8866 
8867 	acrtc->event = acrtc->base.state->event;
8868 
8869 	/* Set the flip status */
8870 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8871 
8872 	/* Mark this event as consumed */
8873 	acrtc->base.state->event = NULL;
8874 
8875 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8876 		     acrtc->crtc_id);
8877 }
8878 
8879 static void update_freesync_state_on_stream(
8880 	struct amdgpu_display_manager *dm,
8881 	struct dm_crtc_state *new_crtc_state,
8882 	struct dc_stream_state *new_stream,
8883 	struct dc_plane_state *surface,
8884 	u32 flip_timestamp_in_us)
8885 {
8886 	struct mod_vrr_params vrr_params;
8887 	struct dc_info_packet vrr_infopacket = {0};
8888 	struct amdgpu_device *adev = dm->adev;
8889 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8890 	unsigned long flags;
8891 	bool pack_sdp_v1_3 = false;
8892 
8893 	if (!new_stream)
8894 		return;
8895 
8896 	/*
8897 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8898 	 * For now it's sufficient to just guard against these conditions.
8899 	 */
8900 
8901 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8902 		return;
8903 
8904 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8905         vrr_params = acrtc->dm_irq_params.vrr_params;
8906 
8907 	if (surface) {
8908 		mod_freesync_handle_preflip(
8909 			dm->freesync_module,
8910 			surface,
8911 			new_stream,
8912 			flip_timestamp_in_us,
8913 			&vrr_params);
8914 
8915 		if (adev->family < AMDGPU_FAMILY_AI &&
8916 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8917 			mod_freesync_handle_v_update(dm->freesync_module,
8918 						     new_stream, &vrr_params);
8919 
8920 			/* Need to call this before the frame ends. */
8921 			dc_stream_adjust_vmin_vmax(dm->dc,
8922 						   new_crtc_state->stream,
8923 						   &vrr_params.adjust);
8924 		}
8925 	}
8926 
8927 	mod_freesync_build_vrr_infopacket(
8928 		dm->freesync_module,
8929 		new_stream,
8930 		&vrr_params,
8931 		PACKET_TYPE_VRR,
8932 		TRANSFER_FUNC_UNKNOWN,
8933 		&vrr_infopacket,
8934 		pack_sdp_v1_3);
8935 
8936 	new_crtc_state->freesync_timing_changed |=
8937 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8938 			&vrr_params.adjust,
8939 			sizeof(vrr_params.adjust)) != 0);
8940 
8941 	new_crtc_state->freesync_vrr_info_changed |=
8942 		(memcmp(&new_crtc_state->vrr_infopacket,
8943 			&vrr_infopacket,
8944 			sizeof(vrr_infopacket)) != 0);
8945 
8946 	acrtc->dm_irq_params.vrr_params = vrr_params;
8947 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8948 
8949 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8950 	new_stream->vrr_infopacket = vrr_infopacket;
8951 
8952 	if (new_crtc_state->freesync_vrr_info_changed)
8953 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8954 			      new_crtc_state->base.crtc->base.id,
8955 			      (int)new_crtc_state->base.vrr_enabled,
8956 			      (int)vrr_params.state);
8957 
8958 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8959 }
8960 
8961 static void update_stream_irq_parameters(
8962 	struct amdgpu_display_manager *dm,
8963 	struct dm_crtc_state *new_crtc_state)
8964 {
8965 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8966 	struct mod_vrr_params vrr_params;
8967 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8968 	struct amdgpu_device *adev = dm->adev;
8969 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8970 	unsigned long flags;
8971 
8972 	if (!new_stream)
8973 		return;
8974 
8975 	/*
8976 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8977 	 * For now it's sufficient to just guard against these conditions.
8978 	 */
8979 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8980 		return;
8981 
8982 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8983 	vrr_params = acrtc->dm_irq_params.vrr_params;
8984 
8985 	if (new_crtc_state->vrr_supported &&
8986 	    config.min_refresh_in_uhz &&
8987 	    config.max_refresh_in_uhz) {
8988 		/*
8989 		 * if freesync compatible mode was set, config.state will be set
8990 		 * in atomic check
8991 		 */
8992 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8993 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8994 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8995 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8996 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8997 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8998 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8999 		} else {
9000 			config.state = new_crtc_state->base.vrr_enabled ?
9001 						     VRR_STATE_ACTIVE_VARIABLE :
9002 						     VRR_STATE_INACTIVE;
9003 		}
9004 	} else {
9005 		config.state = VRR_STATE_UNSUPPORTED;
9006 	}
9007 
9008 	mod_freesync_build_vrr_params(dm->freesync_module,
9009 				      new_stream,
9010 				      &config, &vrr_params);
9011 
9012 	new_crtc_state->freesync_timing_changed |=
9013 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9014 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9015 
9016 	new_crtc_state->freesync_config = config;
9017 	/* Copy state for access from DM IRQ handler */
9018 	acrtc->dm_irq_params.freesync_config = config;
9019 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9020 	acrtc->dm_irq_params.vrr_params = vrr_params;
9021 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9022 }
9023 
9024 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9025 					    struct dm_crtc_state *new_state)
9026 {
9027 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9028 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9029 
9030 	if (!old_vrr_active && new_vrr_active) {
9031 		/* Transition VRR inactive -> active:
9032 		 * While VRR is active, we must not disable vblank irq, as a
9033 		 * reenable after disable would compute bogus vblank/pflip
9034 		 * timestamps if it likely happened inside display front-porch.
9035 		 *
9036 		 * We also need vupdate irq for the actual core vblank handling
9037 		 * at end of vblank.
9038 		 */
9039 		dm_set_vupdate_irq(new_state->base.crtc, true);
9040 		drm_crtc_vblank_get(new_state->base.crtc);
9041 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9042 				 __func__, new_state->base.crtc->base.id);
9043 	} else if (old_vrr_active && !new_vrr_active) {
9044 		/* Transition VRR active -> inactive:
9045 		 * Allow vblank irq disable again for fixed refresh rate.
9046 		 */
9047 		dm_set_vupdate_irq(new_state->base.crtc, false);
9048 		drm_crtc_vblank_put(new_state->base.crtc);
9049 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9050 				 __func__, new_state->base.crtc->base.id);
9051 	}
9052 }
9053 
9054 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9055 {
9056 	struct drm_plane *plane;
9057 	struct drm_plane_state *old_plane_state;
9058 	int i;
9059 
9060 	/*
9061 	 * TODO: Make this per-stream so we don't issue redundant updates for
9062 	 * commits with multiple streams.
9063 	 */
9064 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9065 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9066 			handle_cursor_update(plane, old_plane_state);
9067 }
9068 
9069 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9070 				    struct dc_state *dc_state,
9071 				    struct drm_device *dev,
9072 				    struct amdgpu_display_manager *dm,
9073 				    struct drm_crtc *pcrtc,
9074 				    bool wait_for_vblank)
9075 {
9076 	uint32_t i;
9077 	uint64_t timestamp_ns;
9078 	struct drm_plane *plane;
9079 	struct drm_plane_state *old_plane_state, *new_plane_state;
9080 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9081 	struct drm_crtc_state *new_pcrtc_state =
9082 			drm_atomic_get_new_crtc_state(state, pcrtc);
9083 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9084 	struct dm_crtc_state *dm_old_crtc_state =
9085 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9086 	int planes_count = 0, vpos, hpos;
9087 	long r;
9088 	unsigned long flags;
9089 	struct amdgpu_bo *abo;
9090 	uint32_t target_vblank, last_flip_vblank;
9091 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9092 	bool pflip_present = false;
9093 	struct {
9094 		struct dc_surface_update surface_updates[MAX_SURFACES];
9095 		struct dc_plane_info plane_infos[MAX_SURFACES];
9096 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9097 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9098 		struct dc_stream_update stream_update;
9099 	} *bundle;
9100 
9101 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9102 
9103 	if (!bundle) {
9104 		dm_error("Failed to allocate update bundle\n");
9105 		goto cleanup;
9106 	}
9107 
9108 	/*
9109 	 * Disable the cursor first if we're disabling all the planes.
9110 	 * It'll remain on the screen after the planes are re-enabled
9111 	 * if we don't.
9112 	 */
9113 	if (acrtc_state->active_planes == 0)
9114 		amdgpu_dm_commit_cursors(state);
9115 
9116 	/* update planes when needed */
9117 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9118 		struct drm_crtc *crtc = new_plane_state->crtc;
9119 		struct drm_crtc_state *new_crtc_state;
9120 		struct drm_framebuffer *fb = new_plane_state->fb;
9121 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9122 		bool plane_needs_flip;
9123 		struct dc_plane_state *dc_plane;
9124 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9125 
9126 		/* Cursor plane is handled after stream updates */
9127 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9128 			continue;
9129 
9130 		if (!fb || !crtc || pcrtc != crtc)
9131 			continue;
9132 
9133 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9134 		if (!new_crtc_state->active)
9135 			continue;
9136 
9137 		dc_plane = dm_new_plane_state->dc_state;
9138 
9139 		bundle->surface_updates[planes_count].surface = dc_plane;
9140 		if (new_pcrtc_state->color_mgmt_changed) {
9141 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9142 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9143 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9144 		}
9145 
9146 		fill_dc_scaling_info(dm->adev, new_plane_state,
9147 				     &bundle->scaling_infos[planes_count]);
9148 
9149 		bundle->surface_updates[planes_count].scaling_info =
9150 			&bundle->scaling_infos[planes_count];
9151 
9152 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9153 
9154 		pflip_present = pflip_present || plane_needs_flip;
9155 
9156 		if (!plane_needs_flip) {
9157 			planes_count += 1;
9158 			continue;
9159 		}
9160 
9161 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9162 
9163 		/*
9164 		 * Wait for all fences on this FB. Do limited wait to avoid
9165 		 * deadlock during GPU reset when this fence will not signal
9166 		 * but we hold reservation lock for the BO.
9167 		 */
9168 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9169 					  msecs_to_jiffies(5000));
9170 		if (unlikely(r <= 0))
9171 			DRM_ERROR("Waiting for fences timed out!");
9172 
9173 		fill_dc_plane_info_and_addr(
9174 			dm->adev, new_plane_state,
9175 			afb->tiling_flags,
9176 			&bundle->plane_infos[planes_count],
9177 			&bundle->flip_addrs[planes_count].address,
9178 			afb->tmz_surface, false);
9179 
9180 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9181 				 new_plane_state->plane->index,
9182 				 bundle->plane_infos[planes_count].dcc.enable);
9183 
9184 		bundle->surface_updates[planes_count].plane_info =
9185 			&bundle->plane_infos[planes_count];
9186 
9187 		/*
9188 		 * Only allow immediate flips for fast updates that don't
9189 		 * change FB pitch, DCC state, rotation or mirroing.
9190 		 */
9191 		bundle->flip_addrs[planes_count].flip_immediate =
9192 			crtc->state->async_flip &&
9193 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9194 
9195 		timestamp_ns = ktime_get_ns();
9196 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9197 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9198 		bundle->surface_updates[planes_count].surface = dc_plane;
9199 
9200 		if (!bundle->surface_updates[planes_count].surface) {
9201 			DRM_ERROR("No surface for CRTC: id=%d\n",
9202 					acrtc_attach->crtc_id);
9203 			continue;
9204 		}
9205 
9206 		if (plane == pcrtc->primary)
9207 			update_freesync_state_on_stream(
9208 				dm,
9209 				acrtc_state,
9210 				acrtc_state->stream,
9211 				dc_plane,
9212 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9213 
9214 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9215 				 __func__,
9216 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9217 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9218 
9219 		planes_count += 1;
9220 
9221 	}
9222 
9223 	if (pflip_present) {
9224 		if (!vrr_active) {
9225 			/* Use old throttling in non-vrr fixed refresh rate mode
9226 			 * to keep flip scheduling based on target vblank counts
9227 			 * working in a backwards compatible way, e.g., for
9228 			 * clients using the GLX_OML_sync_control extension or
9229 			 * DRI3/Present extension with defined target_msc.
9230 			 */
9231 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9232 		}
9233 		else {
9234 			/* For variable refresh rate mode only:
9235 			 * Get vblank of last completed flip to avoid > 1 vrr
9236 			 * flips per video frame by use of throttling, but allow
9237 			 * flip programming anywhere in the possibly large
9238 			 * variable vrr vblank interval for fine-grained flip
9239 			 * timing control and more opportunity to avoid stutter
9240 			 * on late submission of flips.
9241 			 */
9242 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9243 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9244 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9245 		}
9246 
9247 		target_vblank = last_flip_vblank + wait_for_vblank;
9248 
9249 		/*
9250 		 * Wait until we're out of the vertical blank period before the one
9251 		 * targeted by the flip
9252 		 */
9253 		while ((acrtc_attach->enabled &&
9254 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9255 							    0, &vpos, &hpos, NULL,
9256 							    NULL, &pcrtc->hwmode)
9257 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9258 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9259 			(int)(target_vblank -
9260 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9261 			usleep_range(1000, 1100);
9262 		}
9263 
9264 		/**
9265 		 * Prepare the flip event for the pageflip interrupt to handle.
9266 		 *
9267 		 * This only works in the case where we've already turned on the
9268 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9269 		 * from 0 -> n planes we have to skip a hardware generated event
9270 		 * and rely on sending it from software.
9271 		 */
9272 		if (acrtc_attach->base.state->event &&
9273 		    acrtc_state->active_planes > 0 &&
9274 		    !acrtc_state->force_dpms_off) {
9275 			drm_crtc_vblank_get(pcrtc);
9276 
9277 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9278 
9279 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9280 			prepare_flip_isr(acrtc_attach);
9281 
9282 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9283 		}
9284 
9285 		if (acrtc_state->stream) {
9286 			if (acrtc_state->freesync_vrr_info_changed)
9287 				bundle->stream_update.vrr_infopacket =
9288 					&acrtc_state->stream->vrr_infopacket;
9289 		}
9290 	}
9291 
9292 	/* Update the planes if changed or disable if we don't have any. */
9293 	if ((planes_count || acrtc_state->active_planes == 0) &&
9294 		acrtc_state->stream) {
9295 #if defined(CONFIG_DRM_AMD_DC_DCN)
9296 		/*
9297 		 * If PSR or idle optimizations are enabled then flush out
9298 		 * any pending work before hardware programming.
9299 		 */
9300 		if (dm->vblank_control_workqueue)
9301 			flush_workqueue(dm->vblank_control_workqueue);
9302 #endif
9303 
9304 		bundle->stream_update.stream = acrtc_state->stream;
9305 		if (new_pcrtc_state->mode_changed) {
9306 			bundle->stream_update.src = acrtc_state->stream->src;
9307 			bundle->stream_update.dst = acrtc_state->stream->dst;
9308 		}
9309 
9310 		if (new_pcrtc_state->color_mgmt_changed) {
9311 			/*
9312 			 * TODO: This isn't fully correct since we've actually
9313 			 * already modified the stream in place.
9314 			 */
9315 			bundle->stream_update.gamut_remap =
9316 				&acrtc_state->stream->gamut_remap_matrix;
9317 			bundle->stream_update.output_csc_transform =
9318 				&acrtc_state->stream->csc_color_matrix;
9319 			bundle->stream_update.out_transfer_func =
9320 				acrtc_state->stream->out_transfer_func;
9321 		}
9322 
9323 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9324 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9325 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9326 
9327 		/*
9328 		 * If FreeSync state on the stream has changed then we need to
9329 		 * re-adjust the min/max bounds now that DC doesn't handle this
9330 		 * as part of commit.
9331 		 */
9332 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9333 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9334 			dc_stream_adjust_vmin_vmax(
9335 				dm->dc, acrtc_state->stream,
9336 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9337 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9338 		}
9339 		mutex_lock(&dm->dc_lock);
9340 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9341 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9342 			amdgpu_dm_psr_disable(acrtc_state->stream);
9343 
9344 		dc_commit_updates_for_stream(dm->dc,
9345 						     bundle->surface_updates,
9346 						     planes_count,
9347 						     acrtc_state->stream,
9348 						     &bundle->stream_update,
9349 						     dc_state);
9350 
9351 		/**
9352 		 * Enable or disable the interrupts on the backend.
9353 		 *
9354 		 * Most pipes are put into power gating when unused.
9355 		 *
9356 		 * When power gating is enabled on a pipe we lose the
9357 		 * interrupt enablement state when power gating is disabled.
9358 		 *
9359 		 * So we need to update the IRQ control state in hardware
9360 		 * whenever the pipe turns on (since it could be previously
9361 		 * power gated) or off (since some pipes can't be power gated
9362 		 * on some ASICs).
9363 		 */
9364 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9365 			dm_update_pflip_irq_state(drm_to_adev(dev),
9366 						  acrtc_attach);
9367 
9368 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9369 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9370 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9371 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9372 
9373 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9374 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9375 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9376 			struct amdgpu_dm_connector *aconn =
9377 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9378 
9379 			if (aconn->psr_skip_count > 0)
9380 				aconn->psr_skip_count--;
9381 
9382 			/* Allow PSR when skip count is 0. */
9383 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9384 		} else {
9385 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9386 		}
9387 
9388 		mutex_unlock(&dm->dc_lock);
9389 	}
9390 
9391 	/*
9392 	 * Update cursor state *after* programming all the planes.
9393 	 * This avoids redundant programming in the case where we're going
9394 	 * to be disabling a single plane - those pipes are being disabled.
9395 	 */
9396 	if (acrtc_state->active_planes)
9397 		amdgpu_dm_commit_cursors(state);
9398 
9399 cleanup:
9400 	kfree(bundle);
9401 }
9402 
9403 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9404 				   struct drm_atomic_state *state)
9405 {
9406 	struct amdgpu_device *adev = drm_to_adev(dev);
9407 	struct amdgpu_dm_connector *aconnector;
9408 	struct drm_connector *connector;
9409 	struct drm_connector_state *old_con_state, *new_con_state;
9410 	struct drm_crtc_state *new_crtc_state;
9411 	struct dm_crtc_state *new_dm_crtc_state;
9412 	const struct dc_stream_status *status;
9413 	int i, inst;
9414 
9415 	/* Notify device removals. */
9416 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9417 		if (old_con_state->crtc != new_con_state->crtc) {
9418 			/* CRTC changes require notification. */
9419 			goto notify;
9420 		}
9421 
9422 		if (!new_con_state->crtc)
9423 			continue;
9424 
9425 		new_crtc_state = drm_atomic_get_new_crtc_state(
9426 			state, new_con_state->crtc);
9427 
9428 		if (!new_crtc_state)
9429 			continue;
9430 
9431 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9432 			continue;
9433 
9434 	notify:
9435 		aconnector = to_amdgpu_dm_connector(connector);
9436 
9437 		mutex_lock(&adev->dm.audio_lock);
9438 		inst = aconnector->audio_inst;
9439 		aconnector->audio_inst = -1;
9440 		mutex_unlock(&adev->dm.audio_lock);
9441 
9442 		amdgpu_dm_audio_eld_notify(adev, inst);
9443 	}
9444 
9445 	/* Notify audio device additions. */
9446 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9447 		if (!new_con_state->crtc)
9448 			continue;
9449 
9450 		new_crtc_state = drm_atomic_get_new_crtc_state(
9451 			state, new_con_state->crtc);
9452 
9453 		if (!new_crtc_state)
9454 			continue;
9455 
9456 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9457 			continue;
9458 
9459 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9460 		if (!new_dm_crtc_state->stream)
9461 			continue;
9462 
9463 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9464 		if (!status)
9465 			continue;
9466 
9467 		aconnector = to_amdgpu_dm_connector(connector);
9468 
9469 		mutex_lock(&adev->dm.audio_lock);
9470 		inst = status->audio_inst;
9471 		aconnector->audio_inst = inst;
9472 		mutex_unlock(&adev->dm.audio_lock);
9473 
9474 		amdgpu_dm_audio_eld_notify(adev, inst);
9475 	}
9476 }
9477 
9478 /*
9479  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9480  * @crtc_state: the DRM CRTC state
9481  * @stream_state: the DC stream state.
9482  *
9483  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9484  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9485  */
9486 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9487 						struct dc_stream_state *stream_state)
9488 {
9489 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9490 }
9491 
9492 /**
9493  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9494  * @state: The atomic state to commit
9495  *
9496  * This will tell DC to commit the constructed DC state from atomic_check,
9497  * programming the hardware. Any failures here implies a hardware failure, since
9498  * atomic check should have filtered anything non-kosher.
9499  */
9500 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9501 {
9502 	struct drm_device *dev = state->dev;
9503 	struct amdgpu_device *adev = drm_to_adev(dev);
9504 	struct amdgpu_display_manager *dm = &adev->dm;
9505 	struct dm_atomic_state *dm_state;
9506 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9507 	uint32_t i, j;
9508 	struct drm_crtc *crtc;
9509 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9510 	unsigned long flags;
9511 	bool wait_for_vblank = true;
9512 	struct drm_connector *connector;
9513 	struct drm_connector_state *old_con_state, *new_con_state;
9514 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9515 	int crtc_disable_count = 0;
9516 	bool mode_set_reset_required = false;
9517 
9518 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9519 
9520 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9521 
9522 	dm_state = dm_atomic_get_new_state(state);
9523 	if (dm_state && dm_state->context) {
9524 		dc_state = dm_state->context;
9525 	} else {
9526 		/* No state changes, retain current state. */
9527 		dc_state_temp = dc_create_state(dm->dc);
9528 		ASSERT(dc_state_temp);
9529 		dc_state = dc_state_temp;
9530 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9531 	}
9532 
9533 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9534 				       new_crtc_state, i) {
9535 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9536 
9537 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9538 
9539 		if (old_crtc_state->active &&
9540 		    (!new_crtc_state->active ||
9541 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9542 			manage_dm_interrupts(adev, acrtc, false);
9543 			dc_stream_release(dm_old_crtc_state->stream);
9544 		}
9545 	}
9546 
9547 	drm_atomic_helper_calc_timestamping_constants(state);
9548 
9549 	/* update changed items */
9550 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9551 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9552 
9553 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9554 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9555 
9556 		DRM_DEBUG_ATOMIC(
9557 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9558 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9559 			"connectors_changed:%d\n",
9560 			acrtc->crtc_id,
9561 			new_crtc_state->enable,
9562 			new_crtc_state->active,
9563 			new_crtc_state->planes_changed,
9564 			new_crtc_state->mode_changed,
9565 			new_crtc_state->active_changed,
9566 			new_crtc_state->connectors_changed);
9567 
9568 		/* Disable cursor if disabling crtc */
9569 		if (old_crtc_state->active && !new_crtc_state->active) {
9570 			struct dc_cursor_position position;
9571 
9572 			memset(&position, 0, sizeof(position));
9573 			mutex_lock(&dm->dc_lock);
9574 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9575 			mutex_unlock(&dm->dc_lock);
9576 		}
9577 
9578 		/* Copy all transient state flags into dc state */
9579 		if (dm_new_crtc_state->stream) {
9580 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9581 							    dm_new_crtc_state->stream);
9582 		}
9583 
9584 		/* handles headless hotplug case, updating new_state and
9585 		 * aconnector as needed
9586 		 */
9587 
9588 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9589 
9590 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9591 
9592 			if (!dm_new_crtc_state->stream) {
9593 				/*
9594 				 * this could happen because of issues with
9595 				 * userspace notifications delivery.
9596 				 * In this case userspace tries to set mode on
9597 				 * display which is disconnected in fact.
9598 				 * dc_sink is NULL in this case on aconnector.
9599 				 * We expect reset mode will come soon.
9600 				 *
9601 				 * This can also happen when unplug is done
9602 				 * during resume sequence ended
9603 				 *
9604 				 * In this case, we want to pretend we still
9605 				 * have a sink to keep the pipe running so that
9606 				 * hw state is consistent with the sw state
9607 				 */
9608 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9609 						__func__, acrtc->base.base.id);
9610 				continue;
9611 			}
9612 
9613 			if (dm_old_crtc_state->stream)
9614 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9615 
9616 			pm_runtime_get_noresume(dev->dev);
9617 
9618 			acrtc->enabled = true;
9619 			acrtc->hw_mode = new_crtc_state->mode;
9620 			crtc->hwmode = new_crtc_state->mode;
9621 			mode_set_reset_required = true;
9622 		} else if (modereset_required(new_crtc_state)) {
9623 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9624 			/* i.e. reset mode */
9625 			if (dm_old_crtc_state->stream)
9626 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9627 
9628 			mode_set_reset_required = true;
9629 		}
9630 	} /* for_each_crtc_in_state() */
9631 
9632 	if (dc_state) {
9633 		/* if there mode set or reset, disable eDP PSR */
9634 		if (mode_set_reset_required) {
9635 #if defined(CONFIG_DRM_AMD_DC_DCN)
9636 			if (dm->vblank_control_workqueue)
9637 				flush_workqueue(dm->vblank_control_workqueue);
9638 #endif
9639 			amdgpu_dm_psr_disable_all(dm);
9640 		}
9641 
9642 		dm_enable_per_frame_crtc_master_sync(dc_state);
9643 		mutex_lock(&dm->dc_lock);
9644 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9645 #if defined(CONFIG_DRM_AMD_DC_DCN)
9646                /* Allow idle optimization when vblank count is 0 for display off */
9647                if (dm->active_vblank_irq_count == 0)
9648                    dc_allow_idle_optimizations(dm->dc,true);
9649 #endif
9650 		mutex_unlock(&dm->dc_lock);
9651 	}
9652 
9653 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9654 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9655 
9656 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9657 
9658 		if (dm_new_crtc_state->stream != NULL) {
9659 			const struct dc_stream_status *status =
9660 					dc_stream_get_status(dm_new_crtc_state->stream);
9661 
9662 			if (!status)
9663 				status = dc_stream_get_status_from_state(dc_state,
9664 									 dm_new_crtc_state->stream);
9665 			if (!status)
9666 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9667 			else
9668 				acrtc->otg_inst = status->primary_otg_inst;
9669 		}
9670 	}
9671 #ifdef CONFIG_DRM_AMD_DC_HDCP
9672 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9673 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9674 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9675 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9676 
9677 		new_crtc_state = NULL;
9678 
9679 		if (acrtc)
9680 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9681 
9682 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9683 
9684 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9685 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9686 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9687 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9688 			dm_new_con_state->update_hdcp = true;
9689 			continue;
9690 		}
9691 
9692 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9693 			hdcp_update_display(
9694 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9695 				new_con_state->hdcp_content_type,
9696 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9697 	}
9698 #endif
9699 
9700 	/* Handle connector state changes */
9701 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9702 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9703 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9704 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9705 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9706 		struct dc_stream_update stream_update;
9707 		struct dc_info_packet hdr_packet;
9708 		struct dc_stream_status *status = NULL;
9709 		bool abm_changed, hdr_changed, scaling_changed;
9710 
9711 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9712 		memset(&stream_update, 0, sizeof(stream_update));
9713 
9714 		if (acrtc) {
9715 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9716 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9717 		}
9718 
9719 		/* Skip any modesets/resets */
9720 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9721 			continue;
9722 
9723 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9724 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9725 
9726 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9727 							     dm_old_con_state);
9728 
9729 		abm_changed = dm_new_crtc_state->abm_level !=
9730 			      dm_old_crtc_state->abm_level;
9731 
9732 		hdr_changed =
9733 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9734 
9735 		if (!scaling_changed && !abm_changed && !hdr_changed)
9736 			continue;
9737 
9738 		stream_update.stream = dm_new_crtc_state->stream;
9739 		if (scaling_changed) {
9740 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9741 					dm_new_con_state, dm_new_crtc_state->stream);
9742 
9743 			stream_update.src = dm_new_crtc_state->stream->src;
9744 			stream_update.dst = dm_new_crtc_state->stream->dst;
9745 		}
9746 
9747 		if (abm_changed) {
9748 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9749 
9750 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9751 		}
9752 
9753 		if (hdr_changed) {
9754 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9755 			stream_update.hdr_static_metadata = &hdr_packet;
9756 		}
9757 
9758 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9759 
9760 		if (WARN_ON(!status))
9761 			continue;
9762 
9763 		WARN_ON(!status->plane_count);
9764 
9765 		/*
9766 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9767 		 * Here we create an empty update on each plane.
9768 		 * To fix this, DC should permit updating only stream properties.
9769 		 */
9770 		for (j = 0; j < status->plane_count; j++)
9771 			dummy_updates[j].surface = status->plane_states[0];
9772 
9773 
9774 		mutex_lock(&dm->dc_lock);
9775 		dc_commit_updates_for_stream(dm->dc,
9776 						     dummy_updates,
9777 						     status->plane_count,
9778 						     dm_new_crtc_state->stream,
9779 						     &stream_update,
9780 						     dc_state);
9781 		mutex_unlock(&dm->dc_lock);
9782 	}
9783 
9784 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9785 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9786 				      new_crtc_state, i) {
9787 		if (old_crtc_state->active && !new_crtc_state->active)
9788 			crtc_disable_count++;
9789 
9790 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9791 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9792 
9793 		/* For freesync config update on crtc state and params for irq */
9794 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9795 
9796 		/* Handle vrr on->off / off->on transitions */
9797 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9798 						dm_new_crtc_state);
9799 	}
9800 
9801 	/**
9802 	 * Enable interrupts for CRTCs that are newly enabled or went through
9803 	 * a modeset. It was intentionally deferred until after the front end
9804 	 * state was modified to wait until the OTG was on and so the IRQ
9805 	 * handlers didn't access stale or invalid state.
9806 	 */
9807 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9808 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9809 #ifdef CONFIG_DEBUG_FS
9810 		bool configure_crc = false;
9811 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9812 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9813 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9814 #endif
9815 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9816 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9817 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9818 #endif
9819 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9820 
9821 		if (new_crtc_state->active &&
9822 		    (!old_crtc_state->active ||
9823 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9824 			dc_stream_retain(dm_new_crtc_state->stream);
9825 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9826 			manage_dm_interrupts(adev, acrtc, true);
9827 
9828 #ifdef CONFIG_DEBUG_FS
9829 			/**
9830 			 * Frontend may have changed so reapply the CRC capture
9831 			 * settings for the stream.
9832 			 */
9833 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9834 
9835 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9836 				configure_crc = true;
9837 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9838 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9839 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9840 					acrtc->dm_irq_params.crc_window.update_win = true;
9841 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9842 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9843 					crc_rd_wrk->crtc = crtc;
9844 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9845 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9846 				}
9847 #endif
9848 			}
9849 
9850 			if (configure_crc)
9851 				if (amdgpu_dm_crtc_configure_crc_source(
9852 					crtc, dm_new_crtc_state, cur_crc_src))
9853 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9854 #endif
9855 		}
9856 	}
9857 
9858 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9859 		if (new_crtc_state->async_flip)
9860 			wait_for_vblank = false;
9861 
9862 	/* update planes when needed per crtc*/
9863 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9864 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9865 
9866 		if (dm_new_crtc_state->stream)
9867 			amdgpu_dm_commit_planes(state, dc_state, dev,
9868 						dm, crtc, wait_for_vblank);
9869 	}
9870 
9871 	/* Update audio instances for each connector. */
9872 	amdgpu_dm_commit_audio(dev, state);
9873 
9874 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9875 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9876 	/* restore the backlight level */
9877 	for (i = 0; i < dm->num_of_edps; i++) {
9878 		if (dm->backlight_dev[i] &&
9879 		    (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9880 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9881 	}
9882 #endif
9883 	/*
9884 	 * send vblank event on all events not handled in flip and
9885 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9886 	 */
9887 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9888 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9889 
9890 		if (new_crtc_state->event)
9891 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9892 
9893 		new_crtc_state->event = NULL;
9894 	}
9895 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9896 
9897 	/* Signal HW programming completion */
9898 	drm_atomic_helper_commit_hw_done(state);
9899 
9900 	if (wait_for_vblank)
9901 		drm_atomic_helper_wait_for_flip_done(dev, state);
9902 
9903 	drm_atomic_helper_cleanup_planes(dev, state);
9904 
9905 	/* return the stolen vga memory back to VRAM */
9906 	if (!adev->mman.keep_stolen_vga_memory)
9907 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9908 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9909 
9910 	/*
9911 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9912 	 * so we can put the GPU into runtime suspend if we're not driving any
9913 	 * displays anymore
9914 	 */
9915 	for (i = 0; i < crtc_disable_count; i++)
9916 		pm_runtime_put_autosuspend(dev->dev);
9917 	pm_runtime_mark_last_busy(dev->dev);
9918 
9919 	if (dc_state_temp)
9920 		dc_release_state(dc_state_temp);
9921 }
9922 
9923 
9924 static int dm_force_atomic_commit(struct drm_connector *connector)
9925 {
9926 	int ret = 0;
9927 	struct drm_device *ddev = connector->dev;
9928 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9929 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9930 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9931 	struct drm_connector_state *conn_state;
9932 	struct drm_crtc_state *crtc_state;
9933 	struct drm_plane_state *plane_state;
9934 
9935 	if (!state)
9936 		return -ENOMEM;
9937 
9938 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9939 
9940 	/* Construct an atomic state to restore previous display setting */
9941 
9942 	/*
9943 	 * Attach connectors to drm_atomic_state
9944 	 */
9945 	conn_state = drm_atomic_get_connector_state(state, connector);
9946 
9947 	ret = PTR_ERR_OR_ZERO(conn_state);
9948 	if (ret)
9949 		goto out;
9950 
9951 	/* Attach crtc to drm_atomic_state*/
9952 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9953 
9954 	ret = PTR_ERR_OR_ZERO(crtc_state);
9955 	if (ret)
9956 		goto out;
9957 
9958 	/* force a restore */
9959 	crtc_state->mode_changed = true;
9960 
9961 	/* Attach plane to drm_atomic_state */
9962 	plane_state = drm_atomic_get_plane_state(state, plane);
9963 
9964 	ret = PTR_ERR_OR_ZERO(plane_state);
9965 	if (ret)
9966 		goto out;
9967 
9968 	/* Call commit internally with the state we just constructed */
9969 	ret = drm_atomic_commit(state);
9970 
9971 out:
9972 	drm_atomic_state_put(state);
9973 	if (ret)
9974 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9975 
9976 	return ret;
9977 }
9978 
9979 /*
9980  * This function handles all cases when set mode does not come upon hotplug.
9981  * This includes when a display is unplugged then plugged back into the
9982  * same port and when running without usermode desktop manager supprot
9983  */
9984 void dm_restore_drm_connector_state(struct drm_device *dev,
9985 				    struct drm_connector *connector)
9986 {
9987 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9988 	struct amdgpu_crtc *disconnected_acrtc;
9989 	struct dm_crtc_state *acrtc_state;
9990 
9991 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9992 		return;
9993 
9994 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9995 	if (!disconnected_acrtc)
9996 		return;
9997 
9998 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9999 	if (!acrtc_state->stream)
10000 		return;
10001 
10002 	/*
10003 	 * If the previous sink is not released and different from the current,
10004 	 * we deduce we are in a state where we can not rely on usermode call
10005 	 * to turn on the display, so we do it here
10006 	 */
10007 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10008 		dm_force_atomic_commit(&aconnector->base);
10009 }
10010 
10011 /*
10012  * Grabs all modesetting locks to serialize against any blocking commits,
10013  * Waits for completion of all non blocking commits.
10014  */
10015 static int do_aquire_global_lock(struct drm_device *dev,
10016 				 struct drm_atomic_state *state)
10017 {
10018 	struct drm_crtc *crtc;
10019 	struct drm_crtc_commit *commit;
10020 	long ret;
10021 
10022 	/*
10023 	 * Adding all modeset locks to aquire_ctx will
10024 	 * ensure that when the framework release it the
10025 	 * extra locks we are locking here will get released to
10026 	 */
10027 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10028 	if (ret)
10029 		return ret;
10030 
10031 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10032 		spin_lock(&crtc->commit_lock);
10033 		commit = list_first_entry_or_null(&crtc->commit_list,
10034 				struct drm_crtc_commit, commit_entry);
10035 		if (commit)
10036 			drm_crtc_commit_get(commit);
10037 		spin_unlock(&crtc->commit_lock);
10038 
10039 		if (!commit)
10040 			continue;
10041 
10042 		/*
10043 		 * Make sure all pending HW programming completed and
10044 		 * page flips done
10045 		 */
10046 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10047 
10048 		if (ret > 0)
10049 			ret = wait_for_completion_interruptible_timeout(
10050 					&commit->flip_done, 10*HZ);
10051 
10052 		if (ret == 0)
10053 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10054 				  "timed out\n", crtc->base.id, crtc->name);
10055 
10056 		drm_crtc_commit_put(commit);
10057 	}
10058 
10059 	return ret < 0 ? ret : 0;
10060 }
10061 
10062 static void get_freesync_config_for_crtc(
10063 	struct dm_crtc_state *new_crtc_state,
10064 	struct dm_connector_state *new_con_state)
10065 {
10066 	struct mod_freesync_config config = {0};
10067 	struct amdgpu_dm_connector *aconnector =
10068 			to_amdgpu_dm_connector(new_con_state->base.connector);
10069 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10070 	int vrefresh = drm_mode_vrefresh(mode);
10071 	bool fs_vid_mode = false;
10072 
10073 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10074 					vrefresh >= aconnector->min_vfreq &&
10075 					vrefresh <= aconnector->max_vfreq;
10076 
10077 	if (new_crtc_state->vrr_supported) {
10078 		new_crtc_state->stream->ignore_msa_timing_param = true;
10079 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10080 
10081 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10082 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10083 		config.vsif_supported = true;
10084 		config.btr = true;
10085 
10086 		if (fs_vid_mode) {
10087 			config.state = VRR_STATE_ACTIVE_FIXED;
10088 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10089 			goto out;
10090 		} else if (new_crtc_state->base.vrr_enabled) {
10091 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10092 		} else {
10093 			config.state = VRR_STATE_INACTIVE;
10094 		}
10095 	}
10096 out:
10097 	new_crtc_state->freesync_config = config;
10098 }
10099 
10100 static void reset_freesync_config_for_crtc(
10101 	struct dm_crtc_state *new_crtc_state)
10102 {
10103 	new_crtc_state->vrr_supported = false;
10104 
10105 	memset(&new_crtc_state->vrr_infopacket, 0,
10106 	       sizeof(new_crtc_state->vrr_infopacket));
10107 }
10108 
10109 static bool
10110 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10111 				 struct drm_crtc_state *new_crtc_state)
10112 {
10113 	struct drm_display_mode old_mode, new_mode;
10114 
10115 	if (!old_crtc_state || !new_crtc_state)
10116 		return false;
10117 
10118 	old_mode = old_crtc_state->mode;
10119 	new_mode = new_crtc_state->mode;
10120 
10121 	if (old_mode.clock       == new_mode.clock &&
10122 	    old_mode.hdisplay    == new_mode.hdisplay &&
10123 	    old_mode.vdisplay    == new_mode.vdisplay &&
10124 	    old_mode.htotal      == new_mode.htotal &&
10125 	    old_mode.vtotal      != new_mode.vtotal &&
10126 	    old_mode.hsync_start == new_mode.hsync_start &&
10127 	    old_mode.vsync_start != new_mode.vsync_start &&
10128 	    old_mode.hsync_end   == new_mode.hsync_end &&
10129 	    old_mode.vsync_end   != new_mode.vsync_end &&
10130 	    old_mode.hskew       == new_mode.hskew &&
10131 	    old_mode.vscan       == new_mode.vscan &&
10132 	    (old_mode.vsync_end - old_mode.vsync_start) ==
10133 	    (new_mode.vsync_end - new_mode.vsync_start))
10134 		return true;
10135 
10136 	return false;
10137 }
10138 
10139 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10140 	uint64_t num, den, res;
10141 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10142 
10143 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10144 
10145 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10146 	den = (unsigned long long)new_crtc_state->mode.htotal *
10147 	      (unsigned long long)new_crtc_state->mode.vtotal;
10148 
10149 	res = div_u64(num, den);
10150 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10151 }
10152 
10153 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10154 				struct drm_atomic_state *state,
10155 				struct drm_crtc *crtc,
10156 				struct drm_crtc_state *old_crtc_state,
10157 				struct drm_crtc_state *new_crtc_state,
10158 				bool enable,
10159 				bool *lock_and_validation_needed)
10160 {
10161 	struct dm_atomic_state *dm_state = NULL;
10162 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10163 	struct dc_stream_state *new_stream;
10164 	int ret = 0;
10165 
10166 	/*
10167 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10168 	 * update changed items
10169 	 */
10170 	struct amdgpu_crtc *acrtc = NULL;
10171 	struct amdgpu_dm_connector *aconnector = NULL;
10172 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10173 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10174 
10175 	new_stream = NULL;
10176 
10177 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10178 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10179 	acrtc = to_amdgpu_crtc(crtc);
10180 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10181 
10182 	/* TODO This hack should go away */
10183 	if (aconnector && enable) {
10184 		/* Make sure fake sink is created in plug-in scenario */
10185 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10186 							    &aconnector->base);
10187 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10188 							    &aconnector->base);
10189 
10190 		if (IS_ERR(drm_new_conn_state)) {
10191 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10192 			goto fail;
10193 		}
10194 
10195 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10196 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10197 
10198 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10199 			goto skip_modeset;
10200 
10201 		new_stream = create_validate_stream_for_sink(aconnector,
10202 							     &new_crtc_state->mode,
10203 							     dm_new_conn_state,
10204 							     dm_old_crtc_state->stream);
10205 
10206 		/*
10207 		 * we can have no stream on ACTION_SET if a display
10208 		 * was disconnected during S3, in this case it is not an
10209 		 * error, the OS will be updated after detection, and
10210 		 * will do the right thing on next atomic commit
10211 		 */
10212 
10213 		if (!new_stream) {
10214 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10215 					__func__, acrtc->base.base.id);
10216 			ret = -ENOMEM;
10217 			goto fail;
10218 		}
10219 
10220 		/*
10221 		 * TODO: Check VSDB bits to decide whether this should
10222 		 * be enabled or not.
10223 		 */
10224 		new_stream->triggered_crtc_reset.enabled =
10225 			dm->force_timing_sync;
10226 
10227 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10228 
10229 		ret = fill_hdr_info_packet(drm_new_conn_state,
10230 					   &new_stream->hdr_static_metadata);
10231 		if (ret)
10232 			goto fail;
10233 
10234 		/*
10235 		 * If we already removed the old stream from the context
10236 		 * (and set the new stream to NULL) then we can't reuse
10237 		 * the old stream even if the stream and scaling are unchanged.
10238 		 * We'll hit the BUG_ON and black screen.
10239 		 *
10240 		 * TODO: Refactor this function to allow this check to work
10241 		 * in all conditions.
10242 		 */
10243 		if (amdgpu_freesync_vid_mode &&
10244 		    dm_new_crtc_state->stream &&
10245 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10246 			goto skip_modeset;
10247 
10248 		if (dm_new_crtc_state->stream &&
10249 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10250 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10251 			new_crtc_state->mode_changed = false;
10252 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10253 					 new_crtc_state->mode_changed);
10254 		}
10255 	}
10256 
10257 	/* mode_changed flag may get updated above, need to check again */
10258 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10259 		goto skip_modeset;
10260 
10261 	DRM_DEBUG_ATOMIC(
10262 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10263 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10264 		"connectors_changed:%d\n",
10265 		acrtc->crtc_id,
10266 		new_crtc_state->enable,
10267 		new_crtc_state->active,
10268 		new_crtc_state->planes_changed,
10269 		new_crtc_state->mode_changed,
10270 		new_crtc_state->active_changed,
10271 		new_crtc_state->connectors_changed);
10272 
10273 	/* Remove stream for any changed/disabled CRTC */
10274 	if (!enable) {
10275 
10276 		if (!dm_old_crtc_state->stream)
10277 			goto skip_modeset;
10278 
10279 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10280 		    is_timing_unchanged_for_freesync(new_crtc_state,
10281 						     old_crtc_state)) {
10282 			new_crtc_state->mode_changed = false;
10283 			DRM_DEBUG_DRIVER(
10284 				"Mode change not required for front porch change, "
10285 				"setting mode_changed to %d",
10286 				new_crtc_state->mode_changed);
10287 
10288 			set_freesync_fixed_config(dm_new_crtc_state);
10289 
10290 			goto skip_modeset;
10291 		} else if (amdgpu_freesync_vid_mode && aconnector &&
10292 			   is_freesync_video_mode(&new_crtc_state->mode,
10293 						  aconnector)) {
10294 			struct drm_display_mode *high_mode;
10295 
10296 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10297 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10298 				set_freesync_fixed_config(dm_new_crtc_state);
10299 			}
10300 		}
10301 
10302 		ret = dm_atomic_get_state(state, &dm_state);
10303 		if (ret)
10304 			goto fail;
10305 
10306 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10307 				crtc->base.id);
10308 
10309 		/* i.e. reset mode */
10310 		if (dc_remove_stream_from_ctx(
10311 				dm->dc,
10312 				dm_state->context,
10313 				dm_old_crtc_state->stream) != DC_OK) {
10314 			ret = -EINVAL;
10315 			goto fail;
10316 		}
10317 
10318 		dc_stream_release(dm_old_crtc_state->stream);
10319 		dm_new_crtc_state->stream = NULL;
10320 
10321 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10322 
10323 		*lock_and_validation_needed = true;
10324 
10325 	} else {/* Add stream for any updated/enabled CRTC */
10326 		/*
10327 		 * Quick fix to prevent NULL pointer on new_stream when
10328 		 * added MST connectors not found in existing crtc_state in the chained mode
10329 		 * TODO: need to dig out the root cause of that
10330 		 */
10331 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10332 			goto skip_modeset;
10333 
10334 		if (modereset_required(new_crtc_state))
10335 			goto skip_modeset;
10336 
10337 		if (modeset_required(new_crtc_state, new_stream,
10338 				     dm_old_crtc_state->stream)) {
10339 
10340 			WARN_ON(dm_new_crtc_state->stream);
10341 
10342 			ret = dm_atomic_get_state(state, &dm_state);
10343 			if (ret)
10344 				goto fail;
10345 
10346 			dm_new_crtc_state->stream = new_stream;
10347 
10348 			dc_stream_retain(new_stream);
10349 
10350 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10351 					 crtc->base.id);
10352 
10353 			if (dc_add_stream_to_ctx(
10354 					dm->dc,
10355 					dm_state->context,
10356 					dm_new_crtc_state->stream) != DC_OK) {
10357 				ret = -EINVAL;
10358 				goto fail;
10359 			}
10360 
10361 			*lock_and_validation_needed = true;
10362 		}
10363 	}
10364 
10365 skip_modeset:
10366 	/* Release extra reference */
10367 	if (new_stream)
10368 		 dc_stream_release(new_stream);
10369 
10370 	/*
10371 	 * We want to do dc stream updates that do not require a
10372 	 * full modeset below.
10373 	 */
10374 	if (!(enable && aconnector && new_crtc_state->active))
10375 		return 0;
10376 	/*
10377 	 * Given above conditions, the dc state cannot be NULL because:
10378 	 * 1. We're in the process of enabling CRTCs (just been added
10379 	 *    to the dc context, or already is on the context)
10380 	 * 2. Has a valid connector attached, and
10381 	 * 3. Is currently active and enabled.
10382 	 * => The dc stream state currently exists.
10383 	 */
10384 	BUG_ON(dm_new_crtc_state->stream == NULL);
10385 
10386 	/* Scaling or underscan settings */
10387 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10388 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10389 		update_stream_scaling_settings(
10390 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10391 
10392 	/* ABM settings */
10393 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10394 
10395 	/*
10396 	 * Color management settings. We also update color properties
10397 	 * when a modeset is needed, to ensure it gets reprogrammed.
10398 	 */
10399 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10400 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10401 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10402 		if (ret)
10403 			goto fail;
10404 	}
10405 
10406 	/* Update Freesync settings. */
10407 	get_freesync_config_for_crtc(dm_new_crtc_state,
10408 				     dm_new_conn_state);
10409 
10410 	return ret;
10411 
10412 fail:
10413 	if (new_stream)
10414 		dc_stream_release(new_stream);
10415 	return ret;
10416 }
10417 
10418 static bool should_reset_plane(struct drm_atomic_state *state,
10419 			       struct drm_plane *plane,
10420 			       struct drm_plane_state *old_plane_state,
10421 			       struct drm_plane_state *new_plane_state)
10422 {
10423 	struct drm_plane *other;
10424 	struct drm_plane_state *old_other_state, *new_other_state;
10425 	struct drm_crtc_state *new_crtc_state;
10426 	int i;
10427 
10428 	/*
10429 	 * TODO: Remove this hack once the checks below are sufficient
10430 	 * enough to determine when we need to reset all the planes on
10431 	 * the stream.
10432 	 */
10433 	if (state->allow_modeset)
10434 		return true;
10435 
10436 	/* Exit early if we know that we're adding or removing the plane. */
10437 	if (old_plane_state->crtc != new_plane_state->crtc)
10438 		return true;
10439 
10440 	/* old crtc == new_crtc == NULL, plane not in context. */
10441 	if (!new_plane_state->crtc)
10442 		return false;
10443 
10444 	new_crtc_state =
10445 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10446 
10447 	if (!new_crtc_state)
10448 		return true;
10449 
10450 	/* CRTC Degamma changes currently require us to recreate planes. */
10451 	if (new_crtc_state->color_mgmt_changed)
10452 		return true;
10453 
10454 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10455 		return true;
10456 
10457 	/*
10458 	 * If there are any new primary or overlay planes being added or
10459 	 * removed then the z-order can potentially change. To ensure
10460 	 * correct z-order and pipe acquisition the current DC architecture
10461 	 * requires us to remove and recreate all existing planes.
10462 	 *
10463 	 * TODO: Come up with a more elegant solution for this.
10464 	 */
10465 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10466 		struct amdgpu_framebuffer *old_afb, *new_afb;
10467 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10468 			continue;
10469 
10470 		if (old_other_state->crtc != new_plane_state->crtc &&
10471 		    new_other_state->crtc != new_plane_state->crtc)
10472 			continue;
10473 
10474 		if (old_other_state->crtc != new_other_state->crtc)
10475 			return true;
10476 
10477 		/* Src/dst size and scaling updates. */
10478 		if (old_other_state->src_w != new_other_state->src_w ||
10479 		    old_other_state->src_h != new_other_state->src_h ||
10480 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10481 		    old_other_state->crtc_h != new_other_state->crtc_h)
10482 			return true;
10483 
10484 		/* Rotation / mirroring updates. */
10485 		if (old_other_state->rotation != new_other_state->rotation)
10486 			return true;
10487 
10488 		/* Blending updates. */
10489 		if (old_other_state->pixel_blend_mode !=
10490 		    new_other_state->pixel_blend_mode)
10491 			return true;
10492 
10493 		/* Alpha updates. */
10494 		if (old_other_state->alpha != new_other_state->alpha)
10495 			return true;
10496 
10497 		/* Colorspace changes. */
10498 		if (old_other_state->color_range != new_other_state->color_range ||
10499 		    old_other_state->color_encoding != new_other_state->color_encoding)
10500 			return true;
10501 
10502 		/* Framebuffer checks fall at the end. */
10503 		if (!old_other_state->fb || !new_other_state->fb)
10504 			continue;
10505 
10506 		/* Pixel format changes can require bandwidth updates. */
10507 		if (old_other_state->fb->format != new_other_state->fb->format)
10508 			return true;
10509 
10510 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10511 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10512 
10513 		/* Tiling and DCC changes also require bandwidth updates. */
10514 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10515 		    old_afb->base.modifier != new_afb->base.modifier)
10516 			return true;
10517 	}
10518 
10519 	return false;
10520 }
10521 
10522 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10523 			      struct drm_plane_state *new_plane_state,
10524 			      struct drm_framebuffer *fb)
10525 {
10526 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10527 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10528 	unsigned int pitch;
10529 	bool linear;
10530 
10531 	if (fb->width > new_acrtc->max_cursor_width ||
10532 	    fb->height > new_acrtc->max_cursor_height) {
10533 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10534 				 new_plane_state->fb->width,
10535 				 new_plane_state->fb->height);
10536 		return -EINVAL;
10537 	}
10538 	if (new_plane_state->src_w != fb->width << 16 ||
10539 	    new_plane_state->src_h != fb->height << 16) {
10540 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10541 		return -EINVAL;
10542 	}
10543 
10544 	/* Pitch in pixels */
10545 	pitch = fb->pitches[0] / fb->format->cpp[0];
10546 
10547 	if (fb->width != pitch) {
10548 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10549 				 fb->width, pitch);
10550 		return -EINVAL;
10551 	}
10552 
10553 	switch (pitch) {
10554 	case 64:
10555 	case 128:
10556 	case 256:
10557 		/* FB pitch is supported by cursor plane */
10558 		break;
10559 	default:
10560 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10561 		return -EINVAL;
10562 	}
10563 
10564 	/* Core DRM takes care of checking FB modifiers, so we only need to
10565 	 * check tiling flags when the FB doesn't have a modifier. */
10566 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10567 		if (adev->family < AMDGPU_FAMILY_AI) {
10568 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10569 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10570 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10571 		} else {
10572 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10573 		}
10574 		if (!linear) {
10575 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10576 			return -EINVAL;
10577 		}
10578 	}
10579 
10580 	return 0;
10581 }
10582 
10583 static int dm_update_plane_state(struct dc *dc,
10584 				 struct drm_atomic_state *state,
10585 				 struct drm_plane *plane,
10586 				 struct drm_plane_state *old_plane_state,
10587 				 struct drm_plane_state *new_plane_state,
10588 				 bool enable,
10589 				 bool *lock_and_validation_needed)
10590 {
10591 
10592 	struct dm_atomic_state *dm_state = NULL;
10593 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10594 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10595 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10596 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10597 	struct amdgpu_crtc *new_acrtc;
10598 	bool needs_reset;
10599 	int ret = 0;
10600 
10601 
10602 	new_plane_crtc = new_plane_state->crtc;
10603 	old_plane_crtc = old_plane_state->crtc;
10604 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10605 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10606 
10607 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10608 		if (!enable || !new_plane_crtc ||
10609 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10610 			return 0;
10611 
10612 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10613 
10614 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10615 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10616 			return -EINVAL;
10617 		}
10618 
10619 		if (new_plane_state->fb) {
10620 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10621 						 new_plane_state->fb);
10622 			if (ret)
10623 				return ret;
10624 		}
10625 
10626 		return 0;
10627 	}
10628 
10629 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10630 					 new_plane_state);
10631 
10632 	/* Remove any changed/removed planes */
10633 	if (!enable) {
10634 		if (!needs_reset)
10635 			return 0;
10636 
10637 		if (!old_plane_crtc)
10638 			return 0;
10639 
10640 		old_crtc_state = drm_atomic_get_old_crtc_state(
10641 				state, old_plane_crtc);
10642 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10643 
10644 		if (!dm_old_crtc_state->stream)
10645 			return 0;
10646 
10647 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10648 				plane->base.id, old_plane_crtc->base.id);
10649 
10650 		ret = dm_atomic_get_state(state, &dm_state);
10651 		if (ret)
10652 			return ret;
10653 
10654 		if (!dc_remove_plane_from_context(
10655 				dc,
10656 				dm_old_crtc_state->stream,
10657 				dm_old_plane_state->dc_state,
10658 				dm_state->context)) {
10659 
10660 			return -EINVAL;
10661 		}
10662 
10663 
10664 		dc_plane_state_release(dm_old_plane_state->dc_state);
10665 		dm_new_plane_state->dc_state = NULL;
10666 
10667 		*lock_and_validation_needed = true;
10668 
10669 	} else { /* Add new planes */
10670 		struct dc_plane_state *dc_new_plane_state;
10671 
10672 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10673 			return 0;
10674 
10675 		if (!new_plane_crtc)
10676 			return 0;
10677 
10678 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10679 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10680 
10681 		if (!dm_new_crtc_state->stream)
10682 			return 0;
10683 
10684 		if (!needs_reset)
10685 			return 0;
10686 
10687 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10688 		if (ret)
10689 			return ret;
10690 
10691 		WARN_ON(dm_new_plane_state->dc_state);
10692 
10693 		dc_new_plane_state = dc_create_plane_state(dc);
10694 		if (!dc_new_plane_state)
10695 			return -ENOMEM;
10696 
10697 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10698 				 plane->base.id, new_plane_crtc->base.id);
10699 
10700 		ret = fill_dc_plane_attributes(
10701 			drm_to_adev(new_plane_crtc->dev),
10702 			dc_new_plane_state,
10703 			new_plane_state,
10704 			new_crtc_state);
10705 		if (ret) {
10706 			dc_plane_state_release(dc_new_plane_state);
10707 			return ret;
10708 		}
10709 
10710 		ret = dm_atomic_get_state(state, &dm_state);
10711 		if (ret) {
10712 			dc_plane_state_release(dc_new_plane_state);
10713 			return ret;
10714 		}
10715 
10716 		/*
10717 		 * Any atomic check errors that occur after this will
10718 		 * not need a release. The plane state will be attached
10719 		 * to the stream, and therefore part of the atomic
10720 		 * state. It'll be released when the atomic state is
10721 		 * cleaned.
10722 		 */
10723 		if (!dc_add_plane_to_context(
10724 				dc,
10725 				dm_new_crtc_state->stream,
10726 				dc_new_plane_state,
10727 				dm_state->context)) {
10728 
10729 			dc_plane_state_release(dc_new_plane_state);
10730 			return -EINVAL;
10731 		}
10732 
10733 		dm_new_plane_state->dc_state = dc_new_plane_state;
10734 
10735 		/* Tell DC to do a full surface update every time there
10736 		 * is a plane change. Inefficient, but works for now.
10737 		 */
10738 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10739 
10740 		*lock_and_validation_needed = true;
10741 	}
10742 
10743 
10744 	return ret;
10745 }
10746 
10747 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10748 				       int *src_w, int *src_h)
10749 {
10750 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10751 	case DRM_MODE_ROTATE_90:
10752 	case DRM_MODE_ROTATE_270:
10753 		*src_w = plane_state->src_h >> 16;
10754 		*src_h = plane_state->src_w >> 16;
10755 		break;
10756 	case DRM_MODE_ROTATE_0:
10757 	case DRM_MODE_ROTATE_180:
10758 	default:
10759 		*src_w = plane_state->src_w >> 16;
10760 		*src_h = plane_state->src_h >> 16;
10761 		break;
10762 	}
10763 }
10764 
10765 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10766 				struct drm_crtc *crtc,
10767 				struct drm_crtc_state *new_crtc_state)
10768 {
10769 	struct drm_plane *cursor = crtc->cursor, *underlying;
10770 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
10771 	int i;
10772 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10773 	int cursor_src_w, cursor_src_h;
10774 	int underlying_src_w, underlying_src_h;
10775 
10776 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10777 	 * cursor per pipe but it's going to inherit the scaling and
10778 	 * positioning from the underlying pipe. Check the cursor plane's
10779 	 * blending properties match the underlying planes'. */
10780 
10781 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10782 	if (!new_cursor_state || !new_cursor_state->fb) {
10783 		return 0;
10784 	}
10785 
10786 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10787 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10788 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10789 
10790 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10791 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
10792 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10793 			continue;
10794 
10795 		/* Ignore disabled planes */
10796 		if (!new_underlying_state->fb)
10797 			continue;
10798 
10799 		dm_get_oriented_plane_size(new_underlying_state,
10800 					   &underlying_src_w, &underlying_src_h);
10801 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10802 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10803 
10804 		if (cursor_scale_w != underlying_scale_w ||
10805 		    cursor_scale_h != underlying_scale_h) {
10806 			drm_dbg_atomic(crtc->dev,
10807 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10808 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10809 			return -EINVAL;
10810 		}
10811 
10812 		/* If this plane covers the whole CRTC, no need to check planes underneath */
10813 		if (new_underlying_state->crtc_x <= 0 &&
10814 		    new_underlying_state->crtc_y <= 0 &&
10815 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10816 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10817 			break;
10818 	}
10819 
10820 	return 0;
10821 }
10822 
10823 #if defined(CONFIG_DRM_AMD_DC_DCN)
10824 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10825 {
10826 	struct drm_connector *connector;
10827 	struct drm_connector_state *conn_state;
10828 	struct amdgpu_dm_connector *aconnector = NULL;
10829 	int i;
10830 	for_each_new_connector_in_state(state, connector, conn_state, i) {
10831 		if (conn_state->crtc != crtc)
10832 			continue;
10833 
10834 		aconnector = to_amdgpu_dm_connector(connector);
10835 		if (!aconnector->port || !aconnector->mst_port)
10836 			aconnector = NULL;
10837 		else
10838 			break;
10839 	}
10840 
10841 	if (!aconnector)
10842 		return 0;
10843 
10844 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10845 }
10846 #endif
10847 
10848 /**
10849  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10850  * @dev: The DRM device
10851  * @state: The atomic state to commit
10852  *
10853  * Validate that the given atomic state is programmable by DC into hardware.
10854  * This involves constructing a &struct dc_state reflecting the new hardware
10855  * state we wish to commit, then querying DC to see if it is programmable. It's
10856  * important not to modify the existing DC state. Otherwise, atomic_check
10857  * may unexpectedly commit hardware changes.
10858  *
10859  * When validating the DC state, it's important that the right locks are
10860  * acquired. For full updates case which removes/adds/updates streams on one
10861  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10862  * that any such full update commit will wait for completion of any outstanding
10863  * flip using DRMs synchronization events.
10864  *
10865  * Note that DM adds the affected connectors for all CRTCs in state, when that
10866  * might not seem necessary. This is because DC stream creation requires the
10867  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10868  * be possible but non-trivial - a possible TODO item.
10869  *
10870  * Return: -Error code if validation failed.
10871  */
10872 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10873 				  struct drm_atomic_state *state)
10874 {
10875 	struct amdgpu_device *adev = drm_to_adev(dev);
10876 	struct dm_atomic_state *dm_state = NULL;
10877 	struct dc *dc = adev->dm.dc;
10878 	struct drm_connector *connector;
10879 	struct drm_connector_state *old_con_state, *new_con_state;
10880 	struct drm_crtc *crtc;
10881 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10882 	struct drm_plane *plane;
10883 	struct drm_plane_state *old_plane_state, *new_plane_state;
10884 	enum dc_status status;
10885 	int ret, i;
10886 	bool lock_and_validation_needed = false;
10887 	struct dm_crtc_state *dm_old_crtc_state;
10888 #if defined(CONFIG_DRM_AMD_DC_DCN)
10889 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10890 	struct drm_dp_mst_topology_state *mst_state;
10891 	struct drm_dp_mst_topology_mgr *mgr;
10892 #endif
10893 
10894 	trace_amdgpu_dm_atomic_check_begin(state);
10895 
10896 	ret = drm_atomic_helper_check_modeset(dev, state);
10897 	if (ret) {
10898 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10899 		goto fail;
10900 	}
10901 
10902 	/* Check connector changes */
10903 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10904 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10905 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10906 
10907 		/* Skip connectors that are disabled or part of modeset already. */
10908 		if (!old_con_state->crtc && !new_con_state->crtc)
10909 			continue;
10910 
10911 		if (!new_con_state->crtc)
10912 			continue;
10913 
10914 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10915 		if (IS_ERR(new_crtc_state)) {
10916 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10917 			ret = PTR_ERR(new_crtc_state);
10918 			goto fail;
10919 		}
10920 
10921 		if (dm_old_con_state->abm_level !=
10922 		    dm_new_con_state->abm_level)
10923 			new_crtc_state->connectors_changed = true;
10924 	}
10925 
10926 #if defined(CONFIG_DRM_AMD_DC_DCN)
10927 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10928 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10929 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10930 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10931 				if (ret) {
10932 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10933 					goto fail;
10934 				}
10935 			}
10936 		}
10937 	}
10938 #endif
10939 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10940 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10941 
10942 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10943 		    !new_crtc_state->color_mgmt_changed &&
10944 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10945 			dm_old_crtc_state->dsc_force_changed == false)
10946 			continue;
10947 
10948 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10949 		if (ret) {
10950 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10951 			goto fail;
10952 		}
10953 
10954 		if (!new_crtc_state->enable)
10955 			continue;
10956 
10957 		ret = drm_atomic_add_affected_connectors(state, crtc);
10958 		if (ret) {
10959 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
10960 			goto fail;
10961 		}
10962 
10963 		ret = drm_atomic_add_affected_planes(state, crtc);
10964 		if (ret) {
10965 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
10966 			goto fail;
10967 		}
10968 
10969 		if (dm_old_crtc_state->dsc_force_changed)
10970 			new_crtc_state->mode_changed = true;
10971 	}
10972 
10973 	/*
10974 	 * Add all primary and overlay planes on the CRTC to the state
10975 	 * whenever a plane is enabled to maintain correct z-ordering
10976 	 * and to enable fast surface updates.
10977 	 */
10978 	drm_for_each_crtc(crtc, dev) {
10979 		bool modified = false;
10980 
10981 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10982 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10983 				continue;
10984 
10985 			if (new_plane_state->crtc == crtc ||
10986 			    old_plane_state->crtc == crtc) {
10987 				modified = true;
10988 				break;
10989 			}
10990 		}
10991 
10992 		if (!modified)
10993 			continue;
10994 
10995 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10996 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10997 				continue;
10998 
10999 			new_plane_state =
11000 				drm_atomic_get_plane_state(state, plane);
11001 
11002 			if (IS_ERR(new_plane_state)) {
11003 				ret = PTR_ERR(new_plane_state);
11004 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11005 				goto fail;
11006 			}
11007 		}
11008 	}
11009 
11010 	/* Remove exiting planes if they are modified */
11011 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11012 		ret = dm_update_plane_state(dc, state, plane,
11013 					    old_plane_state,
11014 					    new_plane_state,
11015 					    false,
11016 					    &lock_and_validation_needed);
11017 		if (ret) {
11018 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11019 			goto fail;
11020 		}
11021 	}
11022 
11023 	/* Disable all crtcs which require disable */
11024 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11025 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11026 					   old_crtc_state,
11027 					   new_crtc_state,
11028 					   false,
11029 					   &lock_and_validation_needed);
11030 		if (ret) {
11031 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11032 			goto fail;
11033 		}
11034 	}
11035 
11036 	/* Enable all crtcs which require enable */
11037 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11038 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11039 					   old_crtc_state,
11040 					   new_crtc_state,
11041 					   true,
11042 					   &lock_and_validation_needed);
11043 		if (ret) {
11044 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11045 			goto fail;
11046 		}
11047 	}
11048 
11049 	/* Add new/modified planes */
11050 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11051 		ret = dm_update_plane_state(dc, state, plane,
11052 					    old_plane_state,
11053 					    new_plane_state,
11054 					    true,
11055 					    &lock_and_validation_needed);
11056 		if (ret) {
11057 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11058 			goto fail;
11059 		}
11060 	}
11061 
11062 	/* Run this here since we want to validate the streams we created */
11063 	ret = drm_atomic_helper_check_planes(dev, state);
11064 	if (ret) {
11065 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11066 		goto fail;
11067 	}
11068 
11069 	/* Check cursor planes scaling */
11070 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11071 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11072 		if (ret) {
11073 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11074 			goto fail;
11075 		}
11076 	}
11077 
11078 	if (state->legacy_cursor_update) {
11079 		/*
11080 		 * This is a fast cursor update coming from the plane update
11081 		 * helper, check if it can be done asynchronously for better
11082 		 * performance.
11083 		 */
11084 		state->async_update =
11085 			!drm_atomic_helper_async_check(dev, state);
11086 
11087 		/*
11088 		 * Skip the remaining global validation if this is an async
11089 		 * update. Cursor updates can be done without affecting
11090 		 * state or bandwidth calcs and this avoids the performance
11091 		 * penalty of locking the private state object and
11092 		 * allocating a new dc_state.
11093 		 */
11094 		if (state->async_update)
11095 			return 0;
11096 	}
11097 
11098 	/* Check scaling and underscan changes*/
11099 	/* TODO Removed scaling changes validation due to inability to commit
11100 	 * new stream into context w\o causing full reset. Need to
11101 	 * decide how to handle.
11102 	 */
11103 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11104 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11105 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11106 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11107 
11108 		/* Skip any modesets/resets */
11109 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11110 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11111 			continue;
11112 
11113 		/* Skip any thing not scale or underscan changes */
11114 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11115 			continue;
11116 
11117 		lock_and_validation_needed = true;
11118 	}
11119 
11120 #if defined(CONFIG_DRM_AMD_DC_DCN)
11121 	/* set the slot info for each mst_state based on the link encoding format */
11122 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11123 		struct amdgpu_dm_connector *aconnector;
11124 		struct drm_connector *connector;
11125 		struct drm_connector_list_iter iter;
11126 		u8 link_coding_cap;
11127 
11128 		if (!mgr->mst_state )
11129 			continue;
11130 
11131 		drm_connector_list_iter_begin(dev, &iter);
11132 		drm_for_each_connector_iter(connector, &iter) {
11133 			int id = connector->index;
11134 
11135 			if (id == mst_state->mgr->conn_base_id) {
11136 				aconnector = to_amdgpu_dm_connector(connector);
11137 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11138 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11139 
11140 				break;
11141 			}
11142 		}
11143 		drm_connector_list_iter_end(&iter);
11144 
11145 	}
11146 #endif
11147 	/**
11148 	 * Streams and planes are reset when there are changes that affect
11149 	 * bandwidth. Anything that affects bandwidth needs to go through
11150 	 * DC global validation to ensure that the configuration can be applied
11151 	 * to hardware.
11152 	 *
11153 	 * We have to currently stall out here in atomic_check for outstanding
11154 	 * commits to finish in this case because our IRQ handlers reference
11155 	 * DRM state directly - we can end up disabling interrupts too early
11156 	 * if we don't.
11157 	 *
11158 	 * TODO: Remove this stall and drop DM state private objects.
11159 	 */
11160 	if (lock_and_validation_needed) {
11161 		ret = dm_atomic_get_state(state, &dm_state);
11162 		if (ret) {
11163 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11164 			goto fail;
11165 		}
11166 
11167 		ret = do_aquire_global_lock(dev, state);
11168 		if (ret) {
11169 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11170 			goto fail;
11171 		}
11172 
11173 #if defined(CONFIG_DRM_AMD_DC_DCN)
11174 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11175 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11176 			goto fail;
11177 		}
11178 
11179 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11180 		if (ret) {
11181 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11182 			goto fail;
11183 		}
11184 #endif
11185 
11186 		/*
11187 		 * Perform validation of MST topology in the state:
11188 		 * We need to perform MST atomic check before calling
11189 		 * dc_validate_global_state(), or there is a chance
11190 		 * to get stuck in an infinite loop and hang eventually.
11191 		 */
11192 		ret = drm_dp_mst_atomic_check(state);
11193 		if (ret) {
11194 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11195 			goto fail;
11196 		}
11197 		status = dc_validate_global_state(dc, dm_state->context, true);
11198 		if (status != DC_OK) {
11199 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11200 				       dc_status_to_str(status), status);
11201 			ret = -EINVAL;
11202 			goto fail;
11203 		}
11204 	} else {
11205 		/*
11206 		 * The commit is a fast update. Fast updates shouldn't change
11207 		 * the DC context, affect global validation, and can have their
11208 		 * commit work done in parallel with other commits not touching
11209 		 * the same resource. If we have a new DC context as part of
11210 		 * the DM atomic state from validation we need to free it and
11211 		 * retain the existing one instead.
11212 		 *
11213 		 * Furthermore, since the DM atomic state only contains the DC
11214 		 * context and can safely be annulled, we can free the state
11215 		 * and clear the associated private object now to free
11216 		 * some memory and avoid a possible use-after-free later.
11217 		 */
11218 
11219 		for (i = 0; i < state->num_private_objs; i++) {
11220 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11221 
11222 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11223 				int j = state->num_private_objs-1;
11224 
11225 				dm_atomic_destroy_state(obj,
11226 						state->private_objs[i].state);
11227 
11228 				/* If i is not at the end of the array then the
11229 				 * last element needs to be moved to where i was
11230 				 * before the array can safely be truncated.
11231 				 */
11232 				if (i != j)
11233 					state->private_objs[i] =
11234 						state->private_objs[j];
11235 
11236 				state->private_objs[j].ptr = NULL;
11237 				state->private_objs[j].state = NULL;
11238 				state->private_objs[j].old_state = NULL;
11239 				state->private_objs[j].new_state = NULL;
11240 
11241 				state->num_private_objs = j;
11242 				break;
11243 			}
11244 		}
11245 	}
11246 
11247 	/* Store the overall update type for use later in atomic check. */
11248 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11249 		struct dm_crtc_state *dm_new_crtc_state =
11250 			to_dm_crtc_state(new_crtc_state);
11251 
11252 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11253 							 UPDATE_TYPE_FULL :
11254 							 UPDATE_TYPE_FAST;
11255 	}
11256 
11257 	/* Must be success */
11258 	WARN_ON(ret);
11259 
11260 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11261 
11262 	return ret;
11263 
11264 fail:
11265 	if (ret == -EDEADLK)
11266 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11267 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11268 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11269 	else
11270 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11271 
11272 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11273 
11274 	return ret;
11275 }
11276 
11277 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11278 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11279 {
11280 	uint8_t dpcd_data;
11281 	bool capable = false;
11282 
11283 	if (amdgpu_dm_connector->dc_link &&
11284 		dm_helpers_dp_read_dpcd(
11285 				NULL,
11286 				amdgpu_dm_connector->dc_link,
11287 				DP_DOWN_STREAM_PORT_COUNT,
11288 				&dpcd_data,
11289 				sizeof(dpcd_data))) {
11290 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11291 	}
11292 
11293 	return capable;
11294 }
11295 
11296 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11297 		unsigned int offset,
11298 		unsigned int total_length,
11299 		uint8_t *data,
11300 		unsigned int length,
11301 		struct amdgpu_hdmi_vsdb_info *vsdb)
11302 {
11303 	bool res;
11304 	union dmub_rb_cmd cmd;
11305 	struct dmub_cmd_send_edid_cea *input;
11306 	struct dmub_cmd_edid_cea_output *output;
11307 
11308 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11309 		return false;
11310 
11311 	memset(&cmd, 0, sizeof(cmd));
11312 
11313 	input = &cmd.edid_cea.data.input;
11314 
11315 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11316 	cmd.edid_cea.header.sub_type = 0;
11317 	cmd.edid_cea.header.payload_bytes =
11318 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11319 	input->offset = offset;
11320 	input->length = length;
11321 	input->cea_total_length = total_length;
11322 	memcpy(input->payload, data, length);
11323 
11324 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11325 	if (!res) {
11326 		DRM_ERROR("EDID CEA parser failed\n");
11327 		return false;
11328 	}
11329 
11330 	output = &cmd.edid_cea.data.output;
11331 
11332 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11333 		if (!output->ack.success) {
11334 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11335 					output->ack.offset);
11336 		}
11337 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11338 		if (!output->amd_vsdb.vsdb_found)
11339 			return false;
11340 
11341 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11342 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11343 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11344 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11345 	} else {
11346 		DRM_WARN("Unknown EDID CEA parser results\n");
11347 		return false;
11348 	}
11349 
11350 	return true;
11351 }
11352 
11353 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11354 		uint8_t *edid_ext, int len,
11355 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11356 {
11357 	int i;
11358 
11359 	/* send extension block to DMCU for parsing */
11360 	for (i = 0; i < len; i += 8) {
11361 		bool res;
11362 		int offset;
11363 
11364 		/* send 8 bytes a time */
11365 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11366 			return false;
11367 
11368 		if (i+8 == len) {
11369 			/* EDID block sent completed, expect result */
11370 			int version, min_rate, max_rate;
11371 
11372 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11373 			if (res) {
11374 				/* amd vsdb found */
11375 				vsdb_info->freesync_supported = 1;
11376 				vsdb_info->amd_vsdb_version = version;
11377 				vsdb_info->min_refresh_rate_hz = min_rate;
11378 				vsdb_info->max_refresh_rate_hz = max_rate;
11379 				return true;
11380 			}
11381 			/* not amd vsdb */
11382 			return false;
11383 		}
11384 
11385 		/* check for ack*/
11386 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11387 		if (!res)
11388 			return false;
11389 	}
11390 
11391 	return false;
11392 }
11393 
11394 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11395 		uint8_t *edid_ext, int len,
11396 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11397 {
11398 	int i;
11399 
11400 	/* send extension block to DMCU for parsing */
11401 	for (i = 0; i < len; i += 8) {
11402 		/* send 8 bytes a time */
11403 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11404 			return false;
11405 	}
11406 
11407 	return vsdb_info->freesync_supported;
11408 }
11409 
11410 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11411 		uint8_t *edid_ext, int len,
11412 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11413 {
11414 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11415 
11416 	if (adev->dm.dmub_srv)
11417 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11418 	else
11419 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11420 }
11421 
11422 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11423 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11424 {
11425 	uint8_t *edid_ext = NULL;
11426 	int i;
11427 	bool valid_vsdb_found = false;
11428 
11429 	/*----- drm_find_cea_extension() -----*/
11430 	/* No EDID or EDID extensions */
11431 	if (edid == NULL || edid->extensions == 0)
11432 		return -ENODEV;
11433 
11434 	/* Find CEA extension */
11435 	for (i = 0; i < edid->extensions; i++) {
11436 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11437 		if (edid_ext[0] == CEA_EXT)
11438 			break;
11439 	}
11440 
11441 	if (i == edid->extensions)
11442 		return -ENODEV;
11443 
11444 	/*----- cea_db_offsets() -----*/
11445 	if (edid_ext[0] != CEA_EXT)
11446 		return -ENODEV;
11447 
11448 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11449 
11450 	return valid_vsdb_found ? i : -ENODEV;
11451 }
11452 
11453 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11454 					struct edid *edid)
11455 {
11456 	int i = 0;
11457 	struct detailed_timing *timing;
11458 	struct detailed_non_pixel *data;
11459 	struct detailed_data_monitor_range *range;
11460 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11461 			to_amdgpu_dm_connector(connector);
11462 	struct dm_connector_state *dm_con_state = NULL;
11463 	struct dc_sink *sink;
11464 
11465 	struct drm_device *dev = connector->dev;
11466 	struct amdgpu_device *adev = drm_to_adev(dev);
11467 	bool freesync_capable = false;
11468 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11469 
11470 	if (!connector->state) {
11471 		DRM_ERROR("%s - Connector has no state", __func__);
11472 		goto update;
11473 	}
11474 
11475 	sink = amdgpu_dm_connector->dc_sink ?
11476 		amdgpu_dm_connector->dc_sink :
11477 		amdgpu_dm_connector->dc_em_sink;
11478 
11479 	if (!edid || !sink) {
11480 		dm_con_state = to_dm_connector_state(connector->state);
11481 
11482 		amdgpu_dm_connector->min_vfreq = 0;
11483 		amdgpu_dm_connector->max_vfreq = 0;
11484 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11485 		connector->display_info.monitor_range.min_vfreq = 0;
11486 		connector->display_info.monitor_range.max_vfreq = 0;
11487 		freesync_capable = false;
11488 
11489 		goto update;
11490 	}
11491 
11492 	dm_con_state = to_dm_connector_state(connector->state);
11493 
11494 	if (!adev->dm.freesync_module)
11495 		goto update;
11496 
11497 
11498 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11499 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11500 		bool edid_check_required = false;
11501 
11502 		if (edid) {
11503 			edid_check_required = is_dp_capable_without_timing_msa(
11504 						adev->dm.dc,
11505 						amdgpu_dm_connector);
11506 		}
11507 
11508 		if (edid_check_required == true && (edid->version > 1 ||
11509 		   (edid->version == 1 && edid->revision > 1))) {
11510 			for (i = 0; i < 4; i++) {
11511 
11512 				timing	= &edid->detailed_timings[i];
11513 				data	= &timing->data.other_data;
11514 				range	= &data->data.range;
11515 				/*
11516 				 * Check if monitor has continuous frequency mode
11517 				 */
11518 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11519 					continue;
11520 				/*
11521 				 * Check for flag range limits only. If flag == 1 then
11522 				 * no additional timing information provided.
11523 				 * Default GTF, GTF Secondary curve and CVT are not
11524 				 * supported
11525 				 */
11526 				if (range->flags != 1)
11527 					continue;
11528 
11529 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11530 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11531 				amdgpu_dm_connector->pixel_clock_mhz =
11532 					range->pixel_clock_mhz * 10;
11533 
11534 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11535 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11536 
11537 				break;
11538 			}
11539 
11540 			if (amdgpu_dm_connector->max_vfreq -
11541 			    amdgpu_dm_connector->min_vfreq > 10) {
11542 
11543 				freesync_capable = true;
11544 			}
11545 		}
11546 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11547 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11548 		if (i >= 0 && vsdb_info.freesync_supported) {
11549 			timing  = &edid->detailed_timings[i];
11550 			data    = &timing->data.other_data;
11551 
11552 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11553 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11554 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11555 				freesync_capable = true;
11556 
11557 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11558 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11559 		}
11560 	}
11561 
11562 update:
11563 	if (dm_con_state)
11564 		dm_con_state->freesync_capable = freesync_capable;
11565 
11566 	if (connector->vrr_capable_property)
11567 		drm_connector_set_vrr_capable_property(connector,
11568 						       freesync_capable);
11569 }
11570 
11571 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11572 {
11573 	struct amdgpu_device *adev = drm_to_adev(dev);
11574 	struct dc *dc = adev->dm.dc;
11575 	int i;
11576 
11577 	mutex_lock(&adev->dm.dc_lock);
11578 	if (dc->current_state) {
11579 		for (i = 0; i < dc->current_state->stream_count; ++i)
11580 			dc->current_state->streams[i]
11581 				->triggered_crtc_reset.enabled =
11582 				adev->dm.force_timing_sync;
11583 
11584 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11585 		dc_trigger_sync(dc, dc->current_state);
11586 	}
11587 	mutex_unlock(&adev->dm.dc_lock);
11588 }
11589 
11590 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11591 		       uint32_t value, const char *func_name)
11592 {
11593 #ifdef DM_CHECK_ADDR_0
11594 	if (address == 0) {
11595 		DC_ERR("invalid register write. address = 0");
11596 		return;
11597 	}
11598 #endif
11599 	cgs_write_register(ctx->cgs_device, address, value);
11600 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11601 }
11602 
11603 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11604 			  const char *func_name)
11605 {
11606 	uint32_t value;
11607 #ifdef DM_CHECK_ADDR_0
11608 	if (address == 0) {
11609 		DC_ERR("invalid register read; address = 0\n");
11610 		return 0;
11611 	}
11612 #endif
11613 
11614 	if (ctx->dmub_srv &&
11615 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11616 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11617 		ASSERT(false);
11618 		return 0;
11619 	}
11620 
11621 	value = cgs_read_register(ctx->cgs_device, address);
11622 
11623 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11624 
11625 	return value;
11626 }
11627 
11628 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11629 						struct dc_context *ctx,
11630 						uint8_t status_type,
11631 						uint32_t *operation_result)
11632 {
11633 	struct amdgpu_device *adev = ctx->driver_context;
11634 	int return_status = -1;
11635 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11636 
11637 	if (is_cmd_aux) {
11638 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11639 			return_status = p_notify->aux_reply.length;
11640 			*operation_result = p_notify->result;
11641 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11642 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11643 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11644 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11645 		} else {
11646 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11647 		}
11648 	} else {
11649 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11650 			return_status = 0;
11651 			*operation_result = p_notify->sc_status;
11652 		} else {
11653 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11654 		}
11655 	}
11656 
11657 	return return_status;
11658 }
11659 
11660 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11661 	unsigned int link_index, void *cmd_payload, void *operation_result)
11662 {
11663 	struct amdgpu_device *adev = ctx->driver_context;
11664 	int ret = 0;
11665 
11666 	if (is_cmd_aux) {
11667 		dc_process_dmub_aux_transfer_async(ctx->dc,
11668 			link_index, (struct aux_payload *)cmd_payload);
11669 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11670 					(struct set_config_cmd_payload *)cmd_payload,
11671 					adev->dm.dmub_notify)) {
11672 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11673 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11674 					(uint32_t *)operation_result);
11675 	}
11676 
11677 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11678 	if (ret == 0) {
11679 		DRM_ERROR("wait_for_completion_timeout timeout!");
11680 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11681 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11682 				(uint32_t *)operation_result);
11683 	}
11684 
11685 	if (is_cmd_aux) {
11686 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11687 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11688 
11689 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11690 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11691 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11692 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11693 				       adev->dm.dmub_notify->aux_reply.length);
11694 			}
11695 		}
11696 	}
11697 
11698 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11699 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11700 			(uint32_t *)operation_result);
11701 }
11702 
11703 /*
11704  * Check whether seamless boot is supported.
11705  *
11706  * So far we only support seamless boot on CHIP_VANGOGH.
11707  * If everything goes well, we may consider expanding
11708  * seamless boot to other ASICs.
11709  */
11710 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11711 {
11712 	switch (adev->asic_type) {
11713 	case CHIP_VANGOGH:
11714 		if (!adev->mman.keep_stolen_vga_memory)
11715 			return true;
11716 		break;
11717 	default:
11718 		break;
11719 	}
11720 
11721 	return false;
11722 }
11723