1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 #include <linux/dmi.h>
76 
77 #include <drm/display/drm_dp_mst_helper.h>
78 #include <drm/display/drm_hdmi_helper.h>
79 #include <drm/drm_atomic.h>
80 #include <drm/drm_atomic_uapi.h>
81 #include <drm/drm_atomic_helper.h>
82 #include <drm/drm_fb_helper.h>
83 #include <drm/drm_fourcc.h>
84 #include <drm/drm_edid.h>
85 #include <drm/drm_vblank.h>
86 #include <drm/drm_audio_component.h>
87 
88 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
89 
90 #include "dcn/dcn_1_0_offset.h"
91 #include "dcn/dcn_1_0_sh_mask.h"
92 #include "soc15_hw_ip.h"
93 #include "soc15_common.h"
94 #include "vega10_ip_offset.h"
95 
96 #include "soc15_common.h"
97 
98 #include "gc/gc_11_0_0_offset.h"
99 #include "gc/gc_11_0_0_sh_mask.h"
100 
101 #include "modules/inc/mod_freesync.h"
102 #include "modules/power/power_helpers.h"
103 #include "modules/inc/mod_info_packet.h"
104 
105 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
107 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
108 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
109 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
110 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
111 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
112 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
113 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
115 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
117 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
119 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
121 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
122 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
123 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
124 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
125 
126 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
127 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
128 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
129 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
130 
131 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
132 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
133 
134 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
135 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
136 
137 /* Number of bytes in PSP header for firmware. */
138 #define PSP_HEADER_BYTES 0x100
139 
140 /* Number of bytes in PSP footer for firmware. */
141 #define PSP_FOOTER_BYTES 0x100
142 
143 /**
144  * DOC: overview
145  *
146  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
147  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
148  * requests into DC requests, and DC responses into DRM responses.
149  *
150  * The root control structure is &struct amdgpu_display_manager.
151  */
152 
153 /* basic init/fini API */
154 static int amdgpu_dm_init(struct amdgpu_device *adev);
155 static void amdgpu_dm_fini(struct amdgpu_device *adev);
156 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
157 
158 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
159 {
160 	switch (link->dpcd_caps.dongle_type) {
161 	case DISPLAY_DONGLE_NONE:
162 		return DRM_MODE_SUBCONNECTOR_Native;
163 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
164 		return DRM_MODE_SUBCONNECTOR_VGA;
165 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
166 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
167 		return DRM_MODE_SUBCONNECTOR_DVID;
168 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
169 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
170 		return DRM_MODE_SUBCONNECTOR_HDMIA;
171 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
172 	default:
173 		return DRM_MODE_SUBCONNECTOR_Unknown;
174 	}
175 }
176 
177 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
178 {
179 	struct dc_link *link = aconnector->dc_link;
180 	struct drm_connector *connector = &aconnector->base;
181 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
182 
183 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
184 		return;
185 
186 	if (aconnector->dc_sink)
187 		subconnector = get_subconnector_type(link);
188 
189 	drm_object_property_set_value(&connector->base,
190 			connector->dev->mode_config.dp_subconnector_property,
191 			subconnector);
192 }
193 
194 /*
195  * initializes drm_device display related structures, based on the information
196  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
197  * drm_encoder, drm_mode_config
198  *
199  * Returns 0 on success
200  */
201 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
202 /* removes and deallocates the drm structures, created by the above function */
203 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
204 
205 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
206 				struct drm_plane *plane,
207 				unsigned long possible_crtcs,
208 				const struct dc_plane_cap *plane_cap);
209 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
210 			       struct drm_plane *plane,
211 			       uint32_t link_index);
212 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
213 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
214 				    uint32_t link_index,
215 				    struct amdgpu_encoder *amdgpu_encoder);
216 static int amdgpu_dm_encoder_init(struct drm_device *dev,
217 				  struct amdgpu_encoder *aencoder,
218 				  uint32_t link_index);
219 
220 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
221 
222 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
223 
224 static int amdgpu_dm_atomic_check(struct drm_device *dev,
225 				  struct drm_atomic_state *state);
226 
227 static void handle_cursor_update(struct drm_plane *plane,
228 				 struct drm_plane_state *old_plane_state);
229 
230 static const struct drm_format_info *
231 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
232 
233 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
234 static void handle_hpd_rx_irq(void *param);
235 
236 static bool
237 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
238 				 struct drm_crtc_state *new_crtc_state);
239 /*
240  * dm_vblank_get_counter
241  *
242  * @brief
243  * Get counter for number of vertical blanks
244  *
245  * @param
246  * struct amdgpu_device *adev - [in] desired amdgpu device
247  * int disp_idx - [in] which CRTC to get the counter from
248  *
249  * @return
250  * Counter for vertical blanks
251  */
252 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
253 {
254 	if (crtc >= adev->mode_info.num_crtc)
255 		return 0;
256 	else {
257 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
258 
259 		if (acrtc->dm_irq_params.stream == NULL) {
260 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
261 				  crtc);
262 			return 0;
263 		}
264 
265 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
266 	}
267 }
268 
269 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
270 				  u32 *vbl, u32 *position)
271 {
272 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
273 
274 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
275 		return -EINVAL;
276 	else {
277 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
278 
279 		if (acrtc->dm_irq_params.stream ==  NULL) {
280 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
281 				  crtc);
282 			return 0;
283 		}
284 
285 		/*
286 		 * TODO rework base driver to use values directly.
287 		 * for now parse it back into reg-format
288 		 */
289 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
290 					 &v_blank_start,
291 					 &v_blank_end,
292 					 &h_position,
293 					 &v_position);
294 
295 		*position = v_position | (h_position << 16);
296 		*vbl = v_blank_start | (v_blank_end << 16);
297 	}
298 
299 	return 0;
300 }
301 
302 static bool dm_is_idle(void *handle)
303 {
304 	/* XXX todo */
305 	return true;
306 }
307 
308 static int dm_wait_for_idle(void *handle)
309 {
310 	/* XXX todo */
311 	return 0;
312 }
313 
314 static bool dm_check_soft_reset(void *handle)
315 {
316 	return false;
317 }
318 
319 static int dm_soft_reset(void *handle)
320 {
321 	/* XXX todo */
322 	return 0;
323 }
324 
325 static struct amdgpu_crtc *
326 get_crtc_by_otg_inst(struct amdgpu_device *adev,
327 		     int otg_inst)
328 {
329 	struct drm_device *dev = adev_to_drm(adev);
330 	struct drm_crtc *crtc;
331 	struct amdgpu_crtc *amdgpu_crtc;
332 
333 	if (WARN_ON(otg_inst == -1))
334 		return adev->mode_info.crtcs[0];
335 
336 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
337 		amdgpu_crtc = to_amdgpu_crtc(crtc);
338 
339 		if (amdgpu_crtc->otg_inst == otg_inst)
340 			return amdgpu_crtc;
341 	}
342 
343 	return NULL;
344 }
345 
346 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
347 {
348 	return acrtc->dm_irq_params.freesync_config.state ==
349 		       VRR_STATE_ACTIVE_VARIABLE ||
350 	       acrtc->dm_irq_params.freesync_config.state ==
351 		       VRR_STATE_ACTIVE_FIXED;
352 }
353 
354 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
355 {
356 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
357 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
358 }
359 
360 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
361 					      struct dm_crtc_state *new_state)
362 {
363 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
364 		return true;
365 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
366 		return true;
367 	else
368 		return false;
369 }
370 
371 /**
372  * dm_pflip_high_irq() - Handle pageflip interrupt
373  * @interrupt_params: ignored
374  *
375  * Handles the pageflip interrupt by notifying all interested parties
376  * that the pageflip has been completed.
377  */
378 static void dm_pflip_high_irq(void *interrupt_params)
379 {
380 	struct amdgpu_crtc *amdgpu_crtc;
381 	struct common_irq_params *irq_params = interrupt_params;
382 	struct amdgpu_device *adev = irq_params->adev;
383 	unsigned long flags;
384 	struct drm_pending_vblank_event *e;
385 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
386 	bool vrr_active;
387 
388 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
389 
390 	/* IRQ could occur when in initial stage */
391 	/* TODO work and BO cleanup */
392 	if (amdgpu_crtc == NULL) {
393 		DC_LOG_PFLIP("CRTC is null, returning.\n");
394 		return;
395 	}
396 
397 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
398 
399 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
400 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
401 						 amdgpu_crtc->pflip_status,
402 						 AMDGPU_FLIP_SUBMITTED,
403 						 amdgpu_crtc->crtc_id,
404 						 amdgpu_crtc);
405 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
406 		return;
407 	}
408 
409 	/* page flip completed. */
410 	e = amdgpu_crtc->event;
411 	amdgpu_crtc->event = NULL;
412 
413 	WARN_ON(!e);
414 
415 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
416 
417 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
418 	if (!vrr_active ||
419 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
420 				      &v_blank_end, &hpos, &vpos) ||
421 	    (vpos < v_blank_start)) {
422 		/* Update to correct count and vblank timestamp if racing with
423 		 * vblank irq. This also updates to the correct vblank timestamp
424 		 * even in VRR mode, as scanout is past the front-porch atm.
425 		 */
426 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
427 
428 		/* Wake up userspace by sending the pageflip event with proper
429 		 * count and timestamp of vblank of flip completion.
430 		 */
431 		if (e) {
432 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
433 
434 			/* Event sent, so done with vblank for this flip */
435 			drm_crtc_vblank_put(&amdgpu_crtc->base);
436 		}
437 	} else if (e) {
438 		/* VRR active and inside front-porch: vblank count and
439 		 * timestamp for pageflip event will only be up to date after
440 		 * drm_crtc_handle_vblank() has been executed from late vblank
441 		 * irq handler after start of back-porch (vline 0). We queue the
442 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
443 		 * updated timestamp and count, once it runs after us.
444 		 *
445 		 * We need to open-code this instead of using the helper
446 		 * drm_crtc_arm_vblank_event(), as that helper would
447 		 * call drm_crtc_accurate_vblank_count(), which we must
448 		 * not call in VRR mode while we are in front-porch!
449 		 */
450 
451 		/* sequence will be replaced by real count during send-out. */
452 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
453 		e->pipe = amdgpu_crtc->crtc_id;
454 
455 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
456 		e = NULL;
457 	}
458 
459 	/* Keep track of vblank of this flip for flip throttling. We use the
460 	 * cooked hw counter, as that one incremented at start of this vblank
461 	 * of pageflip completion, so last_flip_vblank is the forbidden count
462 	 * for queueing new pageflips if vsync + VRR is enabled.
463 	 */
464 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
465 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
466 
467 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
468 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
469 
470 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
471 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
472 		     vrr_active, (int) !e);
473 }
474 
475 static void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
476 {
477 	struct drm_crtc *crtc = &acrtc->base;
478 	struct drm_device *dev = crtc->dev;
479 	unsigned long flags;
480 
481 	drm_crtc_handle_vblank(crtc);
482 
483 	spin_lock_irqsave(&dev->event_lock, flags);
484 
485 	/* Send completion event for cursor-only commits */
486 	if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
487 		drm_crtc_send_vblank_event(crtc, acrtc->event);
488 		drm_crtc_vblank_put(crtc);
489 		acrtc->event = NULL;
490 	}
491 
492 	spin_unlock_irqrestore(&dev->event_lock, flags);
493 }
494 
495 static void dm_vupdate_high_irq(void *interrupt_params)
496 {
497 	struct common_irq_params *irq_params = interrupt_params;
498 	struct amdgpu_device *adev = irq_params->adev;
499 	struct amdgpu_crtc *acrtc;
500 	struct drm_device *drm_dev;
501 	struct drm_vblank_crtc *vblank;
502 	ktime_t frame_duration_ns, previous_timestamp;
503 	unsigned long flags;
504 	int vrr_active;
505 
506 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
507 
508 	if (acrtc) {
509 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
510 		drm_dev = acrtc->base.dev;
511 		vblank = &drm_dev->vblank[acrtc->base.index];
512 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
513 		frame_duration_ns = vblank->time - previous_timestamp;
514 
515 		if (frame_duration_ns > 0) {
516 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
517 						frame_duration_ns,
518 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
519 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
520 		}
521 
522 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
523 			      acrtc->crtc_id,
524 			      vrr_active);
525 
526 		/* Core vblank handling is done here after end of front-porch in
527 		 * vrr mode, as vblank timestamping will give valid results
528 		 * while now done after front-porch. This will also deliver
529 		 * page-flip completion events that have been queued to us
530 		 * if a pageflip happened inside front-porch.
531 		 */
532 		if (vrr_active) {
533 			dm_crtc_handle_vblank(acrtc);
534 
535 			/* BTR processing for pre-DCE12 ASICs */
536 			if (acrtc->dm_irq_params.stream &&
537 			    adev->family < AMDGPU_FAMILY_AI) {
538 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
539 				mod_freesync_handle_v_update(
540 				    adev->dm.freesync_module,
541 				    acrtc->dm_irq_params.stream,
542 				    &acrtc->dm_irq_params.vrr_params);
543 
544 				dc_stream_adjust_vmin_vmax(
545 				    adev->dm.dc,
546 				    acrtc->dm_irq_params.stream,
547 				    &acrtc->dm_irq_params.vrr_params.adjust);
548 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
549 			}
550 		}
551 	}
552 }
553 
554 /**
555  * dm_crtc_high_irq() - Handles CRTC interrupt
556  * @interrupt_params: used for determining the CRTC instance
557  *
558  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
559  * event handler.
560  */
561 static void dm_crtc_high_irq(void *interrupt_params)
562 {
563 	struct common_irq_params *irq_params = interrupt_params;
564 	struct amdgpu_device *adev = irq_params->adev;
565 	struct amdgpu_crtc *acrtc;
566 	unsigned long flags;
567 	int vrr_active;
568 
569 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
570 	if (!acrtc)
571 		return;
572 
573 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
574 
575 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
576 		      vrr_active, acrtc->dm_irq_params.active_planes);
577 
578 	/**
579 	 * Core vblank handling at start of front-porch is only possible
580 	 * in non-vrr mode, as only there vblank timestamping will give
581 	 * valid results while done in front-porch. Otherwise defer it
582 	 * to dm_vupdate_high_irq after end of front-porch.
583 	 */
584 	if (!vrr_active)
585 		dm_crtc_handle_vblank(acrtc);
586 
587 	/**
588 	 * Following stuff must happen at start of vblank, for crc
589 	 * computation and below-the-range btr support in vrr mode.
590 	 */
591 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
592 
593 	/* BTR updates need to happen before VUPDATE on Vega and above. */
594 	if (adev->family < AMDGPU_FAMILY_AI)
595 		return;
596 
597 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
598 
599 	if (acrtc->dm_irq_params.stream &&
600 	    acrtc->dm_irq_params.vrr_params.supported &&
601 	    acrtc->dm_irq_params.freesync_config.state ==
602 		    VRR_STATE_ACTIVE_VARIABLE) {
603 		mod_freesync_handle_v_update(adev->dm.freesync_module,
604 					     acrtc->dm_irq_params.stream,
605 					     &acrtc->dm_irq_params.vrr_params);
606 
607 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
608 					   &acrtc->dm_irq_params.vrr_params.adjust);
609 	}
610 
611 	/*
612 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
613 	 * In that case, pageflip completion interrupts won't fire and pageflip
614 	 * completion events won't get delivered. Prevent this by sending
615 	 * pending pageflip events from here if a flip is still pending.
616 	 *
617 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
618 	 * avoid race conditions between flip programming and completion,
619 	 * which could cause too early flip completion events.
620 	 */
621 	if (adev->family >= AMDGPU_FAMILY_RV &&
622 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
623 	    acrtc->dm_irq_params.active_planes == 0) {
624 		if (acrtc->event) {
625 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
626 			acrtc->event = NULL;
627 			drm_crtc_vblank_put(&acrtc->base);
628 		}
629 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
630 	}
631 
632 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
633 }
634 
635 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
636 /**
637  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
638  * DCN generation ASICs
639  * @interrupt_params: interrupt parameters
640  *
641  * Used to set crc window/read out crc value at vertical line 0 position
642  */
643 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
644 {
645 	struct common_irq_params *irq_params = interrupt_params;
646 	struct amdgpu_device *adev = irq_params->adev;
647 	struct amdgpu_crtc *acrtc;
648 
649 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
650 
651 	if (!acrtc)
652 		return;
653 
654 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
655 }
656 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
657 
658 /**
659  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
660  * @adev: amdgpu_device pointer
661  * @notify: dmub notification structure
662  *
663  * Dmub AUX or SET_CONFIG command completion processing callback
664  * Copies dmub notification to DM which is to be read by AUX command.
665  * issuing thread and also signals the event to wake up the thread.
666  */
667 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
668 					struct dmub_notification *notify)
669 {
670 	if (adev->dm.dmub_notify)
671 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
672 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
673 		complete(&adev->dm.dmub_aux_transfer_done);
674 }
675 
676 /**
677  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
678  * @adev: amdgpu_device pointer
679  * @notify: dmub notification structure
680  *
681  * Dmub Hpd interrupt processing callback. Gets displayindex through the
682  * ink index and calls helper to do the processing.
683  */
684 static void dmub_hpd_callback(struct amdgpu_device *adev,
685 			      struct dmub_notification *notify)
686 {
687 	struct amdgpu_dm_connector *aconnector;
688 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
689 	struct drm_connector *connector;
690 	struct drm_connector_list_iter iter;
691 	struct dc_link *link;
692 	uint8_t link_index = 0;
693 	struct drm_device *dev;
694 
695 	if (adev == NULL)
696 		return;
697 
698 	if (notify == NULL) {
699 		DRM_ERROR("DMUB HPD callback notification was NULL");
700 		return;
701 	}
702 
703 	if (notify->link_index > adev->dm.dc->link_count) {
704 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
705 		return;
706 	}
707 
708 	link_index = notify->link_index;
709 	link = adev->dm.dc->links[link_index];
710 	dev = adev->dm.ddev;
711 
712 	drm_connector_list_iter_begin(dev, &iter);
713 	drm_for_each_connector_iter(connector, &iter) {
714 		aconnector = to_amdgpu_dm_connector(connector);
715 		if (link && aconnector->dc_link == link) {
716 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
717 			hpd_aconnector = aconnector;
718 			break;
719 		}
720 	}
721 	drm_connector_list_iter_end(&iter);
722 
723 	if (hpd_aconnector) {
724 		if (notify->type == DMUB_NOTIFICATION_HPD)
725 			handle_hpd_irq_helper(hpd_aconnector);
726 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
727 			handle_hpd_rx_irq(hpd_aconnector);
728 	}
729 }
730 
731 /**
732  * register_dmub_notify_callback - Sets callback for DMUB notify
733  * @adev: amdgpu_device pointer
734  * @type: Type of dmub notification
735  * @callback: Dmub interrupt callback function
736  * @dmub_int_thread_offload: offload indicator
737  *
738  * API to register a dmub callback handler for a dmub notification
739  * Also sets indicator whether callback processing to be offloaded.
740  * to dmub interrupt handling thread
741  * Return: true if successfully registered, false if there is existing registration
742  */
743 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
744 					  enum dmub_notification_type type,
745 					  dmub_notify_interrupt_callback_t callback,
746 					  bool dmub_int_thread_offload)
747 {
748 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
749 		adev->dm.dmub_callback[type] = callback;
750 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
751 	} else
752 		return false;
753 
754 	return true;
755 }
756 
757 static void dm_handle_hpd_work(struct work_struct *work)
758 {
759 	struct dmub_hpd_work *dmub_hpd_wrk;
760 
761 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
762 
763 	if (!dmub_hpd_wrk->dmub_notify) {
764 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
765 		return;
766 	}
767 
768 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
769 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
770 		dmub_hpd_wrk->dmub_notify);
771 	}
772 
773 	kfree(dmub_hpd_wrk->dmub_notify);
774 	kfree(dmub_hpd_wrk);
775 
776 }
777 
778 #define DMUB_TRACE_MAX_READ 64
779 /**
780  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
781  * @interrupt_params: used for determining the Outbox instance
782  *
783  * Handles the Outbox Interrupt
784  * event handler.
785  */
786 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
787 {
788 	struct dmub_notification notify;
789 	struct common_irq_params *irq_params = interrupt_params;
790 	struct amdgpu_device *adev = irq_params->adev;
791 	struct amdgpu_display_manager *dm = &adev->dm;
792 	struct dmcub_trace_buf_entry entry = { 0 };
793 	uint32_t count = 0;
794 	struct dmub_hpd_work *dmub_hpd_wrk;
795 	struct dc_link *plink = NULL;
796 
797 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
798 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
799 
800 		do {
801 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
802 			if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
803 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
804 				continue;
805 			}
806 			if (!dm->dmub_callback[notify.type]) {
807 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
808 				continue;
809 			}
810 			if (dm->dmub_thread_offload[notify.type] == true) {
811 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
812 				if (!dmub_hpd_wrk) {
813 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
814 					return;
815 				}
816 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
817 				if (!dmub_hpd_wrk->dmub_notify) {
818 					kfree(dmub_hpd_wrk);
819 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
820 					return;
821 				}
822 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
823 				if (dmub_hpd_wrk->dmub_notify)
824 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
825 				dmub_hpd_wrk->adev = adev;
826 				if (notify.type == DMUB_NOTIFICATION_HPD) {
827 					plink = adev->dm.dc->links[notify.link_index];
828 					if (plink) {
829 						plink->hpd_status =
830 							notify.hpd_status == DP_HPD_PLUG;
831 					}
832 				}
833 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
834 			} else {
835 				dm->dmub_callback[notify.type](adev, &notify);
836 			}
837 		} while (notify.pending_notification);
838 	}
839 
840 
841 	do {
842 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
843 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
844 							entry.param0, entry.param1);
845 
846 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
847 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
848 		} else
849 			break;
850 
851 		count++;
852 
853 	} while (count <= DMUB_TRACE_MAX_READ);
854 
855 	if (count > DMUB_TRACE_MAX_READ)
856 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
857 }
858 
859 static int dm_set_clockgating_state(void *handle,
860 		  enum amd_clockgating_state state)
861 {
862 	return 0;
863 }
864 
865 static int dm_set_powergating_state(void *handle,
866 		  enum amd_powergating_state state)
867 {
868 	return 0;
869 }
870 
871 /* Prototypes of private functions */
872 static int dm_early_init(void* handle);
873 
874 /* Allocate memory for FBC compressed data  */
875 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
876 {
877 	struct drm_device *dev = connector->dev;
878 	struct amdgpu_device *adev = drm_to_adev(dev);
879 	struct dm_compressor_info *compressor = &adev->dm.compressor;
880 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
881 	struct drm_display_mode *mode;
882 	unsigned long max_size = 0;
883 
884 	if (adev->dm.dc->fbc_compressor == NULL)
885 		return;
886 
887 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
888 		return;
889 
890 	if (compressor->bo_ptr)
891 		return;
892 
893 
894 	list_for_each_entry(mode, &connector->modes, head) {
895 		if (max_size < mode->htotal * mode->vtotal)
896 			max_size = mode->htotal * mode->vtotal;
897 	}
898 
899 	if (max_size) {
900 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
901 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
902 			    &compressor->gpu_addr, &compressor->cpu_addr);
903 
904 		if (r)
905 			DRM_ERROR("DM: Failed to initialize FBC\n");
906 		else {
907 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
908 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
909 		}
910 
911 	}
912 
913 }
914 
915 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
916 					  int pipe, bool *enabled,
917 					  unsigned char *buf, int max_bytes)
918 {
919 	struct drm_device *dev = dev_get_drvdata(kdev);
920 	struct amdgpu_device *adev = drm_to_adev(dev);
921 	struct drm_connector *connector;
922 	struct drm_connector_list_iter conn_iter;
923 	struct amdgpu_dm_connector *aconnector;
924 	int ret = 0;
925 
926 	*enabled = false;
927 
928 	mutex_lock(&adev->dm.audio_lock);
929 
930 	drm_connector_list_iter_begin(dev, &conn_iter);
931 	drm_for_each_connector_iter(connector, &conn_iter) {
932 		aconnector = to_amdgpu_dm_connector(connector);
933 		if (aconnector->audio_inst != port)
934 			continue;
935 
936 		*enabled = true;
937 		ret = drm_eld_size(connector->eld);
938 		memcpy(buf, connector->eld, min(max_bytes, ret));
939 
940 		break;
941 	}
942 	drm_connector_list_iter_end(&conn_iter);
943 
944 	mutex_unlock(&adev->dm.audio_lock);
945 
946 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
947 
948 	return ret;
949 }
950 
951 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
952 	.get_eld = amdgpu_dm_audio_component_get_eld,
953 };
954 
955 static int amdgpu_dm_audio_component_bind(struct device *kdev,
956 				       struct device *hda_kdev, void *data)
957 {
958 	struct drm_device *dev = dev_get_drvdata(kdev);
959 	struct amdgpu_device *adev = drm_to_adev(dev);
960 	struct drm_audio_component *acomp = data;
961 
962 	acomp->ops = &amdgpu_dm_audio_component_ops;
963 	acomp->dev = kdev;
964 	adev->dm.audio_component = acomp;
965 
966 	return 0;
967 }
968 
969 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
970 					  struct device *hda_kdev, void *data)
971 {
972 	struct drm_device *dev = dev_get_drvdata(kdev);
973 	struct amdgpu_device *adev = drm_to_adev(dev);
974 	struct drm_audio_component *acomp = data;
975 
976 	acomp->ops = NULL;
977 	acomp->dev = NULL;
978 	adev->dm.audio_component = NULL;
979 }
980 
981 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
982 	.bind	= amdgpu_dm_audio_component_bind,
983 	.unbind	= amdgpu_dm_audio_component_unbind,
984 };
985 
986 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
987 {
988 	int i, ret;
989 
990 	if (!amdgpu_audio)
991 		return 0;
992 
993 	adev->mode_info.audio.enabled = true;
994 
995 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
996 
997 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
998 		adev->mode_info.audio.pin[i].channels = -1;
999 		adev->mode_info.audio.pin[i].rate = -1;
1000 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1001 		adev->mode_info.audio.pin[i].status_bits = 0;
1002 		adev->mode_info.audio.pin[i].category_code = 0;
1003 		adev->mode_info.audio.pin[i].connected = false;
1004 		adev->mode_info.audio.pin[i].id =
1005 			adev->dm.dc->res_pool->audios[i]->inst;
1006 		adev->mode_info.audio.pin[i].offset = 0;
1007 	}
1008 
1009 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1010 	if (ret < 0)
1011 		return ret;
1012 
1013 	adev->dm.audio_registered = true;
1014 
1015 	return 0;
1016 }
1017 
1018 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
1019 {
1020 	if (!amdgpu_audio)
1021 		return;
1022 
1023 	if (!adev->mode_info.audio.enabled)
1024 		return;
1025 
1026 	if (adev->dm.audio_registered) {
1027 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1028 		adev->dm.audio_registered = false;
1029 	}
1030 
1031 	/* TODO: Disable audio? */
1032 
1033 	adev->mode_info.audio.enabled = false;
1034 }
1035 
1036 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1037 {
1038 	struct drm_audio_component *acomp = adev->dm.audio_component;
1039 
1040 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1041 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1042 
1043 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1044 						 pin, -1);
1045 	}
1046 }
1047 
1048 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1049 {
1050 	const struct dmcub_firmware_header_v1_0 *hdr;
1051 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1052 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1053 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1054 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1055 	struct abm *abm = adev->dm.dc->res_pool->abm;
1056 	struct dmub_srv_hw_params hw_params;
1057 	enum dmub_status status;
1058 	const unsigned char *fw_inst_const, *fw_bss_data;
1059 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1060 	bool has_hw_support;
1061 
1062 	if (!dmub_srv)
1063 		/* DMUB isn't supported on the ASIC. */
1064 		return 0;
1065 
1066 	if (!fb_info) {
1067 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1068 		return -EINVAL;
1069 	}
1070 
1071 	if (!dmub_fw) {
1072 		/* Firmware required for DMUB support. */
1073 		DRM_ERROR("No firmware provided for DMUB.\n");
1074 		return -EINVAL;
1075 	}
1076 
1077 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1078 	if (status != DMUB_STATUS_OK) {
1079 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1080 		return -EINVAL;
1081 	}
1082 
1083 	if (!has_hw_support) {
1084 		DRM_INFO("DMUB unsupported on ASIC\n");
1085 		return 0;
1086 	}
1087 
1088 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1089 	status = dmub_srv_hw_reset(dmub_srv);
1090 	if (status != DMUB_STATUS_OK)
1091 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1092 
1093 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1094 
1095 	fw_inst_const = dmub_fw->data +
1096 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1097 			PSP_HEADER_BYTES;
1098 
1099 	fw_bss_data = dmub_fw->data +
1100 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1101 		      le32_to_cpu(hdr->inst_const_bytes);
1102 
1103 	/* Copy firmware and bios info into FB memory. */
1104 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1105 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1106 
1107 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1108 
1109 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1110 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1111 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1112 	 * will be done by dm_dmub_hw_init
1113 	 */
1114 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1115 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1116 				fw_inst_const_size);
1117 	}
1118 
1119 	if (fw_bss_data_size)
1120 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1121 		       fw_bss_data, fw_bss_data_size);
1122 
1123 	/* Copy firmware bios info into FB memory. */
1124 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1125 	       adev->bios_size);
1126 
1127 	/* Reset regions that need to be reset. */
1128 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1129 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1130 
1131 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1132 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1133 
1134 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1135 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1136 
1137 	/* Initialize hardware. */
1138 	memset(&hw_params, 0, sizeof(hw_params));
1139 	hw_params.fb_base = adev->gmc.fb_start;
1140 	hw_params.fb_offset = adev->gmc.aper_base;
1141 
1142 	/* backdoor load firmware and trigger dmub running */
1143 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1144 		hw_params.load_inst_const = true;
1145 
1146 	if (dmcu)
1147 		hw_params.psp_version = dmcu->psp_version;
1148 
1149 	for (i = 0; i < fb_info->num_fb; ++i)
1150 		hw_params.fb[i] = &fb_info->fb[i];
1151 
1152 	switch (adev->ip_versions[DCE_HWIP][0]) {
1153 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1154 		hw_params.dpia_supported = true;
1155 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1156 		break;
1157 	default:
1158 		break;
1159 	}
1160 
1161 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1162 	if (status != DMUB_STATUS_OK) {
1163 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1164 		return -EINVAL;
1165 	}
1166 
1167 	/* Wait for firmware load to finish. */
1168 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1169 	if (status != DMUB_STATUS_OK)
1170 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1171 
1172 	/* Init DMCU and ABM if available. */
1173 	if (dmcu && abm) {
1174 		dmcu->funcs->dmcu_init(dmcu);
1175 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1176 	}
1177 
1178 	if (!adev->dm.dc->ctx->dmub_srv)
1179 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1180 	if (!adev->dm.dc->ctx->dmub_srv) {
1181 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1182 		return -ENOMEM;
1183 	}
1184 
1185 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1186 		 adev->dm.dmcub_fw_version);
1187 
1188 	return 0;
1189 }
1190 
1191 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1192 {
1193 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1194 	enum dmub_status status;
1195 	bool init;
1196 
1197 	if (!dmub_srv) {
1198 		/* DMUB isn't supported on the ASIC. */
1199 		return;
1200 	}
1201 
1202 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1203 	if (status != DMUB_STATUS_OK)
1204 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1205 
1206 	if (status == DMUB_STATUS_OK && init) {
1207 		/* Wait for firmware load to finish. */
1208 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1209 		if (status != DMUB_STATUS_OK)
1210 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1211 	} else {
1212 		/* Perform the full hardware initialization. */
1213 		dm_dmub_hw_init(adev);
1214 	}
1215 }
1216 
1217 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1218 {
1219 	uint64_t pt_base;
1220 	uint32_t logical_addr_low;
1221 	uint32_t logical_addr_high;
1222 	uint32_t agp_base, agp_bot, agp_top;
1223 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1224 
1225 	memset(pa_config, 0, sizeof(*pa_config));
1226 
1227 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1228 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1229 
1230 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1231 		/*
1232 		 * Raven2 has a HW issue that it is unable to use the vram which
1233 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1234 		 * workaround that increase system aperture high address (add 1)
1235 		 * to get rid of the VM fault and hardware hang.
1236 		 */
1237 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1238 	else
1239 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1240 
1241 	agp_base = 0;
1242 	agp_bot = adev->gmc.agp_start >> 24;
1243 	agp_top = adev->gmc.agp_end >> 24;
1244 
1245 
1246 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1247 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1248 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1249 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1250 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1251 	page_table_base.low_part = lower_32_bits(pt_base);
1252 
1253 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1254 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1255 
1256 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1257 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1258 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1259 
1260 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1261 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1262 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1263 
1264 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1265 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1266 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1267 
1268 	pa_config->is_hvm_enabled = 0;
1269 
1270 }
1271 
1272 static void vblank_control_worker(struct work_struct *work)
1273 {
1274 	struct vblank_control_work *vblank_work =
1275 		container_of(work, struct vblank_control_work, work);
1276 	struct amdgpu_display_manager *dm = vblank_work->dm;
1277 
1278 	mutex_lock(&dm->dc_lock);
1279 
1280 	if (vblank_work->enable)
1281 		dm->active_vblank_irq_count++;
1282 	else if(dm->active_vblank_irq_count)
1283 		dm->active_vblank_irq_count--;
1284 
1285 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1286 
1287 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1288 
1289 	/*
1290 	 * Control PSR based on vblank requirements from OS
1291 	 *
1292 	 * If panel supports PSR SU, there's no need to disable PSR when OS is
1293 	 * submitting fast atomic commits (we infer this by whether the OS
1294 	 * requests vblank events). Fast atomic commits will simply trigger a
1295 	 * full-frame-update (FFU); a specific case of selective-update (SU)
1296 	 * where the SU region is the full hactive*vactive region. See
1297 	 * fill_dc_dirty_rects().
1298 	 */
1299 	if (vblank_work->stream && vblank_work->stream->link) {
1300 		if (vblank_work->enable) {
1301 			if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
1302 			    vblank_work->stream->link->psr_settings.psr_allow_active)
1303 				amdgpu_dm_psr_disable(vblank_work->stream);
1304 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1305 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1306 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1307 			amdgpu_dm_psr_enable(vblank_work->stream);
1308 		}
1309 	}
1310 
1311 	mutex_unlock(&dm->dc_lock);
1312 
1313 	dc_stream_release(vblank_work->stream);
1314 
1315 	kfree(vblank_work);
1316 }
1317 
1318 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1319 {
1320 	struct hpd_rx_irq_offload_work *offload_work;
1321 	struct amdgpu_dm_connector *aconnector;
1322 	struct dc_link *dc_link;
1323 	struct amdgpu_device *adev;
1324 	enum dc_connection_type new_connection_type = dc_connection_none;
1325 	unsigned long flags;
1326 
1327 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1328 	aconnector = offload_work->offload_wq->aconnector;
1329 
1330 	if (!aconnector) {
1331 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1332 		goto skip;
1333 	}
1334 
1335 	adev = drm_to_adev(aconnector->base.dev);
1336 	dc_link = aconnector->dc_link;
1337 
1338 	mutex_lock(&aconnector->hpd_lock);
1339 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1340 		DRM_ERROR("KMS: Failed to detect connector\n");
1341 	mutex_unlock(&aconnector->hpd_lock);
1342 
1343 	if (new_connection_type == dc_connection_none)
1344 		goto skip;
1345 
1346 	if (amdgpu_in_reset(adev))
1347 		goto skip;
1348 
1349 	mutex_lock(&adev->dm.dc_lock);
1350 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1351 		dc_link_dp_handle_automated_test(dc_link);
1352 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1353 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1354 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1355 		dc_link_dp_handle_link_loss(dc_link);
1356 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1357 		offload_work->offload_wq->is_handling_link_loss = false;
1358 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1359 	}
1360 	mutex_unlock(&adev->dm.dc_lock);
1361 
1362 skip:
1363 	kfree(offload_work);
1364 
1365 }
1366 
1367 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1368 {
1369 	int max_caps = dc->caps.max_links;
1370 	int i = 0;
1371 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1372 
1373 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1374 
1375 	if (!hpd_rx_offload_wq)
1376 		return NULL;
1377 
1378 
1379 	for (i = 0; i < max_caps; i++) {
1380 		hpd_rx_offload_wq[i].wq =
1381 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1382 
1383 		if (hpd_rx_offload_wq[i].wq == NULL) {
1384 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1385 			return NULL;
1386 		}
1387 
1388 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1389 	}
1390 
1391 	return hpd_rx_offload_wq;
1392 }
1393 
1394 struct amdgpu_stutter_quirk {
1395 	u16 chip_vendor;
1396 	u16 chip_device;
1397 	u16 subsys_vendor;
1398 	u16 subsys_device;
1399 	u8 revision;
1400 };
1401 
1402 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1403 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1404 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1405 	{ 0, 0, 0, 0, 0 },
1406 };
1407 
1408 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1409 {
1410 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1411 
1412 	while (p && p->chip_device != 0) {
1413 		if (pdev->vendor == p->chip_vendor &&
1414 		    pdev->device == p->chip_device &&
1415 		    pdev->subsystem_vendor == p->subsys_vendor &&
1416 		    pdev->subsystem_device == p->subsys_device &&
1417 		    pdev->revision == p->revision) {
1418 			return true;
1419 		}
1420 		++p;
1421 	}
1422 	return false;
1423 }
1424 
1425 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1426 	{
1427 		.matches = {
1428 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1429 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1430 		},
1431 	},
1432 	{
1433 		.matches = {
1434 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1435 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1436 		},
1437 	},
1438 	{
1439 		.matches = {
1440 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1441 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1442 		},
1443 	},
1444 	{}
1445 };
1446 
1447 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1448 {
1449 	const struct dmi_system_id *dmi_id;
1450 
1451 	dm->aux_hpd_discon_quirk = false;
1452 
1453 	dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1454 	if (dmi_id) {
1455 		dm->aux_hpd_discon_quirk = true;
1456 		DRM_INFO("aux_hpd_discon_quirk attached\n");
1457 	}
1458 }
1459 
1460 static int amdgpu_dm_init(struct amdgpu_device *adev)
1461 {
1462 	struct dc_init_data init_data;
1463 #ifdef CONFIG_DRM_AMD_DC_HDCP
1464 	struct dc_callback_init init_params;
1465 #endif
1466 	int r;
1467 
1468 	adev->dm.ddev = adev_to_drm(adev);
1469 	adev->dm.adev = adev;
1470 
1471 	/* Zero all the fields */
1472 	memset(&init_data, 0, sizeof(init_data));
1473 #ifdef CONFIG_DRM_AMD_DC_HDCP
1474 	memset(&init_params, 0, sizeof(init_params));
1475 #endif
1476 
1477 	mutex_init(&adev->dm.dc_lock);
1478 	mutex_init(&adev->dm.audio_lock);
1479 	spin_lock_init(&adev->dm.vblank_lock);
1480 
1481 	if(amdgpu_dm_irq_init(adev)) {
1482 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1483 		goto error;
1484 	}
1485 
1486 	init_data.asic_id.chip_family = adev->family;
1487 
1488 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1489 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1490 	init_data.asic_id.chip_id = adev->pdev->device;
1491 
1492 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1493 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1494 	init_data.asic_id.atombios_base_address =
1495 		adev->mode_info.atom_context->bios;
1496 
1497 	init_data.driver = adev;
1498 
1499 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1500 
1501 	if (!adev->dm.cgs_device) {
1502 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1503 		goto error;
1504 	}
1505 
1506 	init_data.cgs_device = adev->dm.cgs_device;
1507 
1508 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1509 
1510 	switch (adev->ip_versions[DCE_HWIP][0]) {
1511 	case IP_VERSION(2, 1, 0):
1512 		switch (adev->dm.dmcub_fw_version) {
1513 		case 0: /* development */
1514 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1515 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1516 			init_data.flags.disable_dmcu = false;
1517 			break;
1518 		default:
1519 			init_data.flags.disable_dmcu = true;
1520 		}
1521 		break;
1522 	case IP_VERSION(2, 0, 3):
1523 		init_data.flags.disable_dmcu = true;
1524 		break;
1525 	default:
1526 		break;
1527 	}
1528 
1529 	switch (adev->asic_type) {
1530 	case CHIP_CARRIZO:
1531 	case CHIP_STONEY:
1532 		init_data.flags.gpu_vm_support = true;
1533 		break;
1534 	default:
1535 		switch (adev->ip_versions[DCE_HWIP][0]) {
1536 		case IP_VERSION(1, 0, 0):
1537 		case IP_VERSION(1, 0, 1):
1538 			/* enable S/G on PCO and RV2 */
1539 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1540 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1541 				init_data.flags.gpu_vm_support = true;
1542 			break;
1543 		case IP_VERSION(2, 1, 0):
1544 		case IP_VERSION(3, 0, 1):
1545 		case IP_VERSION(3, 1, 2):
1546 		case IP_VERSION(3, 1, 3):
1547 		case IP_VERSION(3, 1, 5):
1548 		case IP_VERSION(3, 1, 6):
1549 			init_data.flags.gpu_vm_support = true;
1550 			break;
1551 		default:
1552 			break;
1553 		}
1554 		break;
1555 	}
1556 
1557 	if (init_data.flags.gpu_vm_support)
1558 		adev->mode_info.gpu_vm_support = true;
1559 
1560 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1561 		init_data.flags.fbc_support = true;
1562 
1563 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1564 		init_data.flags.multi_mon_pp_mclk_switch = true;
1565 
1566 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1567 		init_data.flags.disable_fractional_pwm = true;
1568 
1569 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1570 		init_data.flags.edp_no_power_sequencing = true;
1571 
1572 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1573 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1574 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1575 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1576 
1577 	init_data.flags.seamless_boot_edp_requested = false;
1578 
1579 	if (check_seamless_boot_capability(adev)) {
1580 		init_data.flags.seamless_boot_edp_requested = true;
1581 		init_data.flags.allow_seamless_boot_optimization = true;
1582 		DRM_INFO("Seamless boot condition check passed\n");
1583 	}
1584 
1585 	init_data.flags.enable_mipi_converter_optimization = true;
1586 
1587 	init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1588 	init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1589 
1590 	INIT_LIST_HEAD(&adev->dm.da_list);
1591 
1592 	retrieve_dmi_info(&adev->dm);
1593 
1594 	/* Display Core create. */
1595 	adev->dm.dc = dc_create(&init_data);
1596 
1597 	if (adev->dm.dc) {
1598 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1599 	} else {
1600 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1601 		goto error;
1602 	}
1603 
1604 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1605 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1606 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1607 	}
1608 
1609 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1610 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1611 	if (dm_should_disable_stutter(adev->pdev))
1612 		adev->dm.dc->debug.disable_stutter = true;
1613 
1614 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1615 		adev->dm.dc->debug.disable_stutter = true;
1616 
1617 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1618 		adev->dm.dc->debug.disable_dsc = true;
1619 		adev->dm.dc->debug.disable_dsc_edp = true;
1620 	}
1621 
1622 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1623 		adev->dm.dc->debug.disable_clock_gate = true;
1624 
1625 	if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1626 		adev->dm.dc->debug.force_subvp_mclk_switch = true;
1627 
1628 	r = dm_dmub_hw_init(adev);
1629 	if (r) {
1630 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1631 		goto error;
1632 	}
1633 
1634 	dc_hardware_init(adev->dm.dc);
1635 
1636 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1637 	if (!adev->dm.hpd_rx_offload_wq) {
1638 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1639 		goto error;
1640 	}
1641 
1642 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1643 		struct dc_phy_addr_space_config pa_config;
1644 
1645 		mmhub_read_system_context(adev, &pa_config);
1646 
1647 		// Call the DC init_memory func
1648 		dc_setup_system_context(adev->dm.dc, &pa_config);
1649 	}
1650 
1651 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1652 	if (!adev->dm.freesync_module) {
1653 		DRM_ERROR(
1654 		"amdgpu: failed to initialize freesync_module.\n");
1655 	} else
1656 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1657 				adev->dm.freesync_module);
1658 
1659 	amdgpu_dm_init_color_mod();
1660 
1661 	if (adev->dm.dc->caps.max_links > 0) {
1662 		adev->dm.vblank_control_workqueue =
1663 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1664 		if (!adev->dm.vblank_control_workqueue)
1665 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1666 	}
1667 
1668 #ifdef CONFIG_DRM_AMD_DC_HDCP
1669 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1670 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1671 
1672 		if (!adev->dm.hdcp_workqueue)
1673 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1674 		else
1675 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1676 
1677 		dc_init_callbacks(adev->dm.dc, &init_params);
1678 	}
1679 #endif
1680 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1681 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1682 #endif
1683 	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1684 		init_completion(&adev->dm.dmub_aux_transfer_done);
1685 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1686 		if (!adev->dm.dmub_notify) {
1687 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1688 			goto error;
1689 		}
1690 
1691 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1692 		if (!adev->dm.delayed_hpd_wq) {
1693 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1694 			goto error;
1695 		}
1696 
1697 		amdgpu_dm_outbox_init(adev);
1698 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1699 			dmub_aux_setconfig_callback, false)) {
1700 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1701 			goto error;
1702 		}
1703 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1704 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1705 			goto error;
1706 		}
1707 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1708 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1709 			goto error;
1710 		}
1711 	}
1712 
1713 	if (amdgpu_dm_initialize_drm_device(adev)) {
1714 		DRM_ERROR(
1715 		"amdgpu: failed to initialize sw for display support.\n");
1716 		goto error;
1717 	}
1718 
1719 	/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1720 	 * It is expected that DMUB will resend any pending notifications at this point, for
1721 	 * example HPD from DPIA.
1722 	 */
1723 	if (dc_is_dmub_outbox_supported(adev->dm.dc))
1724 		dc_enable_dmub_outbox(adev->dm.dc);
1725 
1726 	/* create fake encoders for MST */
1727 	dm_dp_create_fake_mst_encoders(adev);
1728 
1729 	/* TODO: Add_display_info? */
1730 
1731 	/* TODO use dynamic cursor width */
1732 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1733 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1734 
1735 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1736 		DRM_ERROR(
1737 		"amdgpu: failed to initialize sw for display support.\n");
1738 		goto error;
1739 	}
1740 
1741 
1742 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1743 
1744 	return 0;
1745 error:
1746 	amdgpu_dm_fini(adev);
1747 
1748 	return -EINVAL;
1749 }
1750 
1751 static int amdgpu_dm_early_fini(void *handle)
1752 {
1753 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1754 
1755 	amdgpu_dm_audio_fini(adev);
1756 
1757 	return 0;
1758 }
1759 
1760 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1761 {
1762 	int i;
1763 
1764 	if (adev->dm.vblank_control_workqueue) {
1765 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1766 		adev->dm.vblank_control_workqueue = NULL;
1767 	}
1768 
1769 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1770 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1771 	}
1772 
1773 	amdgpu_dm_destroy_drm_device(&adev->dm);
1774 
1775 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1776 	if (adev->dm.crc_rd_wrk) {
1777 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1778 		kfree(adev->dm.crc_rd_wrk);
1779 		adev->dm.crc_rd_wrk = NULL;
1780 	}
1781 #endif
1782 #ifdef CONFIG_DRM_AMD_DC_HDCP
1783 	if (adev->dm.hdcp_workqueue) {
1784 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1785 		adev->dm.hdcp_workqueue = NULL;
1786 	}
1787 
1788 	if (adev->dm.dc)
1789 		dc_deinit_callbacks(adev->dm.dc);
1790 #endif
1791 
1792 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1793 
1794 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1795 		kfree(adev->dm.dmub_notify);
1796 		adev->dm.dmub_notify = NULL;
1797 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1798 		adev->dm.delayed_hpd_wq = NULL;
1799 	}
1800 
1801 	if (adev->dm.dmub_bo)
1802 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1803 				      &adev->dm.dmub_bo_gpu_addr,
1804 				      &adev->dm.dmub_bo_cpu_addr);
1805 
1806 	if (adev->dm.hpd_rx_offload_wq) {
1807 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1808 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1809 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1810 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1811 			}
1812 		}
1813 
1814 		kfree(adev->dm.hpd_rx_offload_wq);
1815 		adev->dm.hpd_rx_offload_wq = NULL;
1816 	}
1817 
1818 	/* DC Destroy TODO: Replace destroy DAL */
1819 	if (adev->dm.dc)
1820 		dc_destroy(&adev->dm.dc);
1821 	/*
1822 	 * TODO: pageflip, vlank interrupt
1823 	 *
1824 	 * amdgpu_dm_irq_fini(adev);
1825 	 */
1826 
1827 	if (adev->dm.cgs_device) {
1828 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1829 		adev->dm.cgs_device = NULL;
1830 	}
1831 	if (adev->dm.freesync_module) {
1832 		mod_freesync_destroy(adev->dm.freesync_module);
1833 		adev->dm.freesync_module = NULL;
1834 	}
1835 
1836 	mutex_destroy(&adev->dm.audio_lock);
1837 	mutex_destroy(&adev->dm.dc_lock);
1838 
1839 	return;
1840 }
1841 
1842 static int load_dmcu_fw(struct amdgpu_device *adev)
1843 {
1844 	const char *fw_name_dmcu = NULL;
1845 	int r;
1846 	const struct dmcu_firmware_header_v1_0 *hdr;
1847 
1848 	switch(adev->asic_type) {
1849 #if defined(CONFIG_DRM_AMD_DC_SI)
1850 	case CHIP_TAHITI:
1851 	case CHIP_PITCAIRN:
1852 	case CHIP_VERDE:
1853 	case CHIP_OLAND:
1854 #endif
1855 	case CHIP_BONAIRE:
1856 	case CHIP_HAWAII:
1857 	case CHIP_KAVERI:
1858 	case CHIP_KABINI:
1859 	case CHIP_MULLINS:
1860 	case CHIP_TONGA:
1861 	case CHIP_FIJI:
1862 	case CHIP_CARRIZO:
1863 	case CHIP_STONEY:
1864 	case CHIP_POLARIS11:
1865 	case CHIP_POLARIS10:
1866 	case CHIP_POLARIS12:
1867 	case CHIP_VEGAM:
1868 	case CHIP_VEGA10:
1869 	case CHIP_VEGA12:
1870 	case CHIP_VEGA20:
1871 		return 0;
1872 	case CHIP_NAVI12:
1873 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1874 		break;
1875 	case CHIP_RAVEN:
1876 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1877 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1878 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1879 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1880 		else
1881 			return 0;
1882 		break;
1883 	default:
1884 		switch (adev->ip_versions[DCE_HWIP][0]) {
1885 		case IP_VERSION(2, 0, 2):
1886 		case IP_VERSION(2, 0, 3):
1887 		case IP_VERSION(2, 0, 0):
1888 		case IP_VERSION(2, 1, 0):
1889 		case IP_VERSION(3, 0, 0):
1890 		case IP_VERSION(3, 0, 2):
1891 		case IP_VERSION(3, 0, 3):
1892 		case IP_VERSION(3, 0, 1):
1893 		case IP_VERSION(3, 1, 2):
1894 		case IP_VERSION(3, 1, 3):
1895 		case IP_VERSION(3, 1, 5):
1896 		case IP_VERSION(3, 1, 6):
1897 		case IP_VERSION(3, 2, 0):
1898 		case IP_VERSION(3, 2, 1):
1899 			return 0;
1900 		default:
1901 			break;
1902 		}
1903 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1904 		return -EINVAL;
1905 	}
1906 
1907 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1908 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1909 		return 0;
1910 	}
1911 
1912 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1913 	if (r == -ENOENT) {
1914 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1915 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1916 		adev->dm.fw_dmcu = NULL;
1917 		return 0;
1918 	}
1919 	if (r) {
1920 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1921 			fw_name_dmcu);
1922 		return r;
1923 	}
1924 
1925 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1926 	if (r) {
1927 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1928 			fw_name_dmcu);
1929 		release_firmware(adev->dm.fw_dmcu);
1930 		adev->dm.fw_dmcu = NULL;
1931 		return r;
1932 	}
1933 
1934 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1935 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1936 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1937 	adev->firmware.fw_size +=
1938 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1939 
1940 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1941 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1942 	adev->firmware.fw_size +=
1943 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1944 
1945 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1946 
1947 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1948 
1949 	return 0;
1950 }
1951 
1952 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1953 {
1954 	struct amdgpu_device *adev = ctx;
1955 
1956 	return dm_read_reg(adev->dm.dc->ctx, address);
1957 }
1958 
1959 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1960 				     uint32_t value)
1961 {
1962 	struct amdgpu_device *adev = ctx;
1963 
1964 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1965 }
1966 
1967 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1968 {
1969 	struct dmub_srv_create_params create_params;
1970 	struct dmub_srv_region_params region_params;
1971 	struct dmub_srv_region_info region_info;
1972 	struct dmub_srv_fb_params fb_params;
1973 	struct dmub_srv_fb_info *fb_info;
1974 	struct dmub_srv *dmub_srv;
1975 	const struct dmcub_firmware_header_v1_0 *hdr;
1976 	const char *fw_name_dmub;
1977 	enum dmub_asic dmub_asic;
1978 	enum dmub_status status;
1979 	int r;
1980 
1981 	switch (adev->ip_versions[DCE_HWIP][0]) {
1982 	case IP_VERSION(2, 1, 0):
1983 		dmub_asic = DMUB_ASIC_DCN21;
1984 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1985 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1986 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1987 		break;
1988 	case IP_VERSION(3, 0, 0):
1989 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1990 			dmub_asic = DMUB_ASIC_DCN30;
1991 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1992 		} else {
1993 			dmub_asic = DMUB_ASIC_DCN30;
1994 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1995 		}
1996 		break;
1997 	case IP_VERSION(3, 0, 1):
1998 		dmub_asic = DMUB_ASIC_DCN301;
1999 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
2000 		break;
2001 	case IP_VERSION(3, 0, 2):
2002 		dmub_asic = DMUB_ASIC_DCN302;
2003 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
2004 		break;
2005 	case IP_VERSION(3, 0, 3):
2006 		dmub_asic = DMUB_ASIC_DCN303;
2007 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
2008 		break;
2009 	case IP_VERSION(3, 1, 2):
2010 	case IP_VERSION(3, 1, 3):
2011 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
2012 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
2013 		break;
2014 	case IP_VERSION(3, 1, 5):
2015 		dmub_asic = DMUB_ASIC_DCN315;
2016 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
2017 		break;
2018 	case IP_VERSION(3, 1, 6):
2019 		dmub_asic = DMUB_ASIC_DCN316;
2020 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
2021 		break;
2022 	case IP_VERSION(3, 2, 0):
2023 		dmub_asic = DMUB_ASIC_DCN32;
2024 		fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
2025 		break;
2026 	case IP_VERSION(3, 2, 1):
2027 		dmub_asic = DMUB_ASIC_DCN321;
2028 		fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
2029 		break;
2030 	default:
2031 		/* ASIC doesn't support DMUB. */
2032 		return 0;
2033 	}
2034 
2035 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
2036 	if (r) {
2037 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
2038 		return 0;
2039 	}
2040 
2041 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
2042 	if (r) {
2043 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
2044 		return 0;
2045 	}
2046 
2047 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
2048 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
2049 
2050 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2051 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2052 			AMDGPU_UCODE_ID_DMCUB;
2053 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2054 			adev->dm.dmub_fw;
2055 		adev->firmware.fw_size +=
2056 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
2057 
2058 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2059 			 adev->dm.dmcub_fw_version);
2060 	}
2061 
2062 
2063 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2064 	dmub_srv = adev->dm.dmub_srv;
2065 
2066 	if (!dmub_srv) {
2067 		DRM_ERROR("Failed to allocate DMUB service!\n");
2068 		return -ENOMEM;
2069 	}
2070 
2071 	memset(&create_params, 0, sizeof(create_params));
2072 	create_params.user_ctx = adev;
2073 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2074 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2075 	create_params.asic = dmub_asic;
2076 
2077 	/* Create the DMUB service. */
2078 	status = dmub_srv_create(dmub_srv, &create_params);
2079 	if (status != DMUB_STATUS_OK) {
2080 		DRM_ERROR("Error creating DMUB service: %d\n", status);
2081 		return -EINVAL;
2082 	}
2083 
2084 	/* Calculate the size of all the regions for the DMUB service. */
2085 	memset(&region_params, 0, sizeof(region_params));
2086 
2087 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2088 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2089 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2090 	region_params.vbios_size = adev->bios_size;
2091 	region_params.fw_bss_data = region_params.bss_data_size ?
2092 		adev->dm.dmub_fw->data +
2093 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2094 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
2095 	region_params.fw_inst_const =
2096 		adev->dm.dmub_fw->data +
2097 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2098 		PSP_HEADER_BYTES;
2099 
2100 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2101 					   &region_info);
2102 
2103 	if (status != DMUB_STATUS_OK) {
2104 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2105 		return -EINVAL;
2106 	}
2107 
2108 	/*
2109 	 * Allocate a framebuffer based on the total size of all the regions.
2110 	 * TODO: Move this into GART.
2111 	 */
2112 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2113 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2114 				    &adev->dm.dmub_bo_gpu_addr,
2115 				    &adev->dm.dmub_bo_cpu_addr);
2116 	if (r)
2117 		return r;
2118 
2119 	/* Rebase the regions on the framebuffer address. */
2120 	memset(&fb_params, 0, sizeof(fb_params));
2121 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2122 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2123 	fb_params.region_info = &region_info;
2124 
2125 	adev->dm.dmub_fb_info =
2126 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2127 	fb_info = adev->dm.dmub_fb_info;
2128 
2129 	if (!fb_info) {
2130 		DRM_ERROR(
2131 			"Failed to allocate framebuffer info for DMUB service!\n");
2132 		return -ENOMEM;
2133 	}
2134 
2135 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2136 	if (status != DMUB_STATUS_OK) {
2137 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2138 		return -EINVAL;
2139 	}
2140 
2141 	return 0;
2142 }
2143 
2144 static int dm_sw_init(void *handle)
2145 {
2146 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2147 	int r;
2148 
2149 	r = dm_dmub_sw_init(adev);
2150 	if (r)
2151 		return r;
2152 
2153 	return load_dmcu_fw(adev);
2154 }
2155 
2156 static int dm_sw_fini(void *handle)
2157 {
2158 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2159 
2160 	kfree(adev->dm.dmub_fb_info);
2161 	adev->dm.dmub_fb_info = NULL;
2162 
2163 	if (adev->dm.dmub_srv) {
2164 		dmub_srv_destroy(adev->dm.dmub_srv);
2165 		adev->dm.dmub_srv = NULL;
2166 	}
2167 
2168 	release_firmware(adev->dm.dmub_fw);
2169 	adev->dm.dmub_fw = NULL;
2170 
2171 	release_firmware(adev->dm.fw_dmcu);
2172 	adev->dm.fw_dmcu = NULL;
2173 
2174 	return 0;
2175 }
2176 
2177 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2178 {
2179 	struct amdgpu_dm_connector *aconnector;
2180 	struct drm_connector *connector;
2181 	struct drm_connector_list_iter iter;
2182 	int ret = 0;
2183 
2184 	drm_connector_list_iter_begin(dev, &iter);
2185 	drm_for_each_connector_iter(connector, &iter) {
2186 		aconnector = to_amdgpu_dm_connector(connector);
2187 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2188 		    aconnector->mst_mgr.aux) {
2189 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2190 					 aconnector,
2191 					 aconnector->base.base.id);
2192 
2193 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2194 			if (ret < 0) {
2195 				DRM_ERROR("DM_MST: Failed to start MST\n");
2196 				aconnector->dc_link->type =
2197 					dc_connection_single;
2198 				break;
2199 			}
2200 		}
2201 	}
2202 	drm_connector_list_iter_end(&iter);
2203 
2204 	return ret;
2205 }
2206 
2207 static int dm_late_init(void *handle)
2208 {
2209 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2210 
2211 	struct dmcu_iram_parameters params;
2212 	unsigned int linear_lut[16];
2213 	int i;
2214 	struct dmcu *dmcu = NULL;
2215 
2216 	dmcu = adev->dm.dc->res_pool->dmcu;
2217 
2218 	for (i = 0; i < 16; i++)
2219 		linear_lut[i] = 0xFFFF * i / 15;
2220 
2221 	params.set = 0;
2222 	params.backlight_ramping_override = false;
2223 	params.backlight_ramping_start = 0xCCCC;
2224 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2225 	params.backlight_lut_array_size = 16;
2226 	params.backlight_lut_array = linear_lut;
2227 
2228 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2229 	 * 0xFFFF x 0.01 = 0x28F
2230 	 */
2231 	params.min_abm_backlight = 0x28F;
2232 	/* In the case where abm is implemented on dmcub,
2233 	* dmcu object will be null.
2234 	* ABM 2.4 and up are implemented on dmcub.
2235 	*/
2236 	if (dmcu) {
2237 		if (!dmcu_load_iram(dmcu, params))
2238 			return -EINVAL;
2239 	} else if (adev->dm.dc->ctx->dmub_srv) {
2240 		struct dc_link *edp_links[MAX_NUM_EDP];
2241 		int edp_num;
2242 
2243 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2244 		for (i = 0; i < edp_num; i++) {
2245 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2246 				return -EINVAL;
2247 		}
2248 	}
2249 
2250 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2251 }
2252 
2253 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2254 {
2255 	struct amdgpu_dm_connector *aconnector;
2256 	struct drm_connector *connector;
2257 	struct drm_connector_list_iter iter;
2258 	struct drm_dp_mst_topology_mgr *mgr;
2259 	int ret;
2260 	bool need_hotplug = false;
2261 
2262 	drm_connector_list_iter_begin(dev, &iter);
2263 	drm_for_each_connector_iter(connector, &iter) {
2264 		aconnector = to_amdgpu_dm_connector(connector);
2265 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2266 		    aconnector->mst_port)
2267 			continue;
2268 
2269 		mgr = &aconnector->mst_mgr;
2270 
2271 		if (suspend) {
2272 			drm_dp_mst_topology_mgr_suspend(mgr);
2273 		} else {
2274 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2275 			if (ret < 0) {
2276 				dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2277 					aconnector->dc_link);
2278 				need_hotplug = true;
2279 			}
2280 		}
2281 	}
2282 	drm_connector_list_iter_end(&iter);
2283 
2284 	if (need_hotplug)
2285 		drm_kms_helper_hotplug_event(dev);
2286 }
2287 
2288 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2289 {
2290 	int ret = 0;
2291 
2292 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2293 	 * on window driver dc implementation.
2294 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2295 	 * should be passed to smu during boot up and resume from s3.
2296 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2297 	 * dcn20_resource_construct
2298 	 * then call pplib functions below to pass the settings to smu:
2299 	 * smu_set_watermarks_for_clock_ranges
2300 	 * smu_set_watermarks_table
2301 	 * navi10_set_watermarks_table
2302 	 * smu_write_watermarks_table
2303 	 *
2304 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2305 	 * dc has implemented different flow for window driver:
2306 	 * dc_hardware_init / dc_set_power_state
2307 	 * dcn10_init_hw
2308 	 * notify_wm_ranges
2309 	 * set_wm_ranges
2310 	 * -- Linux
2311 	 * smu_set_watermarks_for_clock_ranges
2312 	 * renoir_set_watermarks_table
2313 	 * smu_write_watermarks_table
2314 	 *
2315 	 * For Linux,
2316 	 * dc_hardware_init -> amdgpu_dm_init
2317 	 * dc_set_power_state --> dm_resume
2318 	 *
2319 	 * therefore, this function apply to navi10/12/14 but not Renoir
2320 	 * *
2321 	 */
2322 	switch (adev->ip_versions[DCE_HWIP][0]) {
2323 	case IP_VERSION(2, 0, 2):
2324 	case IP_VERSION(2, 0, 0):
2325 		break;
2326 	default:
2327 		return 0;
2328 	}
2329 
2330 	ret = amdgpu_dpm_write_watermarks_table(adev);
2331 	if (ret) {
2332 		DRM_ERROR("Failed to update WMTABLE!\n");
2333 		return ret;
2334 	}
2335 
2336 	return 0;
2337 }
2338 
2339 /**
2340  * dm_hw_init() - Initialize DC device
2341  * @handle: The base driver device containing the amdgpu_dm device.
2342  *
2343  * Initialize the &struct amdgpu_display_manager device. This involves calling
2344  * the initializers of each DM component, then populating the struct with them.
2345  *
2346  * Although the function implies hardware initialization, both hardware and
2347  * software are initialized here. Splitting them out to their relevant init
2348  * hooks is a future TODO item.
2349  *
2350  * Some notable things that are initialized here:
2351  *
2352  * - Display Core, both software and hardware
2353  * - DC modules that we need (freesync and color management)
2354  * - DRM software states
2355  * - Interrupt sources and handlers
2356  * - Vblank support
2357  * - Debug FS entries, if enabled
2358  */
2359 static int dm_hw_init(void *handle)
2360 {
2361 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2362 	/* Create DAL display manager */
2363 	amdgpu_dm_init(adev);
2364 	amdgpu_dm_hpd_init(adev);
2365 
2366 	return 0;
2367 }
2368 
2369 /**
2370  * dm_hw_fini() - Teardown DC device
2371  * @handle: The base driver device containing the amdgpu_dm device.
2372  *
2373  * Teardown components within &struct amdgpu_display_manager that require
2374  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2375  * were loaded. Also flush IRQ workqueues and disable them.
2376  */
2377 static int dm_hw_fini(void *handle)
2378 {
2379 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2380 
2381 	amdgpu_dm_hpd_fini(adev);
2382 
2383 	amdgpu_dm_irq_fini(adev);
2384 	amdgpu_dm_fini(adev);
2385 	return 0;
2386 }
2387 
2388 
2389 static int dm_enable_vblank(struct drm_crtc *crtc);
2390 static void dm_disable_vblank(struct drm_crtc *crtc);
2391 
2392 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2393 				 struct dc_state *state, bool enable)
2394 {
2395 	enum dc_irq_source irq_source;
2396 	struct amdgpu_crtc *acrtc;
2397 	int rc = -EBUSY;
2398 	int i = 0;
2399 
2400 	for (i = 0; i < state->stream_count; i++) {
2401 		acrtc = get_crtc_by_otg_inst(
2402 				adev, state->stream_status[i].primary_otg_inst);
2403 
2404 		if (acrtc && state->stream_status[i].plane_count != 0) {
2405 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2406 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2407 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2408 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2409 			if (rc)
2410 				DRM_WARN("Failed to %s pflip interrupts\n",
2411 					 enable ? "enable" : "disable");
2412 
2413 			if (enable) {
2414 				rc = dm_enable_vblank(&acrtc->base);
2415 				if (rc)
2416 					DRM_WARN("Failed to enable vblank interrupts\n");
2417 			} else {
2418 				dm_disable_vblank(&acrtc->base);
2419 			}
2420 
2421 		}
2422 	}
2423 
2424 }
2425 
2426 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2427 {
2428 	struct dc_state *context = NULL;
2429 	enum dc_status res = DC_ERROR_UNEXPECTED;
2430 	int i;
2431 	struct dc_stream_state *del_streams[MAX_PIPES];
2432 	int del_streams_count = 0;
2433 
2434 	memset(del_streams, 0, sizeof(del_streams));
2435 
2436 	context = dc_create_state(dc);
2437 	if (context == NULL)
2438 		goto context_alloc_fail;
2439 
2440 	dc_resource_state_copy_construct_current(dc, context);
2441 
2442 	/* First remove from context all streams */
2443 	for (i = 0; i < context->stream_count; i++) {
2444 		struct dc_stream_state *stream = context->streams[i];
2445 
2446 		del_streams[del_streams_count++] = stream;
2447 	}
2448 
2449 	/* Remove all planes for removed streams and then remove the streams */
2450 	for (i = 0; i < del_streams_count; i++) {
2451 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2452 			res = DC_FAIL_DETACH_SURFACES;
2453 			goto fail;
2454 		}
2455 
2456 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2457 		if (res != DC_OK)
2458 			goto fail;
2459 	}
2460 
2461 	res = dc_commit_state(dc, context);
2462 
2463 fail:
2464 	dc_release_state(context);
2465 
2466 context_alloc_fail:
2467 	return res;
2468 }
2469 
2470 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2471 {
2472 	int i;
2473 
2474 	if (dm->hpd_rx_offload_wq) {
2475 		for (i = 0; i < dm->dc->caps.max_links; i++)
2476 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2477 	}
2478 }
2479 
2480 static int dm_suspend(void *handle)
2481 {
2482 	struct amdgpu_device *adev = handle;
2483 	struct amdgpu_display_manager *dm = &adev->dm;
2484 	int ret = 0;
2485 
2486 	if (amdgpu_in_reset(adev)) {
2487 		mutex_lock(&dm->dc_lock);
2488 
2489 		dc_allow_idle_optimizations(adev->dm.dc, false);
2490 
2491 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2492 
2493 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2494 
2495 		amdgpu_dm_commit_zero_streams(dm->dc);
2496 
2497 		amdgpu_dm_irq_suspend(adev);
2498 
2499 		hpd_rx_irq_work_suspend(dm);
2500 
2501 		return ret;
2502 	}
2503 
2504 	WARN_ON(adev->dm.cached_state);
2505 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2506 
2507 	s3_handle_mst(adev_to_drm(adev), true);
2508 
2509 	amdgpu_dm_irq_suspend(adev);
2510 
2511 	hpd_rx_irq_work_suspend(dm);
2512 
2513 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2514 
2515 	return 0;
2516 }
2517 
2518 struct amdgpu_dm_connector *
2519 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2520 					     struct drm_crtc *crtc)
2521 {
2522 	uint32_t i;
2523 	struct drm_connector_state *new_con_state;
2524 	struct drm_connector *connector;
2525 	struct drm_crtc *crtc_from_state;
2526 
2527 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2528 		crtc_from_state = new_con_state->crtc;
2529 
2530 		if (crtc_from_state == crtc)
2531 			return to_amdgpu_dm_connector(connector);
2532 	}
2533 
2534 	return NULL;
2535 }
2536 
2537 static void emulated_link_detect(struct dc_link *link)
2538 {
2539 	struct dc_sink_init_data sink_init_data = { 0 };
2540 	struct display_sink_capability sink_caps = { 0 };
2541 	enum dc_edid_status edid_status;
2542 	struct dc_context *dc_ctx = link->ctx;
2543 	struct dc_sink *sink = NULL;
2544 	struct dc_sink *prev_sink = NULL;
2545 
2546 	link->type = dc_connection_none;
2547 	prev_sink = link->local_sink;
2548 
2549 	if (prev_sink)
2550 		dc_sink_release(prev_sink);
2551 
2552 	switch (link->connector_signal) {
2553 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2554 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2555 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2556 		break;
2557 	}
2558 
2559 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2560 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2561 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2562 		break;
2563 	}
2564 
2565 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2566 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2567 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2568 		break;
2569 	}
2570 
2571 	case SIGNAL_TYPE_LVDS: {
2572 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2573 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2574 		break;
2575 	}
2576 
2577 	case SIGNAL_TYPE_EDP: {
2578 		sink_caps.transaction_type =
2579 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2580 		sink_caps.signal = SIGNAL_TYPE_EDP;
2581 		break;
2582 	}
2583 
2584 	case SIGNAL_TYPE_DISPLAY_PORT: {
2585 		sink_caps.transaction_type =
2586 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2587 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2588 		break;
2589 	}
2590 
2591 	default:
2592 		DC_ERROR("Invalid connector type! signal:%d\n",
2593 			link->connector_signal);
2594 		return;
2595 	}
2596 
2597 	sink_init_data.link = link;
2598 	sink_init_data.sink_signal = sink_caps.signal;
2599 
2600 	sink = dc_sink_create(&sink_init_data);
2601 	if (!sink) {
2602 		DC_ERROR("Failed to create sink!\n");
2603 		return;
2604 	}
2605 
2606 	/* dc_sink_create returns a new reference */
2607 	link->local_sink = sink;
2608 
2609 	edid_status = dm_helpers_read_local_edid(
2610 			link->ctx,
2611 			link,
2612 			sink);
2613 
2614 	if (edid_status != EDID_OK)
2615 		DC_ERROR("Failed to read EDID");
2616 
2617 }
2618 
2619 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2620 				     struct amdgpu_display_manager *dm)
2621 {
2622 	struct {
2623 		struct dc_surface_update surface_updates[MAX_SURFACES];
2624 		struct dc_plane_info plane_infos[MAX_SURFACES];
2625 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2626 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2627 		struct dc_stream_update stream_update;
2628 	} * bundle;
2629 	int k, m;
2630 
2631 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2632 
2633 	if (!bundle) {
2634 		dm_error("Failed to allocate update bundle\n");
2635 		goto cleanup;
2636 	}
2637 
2638 	for (k = 0; k < dc_state->stream_count; k++) {
2639 		bundle->stream_update.stream = dc_state->streams[k];
2640 
2641 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2642 			bundle->surface_updates[m].surface =
2643 				dc_state->stream_status->plane_states[m];
2644 			bundle->surface_updates[m].surface->force_full_update =
2645 				true;
2646 		}
2647 		dc_commit_updates_for_stream(
2648 			dm->dc, bundle->surface_updates,
2649 			dc_state->stream_status->plane_count,
2650 			dc_state->streams[k], &bundle->stream_update, dc_state);
2651 	}
2652 
2653 cleanup:
2654 	kfree(bundle);
2655 
2656 	return;
2657 }
2658 
2659 static int dm_resume(void *handle)
2660 {
2661 	struct amdgpu_device *adev = handle;
2662 	struct drm_device *ddev = adev_to_drm(adev);
2663 	struct amdgpu_display_manager *dm = &adev->dm;
2664 	struct amdgpu_dm_connector *aconnector;
2665 	struct drm_connector *connector;
2666 	struct drm_connector_list_iter iter;
2667 	struct drm_crtc *crtc;
2668 	struct drm_crtc_state *new_crtc_state;
2669 	struct dm_crtc_state *dm_new_crtc_state;
2670 	struct drm_plane *plane;
2671 	struct drm_plane_state *new_plane_state;
2672 	struct dm_plane_state *dm_new_plane_state;
2673 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2674 	enum dc_connection_type new_connection_type = dc_connection_none;
2675 	struct dc_state *dc_state;
2676 	int i, r, j;
2677 
2678 	if (amdgpu_in_reset(adev)) {
2679 		dc_state = dm->cached_dc_state;
2680 
2681 		/*
2682 		 * The dc->current_state is backed up into dm->cached_dc_state
2683 		 * before we commit 0 streams.
2684 		 *
2685 		 * DC will clear link encoder assignments on the real state
2686 		 * but the changes won't propagate over to the copy we made
2687 		 * before the 0 streams commit.
2688 		 *
2689 		 * DC expects that link encoder assignments are *not* valid
2690 		 * when committing a state, so as a workaround we can copy
2691 		 * off of the current state.
2692 		 *
2693 		 * We lose the previous assignments, but we had already
2694 		 * commit 0 streams anyway.
2695 		 */
2696 		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2697 
2698 		r = dm_dmub_hw_init(adev);
2699 		if (r)
2700 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2701 
2702 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2703 		dc_resume(dm->dc);
2704 
2705 		amdgpu_dm_irq_resume_early(adev);
2706 
2707 		for (i = 0; i < dc_state->stream_count; i++) {
2708 			dc_state->streams[i]->mode_changed = true;
2709 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2710 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2711 					= 0xffffffff;
2712 			}
2713 		}
2714 
2715 		if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2716 			amdgpu_dm_outbox_init(adev);
2717 			dc_enable_dmub_outbox(adev->dm.dc);
2718 		}
2719 
2720 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2721 
2722 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2723 
2724 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2725 
2726 		dc_release_state(dm->cached_dc_state);
2727 		dm->cached_dc_state = NULL;
2728 
2729 		amdgpu_dm_irq_resume_late(adev);
2730 
2731 		mutex_unlock(&dm->dc_lock);
2732 
2733 		return 0;
2734 	}
2735 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2736 	dc_release_state(dm_state->context);
2737 	dm_state->context = dc_create_state(dm->dc);
2738 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2739 	dc_resource_state_construct(dm->dc, dm_state->context);
2740 
2741 	/* Before powering on DC we need to re-initialize DMUB. */
2742 	dm_dmub_hw_resume(adev);
2743 
2744 	/* Re-enable outbox interrupts for DPIA. */
2745 	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2746 		amdgpu_dm_outbox_init(adev);
2747 		dc_enable_dmub_outbox(adev->dm.dc);
2748 	}
2749 
2750 	/* power on hardware */
2751 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2752 
2753 	/* program HPD filter */
2754 	dc_resume(dm->dc);
2755 
2756 	/*
2757 	 * early enable HPD Rx IRQ, should be done before set mode as short
2758 	 * pulse interrupts are used for MST
2759 	 */
2760 	amdgpu_dm_irq_resume_early(adev);
2761 
2762 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2763 	s3_handle_mst(ddev, false);
2764 
2765 	/* Do detection*/
2766 	drm_connector_list_iter_begin(ddev, &iter);
2767 	drm_for_each_connector_iter(connector, &iter) {
2768 		aconnector = to_amdgpu_dm_connector(connector);
2769 
2770 		/*
2771 		 * this is the case when traversing through already created
2772 		 * MST connectors, should be skipped
2773 		 */
2774 		if (aconnector->dc_link &&
2775 		    aconnector->dc_link->type == dc_connection_mst_branch)
2776 			continue;
2777 
2778 		mutex_lock(&aconnector->hpd_lock);
2779 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2780 			DRM_ERROR("KMS: Failed to detect connector\n");
2781 
2782 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2783 			emulated_link_detect(aconnector->dc_link);
2784 		} else {
2785 			mutex_lock(&dm->dc_lock);
2786 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2787 			mutex_unlock(&dm->dc_lock);
2788 		}
2789 
2790 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2791 			aconnector->fake_enable = false;
2792 
2793 		if (aconnector->dc_sink)
2794 			dc_sink_release(aconnector->dc_sink);
2795 		aconnector->dc_sink = NULL;
2796 		amdgpu_dm_update_connector_after_detect(aconnector);
2797 		mutex_unlock(&aconnector->hpd_lock);
2798 	}
2799 	drm_connector_list_iter_end(&iter);
2800 
2801 	/* Force mode set in atomic commit */
2802 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2803 		new_crtc_state->active_changed = true;
2804 
2805 	/*
2806 	 * atomic_check is expected to create the dc states. We need to release
2807 	 * them here, since they were duplicated as part of the suspend
2808 	 * procedure.
2809 	 */
2810 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2811 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2812 		if (dm_new_crtc_state->stream) {
2813 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2814 			dc_stream_release(dm_new_crtc_state->stream);
2815 			dm_new_crtc_state->stream = NULL;
2816 		}
2817 	}
2818 
2819 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2820 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2821 		if (dm_new_plane_state->dc_state) {
2822 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2823 			dc_plane_state_release(dm_new_plane_state->dc_state);
2824 			dm_new_plane_state->dc_state = NULL;
2825 		}
2826 	}
2827 
2828 	drm_atomic_helper_resume(ddev, dm->cached_state);
2829 
2830 	dm->cached_state = NULL;
2831 
2832 	amdgpu_dm_irq_resume_late(adev);
2833 
2834 	amdgpu_dm_smu_write_watermarks_table(adev);
2835 
2836 	return 0;
2837 }
2838 
2839 /**
2840  * DOC: DM Lifecycle
2841  *
2842  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2843  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2844  * the base driver's device list to be initialized and torn down accordingly.
2845  *
2846  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2847  */
2848 
2849 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2850 	.name = "dm",
2851 	.early_init = dm_early_init,
2852 	.late_init = dm_late_init,
2853 	.sw_init = dm_sw_init,
2854 	.sw_fini = dm_sw_fini,
2855 	.early_fini = amdgpu_dm_early_fini,
2856 	.hw_init = dm_hw_init,
2857 	.hw_fini = dm_hw_fini,
2858 	.suspend = dm_suspend,
2859 	.resume = dm_resume,
2860 	.is_idle = dm_is_idle,
2861 	.wait_for_idle = dm_wait_for_idle,
2862 	.check_soft_reset = dm_check_soft_reset,
2863 	.soft_reset = dm_soft_reset,
2864 	.set_clockgating_state = dm_set_clockgating_state,
2865 	.set_powergating_state = dm_set_powergating_state,
2866 };
2867 
2868 const struct amdgpu_ip_block_version dm_ip_block =
2869 {
2870 	.type = AMD_IP_BLOCK_TYPE_DCE,
2871 	.major = 1,
2872 	.minor = 0,
2873 	.rev = 0,
2874 	.funcs = &amdgpu_dm_funcs,
2875 };
2876 
2877 
2878 /**
2879  * DOC: atomic
2880  *
2881  * *WIP*
2882  */
2883 
2884 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2885 	.fb_create = amdgpu_display_user_framebuffer_create,
2886 	.get_format_info = amd_get_format_info,
2887 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2888 	.atomic_check = amdgpu_dm_atomic_check,
2889 	.atomic_commit = drm_atomic_helper_commit,
2890 };
2891 
2892 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2893 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2894 };
2895 
2896 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2897 {
2898 	u32 max_avg, min_cll, max, min, q, r;
2899 	struct amdgpu_dm_backlight_caps *caps;
2900 	struct amdgpu_display_manager *dm;
2901 	struct drm_connector *conn_base;
2902 	struct amdgpu_device *adev;
2903 	struct dc_link *link = NULL;
2904 	static const u8 pre_computed_values[] = {
2905 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2906 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2907 	int i;
2908 
2909 	if (!aconnector || !aconnector->dc_link)
2910 		return;
2911 
2912 	link = aconnector->dc_link;
2913 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2914 		return;
2915 
2916 	conn_base = &aconnector->base;
2917 	adev = drm_to_adev(conn_base->dev);
2918 	dm = &adev->dm;
2919 	for (i = 0; i < dm->num_of_edps; i++) {
2920 		if (link == dm->backlight_link[i])
2921 			break;
2922 	}
2923 	if (i >= dm->num_of_edps)
2924 		return;
2925 	caps = &dm->backlight_caps[i];
2926 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2927 	caps->aux_support = false;
2928 	max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2929 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2930 
2931 	if (caps->ext_caps->bits.oled == 1 /*||
2932 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2933 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2934 		caps->aux_support = true;
2935 
2936 	if (amdgpu_backlight == 0)
2937 		caps->aux_support = false;
2938 	else if (amdgpu_backlight == 1)
2939 		caps->aux_support = true;
2940 
2941 	/* From the specification (CTA-861-G), for calculating the maximum
2942 	 * luminance we need to use:
2943 	 *	Luminance = 50*2**(CV/32)
2944 	 * Where CV is a one-byte value.
2945 	 * For calculating this expression we may need float point precision;
2946 	 * to avoid this complexity level, we take advantage that CV is divided
2947 	 * by a constant. From the Euclids division algorithm, we know that CV
2948 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2949 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2950 	 * need to pre-compute the value of r/32. For pre-computing the values
2951 	 * We just used the following Ruby line:
2952 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2953 	 * The results of the above expressions can be verified at
2954 	 * pre_computed_values.
2955 	 */
2956 	q = max_avg >> 5;
2957 	r = max_avg % 32;
2958 	max = (1 << q) * pre_computed_values[r];
2959 
2960 	// min luminance: maxLum * (CV/255)^2 / 100
2961 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2962 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2963 
2964 	caps->aux_max_input_signal = max;
2965 	caps->aux_min_input_signal = min;
2966 }
2967 
2968 void amdgpu_dm_update_connector_after_detect(
2969 		struct amdgpu_dm_connector *aconnector)
2970 {
2971 	struct drm_connector *connector = &aconnector->base;
2972 	struct drm_device *dev = connector->dev;
2973 	struct dc_sink *sink;
2974 
2975 	/* MST handled by drm_mst framework */
2976 	if (aconnector->mst_mgr.mst_state == true)
2977 		return;
2978 
2979 	sink = aconnector->dc_link->local_sink;
2980 	if (sink)
2981 		dc_sink_retain(sink);
2982 
2983 	/*
2984 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2985 	 * the connector sink is set to either fake or physical sink depends on link status.
2986 	 * Skip if already done during boot.
2987 	 */
2988 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2989 			&& aconnector->dc_em_sink) {
2990 
2991 		/*
2992 		 * For S3 resume with headless use eml_sink to fake stream
2993 		 * because on resume connector->sink is set to NULL
2994 		 */
2995 		mutex_lock(&dev->mode_config.mutex);
2996 
2997 		if (sink) {
2998 			if (aconnector->dc_sink) {
2999 				amdgpu_dm_update_freesync_caps(connector, NULL);
3000 				/*
3001 				 * retain and release below are used to
3002 				 * bump up refcount for sink because the link doesn't point
3003 				 * to it anymore after disconnect, so on next crtc to connector
3004 				 * reshuffle by UMD we will get into unwanted dc_sink release
3005 				 */
3006 				dc_sink_release(aconnector->dc_sink);
3007 			}
3008 			aconnector->dc_sink = sink;
3009 			dc_sink_retain(aconnector->dc_sink);
3010 			amdgpu_dm_update_freesync_caps(connector,
3011 					aconnector->edid);
3012 		} else {
3013 			amdgpu_dm_update_freesync_caps(connector, NULL);
3014 			if (!aconnector->dc_sink) {
3015 				aconnector->dc_sink = aconnector->dc_em_sink;
3016 				dc_sink_retain(aconnector->dc_sink);
3017 			}
3018 		}
3019 
3020 		mutex_unlock(&dev->mode_config.mutex);
3021 
3022 		if (sink)
3023 			dc_sink_release(sink);
3024 		return;
3025 	}
3026 
3027 	/*
3028 	 * TODO: temporary guard to look for proper fix
3029 	 * if this sink is MST sink, we should not do anything
3030 	 */
3031 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
3032 		dc_sink_release(sink);
3033 		return;
3034 	}
3035 
3036 	if (aconnector->dc_sink == sink) {
3037 		/*
3038 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
3039 		 * Do nothing!!
3040 		 */
3041 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
3042 				aconnector->connector_id);
3043 		if (sink)
3044 			dc_sink_release(sink);
3045 		return;
3046 	}
3047 
3048 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
3049 		aconnector->connector_id, aconnector->dc_sink, sink);
3050 
3051 	mutex_lock(&dev->mode_config.mutex);
3052 
3053 	/*
3054 	 * 1. Update status of the drm connector
3055 	 * 2. Send an event and let userspace tell us what to do
3056 	 */
3057 	if (sink) {
3058 		/*
3059 		 * TODO: check if we still need the S3 mode update workaround.
3060 		 * If yes, put it here.
3061 		 */
3062 		if (aconnector->dc_sink) {
3063 			amdgpu_dm_update_freesync_caps(connector, NULL);
3064 			dc_sink_release(aconnector->dc_sink);
3065 		}
3066 
3067 		aconnector->dc_sink = sink;
3068 		dc_sink_retain(aconnector->dc_sink);
3069 		if (sink->dc_edid.length == 0) {
3070 			aconnector->edid = NULL;
3071 			if (aconnector->dc_link->aux_mode) {
3072 				drm_dp_cec_unset_edid(
3073 					&aconnector->dm_dp_aux.aux);
3074 			}
3075 		} else {
3076 			aconnector->edid =
3077 				(struct edid *)sink->dc_edid.raw_edid;
3078 
3079 			if (aconnector->dc_link->aux_mode)
3080 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3081 						    aconnector->edid);
3082 		}
3083 
3084 		drm_connector_update_edid_property(connector, aconnector->edid);
3085 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3086 		update_connector_ext_caps(aconnector);
3087 	} else {
3088 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3089 		amdgpu_dm_update_freesync_caps(connector, NULL);
3090 		drm_connector_update_edid_property(connector, NULL);
3091 		aconnector->num_modes = 0;
3092 		dc_sink_release(aconnector->dc_sink);
3093 		aconnector->dc_sink = NULL;
3094 		aconnector->edid = NULL;
3095 #ifdef CONFIG_DRM_AMD_DC_HDCP
3096 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3097 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3098 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3099 #endif
3100 	}
3101 
3102 	mutex_unlock(&dev->mode_config.mutex);
3103 
3104 	update_subconnector_property(aconnector);
3105 
3106 	if (sink)
3107 		dc_sink_release(sink);
3108 }
3109 
3110 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3111 {
3112 	struct drm_connector *connector = &aconnector->base;
3113 	struct drm_device *dev = connector->dev;
3114 	enum dc_connection_type new_connection_type = dc_connection_none;
3115 	struct amdgpu_device *adev = drm_to_adev(dev);
3116 #ifdef CONFIG_DRM_AMD_DC_HDCP
3117 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3118 #endif
3119 	bool ret = false;
3120 
3121 	if (adev->dm.disable_hpd_irq)
3122 		return;
3123 
3124 	/*
3125 	 * In case of failure or MST no need to update connector status or notify the OS
3126 	 * since (for MST case) MST does this in its own context.
3127 	 */
3128 	mutex_lock(&aconnector->hpd_lock);
3129 
3130 #ifdef CONFIG_DRM_AMD_DC_HDCP
3131 	if (adev->dm.hdcp_workqueue) {
3132 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3133 		dm_con_state->update_hdcp = true;
3134 	}
3135 #endif
3136 	if (aconnector->fake_enable)
3137 		aconnector->fake_enable = false;
3138 
3139 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3140 		DRM_ERROR("KMS: Failed to detect connector\n");
3141 
3142 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3143 		emulated_link_detect(aconnector->dc_link);
3144 
3145 		drm_modeset_lock_all(dev);
3146 		dm_restore_drm_connector_state(dev, connector);
3147 		drm_modeset_unlock_all(dev);
3148 
3149 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3150 			drm_kms_helper_connector_hotplug_event(connector);
3151 	} else {
3152 		mutex_lock(&adev->dm.dc_lock);
3153 		ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3154 		mutex_unlock(&adev->dm.dc_lock);
3155 		if (ret) {
3156 			amdgpu_dm_update_connector_after_detect(aconnector);
3157 
3158 			drm_modeset_lock_all(dev);
3159 			dm_restore_drm_connector_state(dev, connector);
3160 			drm_modeset_unlock_all(dev);
3161 
3162 			if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3163 				drm_kms_helper_connector_hotplug_event(connector);
3164 		}
3165 	}
3166 	mutex_unlock(&aconnector->hpd_lock);
3167 
3168 }
3169 
3170 static void handle_hpd_irq(void *param)
3171 {
3172 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3173 
3174 	handle_hpd_irq_helper(aconnector);
3175 
3176 }
3177 
3178 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3179 {
3180 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3181 	uint8_t dret;
3182 	bool new_irq_handled = false;
3183 	int dpcd_addr;
3184 	int dpcd_bytes_to_read;
3185 
3186 	const int max_process_count = 30;
3187 	int process_count = 0;
3188 
3189 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3190 
3191 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3192 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3193 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3194 		dpcd_addr = DP_SINK_COUNT;
3195 	} else {
3196 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3197 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3198 		dpcd_addr = DP_SINK_COUNT_ESI;
3199 	}
3200 
3201 	dret = drm_dp_dpcd_read(
3202 		&aconnector->dm_dp_aux.aux,
3203 		dpcd_addr,
3204 		esi,
3205 		dpcd_bytes_to_read);
3206 
3207 	while (dret == dpcd_bytes_to_read &&
3208 		process_count < max_process_count) {
3209 		uint8_t retry;
3210 		dret = 0;
3211 
3212 		process_count++;
3213 
3214 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3215 		/* handle HPD short pulse irq */
3216 		if (aconnector->mst_mgr.mst_state)
3217 			drm_dp_mst_hpd_irq(
3218 				&aconnector->mst_mgr,
3219 				esi,
3220 				&new_irq_handled);
3221 
3222 		if (new_irq_handled) {
3223 			/* ACK at DPCD to notify down stream */
3224 			const int ack_dpcd_bytes_to_write =
3225 				dpcd_bytes_to_read - 1;
3226 
3227 			for (retry = 0; retry < 3; retry++) {
3228 				uint8_t wret;
3229 
3230 				wret = drm_dp_dpcd_write(
3231 					&aconnector->dm_dp_aux.aux,
3232 					dpcd_addr + 1,
3233 					&esi[1],
3234 					ack_dpcd_bytes_to_write);
3235 				if (wret == ack_dpcd_bytes_to_write)
3236 					break;
3237 			}
3238 
3239 			/* check if there is new irq to be handled */
3240 			dret = drm_dp_dpcd_read(
3241 				&aconnector->dm_dp_aux.aux,
3242 				dpcd_addr,
3243 				esi,
3244 				dpcd_bytes_to_read);
3245 
3246 			new_irq_handled = false;
3247 		} else {
3248 			break;
3249 		}
3250 	}
3251 
3252 	if (process_count == max_process_count)
3253 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3254 }
3255 
3256 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3257 							union hpd_irq_data hpd_irq_data)
3258 {
3259 	struct hpd_rx_irq_offload_work *offload_work =
3260 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3261 
3262 	if (!offload_work) {
3263 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3264 		return;
3265 	}
3266 
3267 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3268 	offload_work->data = hpd_irq_data;
3269 	offload_work->offload_wq = offload_wq;
3270 
3271 	queue_work(offload_wq->wq, &offload_work->work);
3272 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3273 }
3274 
3275 static void handle_hpd_rx_irq(void *param)
3276 {
3277 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3278 	struct drm_connector *connector = &aconnector->base;
3279 	struct drm_device *dev = connector->dev;
3280 	struct dc_link *dc_link = aconnector->dc_link;
3281 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3282 	bool result = false;
3283 	enum dc_connection_type new_connection_type = dc_connection_none;
3284 	struct amdgpu_device *adev = drm_to_adev(dev);
3285 	union hpd_irq_data hpd_irq_data;
3286 	bool link_loss = false;
3287 	bool has_left_work = false;
3288 	int idx = aconnector->base.index;
3289 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3290 
3291 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3292 
3293 	if (adev->dm.disable_hpd_irq)
3294 		return;
3295 
3296 	/*
3297 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3298 	 * conflict, after implement i2c helper, this mutex should be
3299 	 * retired.
3300 	 */
3301 	mutex_lock(&aconnector->hpd_lock);
3302 
3303 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3304 						&link_loss, true, &has_left_work);
3305 
3306 	if (!has_left_work)
3307 		goto out;
3308 
3309 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3310 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3311 		goto out;
3312 	}
3313 
3314 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3315 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3316 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3317 			dm_handle_mst_sideband_msg(aconnector);
3318 			goto out;
3319 		}
3320 
3321 		if (link_loss) {
3322 			bool skip = false;
3323 
3324 			spin_lock(&offload_wq->offload_lock);
3325 			skip = offload_wq->is_handling_link_loss;
3326 
3327 			if (!skip)
3328 				offload_wq->is_handling_link_loss = true;
3329 
3330 			spin_unlock(&offload_wq->offload_lock);
3331 
3332 			if (!skip)
3333 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3334 
3335 			goto out;
3336 		}
3337 	}
3338 
3339 out:
3340 	if (result && !is_mst_root_connector) {
3341 		/* Downstream Port status changed. */
3342 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3343 			DRM_ERROR("KMS: Failed to detect connector\n");
3344 
3345 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3346 			emulated_link_detect(dc_link);
3347 
3348 			if (aconnector->fake_enable)
3349 				aconnector->fake_enable = false;
3350 
3351 			amdgpu_dm_update_connector_after_detect(aconnector);
3352 
3353 
3354 			drm_modeset_lock_all(dev);
3355 			dm_restore_drm_connector_state(dev, connector);
3356 			drm_modeset_unlock_all(dev);
3357 
3358 			drm_kms_helper_connector_hotplug_event(connector);
3359 		} else {
3360 			bool ret = false;
3361 
3362 			mutex_lock(&adev->dm.dc_lock);
3363 			ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3364 			mutex_unlock(&adev->dm.dc_lock);
3365 
3366 			if (ret) {
3367 				if (aconnector->fake_enable)
3368 					aconnector->fake_enable = false;
3369 
3370 				amdgpu_dm_update_connector_after_detect(aconnector);
3371 
3372 				drm_modeset_lock_all(dev);
3373 				dm_restore_drm_connector_state(dev, connector);
3374 				drm_modeset_unlock_all(dev);
3375 
3376 				drm_kms_helper_connector_hotplug_event(connector);
3377 			}
3378 		}
3379 	}
3380 #ifdef CONFIG_DRM_AMD_DC_HDCP
3381 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3382 		if (adev->dm.hdcp_workqueue)
3383 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3384 	}
3385 #endif
3386 
3387 	if (dc_link->type != dc_connection_mst_branch)
3388 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3389 
3390 	mutex_unlock(&aconnector->hpd_lock);
3391 }
3392 
3393 static void register_hpd_handlers(struct amdgpu_device *adev)
3394 {
3395 	struct drm_device *dev = adev_to_drm(adev);
3396 	struct drm_connector *connector;
3397 	struct amdgpu_dm_connector *aconnector;
3398 	const struct dc_link *dc_link;
3399 	struct dc_interrupt_params int_params = {0};
3400 
3401 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3402 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3403 
3404 	list_for_each_entry(connector,
3405 			&dev->mode_config.connector_list, head)	{
3406 
3407 		aconnector = to_amdgpu_dm_connector(connector);
3408 		dc_link = aconnector->dc_link;
3409 
3410 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3411 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3412 			int_params.irq_source = dc_link->irq_source_hpd;
3413 
3414 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3415 					handle_hpd_irq,
3416 					(void *) aconnector);
3417 		}
3418 
3419 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3420 
3421 			/* Also register for DP short pulse (hpd_rx). */
3422 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3423 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3424 
3425 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3426 					handle_hpd_rx_irq,
3427 					(void *) aconnector);
3428 
3429 			if (adev->dm.hpd_rx_offload_wq)
3430 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3431 					aconnector;
3432 		}
3433 	}
3434 }
3435 
3436 #if defined(CONFIG_DRM_AMD_DC_SI)
3437 /* Register IRQ sources and initialize IRQ callbacks */
3438 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3439 {
3440 	struct dc *dc = adev->dm.dc;
3441 	struct common_irq_params *c_irq_params;
3442 	struct dc_interrupt_params int_params = {0};
3443 	int r;
3444 	int i;
3445 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3446 
3447 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3448 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3449 
3450 	/*
3451 	 * Actions of amdgpu_irq_add_id():
3452 	 * 1. Register a set() function with base driver.
3453 	 *    Base driver will call set() function to enable/disable an
3454 	 *    interrupt in DC hardware.
3455 	 * 2. Register amdgpu_dm_irq_handler().
3456 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3457 	 *    coming from DC hardware.
3458 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3459 	 *    for acknowledging and handling. */
3460 
3461 	/* Use VBLANK interrupt */
3462 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3463 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3464 		if (r) {
3465 			DRM_ERROR("Failed to add crtc irq id!\n");
3466 			return r;
3467 		}
3468 
3469 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3470 		int_params.irq_source =
3471 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3472 
3473 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3474 
3475 		c_irq_params->adev = adev;
3476 		c_irq_params->irq_src = int_params.irq_source;
3477 
3478 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3479 				dm_crtc_high_irq, c_irq_params);
3480 	}
3481 
3482 	/* Use GRPH_PFLIP interrupt */
3483 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3484 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3485 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3486 		if (r) {
3487 			DRM_ERROR("Failed to add page flip irq id!\n");
3488 			return r;
3489 		}
3490 
3491 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3492 		int_params.irq_source =
3493 			dc_interrupt_to_irq_source(dc, i, 0);
3494 
3495 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3496 
3497 		c_irq_params->adev = adev;
3498 		c_irq_params->irq_src = int_params.irq_source;
3499 
3500 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3501 				dm_pflip_high_irq, c_irq_params);
3502 
3503 	}
3504 
3505 	/* HPD */
3506 	r = amdgpu_irq_add_id(adev, client_id,
3507 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3508 	if (r) {
3509 		DRM_ERROR("Failed to add hpd irq id!\n");
3510 		return r;
3511 	}
3512 
3513 	register_hpd_handlers(adev);
3514 
3515 	return 0;
3516 }
3517 #endif
3518 
3519 /* Register IRQ sources and initialize IRQ callbacks */
3520 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3521 {
3522 	struct dc *dc = adev->dm.dc;
3523 	struct common_irq_params *c_irq_params;
3524 	struct dc_interrupt_params int_params = {0};
3525 	int r;
3526 	int i;
3527 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3528 
3529 	if (adev->family >= AMDGPU_FAMILY_AI)
3530 		client_id = SOC15_IH_CLIENTID_DCE;
3531 
3532 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3533 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3534 
3535 	/*
3536 	 * Actions of amdgpu_irq_add_id():
3537 	 * 1. Register a set() function with base driver.
3538 	 *    Base driver will call set() function to enable/disable an
3539 	 *    interrupt in DC hardware.
3540 	 * 2. Register amdgpu_dm_irq_handler().
3541 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3542 	 *    coming from DC hardware.
3543 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3544 	 *    for acknowledging and handling. */
3545 
3546 	/* Use VBLANK interrupt */
3547 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3548 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3549 		if (r) {
3550 			DRM_ERROR("Failed to add crtc irq id!\n");
3551 			return r;
3552 		}
3553 
3554 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3555 		int_params.irq_source =
3556 			dc_interrupt_to_irq_source(dc, i, 0);
3557 
3558 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3559 
3560 		c_irq_params->adev = adev;
3561 		c_irq_params->irq_src = int_params.irq_source;
3562 
3563 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3564 				dm_crtc_high_irq, c_irq_params);
3565 	}
3566 
3567 	/* Use VUPDATE interrupt */
3568 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3569 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3570 		if (r) {
3571 			DRM_ERROR("Failed to add vupdate irq id!\n");
3572 			return r;
3573 		}
3574 
3575 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3576 		int_params.irq_source =
3577 			dc_interrupt_to_irq_source(dc, i, 0);
3578 
3579 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3580 
3581 		c_irq_params->adev = adev;
3582 		c_irq_params->irq_src = int_params.irq_source;
3583 
3584 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3585 				dm_vupdate_high_irq, c_irq_params);
3586 	}
3587 
3588 	/* Use GRPH_PFLIP interrupt */
3589 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3590 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3591 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3592 		if (r) {
3593 			DRM_ERROR("Failed to add page flip irq id!\n");
3594 			return r;
3595 		}
3596 
3597 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3598 		int_params.irq_source =
3599 			dc_interrupt_to_irq_source(dc, i, 0);
3600 
3601 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3602 
3603 		c_irq_params->adev = adev;
3604 		c_irq_params->irq_src = int_params.irq_source;
3605 
3606 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3607 				dm_pflip_high_irq, c_irq_params);
3608 
3609 	}
3610 
3611 	/* HPD */
3612 	r = amdgpu_irq_add_id(adev, client_id,
3613 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3614 	if (r) {
3615 		DRM_ERROR("Failed to add hpd irq id!\n");
3616 		return r;
3617 	}
3618 
3619 	register_hpd_handlers(adev);
3620 
3621 	return 0;
3622 }
3623 
3624 /* Register IRQ sources and initialize IRQ callbacks */
3625 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3626 {
3627 	struct dc *dc = adev->dm.dc;
3628 	struct common_irq_params *c_irq_params;
3629 	struct dc_interrupt_params int_params = {0};
3630 	int r;
3631 	int i;
3632 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3633 	static const unsigned int vrtl_int_srcid[] = {
3634 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3635 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3636 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3637 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3638 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3639 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3640 	};
3641 #endif
3642 
3643 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3644 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3645 
3646 	/*
3647 	 * Actions of amdgpu_irq_add_id():
3648 	 * 1. Register a set() function with base driver.
3649 	 *    Base driver will call set() function to enable/disable an
3650 	 *    interrupt in DC hardware.
3651 	 * 2. Register amdgpu_dm_irq_handler().
3652 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3653 	 *    coming from DC hardware.
3654 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3655 	 *    for acknowledging and handling.
3656 	 */
3657 
3658 	/* Use VSTARTUP interrupt */
3659 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3660 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3661 			i++) {
3662 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3663 
3664 		if (r) {
3665 			DRM_ERROR("Failed to add crtc irq id!\n");
3666 			return r;
3667 		}
3668 
3669 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3670 		int_params.irq_source =
3671 			dc_interrupt_to_irq_source(dc, i, 0);
3672 
3673 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3674 
3675 		c_irq_params->adev = adev;
3676 		c_irq_params->irq_src = int_params.irq_source;
3677 
3678 		amdgpu_dm_irq_register_interrupt(
3679 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3680 	}
3681 
3682 	/* Use otg vertical line interrupt */
3683 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3684 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3685 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3686 				vrtl_int_srcid[i], &adev->vline0_irq);
3687 
3688 		if (r) {
3689 			DRM_ERROR("Failed to add vline0 irq id!\n");
3690 			return r;
3691 		}
3692 
3693 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3694 		int_params.irq_source =
3695 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3696 
3697 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3698 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3699 			break;
3700 		}
3701 
3702 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3703 					- DC_IRQ_SOURCE_DC1_VLINE0];
3704 
3705 		c_irq_params->adev = adev;
3706 		c_irq_params->irq_src = int_params.irq_source;
3707 
3708 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3709 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3710 	}
3711 #endif
3712 
3713 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3714 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3715 	 * to trigger at end of each vblank, regardless of state of the lock,
3716 	 * matching DCE behaviour.
3717 	 */
3718 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3719 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3720 	     i++) {
3721 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3722 
3723 		if (r) {
3724 			DRM_ERROR("Failed to add vupdate irq id!\n");
3725 			return r;
3726 		}
3727 
3728 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3729 		int_params.irq_source =
3730 			dc_interrupt_to_irq_source(dc, i, 0);
3731 
3732 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3733 
3734 		c_irq_params->adev = adev;
3735 		c_irq_params->irq_src = int_params.irq_source;
3736 
3737 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3738 				dm_vupdate_high_irq, c_irq_params);
3739 	}
3740 
3741 	/* Use GRPH_PFLIP interrupt */
3742 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3743 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3744 			i++) {
3745 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3746 		if (r) {
3747 			DRM_ERROR("Failed to add page flip irq id!\n");
3748 			return r;
3749 		}
3750 
3751 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3752 		int_params.irq_source =
3753 			dc_interrupt_to_irq_source(dc, i, 0);
3754 
3755 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3756 
3757 		c_irq_params->adev = adev;
3758 		c_irq_params->irq_src = int_params.irq_source;
3759 
3760 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3761 				dm_pflip_high_irq, c_irq_params);
3762 
3763 	}
3764 
3765 	/* HPD */
3766 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3767 			&adev->hpd_irq);
3768 	if (r) {
3769 		DRM_ERROR("Failed to add hpd irq id!\n");
3770 		return r;
3771 	}
3772 
3773 	register_hpd_handlers(adev);
3774 
3775 	return 0;
3776 }
3777 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3778 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3779 {
3780 	struct dc *dc = adev->dm.dc;
3781 	struct common_irq_params *c_irq_params;
3782 	struct dc_interrupt_params int_params = {0};
3783 	int r, i;
3784 
3785 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3786 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3787 
3788 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3789 			&adev->dmub_outbox_irq);
3790 	if (r) {
3791 		DRM_ERROR("Failed to add outbox irq id!\n");
3792 		return r;
3793 	}
3794 
3795 	if (dc->ctx->dmub_srv) {
3796 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3797 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3798 		int_params.irq_source =
3799 		dc_interrupt_to_irq_source(dc, i, 0);
3800 
3801 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3802 
3803 		c_irq_params->adev = adev;
3804 		c_irq_params->irq_src = int_params.irq_source;
3805 
3806 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3807 				dm_dmub_outbox1_low_irq, c_irq_params);
3808 	}
3809 
3810 	return 0;
3811 }
3812 
3813 /*
3814  * Acquires the lock for the atomic state object and returns
3815  * the new atomic state.
3816  *
3817  * This should only be called during atomic check.
3818  */
3819 int dm_atomic_get_state(struct drm_atomic_state *state,
3820 			struct dm_atomic_state **dm_state)
3821 {
3822 	struct drm_device *dev = state->dev;
3823 	struct amdgpu_device *adev = drm_to_adev(dev);
3824 	struct amdgpu_display_manager *dm = &adev->dm;
3825 	struct drm_private_state *priv_state;
3826 
3827 	if (*dm_state)
3828 		return 0;
3829 
3830 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3831 	if (IS_ERR(priv_state))
3832 		return PTR_ERR(priv_state);
3833 
3834 	*dm_state = to_dm_atomic_state(priv_state);
3835 
3836 	return 0;
3837 }
3838 
3839 static struct dm_atomic_state *
3840 dm_atomic_get_new_state(struct drm_atomic_state *state)
3841 {
3842 	struct drm_device *dev = state->dev;
3843 	struct amdgpu_device *adev = drm_to_adev(dev);
3844 	struct amdgpu_display_manager *dm = &adev->dm;
3845 	struct drm_private_obj *obj;
3846 	struct drm_private_state *new_obj_state;
3847 	int i;
3848 
3849 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3850 		if (obj->funcs == dm->atomic_obj.funcs)
3851 			return to_dm_atomic_state(new_obj_state);
3852 	}
3853 
3854 	return NULL;
3855 }
3856 
3857 static struct drm_private_state *
3858 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3859 {
3860 	struct dm_atomic_state *old_state, *new_state;
3861 
3862 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3863 	if (!new_state)
3864 		return NULL;
3865 
3866 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3867 
3868 	old_state = to_dm_atomic_state(obj->state);
3869 
3870 	if (old_state && old_state->context)
3871 		new_state->context = dc_copy_state(old_state->context);
3872 
3873 	if (!new_state->context) {
3874 		kfree(new_state);
3875 		return NULL;
3876 	}
3877 
3878 	return &new_state->base;
3879 }
3880 
3881 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3882 				    struct drm_private_state *state)
3883 {
3884 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3885 
3886 	if (dm_state && dm_state->context)
3887 		dc_release_state(dm_state->context);
3888 
3889 	kfree(dm_state);
3890 }
3891 
3892 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3893 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3894 	.atomic_destroy_state = dm_atomic_destroy_state,
3895 };
3896 
3897 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3898 {
3899 	struct dm_atomic_state *state;
3900 	int r;
3901 
3902 	adev->mode_info.mode_config_initialized = true;
3903 
3904 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3905 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3906 
3907 	adev_to_drm(adev)->mode_config.max_width = 16384;
3908 	adev_to_drm(adev)->mode_config.max_height = 16384;
3909 
3910 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3911 	/* disable prefer shadow for now due to hibernation issues */
3912 	adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3913 	/* indicates support for immediate flip */
3914 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3915 
3916 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3917 
3918 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3919 	if (!state)
3920 		return -ENOMEM;
3921 
3922 	state->context = dc_create_state(adev->dm.dc);
3923 	if (!state->context) {
3924 		kfree(state);
3925 		return -ENOMEM;
3926 	}
3927 
3928 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3929 
3930 	drm_atomic_private_obj_init(adev_to_drm(adev),
3931 				    &adev->dm.atomic_obj,
3932 				    &state->base,
3933 				    &dm_atomic_state_funcs);
3934 
3935 	r = amdgpu_display_modeset_create_props(adev);
3936 	if (r) {
3937 		dc_release_state(state->context);
3938 		kfree(state);
3939 		return r;
3940 	}
3941 
3942 	r = amdgpu_dm_audio_init(adev);
3943 	if (r) {
3944 		dc_release_state(state->context);
3945 		kfree(state);
3946 		return r;
3947 	}
3948 
3949 	return 0;
3950 }
3951 
3952 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3953 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3954 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3955 
3956 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3957 					    int bl_idx)
3958 {
3959 #if defined(CONFIG_ACPI)
3960 	struct amdgpu_dm_backlight_caps caps;
3961 
3962 	memset(&caps, 0, sizeof(caps));
3963 
3964 	if (dm->backlight_caps[bl_idx].caps_valid)
3965 		return;
3966 
3967 	amdgpu_acpi_get_backlight_caps(&caps);
3968 	if (caps.caps_valid) {
3969 		dm->backlight_caps[bl_idx].caps_valid = true;
3970 		if (caps.aux_support)
3971 			return;
3972 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3973 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3974 	} else {
3975 		dm->backlight_caps[bl_idx].min_input_signal =
3976 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3977 		dm->backlight_caps[bl_idx].max_input_signal =
3978 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3979 	}
3980 #else
3981 	if (dm->backlight_caps[bl_idx].aux_support)
3982 		return;
3983 
3984 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3985 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3986 #endif
3987 }
3988 
3989 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3990 				unsigned *min, unsigned *max)
3991 {
3992 	if (!caps)
3993 		return 0;
3994 
3995 	if (caps->aux_support) {
3996 		// Firmware limits are in nits, DC API wants millinits.
3997 		*max = 1000 * caps->aux_max_input_signal;
3998 		*min = 1000 * caps->aux_min_input_signal;
3999 	} else {
4000 		// Firmware limits are 8-bit, PWM control is 16-bit.
4001 		*max = 0x101 * caps->max_input_signal;
4002 		*min = 0x101 * caps->min_input_signal;
4003 	}
4004 	return 1;
4005 }
4006 
4007 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
4008 					uint32_t brightness)
4009 {
4010 	unsigned min, max;
4011 
4012 	if (!get_brightness_range(caps, &min, &max))
4013 		return brightness;
4014 
4015 	// Rescale 0..255 to min..max
4016 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
4017 				       AMDGPU_MAX_BL_LEVEL);
4018 }
4019 
4020 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
4021 				      uint32_t brightness)
4022 {
4023 	unsigned min, max;
4024 
4025 	if (!get_brightness_range(caps, &min, &max))
4026 		return brightness;
4027 
4028 	if (brightness < min)
4029 		return 0;
4030 	// Rescale min..max to 0..255
4031 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
4032 				 max - min);
4033 }
4034 
4035 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
4036 					 int bl_idx,
4037 					 u32 user_brightness)
4038 {
4039 	struct amdgpu_dm_backlight_caps caps;
4040 	struct dc_link *link;
4041 	u32 brightness;
4042 	bool rc;
4043 
4044 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4045 	caps = dm->backlight_caps[bl_idx];
4046 
4047 	dm->brightness[bl_idx] = user_brightness;
4048 	/* update scratch register */
4049 	if (bl_idx == 0)
4050 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
4051 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
4052 	link = (struct dc_link *)dm->backlight_link[bl_idx];
4053 
4054 	/* Change brightness based on AUX property */
4055 	if (caps.aux_support) {
4056 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
4057 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4058 		if (!rc)
4059 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4060 	} else {
4061 		rc = dc_link_set_backlight_level(link, brightness, 0);
4062 		if (!rc)
4063 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4064 	}
4065 
4066 	if (rc)
4067 		dm->actual_brightness[bl_idx] = user_brightness;
4068 }
4069 
4070 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4071 {
4072 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4073 	int i;
4074 
4075 	for (i = 0; i < dm->num_of_edps; i++) {
4076 		if (bd == dm->backlight_dev[i])
4077 			break;
4078 	}
4079 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4080 		i = 0;
4081 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4082 
4083 	return 0;
4084 }
4085 
4086 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4087 					 int bl_idx)
4088 {
4089 	struct amdgpu_dm_backlight_caps caps;
4090 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4091 
4092 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4093 	caps = dm->backlight_caps[bl_idx];
4094 
4095 	if (caps.aux_support) {
4096 		u32 avg, peak;
4097 		bool rc;
4098 
4099 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4100 		if (!rc)
4101 			return dm->brightness[bl_idx];
4102 		return convert_brightness_to_user(&caps, avg);
4103 	} else {
4104 		int ret = dc_link_get_backlight_level(link);
4105 
4106 		if (ret == DC_ERROR_UNEXPECTED)
4107 			return dm->brightness[bl_idx];
4108 		return convert_brightness_to_user(&caps, ret);
4109 	}
4110 }
4111 
4112 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4113 {
4114 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4115 	int i;
4116 
4117 	for (i = 0; i < dm->num_of_edps; i++) {
4118 		if (bd == dm->backlight_dev[i])
4119 			break;
4120 	}
4121 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4122 		i = 0;
4123 	return amdgpu_dm_backlight_get_level(dm, i);
4124 }
4125 
4126 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4127 	.options = BL_CORE_SUSPENDRESUME,
4128 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4129 	.update_status	= amdgpu_dm_backlight_update_status,
4130 };
4131 
4132 static void
4133 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4134 {
4135 	char bl_name[16];
4136 	struct backlight_properties props = { 0 };
4137 
4138 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4139 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4140 
4141 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4142 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4143 	props.type = BACKLIGHT_RAW;
4144 
4145 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4146 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4147 
4148 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4149 								       adev_to_drm(dm->adev)->dev,
4150 								       dm,
4151 								       &amdgpu_dm_backlight_ops,
4152 								       &props);
4153 
4154 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4155 		DRM_ERROR("DM: Backlight registration failed!\n");
4156 	else
4157 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4158 }
4159 
4160 static int initialize_plane(struct amdgpu_display_manager *dm,
4161 			    struct amdgpu_mode_info *mode_info, int plane_id,
4162 			    enum drm_plane_type plane_type,
4163 			    const struct dc_plane_cap *plane_cap)
4164 {
4165 	struct drm_plane *plane;
4166 	unsigned long possible_crtcs;
4167 	int ret = 0;
4168 
4169 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4170 	if (!plane) {
4171 		DRM_ERROR("KMS: Failed to allocate plane\n");
4172 		return -ENOMEM;
4173 	}
4174 	plane->type = plane_type;
4175 
4176 	/*
4177 	 * HACK: IGT tests expect that the primary plane for a CRTC
4178 	 * can only have one possible CRTC. Only expose support for
4179 	 * any CRTC if they're not going to be used as a primary plane
4180 	 * for a CRTC - like overlay or underlay planes.
4181 	 */
4182 	possible_crtcs = 1 << plane_id;
4183 	if (plane_id >= dm->dc->caps.max_streams)
4184 		possible_crtcs = 0xff;
4185 
4186 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4187 
4188 	if (ret) {
4189 		DRM_ERROR("KMS: Failed to initialize plane\n");
4190 		kfree(plane);
4191 		return ret;
4192 	}
4193 
4194 	if (mode_info)
4195 		mode_info->planes[plane_id] = plane;
4196 
4197 	return ret;
4198 }
4199 
4200 
4201 static void register_backlight_device(struct amdgpu_display_manager *dm,
4202 				      struct dc_link *link)
4203 {
4204 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4205 	    link->type != dc_connection_none) {
4206 		/*
4207 		 * Event if registration failed, we should continue with
4208 		 * DM initialization because not having a backlight control
4209 		 * is better then a black screen.
4210 		 */
4211 		if (!dm->backlight_dev[dm->num_of_edps])
4212 			amdgpu_dm_register_backlight_device(dm);
4213 
4214 		if (dm->backlight_dev[dm->num_of_edps]) {
4215 			dm->backlight_link[dm->num_of_edps] = link;
4216 			dm->num_of_edps++;
4217 		}
4218 	}
4219 }
4220 
4221 
4222 /*
4223  * In this architecture, the association
4224  * connector -> encoder -> crtc
4225  * id not really requried. The crtc and connector will hold the
4226  * display_index as an abstraction to use with DAL component
4227  *
4228  * Returns 0 on success
4229  */
4230 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4231 {
4232 	struct amdgpu_display_manager *dm = &adev->dm;
4233 	int32_t i;
4234 	struct amdgpu_dm_connector *aconnector = NULL;
4235 	struct amdgpu_encoder *aencoder = NULL;
4236 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4237 	uint32_t link_cnt;
4238 	int32_t primary_planes;
4239 	enum dc_connection_type new_connection_type = dc_connection_none;
4240 	const struct dc_plane_cap *plane;
4241 	bool psr_feature_enabled = false;
4242 
4243 	dm->display_indexes_num = dm->dc->caps.max_streams;
4244 	/* Update the actual used number of crtc */
4245 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4246 
4247 	link_cnt = dm->dc->caps.max_links;
4248 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4249 		DRM_ERROR("DM: Failed to initialize mode config\n");
4250 		return -EINVAL;
4251 	}
4252 
4253 	/* There is one primary plane per CRTC */
4254 	primary_planes = dm->dc->caps.max_streams;
4255 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4256 
4257 	/*
4258 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4259 	 * Order is reversed to match iteration order in atomic check.
4260 	 */
4261 	for (i = (primary_planes - 1); i >= 0; i--) {
4262 		plane = &dm->dc->caps.planes[i];
4263 
4264 		if (initialize_plane(dm, mode_info, i,
4265 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4266 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4267 			goto fail;
4268 		}
4269 	}
4270 
4271 	/*
4272 	 * Initialize overlay planes, index starting after primary planes.
4273 	 * These planes have a higher DRM index than the primary planes since
4274 	 * they should be considered as having a higher z-order.
4275 	 * Order is reversed to match iteration order in atomic check.
4276 	 *
4277 	 * Only support DCN for now, and only expose one so we don't encourage
4278 	 * userspace to use up all the pipes.
4279 	 */
4280 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4281 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4282 
4283 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4284 			continue;
4285 
4286 		if (!plane->blends_with_above || !plane->blends_with_below)
4287 			continue;
4288 
4289 		if (!plane->pixel_format_support.argb8888)
4290 			continue;
4291 
4292 		if (initialize_plane(dm, NULL, primary_planes + i,
4293 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4294 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4295 			goto fail;
4296 		}
4297 
4298 		/* Only create one overlay plane. */
4299 		break;
4300 	}
4301 
4302 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4303 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4304 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4305 			goto fail;
4306 		}
4307 
4308 	/* Use Outbox interrupt */
4309 	switch (adev->ip_versions[DCE_HWIP][0]) {
4310 	case IP_VERSION(3, 0, 0):
4311 	case IP_VERSION(3, 1, 2):
4312 	case IP_VERSION(3, 1, 3):
4313 	case IP_VERSION(3, 1, 5):
4314 	case IP_VERSION(3, 1, 6):
4315 	case IP_VERSION(3, 2, 0):
4316 	case IP_VERSION(3, 2, 1):
4317 	case IP_VERSION(2, 1, 0):
4318 		if (register_outbox_irq_handlers(dm->adev)) {
4319 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4320 			goto fail;
4321 		}
4322 		break;
4323 	default:
4324 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4325 			      adev->ip_versions[DCE_HWIP][0]);
4326 	}
4327 
4328 	/* Determine whether to enable PSR support by default. */
4329 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4330 		switch (adev->ip_versions[DCE_HWIP][0]) {
4331 		case IP_VERSION(3, 1, 2):
4332 		case IP_VERSION(3, 1, 3):
4333 		case IP_VERSION(3, 1, 5):
4334 		case IP_VERSION(3, 1, 6):
4335 		case IP_VERSION(3, 2, 0):
4336 		case IP_VERSION(3, 2, 1):
4337 			psr_feature_enabled = true;
4338 			break;
4339 		default:
4340 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4341 			break;
4342 		}
4343 	}
4344 
4345 	/* loops over all connectors on the board */
4346 	for (i = 0; i < link_cnt; i++) {
4347 		struct dc_link *link = NULL;
4348 
4349 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4350 			DRM_ERROR(
4351 				"KMS: Cannot support more than %d display indexes\n",
4352 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4353 			continue;
4354 		}
4355 
4356 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4357 		if (!aconnector)
4358 			goto fail;
4359 
4360 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4361 		if (!aencoder)
4362 			goto fail;
4363 
4364 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4365 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4366 			goto fail;
4367 		}
4368 
4369 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4370 			DRM_ERROR("KMS: Failed to initialize connector\n");
4371 			goto fail;
4372 		}
4373 
4374 		link = dc_get_link_at_index(dm->dc, i);
4375 
4376 		if (!dc_link_detect_sink(link, &new_connection_type))
4377 			DRM_ERROR("KMS: Failed to detect connector\n");
4378 
4379 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4380 			emulated_link_detect(link);
4381 			amdgpu_dm_update_connector_after_detect(aconnector);
4382 		} else {
4383 			bool ret = false;
4384 
4385 			mutex_lock(&dm->dc_lock);
4386 			ret = dc_link_detect(link, DETECT_REASON_BOOT);
4387 			mutex_unlock(&dm->dc_lock);
4388 
4389 			if (ret) {
4390 				amdgpu_dm_update_connector_after_detect(aconnector);
4391 				register_backlight_device(dm, link);
4392 
4393 				if (dm->num_of_edps)
4394 					update_connector_ext_caps(aconnector);
4395 
4396 				if (psr_feature_enabled)
4397 					amdgpu_dm_set_psr_caps(link);
4398 
4399 				/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4400 				 * PSR is also supported.
4401 				 */
4402 				if (link->psr_settings.psr_feature_enabled)
4403 					adev_to_drm(adev)->vblank_disable_immediate = false;
4404 			}
4405 		}
4406 	}
4407 
4408 	/* Software is initialized. Now we can register interrupt handlers. */
4409 	switch (adev->asic_type) {
4410 #if defined(CONFIG_DRM_AMD_DC_SI)
4411 	case CHIP_TAHITI:
4412 	case CHIP_PITCAIRN:
4413 	case CHIP_VERDE:
4414 	case CHIP_OLAND:
4415 		if (dce60_register_irq_handlers(dm->adev)) {
4416 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4417 			goto fail;
4418 		}
4419 		break;
4420 #endif
4421 	case CHIP_BONAIRE:
4422 	case CHIP_HAWAII:
4423 	case CHIP_KAVERI:
4424 	case CHIP_KABINI:
4425 	case CHIP_MULLINS:
4426 	case CHIP_TONGA:
4427 	case CHIP_FIJI:
4428 	case CHIP_CARRIZO:
4429 	case CHIP_STONEY:
4430 	case CHIP_POLARIS11:
4431 	case CHIP_POLARIS10:
4432 	case CHIP_POLARIS12:
4433 	case CHIP_VEGAM:
4434 	case CHIP_VEGA10:
4435 	case CHIP_VEGA12:
4436 	case CHIP_VEGA20:
4437 		if (dce110_register_irq_handlers(dm->adev)) {
4438 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4439 			goto fail;
4440 		}
4441 		break;
4442 	default:
4443 		switch (adev->ip_versions[DCE_HWIP][0]) {
4444 		case IP_VERSION(1, 0, 0):
4445 		case IP_VERSION(1, 0, 1):
4446 		case IP_VERSION(2, 0, 2):
4447 		case IP_VERSION(2, 0, 3):
4448 		case IP_VERSION(2, 0, 0):
4449 		case IP_VERSION(2, 1, 0):
4450 		case IP_VERSION(3, 0, 0):
4451 		case IP_VERSION(3, 0, 2):
4452 		case IP_VERSION(3, 0, 3):
4453 		case IP_VERSION(3, 0, 1):
4454 		case IP_VERSION(3, 1, 2):
4455 		case IP_VERSION(3, 1, 3):
4456 		case IP_VERSION(3, 1, 5):
4457 		case IP_VERSION(3, 1, 6):
4458 		case IP_VERSION(3, 2, 0):
4459 		case IP_VERSION(3, 2, 1):
4460 			if (dcn10_register_irq_handlers(dm->adev)) {
4461 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4462 				goto fail;
4463 			}
4464 			break;
4465 		default:
4466 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4467 					adev->ip_versions[DCE_HWIP][0]);
4468 			goto fail;
4469 		}
4470 		break;
4471 	}
4472 
4473 	return 0;
4474 fail:
4475 	kfree(aencoder);
4476 	kfree(aconnector);
4477 
4478 	return -EINVAL;
4479 }
4480 
4481 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4482 {
4483 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4484 	return;
4485 }
4486 
4487 /******************************************************************************
4488  * amdgpu_display_funcs functions
4489  *****************************************************************************/
4490 
4491 /*
4492  * dm_bandwidth_update - program display watermarks
4493  *
4494  * @adev: amdgpu_device pointer
4495  *
4496  * Calculate and program the display watermarks and line buffer allocation.
4497  */
4498 static void dm_bandwidth_update(struct amdgpu_device *adev)
4499 {
4500 	/* TODO: implement later */
4501 }
4502 
4503 static const struct amdgpu_display_funcs dm_display_funcs = {
4504 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4505 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4506 	.backlight_set_level = NULL, /* never called for DC */
4507 	.backlight_get_level = NULL, /* never called for DC */
4508 	.hpd_sense = NULL,/* called unconditionally */
4509 	.hpd_set_polarity = NULL, /* called unconditionally */
4510 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4511 	.page_flip_get_scanoutpos =
4512 		dm_crtc_get_scanoutpos,/* called unconditionally */
4513 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4514 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4515 };
4516 
4517 #if defined(CONFIG_DEBUG_KERNEL_DC)
4518 
4519 static ssize_t s3_debug_store(struct device *device,
4520 			      struct device_attribute *attr,
4521 			      const char *buf,
4522 			      size_t count)
4523 {
4524 	int ret;
4525 	int s3_state;
4526 	struct drm_device *drm_dev = dev_get_drvdata(device);
4527 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4528 
4529 	ret = kstrtoint(buf, 0, &s3_state);
4530 
4531 	if (ret == 0) {
4532 		if (s3_state) {
4533 			dm_resume(adev);
4534 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4535 		} else
4536 			dm_suspend(adev);
4537 	}
4538 
4539 	return ret == 0 ? count : 0;
4540 }
4541 
4542 DEVICE_ATTR_WO(s3_debug);
4543 
4544 #endif
4545 
4546 static int dm_early_init(void *handle)
4547 {
4548 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4549 
4550 	switch (adev->asic_type) {
4551 #if defined(CONFIG_DRM_AMD_DC_SI)
4552 	case CHIP_TAHITI:
4553 	case CHIP_PITCAIRN:
4554 	case CHIP_VERDE:
4555 		adev->mode_info.num_crtc = 6;
4556 		adev->mode_info.num_hpd = 6;
4557 		adev->mode_info.num_dig = 6;
4558 		break;
4559 	case CHIP_OLAND:
4560 		adev->mode_info.num_crtc = 2;
4561 		adev->mode_info.num_hpd = 2;
4562 		adev->mode_info.num_dig = 2;
4563 		break;
4564 #endif
4565 	case CHIP_BONAIRE:
4566 	case CHIP_HAWAII:
4567 		adev->mode_info.num_crtc = 6;
4568 		adev->mode_info.num_hpd = 6;
4569 		adev->mode_info.num_dig = 6;
4570 		break;
4571 	case CHIP_KAVERI:
4572 		adev->mode_info.num_crtc = 4;
4573 		adev->mode_info.num_hpd = 6;
4574 		adev->mode_info.num_dig = 7;
4575 		break;
4576 	case CHIP_KABINI:
4577 	case CHIP_MULLINS:
4578 		adev->mode_info.num_crtc = 2;
4579 		adev->mode_info.num_hpd = 6;
4580 		adev->mode_info.num_dig = 6;
4581 		break;
4582 	case CHIP_FIJI:
4583 	case CHIP_TONGA:
4584 		adev->mode_info.num_crtc = 6;
4585 		adev->mode_info.num_hpd = 6;
4586 		adev->mode_info.num_dig = 7;
4587 		break;
4588 	case CHIP_CARRIZO:
4589 		adev->mode_info.num_crtc = 3;
4590 		adev->mode_info.num_hpd = 6;
4591 		adev->mode_info.num_dig = 9;
4592 		break;
4593 	case CHIP_STONEY:
4594 		adev->mode_info.num_crtc = 2;
4595 		adev->mode_info.num_hpd = 6;
4596 		adev->mode_info.num_dig = 9;
4597 		break;
4598 	case CHIP_POLARIS11:
4599 	case CHIP_POLARIS12:
4600 		adev->mode_info.num_crtc = 5;
4601 		adev->mode_info.num_hpd = 5;
4602 		adev->mode_info.num_dig = 5;
4603 		break;
4604 	case CHIP_POLARIS10:
4605 	case CHIP_VEGAM:
4606 		adev->mode_info.num_crtc = 6;
4607 		adev->mode_info.num_hpd = 6;
4608 		adev->mode_info.num_dig = 6;
4609 		break;
4610 	case CHIP_VEGA10:
4611 	case CHIP_VEGA12:
4612 	case CHIP_VEGA20:
4613 		adev->mode_info.num_crtc = 6;
4614 		adev->mode_info.num_hpd = 6;
4615 		adev->mode_info.num_dig = 6;
4616 		break;
4617 	default:
4618 
4619 		switch (adev->ip_versions[DCE_HWIP][0]) {
4620 		case IP_VERSION(2, 0, 2):
4621 		case IP_VERSION(3, 0, 0):
4622 			adev->mode_info.num_crtc = 6;
4623 			adev->mode_info.num_hpd = 6;
4624 			adev->mode_info.num_dig = 6;
4625 			break;
4626 		case IP_VERSION(2, 0, 0):
4627 		case IP_VERSION(3, 0, 2):
4628 			adev->mode_info.num_crtc = 5;
4629 			adev->mode_info.num_hpd = 5;
4630 			adev->mode_info.num_dig = 5;
4631 			break;
4632 		case IP_VERSION(2, 0, 3):
4633 		case IP_VERSION(3, 0, 3):
4634 			adev->mode_info.num_crtc = 2;
4635 			adev->mode_info.num_hpd = 2;
4636 			adev->mode_info.num_dig = 2;
4637 			break;
4638 		case IP_VERSION(1, 0, 0):
4639 		case IP_VERSION(1, 0, 1):
4640 		case IP_VERSION(3, 0, 1):
4641 		case IP_VERSION(2, 1, 0):
4642 		case IP_VERSION(3, 1, 2):
4643 		case IP_VERSION(3, 1, 3):
4644 		case IP_VERSION(3, 1, 5):
4645 		case IP_VERSION(3, 1, 6):
4646 		case IP_VERSION(3, 2, 0):
4647 		case IP_VERSION(3, 2, 1):
4648 			adev->mode_info.num_crtc = 4;
4649 			adev->mode_info.num_hpd = 4;
4650 			adev->mode_info.num_dig = 4;
4651 			break;
4652 		default:
4653 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4654 					adev->ip_versions[DCE_HWIP][0]);
4655 			return -EINVAL;
4656 		}
4657 		break;
4658 	}
4659 
4660 	amdgpu_dm_set_irq_funcs(adev);
4661 
4662 	if (adev->mode_info.funcs == NULL)
4663 		adev->mode_info.funcs = &dm_display_funcs;
4664 
4665 	/*
4666 	 * Note: Do NOT change adev->audio_endpt_rreg and
4667 	 * adev->audio_endpt_wreg because they are initialised in
4668 	 * amdgpu_device_init()
4669 	 */
4670 #if defined(CONFIG_DEBUG_KERNEL_DC)
4671 	device_create_file(
4672 		adev_to_drm(adev)->dev,
4673 		&dev_attr_s3_debug);
4674 #endif
4675 
4676 	return 0;
4677 }
4678 
4679 static bool modeset_required(struct drm_crtc_state *crtc_state,
4680 			     struct dc_stream_state *new_stream,
4681 			     struct dc_stream_state *old_stream)
4682 {
4683 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4684 }
4685 
4686 static bool modereset_required(struct drm_crtc_state *crtc_state)
4687 {
4688 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4689 }
4690 
4691 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4692 {
4693 	drm_encoder_cleanup(encoder);
4694 	kfree(encoder);
4695 }
4696 
4697 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4698 	.destroy = amdgpu_dm_encoder_destroy,
4699 };
4700 
4701 
4702 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4703 					 struct drm_framebuffer *fb,
4704 					 int *min_downscale, int *max_upscale)
4705 {
4706 	struct amdgpu_device *adev = drm_to_adev(dev);
4707 	struct dc *dc = adev->dm.dc;
4708 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4709 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4710 
4711 	switch (fb->format->format) {
4712 	case DRM_FORMAT_P010:
4713 	case DRM_FORMAT_NV12:
4714 	case DRM_FORMAT_NV21:
4715 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4716 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4717 		break;
4718 
4719 	case DRM_FORMAT_XRGB16161616F:
4720 	case DRM_FORMAT_ARGB16161616F:
4721 	case DRM_FORMAT_XBGR16161616F:
4722 	case DRM_FORMAT_ABGR16161616F:
4723 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4724 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4725 		break;
4726 
4727 	default:
4728 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4729 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4730 		break;
4731 	}
4732 
4733 	/*
4734 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4735 	 * scaling factor of 1.0 == 1000 units.
4736 	 */
4737 	if (*max_upscale == 1)
4738 		*max_upscale = 1000;
4739 
4740 	if (*min_downscale == 1)
4741 		*min_downscale = 1000;
4742 }
4743 
4744 
4745 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4746 				const struct drm_plane_state *state,
4747 				struct dc_scaling_info *scaling_info)
4748 {
4749 	int scale_w, scale_h, min_downscale, max_upscale;
4750 
4751 	memset(scaling_info, 0, sizeof(*scaling_info));
4752 
4753 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4754 	scaling_info->src_rect.x = state->src_x >> 16;
4755 	scaling_info->src_rect.y = state->src_y >> 16;
4756 
4757 	/*
4758 	 * For reasons we don't (yet) fully understand a non-zero
4759 	 * src_y coordinate into an NV12 buffer can cause a
4760 	 * system hang on DCN1x.
4761 	 * To avoid hangs (and maybe be overly cautious)
4762 	 * let's reject both non-zero src_x and src_y.
4763 	 *
4764 	 * We currently know of only one use-case to reproduce a
4765 	 * scenario with non-zero src_x and src_y for NV12, which
4766 	 * is to gesture the YouTube Android app into full screen
4767 	 * on ChromeOS.
4768 	 */
4769 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4770 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4771 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4772 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4773 		return -EINVAL;
4774 
4775 	scaling_info->src_rect.width = state->src_w >> 16;
4776 	if (scaling_info->src_rect.width == 0)
4777 		return -EINVAL;
4778 
4779 	scaling_info->src_rect.height = state->src_h >> 16;
4780 	if (scaling_info->src_rect.height == 0)
4781 		return -EINVAL;
4782 
4783 	scaling_info->dst_rect.x = state->crtc_x;
4784 	scaling_info->dst_rect.y = state->crtc_y;
4785 
4786 	if (state->crtc_w == 0)
4787 		return -EINVAL;
4788 
4789 	scaling_info->dst_rect.width = state->crtc_w;
4790 
4791 	if (state->crtc_h == 0)
4792 		return -EINVAL;
4793 
4794 	scaling_info->dst_rect.height = state->crtc_h;
4795 
4796 	/* DRM doesn't specify clipping on destination output. */
4797 	scaling_info->clip_rect = scaling_info->dst_rect;
4798 
4799 	/* Validate scaling per-format with DC plane caps */
4800 	if (state->plane && state->plane->dev && state->fb) {
4801 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4802 					     &min_downscale, &max_upscale);
4803 	} else {
4804 		min_downscale = 250;
4805 		max_upscale = 16000;
4806 	}
4807 
4808 	scale_w = scaling_info->dst_rect.width * 1000 /
4809 		  scaling_info->src_rect.width;
4810 
4811 	if (scale_w < min_downscale || scale_w > max_upscale)
4812 		return -EINVAL;
4813 
4814 	scale_h = scaling_info->dst_rect.height * 1000 /
4815 		  scaling_info->src_rect.height;
4816 
4817 	if (scale_h < min_downscale || scale_h > max_upscale)
4818 		return -EINVAL;
4819 
4820 	/*
4821 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4822 	 * assume reasonable defaults based on the format.
4823 	 */
4824 
4825 	return 0;
4826 }
4827 
4828 static void
4829 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4830 				 uint64_t tiling_flags)
4831 {
4832 	/* Fill GFX8 params */
4833 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4834 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4835 
4836 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4837 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4838 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4839 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4840 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4841 
4842 		/* XXX fix me for VI */
4843 		tiling_info->gfx8.num_banks = num_banks;
4844 		tiling_info->gfx8.array_mode =
4845 				DC_ARRAY_2D_TILED_THIN1;
4846 		tiling_info->gfx8.tile_split = tile_split;
4847 		tiling_info->gfx8.bank_width = bankw;
4848 		tiling_info->gfx8.bank_height = bankh;
4849 		tiling_info->gfx8.tile_aspect = mtaspect;
4850 		tiling_info->gfx8.tile_mode =
4851 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4852 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4853 			== DC_ARRAY_1D_TILED_THIN1) {
4854 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4855 	}
4856 
4857 	tiling_info->gfx8.pipe_config =
4858 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4859 }
4860 
4861 static void
4862 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4863 				  union dc_tiling_info *tiling_info)
4864 {
4865 	tiling_info->gfx9.num_pipes =
4866 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4867 	tiling_info->gfx9.num_banks =
4868 		adev->gfx.config.gb_addr_config_fields.num_banks;
4869 	tiling_info->gfx9.pipe_interleave =
4870 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4871 	tiling_info->gfx9.num_shader_engines =
4872 		adev->gfx.config.gb_addr_config_fields.num_se;
4873 	tiling_info->gfx9.max_compressed_frags =
4874 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4875 	tiling_info->gfx9.num_rb_per_se =
4876 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4877 	tiling_info->gfx9.shaderEnable = 1;
4878 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4879 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4880 }
4881 
4882 static int
4883 validate_dcc(struct amdgpu_device *adev,
4884 	     const enum surface_pixel_format format,
4885 	     const enum dc_rotation_angle rotation,
4886 	     const union dc_tiling_info *tiling_info,
4887 	     const struct dc_plane_dcc_param *dcc,
4888 	     const struct dc_plane_address *address,
4889 	     const struct plane_size *plane_size)
4890 {
4891 	struct dc *dc = adev->dm.dc;
4892 	struct dc_dcc_surface_param input;
4893 	struct dc_surface_dcc_cap output;
4894 
4895 	memset(&input, 0, sizeof(input));
4896 	memset(&output, 0, sizeof(output));
4897 
4898 	if (!dcc->enable)
4899 		return 0;
4900 
4901 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4902 	    !dc->cap_funcs.get_dcc_compression_cap)
4903 		return -EINVAL;
4904 
4905 	input.format = format;
4906 	input.surface_size.width = plane_size->surface_size.width;
4907 	input.surface_size.height = plane_size->surface_size.height;
4908 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4909 
4910 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4911 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4912 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4913 		input.scan = SCAN_DIRECTION_VERTICAL;
4914 
4915 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4916 		return -EINVAL;
4917 
4918 	if (!output.capable)
4919 		return -EINVAL;
4920 
4921 	if (dcc->independent_64b_blks == 0 &&
4922 	    output.grph.rgb.independent_64b_blks != 0)
4923 		return -EINVAL;
4924 
4925 	return 0;
4926 }
4927 
4928 static bool
4929 modifier_has_dcc(uint64_t modifier)
4930 {
4931 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4932 }
4933 
4934 static unsigned
4935 modifier_gfx9_swizzle_mode(uint64_t modifier)
4936 {
4937 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4938 		return 0;
4939 
4940 	return AMD_FMT_MOD_GET(TILE, modifier);
4941 }
4942 
4943 static const struct drm_format_info *
4944 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4945 {
4946 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4947 }
4948 
4949 static void
4950 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4951 				    union dc_tiling_info *tiling_info,
4952 				    uint64_t modifier)
4953 {
4954 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4955 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4956 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4957 	unsigned int pipes_log2;
4958 
4959 	pipes_log2 = min(5u, mod_pipe_xor_bits);
4960 
4961 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4962 
4963 	if (!IS_AMD_FMT_MOD(modifier))
4964 		return;
4965 
4966 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4967 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4968 
4969 	if (adev->family >= AMDGPU_FAMILY_NV) {
4970 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4971 	} else {
4972 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4973 
4974 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4975 	}
4976 }
4977 
4978 enum dm_micro_swizzle {
4979 	MICRO_SWIZZLE_Z = 0,
4980 	MICRO_SWIZZLE_S = 1,
4981 	MICRO_SWIZZLE_D = 2,
4982 	MICRO_SWIZZLE_R = 3
4983 };
4984 
4985 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4986 					  uint32_t format,
4987 					  uint64_t modifier)
4988 {
4989 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4990 	const struct drm_format_info *info = drm_format_info(format);
4991 	int i;
4992 
4993 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4994 
4995 	if (!info)
4996 		return false;
4997 
4998 	/*
4999 	 * We always have to allow these modifiers:
5000 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
5001 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
5002 	 */
5003 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
5004 	    modifier == DRM_FORMAT_MOD_INVALID) {
5005 		return true;
5006 	}
5007 
5008 	/* Check that the modifier is on the list of the plane's supported modifiers. */
5009 	for (i = 0; i < plane->modifier_count; i++) {
5010 		if (modifier == plane->modifiers[i])
5011 			break;
5012 	}
5013 	if (i == plane->modifier_count)
5014 		return false;
5015 
5016 	/*
5017 	 * For D swizzle the canonical modifier depends on the bpp, so check
5018 	 * it here.
5019 	 */
5020 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
5021 	    adev->family >= AMDGPU_FAMILY_NV) {
5022 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
5023 			return false;
5024 	}
5025 
5026 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
5027 	    info->cpp[0] < 8)
5028 		return false;
5029 
5030 	if (modifier_has_dcc(modifier)) {
5031 		/* Per radeonsi comments 16/64 bpp are more complicated. */
5032 		if (info->cpp[0] != 4)
5033 			return false;
5034 		/* We support multi-planar formats, but not when combined with
5035 		 * additional DCC metadata planes. */
5036 		if (info->num_planes > 1)
5037 			return false;
5038 	}
5039 
5040 	return true;
5041 }
5042 
5043 static void
5044 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
5045 {
5046 	if (!*mods)
5047 		return;
5048 
5049 	if (*cap - *size < 1) {
5050 		uint64_t new_cap = *cap * 2;
5051 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
5052 
5053 		if (!new_mods) {
5054 			kfree(*mods);
5055 			*mods = NULL;
5056 			return;
5057 		}
5058 
5059 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
5060 		kfree(*mods);
5061 		*mods = new_mods;
5062 		*cap = new_cap;
5063 	}
5064 
5065 	(*mods)[*size] = mod;
5066 	*size += 1;
5067 }
5068 
5069 static void
5070 add_gfx9_modifiers(const struct amdgpu_device *adev,
5071 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
5072 {
5073 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5074 	int pipe_xor_bits = min(8, pipes +
5075 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5076 	int bank_xor_bits = min(8 - pipe_xor_bits,
5077 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5078 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5079 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5080 
5081 
5082 	if (adev->family == AMDGPU_FAMILY_RV) {
5083 		/* Raven2 and later */
5084 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5085 
5086 		/*
5087 		 * No _D DCC swizzles yet because we only allow 32bpp, which
5088 		 * doesn't support _D on DCN
5089 		 */
5090 
5091 		if (has_constant_encode) {
5092 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5093 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5094 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5095 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5096 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5097 				    AMD_FMT_MOD_SET(DCC, 1) |
5098 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5099 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5100 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5101 		}
5102 
5103 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5104 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5105 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5106 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5107 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5108 			    AMD_FMT_MOD_SET(DCC, 1) |
5109 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5110 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5111 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5112 
5113 		if (has_constant_encode) {
5114 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5115 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5116 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5117 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5118 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5119 				    AMD_FMT_MOD_SET(DCC, 1) |
5120 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5121 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5122 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5123 
5124 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5125 				    AMD_FMT_MOD_SET(RB, rb) |
5126 				    AMD_FMT_MOD_SET(PIPE, pipes));
5127 		}
5128 
5129 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5130 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5131 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5132 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5133 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5134 			    AMD_FMT_MOD_SET(DCC, 1) |
5135 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5136 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5137 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5138 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5139 			    AMD_FMT_MOD_SET(RB, rb) |
5140 			    AMD_FMT_MOD_SET(PIPE, pipes));
5141 	}
5142 
5143 	/*
5144 	 * Only supported for 64bpp on Raven, will be filtered on format in
5145 	 * dm_plane_format_mod_supported.
5146 	 */
5147 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5148 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5149 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5150 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5151 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5152 
5153 	if (adev->family == AMDGPU_FAMILY_RV) {
5154 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5155 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5156 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5157 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5158 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5159 	}
5160 
5161 	/*
5162 	 * Only supported for 64bpp on Raven, will be filtered on format in
5163 	 * dm_plane_format_mod_supported.
5164 	 */
5165 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5166 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5167 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5168 
5169 	if (adev->family == AMDGPU_FAMILY_RV) {
5170 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5171 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5172 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5173 	}
5174 }
5175 
5176 static void
5177 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5178 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5179 {
5180 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5181 
5182 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5183 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5184 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5185 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5186 		    AMD_FMT_MOD_SET(DCC, 1) |
5187 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5188 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5189 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5190 
5191 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5192 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5193 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5194 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5195 		    AMD_FMT_MOD_SET(DCC, 1) |
5196 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5197 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5198 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5199 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5200 
5201 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5202 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5203 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5204 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5205 
5206 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5207 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5208 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5209 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5210 
5211 
5212 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5213 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5214 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5215 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5216 
5217 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5218 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5219 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5220 }
5221 
5222 static void
5223 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5224 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5225 {
5226 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5227 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5228 
5229 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5230 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5231 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5232 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5233 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5234 		    AMD_FMT_MOD_SET(DCC, 1) |
5235 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5236 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5237 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5238 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5239 
5240 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5241 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5242 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5243 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5244 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5245 		    AMD_FMT_MOD_SET(DCC, 1) |
5246 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5247 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5248 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5249 
5250 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5251 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5252 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5253 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5254 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5255 		    AMD_FMT_MOD_SET(DCC, 1) |
5256 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5257 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5258 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5259 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5260 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5261 
5262 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5263 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5264 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5265 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5266 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5267 		    AMD_FMT_MOD_SET(DCC, 1) |
5268 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5269 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5270 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5271 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5272 
5273 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5274 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5275 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5276 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5277 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5278 
5279 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5280 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5281 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5282 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5283 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5284 
5285 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5286 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5287 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5288 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5289 
5290 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5291 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5292 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5293 }
5294 
5295 static void
5296 add_gfx11_modifiers(struct amdgpu_device *adev,
5297 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5298 {
5299 	int num_pipes = 0;
5300 	int pipe_xor_bits = 0;
5301 	int num_pkrs = 0;
5302 	int pkrs = 0;
5303 	u32 gb_addr_config;
5304 	u8 i = 0;
5305 	unsigned swizzle_r_x;
5306 	uint64_t modifier_r_x;
5307 	uint64_t modifier_dcc_best;
5308 	uint64_t modifier_dcc_4k;
5309 
5310 	/* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
5311 	 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */
5312 	gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
5313 	ASSERT(gb_addr_config != 0);
5314 
5315 	num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
5316 	pkrs = ilog2(num_pkrs);
5317 	num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
5318 	pipe_xor_bits = ilog2(num_pipes);
5319 
5320 	for (i = 0; i < 2; i++) {
5321 		/* Insert the best one first. */
5322 		/* R_X swizzle modes are the best for rendering and DCC requires them. */
5323 		if (num_pipes > 16)
5324 			swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
5325 		else
5326 			swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
5327 
5328 		modifier_r_x = AMD_FMT_MOD |
5329 			       AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5330 			       AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5331 			       AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
5332 			       AMD_FMT_MOD_SET(PACKERS, pkrs);
5333 
5334 		/* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
5335 		modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5336 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
5337 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5338 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
5339 
5340 		/* DCC settings for 4K and greater resolutions. (required by display hw) */
5341 		modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
5342 				  AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5343 				  AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5344 				  AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
5345 
5346 		add_modifier(mods, size, capacity, modifier_dcc_best);
5347 		add_modifier(mods, size, capacity, modifier_dcc_4k);
5348 
5349 		add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5350 		add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5351 
5352 		add_modifier(mods, size, capacity, modifier_r_x);
5353 	}
5354 
5355 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5356              AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5357 			 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
5358 }
5359 
5360 static int
5361 get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5362 {
5363 	uint64_t size = 0, capacity = 128;
5364 	*mods = NULL;
5365 
5366 	/* We have not hooked up any pre-GFX9 modifiers. */
5367 	if (adev->family < AMDGPU_FAMILY_AI)
5368 		return 0;
5369 
5370 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5371 
5372 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5373 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5374 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5375 		return *mods ? 0 : -ENOMEM;
5376 	}
5377 
5378 	switch (adev->family) {
5379 	case AMDGPU_FAMILY_AI:
5380 	case AMDGPU_FAMILY_RV:
5381 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5382 		break;
5383 	case AMDGPU_FAMILY_NV:
5384 	case AMDGPU_FAMILY_VGH:
5385 	case AMDGPU_FAMILY_YC:
5386 	case AMDGPU_FAMILY_GC_10_3_6:
5387 	case AMDGPU_FAMILY_GC_10_3_7:
5388 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5389 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5390 		else
5391 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5392 		break;
5393 	case AMDGPU_FAMILY_GC_11_0_0:
5394 		add_gfx11_modifiers(adev, mods, &size, &capacity);
5395 		break;
5396 	}
5397 
5398 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5399 
5400 	/* INVALID marks the end of the list. */
5401 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5402 
5403 	if (!*mods)
5404 		return -ENOMEM;
5405 
5406 	return 0;
5407 }
5408 
5409 static int
5410 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5411 					  const struct amdgpu_framebuffer *afb,
5412 					  const enum surface_pixel_format format,
5413 					  const enum dc_rotation_angle rotation,
5414 					  const struct plane_size *plane_size,
5415 					  union dc_tiling_info *tiling_info,
5416 					  struct dc_plane_dcc_param *dcc,
5417 					  struct dc_plane_address *address,
5418 					  const bool force_disable_dcc)
5419 {
5420 	const uint64_t modifier = afb->base.modifier;
5421 	int ret = 0;
5422 
5423 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5424 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5425 
5426 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5427 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5428 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5429 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5430 
5431 		dcc->enable = 1;
5432 		dcc->meta_pitch = afb->base.pitches[1];
5433 		dcc->independent_64b_blks = independent_64b_blks;
5434 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5435 			if (independent_64b_blks && independent_128b_blks)
5436 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5437 			else if (independent_128b_blks)
5438 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5439 			else if (independent_64b_blks && !independent_128b_blks)
5440 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5441 			else
5442 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5443 		} else {
5444 			if (independent_64b_blks)
5445 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5446 			else
5447 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5448 		}
5449 
5450 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5451 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5452 	}
5453 
5454 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5455 	if (ret)
5456 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5457 
5458 	return ret;
5459 }
5460 
5461 static int
5462 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5463 			     const struct amdgpu_framebuffer *afb,
5464 			     const enum surface_pixel_format format,
5465 			     const enum dc_rotation_angle rotation,
5466 			     const uint64_t tiling_flags,
5467 			     union dc_tiling_info *tiling_info,
5468 			     struct plane_size *plane_size,
5469 			     struct dc_plane_dcc_param *dcc,
5470 			     struct dc_plane_address *address,
5471 			     bool tmz_surface,
5472 			     bool force_disable_dcc)
5473 {
5474 	const struct drm_framebuffer *fb = &afb->base;
5475 	int ret;
5476 
5477 	memset(tiling_info, 0, sizeof(*tiling_info));
5478 	memset(plane_size, 0, sizeof(*plane_size));
5479 	memset(dcc, 0, sizeof(*dcc));
5480 	memset(address, 0, sizeof(*address));
5481 
5482 	address->tmz_surface = tmz_surface;
5483 
5484 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5485 		uint64_t addr = afb->address + fb->offsets[0];
5486 
5487 		plane_size->surface_size.x = 0;
5488 		plane_size->surface_size.y = 0;
5489 		plane_size->surface_size.width = fb->width;
5490 		plane_size->surface_size.height = fb->height;
5491 		plane_size->surface_pitch =
5492 			fb->pitches[0] / fb->format->cpp[0];
5493 
5494 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5495 		address->grph.addr.low_part = lower_32_bits(addr);
5496 		address->grph.addr.high_part = upper_32_bits(addr);
5497 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5498 		uint64_t luma_addr = afb->address + fb->offsets[0];
5499 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5500 
5501 		plane_size->surface_size.x = 0;
5502 		plane_size->surface_size.y = 0;
5503 		plane_size->surface_size.width = fb->width;
5504 		plane_size->surface_size.height = fb->height;
5505 		plane_size->surface_pitch =
5506 			fb->pitches[0] / fb->format->cpp[0];
5507 
5508 		plane_size->chroma_size.x = 0;
5509 		plane_size->chroma_size.y = 0;
5510 		/* TODO: set these based on surface format */
5511 		plane_size->chroma_size.width = fb->width / 2;
5512 		plane_size->chroma_size.height = fb->height / 2;
5513 
5514 		plane_size->chroma_pitch =
5515 			fb->pitches[1] / fb->format->cpp[1];
5516 
5517 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5518 		address->video_progressive.luma_addr.low_part =
5519 			lower_32_bits(luma_addr);
5520 		address->video_progressive.luma_addr.high_part =
5521 			upper_32_bits(luma_addr);
5522 		address->video_progressive.chroma_addr.low_part =
5523 			lower_32_bits(chroma_addr);
5524 		address->video_progressive.chroma_addr.high_part =
5525 			upper_32_bits(chroma_addr);
5526 	}
5527 
5528 	if (adev->family >= AMDGPU_FAMILY_AI) {
5529 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5530 								rotation, plane_size,
5531 								tiling_info, dcc,
5532 								address,
5533 								force_disable_dcc);
5534 		if (ret)
5535 			return ret;
5536 	} else {
5537 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5538 	}
5539 
5540 	return 0;
5541 }
5542 
5543 static void
5544 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5545 			       bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5546 			       bool *global_alpha, int *global_alpha_value)
5547 {
5548 	*per_pixel_alpha = false;
5549 	*pre_multiplied_alpha = true;
5550 	*global_alpha = false;
5551 	*global_alpha_value = 0xff;
5552 
5553 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5554 		return;
5555 
5556 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5557 		plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
5558 		static const uint32_t alpha_formats[] = {
5559 			DRM_FORMAT_ARGB8888,
5560 			DRM_FORMAT_RGBA8888,
5561 			DRM_FORMAT_ABGR8888,
5562 		};
5563 		uint32_t format = plane_state->fb->format->format;
5564 		unsigned int i;
5565 
5566 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5567 			if (format == alpha_formats[i]) {
5568 				*per_pixel_alpha = true;
5569 				break;
5570 			}
5571 		}
5572 
5573 		if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5574 			*pre_multiplied_alpha = false;
5575 	}
5576 
5577 	if (plane_state->alpha < 0xffff) {
5578 		*global_alpha = true;
5579 		*global_alpha_value = plane_state->alpha >> 8;
5580 	}
5581 }
5582 
5583 static int
5584 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5585 			    const enum surface_pixel_format format,
5586 			    enum dc_color_space *color_space)
5587 {
5588 	bool full_range;
5589 
5590 	*color_space = COLOR_SPACE_SRGB;
5591 
5592 	/* DRM color properties only affect non-RGB formats. */
5593 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5594 		return 0;
5595 
5596 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5597 
5598 	switch (plane_state->color_encoding) {
5599 	case DRM_COLOR_YCBCR_BT601:
5600 		if (full_range)
5601 			*color_space = COLOR_SPACE_YCBCR601;
5602 		else
5603 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5604 		break;
5605 
5606 	case DRM_COLOR_YCBCR_BT709:
5607 		if (full_range)
5608 			*color_space = COLOR_SPACE_YCBCR709;
5609 		else
5610 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5611 		break;
5612 
5613 	case DRM_COLOR_YCBCR_BT2020:
5614 		if (full_range)
5615 			*color_space = COLOR_SPACE_2020_YCBCR;
5616 		else
5617 			return -EINVAL;
5618 		break;
5619 
5620 	default:
5621 		return -EINVAL;
5622 	}
5623 
5624 	return 0;
5625 }
5626 
5627 static int
5628 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5629 			    const struct drm_plane_state *plane_state,
5630 			    const uint64_t tiling_flags,
5631 			    struct dc_plane_info *plane_info,
5632 			    struct dc_plane_address *address,
5633 			    bool tmz_surface,
5634 			    bool force_disable_dcc)
5635 {
5636 	const struct drm_framebuffer *fb = plane_state->fb;
5637 	const struct amdgpu_framebuffer *afb =
5638 		to_amdgpu_framebuffer(plane_state->fb);
5639 	int ret;
5640 
5641 	memset(plane_info, 0, sizeof(*plane_info));
5642 
5643 	switch (fb->format->format) {
5644 	case DRM_FORMAT_C8:
5645 		plane_info->format =
5646 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5647 		break;
5648 	case DRM_FORMAT_RGB565:
5649 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5650 		break;
5651 	case DRM_FORMAT_XRGB8888:
5652 	case DRM_FORMAT_ARGB8888:
5653 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5654 		break;
5655 	case DRM_FORMAT_XRGB2101010:
5656 	case DRM_FORMAT_ARGB2101010:
5657 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5658 		break;
5659 	case DRM_FORMAT_XBGR2101010:
5660 	case DRM_FORMAT_ABGR2101010:
5661 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5662 		break;
5663 	case DRM_FORMAT_XBGR8888:
5664 	case DRM_FORMAT_ABGR8888:
5665 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5666 		break;
5667 	case DRM_FORMAT_NV21:
5668 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5669 		break;
5670 	case DRM_FORMAT_NV12:
5671 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5672 		break;
5673 	case DRM_FORMAT_P010:
5674 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5675 		break;
5676 	case DRM_FORMAT_XRGB16161616F:
5677 	case DRM_FORMAT_ARGB16161616F:
5678 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5679 		break;
5680 	case DRM_FORMAT_XBGR16161616F:
5681 	case DRM_FORMAT_ABGR16161616F:
5682 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5683 		break;
5684 	case DRM_FORMAT_XRGB16161616:
5685 	case DRM_FORMAT_ARGB16161616:
5686 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5687 		break;
5688 	case DRM_FORMAT_XBGR16161616:
5689 	case DRM_FORMAT_ABGR16161616:
5690 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5691 		break;
5692 	default:
5693 		DRM_ERROR(
5694 			"Unsupported screen format %p4cc\n",
5695 			&fb->format->format);
5696 		return -EINVAL;
5697 	}
5698 
5699 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5700 	case DRM_MODE_ROTATE_0:
5701 		plane_info->rotation = ROTATION_ANGLE_0;
5702 		break;
5703 	case DRM_MODE_ROTATE_90:
5704 		plane_info->rotation = ROTATION_ANGLE_90;
5705 		break;
5706 	case DRM_MODE_ROTATE_180:
5707 		plane_info->rotation = ROTATION_ANGLE_180;
5708 		break;
5709 	case DRM_MODE_ROTATE_270:
5710 		plane_info->rotation = ROTATION_ANGLE_270;
5711 		break;
5712 	default:
5713 		plane_info->rotation = ROTATION_ANGLE_0;
5714 		break;
5715 	}
5716 
5717 	plane_info->visible = true;
5718 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5719 
5720 	plane_info->layer_index = 0;
5721 
5722 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5723 					  &plane_info->color_space);
5724 	if (ret)
5725 		return ret;
5726 
5727 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5728 					   plane_info->rotation, tiling_flags,
5729 					   &plane_info->tiling_info,
5730 					   &plane_info->plane_size,
5731 					   &plane_info->dcc, address, tmz_surface,
5732 					   force_disable_dcc);
5733 	if (ret)
5734 		return ret;
5735 
5736 	fill_blending_from_plane_state(
5737 		plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5738 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5739 
5740 	return 0;
5741 }
5742 
5743 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5744 				    struct dc_plane_state *dc_plane_state,
5745 				    struct drm_plane_state *plane_state,
5746 				    struct drm_crtc_state *crtc_state)
5747 {
5748 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5749 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5750 	struct dc_scaling_info scaling_info;
5751 	struct dc_plane_info plane_info;
5752 	int ret;
5753 	bool force_disable_dcc = false;
5754 
5755 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5756 	if (ret)
5757 		return ret;
5758 
5759 	dc_plane_state->src_rect = scaling_info.src_rect;
5760 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5761 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5762 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5763 
5764 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5765 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5766 					  afb->tiling_flags,
5767 					  &plane_info,
5768 					  &dc_plane_state->address,
5769 					  afb->tmz_surface,
5770 					  force_disable_dcc);
5771 	if (ret)
5772 		return ret;
5773 
5774 	dc_plane_state->format = plane_info.format;
5775 	dc_plane_state->color_space = plane_info.color_space;
5776 	dc_plane_state->format = plane_info.format;
5777 	dc_plane_state->plane_size = plane_info.plane_size;
5778 	dc_plane_state->rotation = plane_info.rotation;
5779 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5780 	dc_plane_state->stereo_format = plane_info.stereo_format;
5781 	dc_plane_state->tiling_info = plane_info.tiling_info;
5782 	dc_plane_state->visible = plane_info.visible;
5783 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5784 	dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5785 	dc_plane_state->global_alpha = plane_info.global_alpha;
5786 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5787 	dc_plane_state->dcc = plane_info.dcc;
5788 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5789 	dc_plane_state->flip_int_enabled = true;
5790 
5791 	/*
5792 	 * Always set input transfer function, since plane state is refreshed
5793 	 * every time.
5794 	 */
5795 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5796 	if (ret)
5797 		return ret;
5798 
5799 	return 0;
5800 }
5801 
5802 /**
5803  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
5804  *
5805  * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
5806  *         remote fb
5807  * @old_plane_state: Old state of @plane
5808  * @new_plane_state: New state of @plane
5809  * @crtc_state: New state of CRTC connected to the @plane
5810  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
5811  *
5812  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
5813  * (referred to as "damage clips" in DRM nomenclature) that require updating on
5814  * the eDP remote buffer. The responsibility of specifying the dirty regions is
5815  * amdgpu_dm's.
5816  *
5817  * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
5818  * plane with regions that require flushing to the eDP remote buffer. In
5819  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
5820  * implicitly provide damage clips without any client support via the plane
5821  * bounds.
5822  *
5823  * Today, amdgpu_dm only supports the MPO and cursor usecase.
5824  *
5825  * TODO: Also enable for FB_DAMAGE_CLIPS
5826  */
5827 static void fill_dc_dirty_rects(struct drm_plane *plane,
5828 				struct drm_plane_state *old_plane_state,
5829 				struct drm_plane_state *new_plane_state,
5830 				struct drm_crtc_state *crtc_state,
5831 				struct dc_flip_addrs *flip_addrs)
5832 {
5833 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5834 	struct rect *dirty_rects = flip_addrs->dirty_rects;
5835 	uint32_t num_clips;
5836 	bool bb_changed;
5837 	bool fb_changed;
5838 	uint32_t i = 0;
5839 
5840 	flip_addrs->dirty_rect_count = 0;
5841 
5842 	/*
5843 	 * Cursor plane has it's own dirty rect update interface. See
5844 	 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
5845 	 */
5846 	if (plane->type == DRM_PLANE_TYPE_CURSOR)
5847 		return;
5848 
5849 	/*
5850 	 * Today, we only consider MPO use-case for PSR SU. If MPO not
5851 	 * requested, and there is a plane update, do FFU.
5852 	 */
5853 	if (!dm_crtc_state->mpo_requested) {
5854 		dirty_rects[0].x = 0;
5855 		dirty_rects[0].y = 0;
5856 		dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
5857 		dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
5858 		flip_addrs->dirty_rect_count = 1;
5859 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
5860 				 new_plane_state->plane->base.id,
5861 				 dm_crtc_state->base.mode.crtc_hdisplay,
5862 				 dm_crtc_state->base.mode.crtc_vdisplay);
5863 		return;
5864 	}
5865 
5866 	/*
5867 	 * MPO is requested. Add entire plane bounding box to dirty rects if
5868 	 * flipped to or damaged.
5869 	 *
5870 	 * If plane is moved or resized, also add old bounding box to dirty
5871 	 * rects.
5872 	 */
5873 	num_clips = drm_plane_get_damage_clips_count(new_plane_state);
5874 	fb_changed = old_plane_state->fb->base.id !=
5875 		     new_plane_state->fb->base.id;
5876 	bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
5877 		      old_plane_state->crtc_y != new_plane_state->crtc_y ||
5878 		      old_plane_state->crtc_w != new_plane_state->crtc_w ||
5879 		      old_plane_state->crtc_h != new_plane_state->crtc_h);
5880 
5881 	DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5882 			 new_plane_state->plane->base.id,
5883 			 bb_changed, fb_changed, num_clips);
5884 
5885 	if (num_clips || fb_changed || bb_changed) {
5886 		dirty_rects[i].x = new_plane_state->crtc_x;
5887 		dirty_rects[i].y = new_plane_state->crtc_y;
5888 		dirty_rects[i].width = new_plane_state->crtc_w;
5889 		dirty_rects[i].height = new_plane_state->crtc_h;
5890 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5891 				 new_plane_state->plane->base.id,
5892 				 dirty_rects[i].x, dirty_rects[i].y,
5893 				 dirty_rects[i].width, dirty_rects[i].height);
5894 		i += 1;
5895 	}
5896 
5897 	/* Add old plane bounding-box if plane is moved or resized */
5898 	if (bb_changed) {
5899 		dirty_rects[i].x = old_plane_state->crtc_x;
5900 		dirty_rects[i].y = old_plane_state->crtc_y;
5901 		dirty_rects[i].width = old_plane_state->crtc_w;
5902 		dirty_rects[i].height = old_plane_state->crtc_h;
5903 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5904 				old_plane_state->plane->base.id,
5905 				dirty_rects[i].x, dirty_rects[i].y,
5906 				dirty_rects[i].width, dirty_rects[i].height);
5907 		i += 1;
5908 	}
5909 
5910 	flip_addrs->dirty_rect_count = i;
5911 }
5912 
5913 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5914 					   const struct dm_connector_state *dm_state,
5915 					   struct dc_stream_state *stream)
5916 {
5917 	enum amdgpu_rmx_type rmx_type;
5918 
5919 	struct rect src = { 0 }; /* viewport in composition space*/
5920 	struct rect dst = { 0 }; /* stream addressable area */
5921 
5922 	/* no mode. nothing to be done */
5923 	if (!mode)
5924 		return;
5925 
5926 	/* Full screen scaling by default */
5927 	src.width = mode->hdisplay;
5928 	src.height = mode->vdisplay;
5929 	dst.width = stream->timing.h_addressable;
5930 	dst.height = stream->timing.v_addressable;
5931 
5932 	if (dm_state) {
5933 		rmx_type = dm_state->scaling;
5934 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5935 			if (src.width * dst.height <
5936 					src.height * dst.width) {
5937 				/* height needs less upscaling/more downscaling */
5938 				dst.width = src.width *
5939 						dst.height / src.height;
5940 			} else {
5941 				/* width needs less upscaling/more downscaling */
5942 				dst.height = src.height *
5943 						dst.width / src.width;
5944 			}
5945 		} else if (rmx_type == RMX_CENTER) {
5946 			dst = src;
5947 		}
5948 
5949 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5950 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5951 
5952 		if (dm_state->underscan_enable) {
5953 			dst.x += dm_state->underscan_hborder / 2;
5954 			dst.y += dm_state->underscan_vborder / 2;
5955 			dst.width -= dm_state->underscan_hborder;
5956 			dst.height -= dm_state->underscan_vborder;
5957 		}
5958 	}
5959 
5960 	stream->src = src;
5961 	stream->dst = dst;
5962 
5963 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5964 		      dst.x, dst.y, dst.width, dst.height);
5965 
5966 }
5967 
5968 static enum dc_color_depth
5969 convert_color_depth_from_display_info(const struct drm_connector *connector,
5970 				      bool is_y420, int requested_bpc)
5971 {
5972 	uint8_t bpc;
5973 
5974 	if (is_y420) {
5975 		bpc = 8;
5976 
5977 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5978 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5979 			bpc = 16;
5980 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5981 			bpc = 12;
5982 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5983 			bpc = 10;
5984 	} else {
5985 		bpc = (uint8_t)connector->display_info.bpc;
5986 		/* Assume 8 bpc by default if no bpc is specified. */
5987 		bpc = bpc ? bpc : 8;
5988 	}
5989 
5990 	if (requested_bpc > 0) {
5991 		/*
5992 		 * Cap display bpc based on the user requested value.
5993 		 *
5994 		 * The value for state->max_bpc may not correctly updated
5995 		 * depending on when the connector gets added to the state
5996 		 * or if this was called outside of atomic check, so it
5997 		 * can't be used directly.
5998 		 */
5999 		bpc = min_t(u8, bpc, requested_bpc);
6000 
6001 		/* Round down to the nearest even number. */
6002 		bpc = bpc - (bpc & 1);
6003 	}
6004 
6005 	switch (bpc) {
6006 	case 0:
6007 		/*
6008 		 * Temporary Work around, DRM doesn't parse color depth for
6009 		 * EDID revision before 1.4
6010 		 * TODO: Fix edid parsing
6011 		 */
6012 		return COLOR_DEPTH_888;
6013 	case 6:
6014 		return COLOR_DEPTH_666;
6015 	case 8:
6016 		return COLOR_DEPTH_888;
6017 	case 10:
6018 		return COLOR_DEPTH_101010;
6019 	case 12:
6020 		return COLOR_DEPTH_121212;
6021 	case 14:
6022 		return COLOR_DEPTH_141414;
6023 	case 16:
6024 		return COLOR_DEPTH_161616;
6025 	default:
6026 		return COLOR_DEPTH_UNDEFINED;
6027 	}
6028 }
6029 
6030 static enum dc_aspect_ratio
6031 get_aspect_ratio(const struct drm_display_mode *mode_in)
6032 {
6033 	/* 1-1 mapping, since both enums follow the HDMI spec. */
6034 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
6035 }
6036 
6037 static enum dc_color_space
6038 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
6039 {
6040 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
6041 
6042 	switch (dc_crtc_timing->pixel_encoding)	{
6043 	case PIXEL_ENCODING_YCBCR422:
6044 	case PIXEL_ENCODING_YCBCR444:
6045 	case PIXEL_ENCODING_YCBCR420:
6046 	{
6047 		/*
6048 		 * 27030khz is the separation point between HDTV and SDTV
6049 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
6050 		 * respectively
6051 		 */
6052 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
6053 			if (dc_crtc_timing->flags.Y_ONLY)
6054 				color_space =
6055 					COLOR_SPACE_YCBCR709_LIMITED;
6056 			else
6057 				color_space = COLOR_SPACE_YCBCR709;
6058 		} else {
6059 			if (dc_crtc_timing->flags.Y_ONLY)
6060 				color_space =
6061 					COLOR_SPACE_YCBCR601_LIMITED;
6062 			else
6063 				color_space = COLOR_SPACE_YCBCR601;
6064 		}
6065 
6066 	}
6067 	break;
6068 	case PIXEL_ENCODING_RGB:
6069 		color_space = COLOR_SPACE_SRGB;
6070 		break;
6071 
6072 	default:
6073 		WARN_ON(1);
6074 		break;
6075 	}
6076 
6077 	return color_space;
6078 }
6079 
6080 static bool adjust_colour_depth_from_display_info(
6081 	struct dc_crtc_timing *timing_out,
6082 	const struct drm_display_info *info)
6083 {
6084 	enum dc_color_depth depth = timing_out->display_color_depth;
6085 	int normalized_clk;
6086 	do {
6087 		normalized_clk = timing_out->pix_clk_100hz / 10;
6088 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
6089 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
6090 			normalized_clk /= 2;
6091 		/* Adjusting pix clock following on HDMI spec based on colour depth */
6092 		switch (depth) {
6093 		case COLOR_DEPTH_888:
6094 			break;
6095 		case COLOR_DEPTH_101010:
6096 			normalized_clk = (normalized_clk * 30) / 24;
6097 			break;
6098 		case COLOR_DEPTH_121212:
6099 			normalized_clk = (normalized_clk * 36) / 24;
6100 			break;
6101 		case COLOR_DEPTH_161616:
6102 			normalized_clk = (normalized_clk * 48) / 24;
6103 			break;
6104 		default:
6105 			/* The above depths are the only ones valid for HDMI. */
6106 			return false;
6107 		}
6108 		if (normalized_clk <= info->max_tmds_clock) {
6109 			timing_out->display_color_depth = depth;
6110 			return true;
6111 		}
6112 	} while (--depth > COLOR_DEPTH_666);
6113 	return false;
6114 }
6115 
6116 static void fill_stream_properties_from_drm_display_mode(
6117 	struct dc_stream_state *stream,
6118 	const struct drm_display_mode *mode_in,
6119 	const struct drm_connector *connector,
6120 	const struct drm_connector_state *connector_state,
6121 	const struct dc_stream_state *old_stream,
6122 	int requested_bpc)
6123 {
6124 	struct dc_crtc_timing *timing_out = &stream->timing;
6125 	const struct drm_display_info *info = &connector->display_info;
6126 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6127 	struct hdmi_vendor_infoframe hv_frame;
6128 	struct hdmi_avi_infoframe avi_frame;
6129 
6130 	memset(&hv_frame, 0, sizeof(hv_frame));
6131 	memset(&avi_frame, 0, sizeof(avi_frame));
6132 
6133 	timing_out->h_border_left = 0;
6134 	timing_out->h_border_right = 0;
6135 	timing_out->v_border_top = 0;
6136 	timing_out->v_border_bottom = 0;
6137 	/* TODO: un-hardcode */
6138 	if (drm_mode_is_420_only(info, mode_in)
6139 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6140 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6141 	else if (drm_mode_is_420_also(info, mode_in)
6142 			&& aconnector->force_yuv420_output)
6143 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6144 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
6145 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6146 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
6147 	else
6148 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
6149 
6150 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
6151 	timing_out->display_color_depth = convert_color_depth_from_display_info(
6152 		connector,
6153 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
6154 		requested_bpc);
6155 	timing_out->scan_type = SCANNING_TYPE_NODATA;
6156 	timing_out->hdmi_vic = 0;
6157 
6158 	if(old_stream) {
6159 		timing_out->vic = old_stream->timing.vic;
6160 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
6161 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
6162 	} else {
6163 		timing_out->vic = drm_match_cea_mode(mode_in);
6164 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
6165 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
6166 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
6167 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
6168 	}
6169 
6170 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6171 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
6172 		timing_out->vic = avi_frame.video_code;
6173 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
6174 		timing_out->hdmi_vic = hv_frame.vic;
6175 	}
6176 
6177 	if (is_freesync_video_mode(mode_in, aconnector)) {
6178 		timing_out->h_addressable = mode_in->hdisplay;
6179 		timing_out->h_total = mode_in->htotal;
6180 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
6181 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
6182 		timing_out->v_total = mode_in->vtotal;
6183 		timing_out->v_addressable = mode_in->vdisplay;
6184 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
6185 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
6186 		timing_out->pix_clk_100hz = mode_in->clock * 10;
6187 	} else {
6188 		timing_out->h_addressable = mode_in->crtc_hdisplay;
6189 		timing_out->h_total = mode_in->crtc_htotal;
6190 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
6191 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
6192 		timing_out->v_total = mode_in->crtc_vtotal;
6193 		timing_out->v_addressable = mode_in->crtc_vdisplay;
6194 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
6195 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
6196 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
6197 	}
6198 
6199 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
6200 
6201 	stream->output_color_space = get_output_color_space(timing_out);
6202 
6203 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
6204 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
6205 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6206 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
6207 		    drm_mode_is_420_also(info, mode_in) &&
6208 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
6209 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6210 			adjust_colour_depth_from_display_info(timing_out, info);
6211 		}
6212 	}
6213 }
6214 
6215 static void fill_audio_info(struct audio_info *audio_info,
6216 			    const struct drm_connector *drm_connector,
6217 			    const struct dc_sink *dc_sink)
6218 {
6219 	int i = 0;
6220 	int cea_revision = 0;
6221 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
6222 
6223 	audio_info->manufacture_id = edid_caps->manufacturer_id;
6224 	audio_info->product_id = edid_caps->product_id;
6225 
6226 	cea_revision = drm_connector->display_info.cea_rev;
6227 
6228 	strscpy(audio_info->display_name,
6229 		edid_caps->display_name,
6230 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
6231 
6232 	if (cea_revision >= 3) {
6233 		audio_info->mode_count = edid_caps->audio_mode_count;
6234 
6235 		for (i = 0; i < audio_info->mode_count; ++i) {
6236 			audio_info->modes[i].format_code =
6237 					(enum audio_format_code)
6238 					(edid_caps->audio_modes[i].format_code);
6239 			audio_info->modes[i].channel_count =
6240 					edid_caps->audio_modes[i].channel_count;
6241 			audio_info->modes[i].sample_rates.all =
6242 					edid_caps->audio_modes[i].sample_rate;
6243 			audio_info->modes[i].sample_size =
6244 					edid_caps->audio_modes[i].sample_size;
6245 		}
6246 	}
6247 
6248 	audio_info->flags.all = edid_caps->speaker_flags;
6249 
6250 	/* TODO: We only check for the progressive mode, check for interlace mode too */
6251 	if (drm_connector->latency_present[0]) {
6252 		audio_info->video_latency = drm_connector->video_latency[0];
6253 		audio_info->audio_latency = drm_connector->audio_latency[0];
6254 	}
6255 
6256 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6257 
6258 }
6259 
6260 static void
6261 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6262 				      struct drm_display_mode *dst_mode)
6263 {
6264 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6265 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6266 	dst_mode->crtc_clock = src_mode->crtc_clock;
6267 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6268 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6269 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6270 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6271 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
6272 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
6273 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6274 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6275 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6276 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6277 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6278 }
6279 
6280 static void
6281 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6282 					const struct drm_display_mode *native_mode,
6283 					bool scale_enabled)
6284 {
6285 	if (scale_enabled) {
6286 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6287 	} else if (native_mode->clock == drm_mode->clock &&
6288 			native_mode->htotal == drm_mode->htotal &&
6289 			native_mode->vtotal == drm_mode->vtotal) {
6290 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6291 	} else {
6292 		/* no scaling nor amdgpu inserted, no need to patch */
6293 	}
6294 }
6295 
6296 static struct dc_sink *
6297 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6298 {
6299 	struct dc_sink_init_data sink_init_data = { 0 };
6300 	struct dc_sink *sink = NULL;
6301 	sink_init_data.link = aconnector->dc_link;
6302 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6303 
6304 	sink = dc_sink_create(&sink_init_data);
6305 	if (!sink) {
6306 		DRM_ERROR("Failed to create sink!\n");
6307 		return NULL;
6308 	}
6309 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6310 
6311 	return sink;
6312 }
6313 
6314 static void set_multisync_trigger_params(
6315 		struct dc_stream_state *stream)
6316 {
6317 	struct dc_stream_state *master = NULL;
6318 
6319 	if (stream->triggered_crtc_reset.enabled) {
6320 		master = stream->triggered_crtc_reset.event_source;
6321 		stream->triggered_crtc_reset.event =
6322 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6323 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6324 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6325 	}
6326 }
6327 
6328 static void set_master_stream(struct dc_stream_state *stream_set[],
6329 			      int stream_count)
6330 {
6331 	int j, highest_rfr = 0, master_stream = 0;
6332 
6333 	for (j = 0;  j < stream_count; j++) {
6334 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6335 			int refresh_rate = 0;
6336 
6337 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6338 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6339 			if (refresh_rate > highest_rfr) {
6340 				highest_rfr = refresh_rate;
6341 				master_stream = j;
6342 			}
6343 		}
6344 	}
6345 	for (j = 0;  j < stream_count; j++) {
6346 		if (stream_set[j])
6347 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6348 	}
6349 }
6350 
6351 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6352 {
6353 	int i = 0;
6354 	struct dc_stream_state *stream;
6355 
6356 	if (context->stream_count < 2)
6357 		return;
6358 	for (i = 0; i < context->stream_count ; i++) {
6359 		if (!context->streams[i])
6360 			continue;
6361 		/*
6362 		 * TODO: add a function to read AMD VSDB bits and set
6363 		 * crtc_sync_master.multi_sync_enabled flag
6364 		 * For now it's set to false
6365 		 */
6366 	}
6367 
6368 	set_master_stream(context->streams, context->stream_count);
6369 
6370 	for (i = 0; i < context->stream_count ; i++) {
6371 		stream = context->streams[i];
6372 
6373 		if (!stream)
6374 			continue;
6375 
6376 		set_multisync_trigger_params(stream);
6377 	}
6378 }
6379 
6380 #if defined(CONFIG_DRM_AMD_DC_DCN)
6381 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6382 							struct dc_sink *sink, struct dc_stream_state *stream,
6383 							struct dsc_dec_dpcd_caps *dsc_caps)
6384 {
6385 	stream->timing.flags.DSC = 0;
6386 	dsc_caps->is_dsc_supported = false;
6387 
6388 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6389 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6390 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6391 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6392 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6393 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6394 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6395 				dsc_caps);
6396 	}
6397 }
6398 
6399 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6400 				    struct dc_sink *sink, struct dc_stream_state *stream,
6401 				    struct dsc_dec_dpcd_caps *dsc_caps,
6402 				    uint32_t max_dsc_target_bpp_limit_override)
6403 {
6404 	const struct dc_link_settings *verified_link_cap = NULL;
6405 	uint32_t link_bw_in_kbps;
6406 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6407 	struct dc *dc = sink->ctx->dc;
6408 	struct dc_dsc_bw_range bw_range = {0};
6409 	struct dc_dsc_config dsc_cfg = {0};
6410 
6411 	verified_link_cap = dc_link_get_link_cap(stream->link);
6412 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6413 	edp_min_bpp_x16 = 8 * 16;
6414 	edp_max_bpp_x16 = 8 * 16;
6415 
6416 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6417 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6418 
6419 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6420 		edp_min_bpp_x16 = edp_max_bpp_x16;
6421 
6422 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6423 				dc->debug.dsc_min_slice_height_override,
6424 				edp_min_bpp_x16, edp_max_bpp_x16,
6425 				dsc_caps,
6426 				&stream->timing,
6427 				&bw_range)) {
6428 
6429 		if (bw_range.max_kbps < link_bw_in_kbps) {
6430 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6431 					dsc_caps,
6432 					dc->debug.dsc_min_slice_height_override,
6433 					max_dsc_target_bpp_limit_override,
6434 					0,
6435 					&stream->timing,
6436 					&dsc_cfg)) {
6437 				stream->timing.dsc_cfg = dsc_cfg;
6438 				stream->timing.flags.DSC = 1;
6439 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6440 			}
6441 			return;
6442 		}
6443 	}
6444 
6445 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6446 				dsc_caps,
6447 				dc->debug.dsc_min_slice_height_override,
6448 				max_dsc_target_bpp_limit_override,
6449 				link_bw_in_kbps,
6450 				&stream->timing,
6451 				&dsc_cfg)) {
6452 		stream->timing.dsc_cfg = dsc_cfg;
6453 		stream->timing.flags.DSC = 1;
6454 	}
6455 }
6456 
6457 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6458 										struct dc_sink *sink, struct dc_stream_state *stream,
6459 										struct dsc_dec_dpcd_caps *dsc_caps)
6460 {
6461 	struct drm_connector *drm_connector = &aconnector->base;
6462 	uint32_t link_bandwidth_kbps;
6463 	uint32_t max_dsc_target_bpp_limit_override = 0;
6464 	struct dc *dc = sink->ctx->dc;
6465 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6466 	uint32_t dsc_max_supported_bw_in_kbps;
6467 
6468 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6469 							dc_link_get_link_cap(aconnector->dc_link));
6470 
6471 	if (stream->link && stream->link->local_sink)
6472 		max_dsc_target_bpp_limit_override =
6473 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6474 
6475 	/* Set DSC policy according to dsc_clock_en */
6476 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6477 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6478 
6479 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6480 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6481 
6482 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6483 
6484 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6485 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6486 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6487 						dsc_caps,
6488 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6489 						max_dsc_target_bpp_limit_override,
6490 						link_bandwidth_kbps,
6491 						&stream->timing,
6492 						&stream->timing.dsc_cfg)) {
6493 				stream->timing.flags.DSC = 1;
6494 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6495 								 __func__, drm_connector->name);
6496 			}
6497 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6498 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6499 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6500 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6501 
6502 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6503 					max_supported_bw_in_kbps > 0 &&
6504 					dsc_max_supported_bw_in_kbps > 0)
6505 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6506 						dsc_caps,
6507 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6508 						max_dsc_target_bpp_limit_override,
6509 						dsc_max_supported_bw_in_kbps,
6510 						&stream->timing,
6511 						&stream->timing.dsc_cfg)) {
6512 					stream->timing.flags.DSC = 1;
6513 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6514 									 __func__, drm_connector->name);
6515 				}
6516 		}
6517 	}
6518 
6519 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6520 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6521 		stream->timing.flags.DSC = 1;
6522 
6523 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6524 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6525 
6526 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6527 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6528 
6529 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6530 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6531 }
6532 #endif /* CONFIG_DRM_AMD_DC_DCN */
6533 
6534 /**
6535  * DOC: FreeSync Video
6536  *
6537  * When a userspace application wants to play a video, the content follows a
6538  * standard format definition that usually specifies the FPS for that format.
6539  * The below list illustrates some video format and the expected FPS,
6540  * respectively:
6541  *
6542  * - TV/NTSC (23.976 FPS)
6543  * - Cinema (24 FPS)
6544  * - TV/PAL (25 FPS)
6545  * - TV/NTSC (29.97 FPS)
6546  * - TV/NTSC (30 FPS)
6547  * - Cinema HFR (48 FPS)
6548  * - TV/PAL (50 FPS)
6549  * - Commonly used (60 FPS)
6550  * - Multiples of 24 (48,72,96,120 FPS)
6551  *
6552  * The list of standards video format is not huge and can be added to the
6553  * connector modeset list beforehand. With that, userspace can leverage
6554  * FreeSync to extends the front porch in order to attain the target refresh
6555  * rate. Such a switch will happen seamlessly, without screen blanking or
6556  * reprogramming of the output in any other way. If the userspace requests a
6557  * modesetting change compatible with FreeSync modes that only differ in the
6558  * refresh rate, DC will skip the full update and avoid blink during the
6559  * transition. For example, the video player can change the modesetting from
6560  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6561  * causing any display blink. This same concept can be applied to a mode
6562  * setting change.
6563  */
6564 static struct drm_display_mode *
6565 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6566 			  bool use_probed_modes)
6567 {
6568 	struct drm_display_mode *m, *m_pref = NULL;
6569 	u16 current_refresh, highest_refresh;
6570 	struct list_head *list_head = use_probed_modes ?
6571 						    &aconnector->base.probed_modes :
6572 						    &aconnector->base.modes;
6573 
6574 	if (aconnector->freesync_vid_base.clock != 0)
6575 		return &aconnector->freesync_vid_base;
6576 
6577 	/* Find the preferred mode */
6578 	list_for_each_entry (m, list_head, head) {
6579 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6580 			m_pref = m;
6581 			break;
6582 		}
6583 	}
6584 
6585 	if (!m_pref) {
6586 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6587 		m_pref = list_first_entry_or_null(
6588 			&aconnector->base.modes, struct drm_display_mode, head);
6589 		if (!m_pref) {
6590 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6591 			return NULL;
6592 		}
6593 	}
6594 
6595 	highest_refresh = drm_mode_vrefresh(m_pref);
6596 
6597 	/*
6598 	 * Find the mode with highest refresh rate with same resolution.
6599 	 * For some monitors, preferred mode is not the mode with highest
6600 	 * supported refresh rate.
6601 	 */
6602 	list_for_each_entry (m, list_head, head) {
6603 		current_refresh  = drm_mode_vrefresh(m);
6604 
6605 		if (m->hdisplay == m_pref->hdisplay &&
6606 		    m->vdisplay == m_pref->vdisplay &&
6607 		    highest_refresh < current_refresh) {
6608 			highest_refresh = current_refresh;
6609 			m_pref = m;
6610 		}
6611 	}
6612 
6613 	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6614 	return m_pref;
6615 }
6616 
6617 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6618 				   struct amdgpu_dm_connector *aconnector)
6619 {
6620 	struct drm_display_mode *high_mode;
6621 	int timing_diff;
6622 
6623 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6624 	if (!high_mode || !mode)
6625 		return false;
6626 
6627 	timing_diff = high_mode->vtotal - mode->vtotal;
6628 
6629 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6630 	    high_mode->hdisplay != mode->hdisplay ||
6631 	    high_mode->vdisplay != mode->vdisplay ||
6632 	    high_mode->hsync_start != mode->hsync_start ||
6633 	    high_mode->hsync_end != mode->hsync_end ||
6634 	    high_mode->htotal != mode->htotal ||
6635 	    high_mode->hskew != mode->hskew ||
6636 	    high_mode->vscan != mode->vscan ||
6637 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6638 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6639 		return false;
6640 	else
6641 		return true;
6642 }
6643 
6644 static struct dc_stream_state *
6645 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6646 		       const struct drm_display_mode *drm_mode,
6647 		       const struct dm_connector_state *dm_state,
6648 		       const struct dc_stream_state *old_stream,
6649 		       int requested_bpc)
6650 {
6651 	struct drm_display_mode *preferred_mode = NULL;
6652 	struct drm_connector *drm_connector;
6653 	const struct drm_connector_state *con_state =
6654 		dm_state ? &dm_state->base : NULL;
6655 	struct dc_stream_state *stream = NULL;
6656 	struct drm_display_mode mode = *drm_mode;
6657 	struct drm_display_mode saved_mode;
6658 	struct drm_display_mode *freesync_mode = NULL;
6659 	bool native_mode_found = false;
6660 	bool recalculate_timing = false;
6661 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6662 	int mode_refresh;
6663 	int preferred_refresh = 0;
6664 #if defined(CONFIG_DRM_AMD_DC_DCN)
6665 	struct dsc_dec_dpcd_caps dsc_caps;
6666 #endif
6667 	struct dc_sink *sink = NULL;
6668 
6669 	memset(&saved_mode, 0, sizeof(saved_mode));
6670 
6671 	if (aconnector == NULL) {
6672 		DRM_ERROR("aconnector is NULL!\n");
6673 		return stream;
6674 	}
6675 
6676 	drm_connector = &aconnector->base;
6677 
6678 	if (!aconnector->dc_sink) {
6679 		sink = create_fake_sink(aconnector);
6680 		if (!sink)
6681 			return stream;
6682 	} else {
6683 		sink = aconnector->dc_sink;
6684 		dc_sink_retain(sink);
6685 	}
6686 
6687 	stream = dc_create_stream_for_sink(sink);
6688 
6689 	if (stream == NULL) {
6690 		DRM_ERROR("Failed to create stream for sink!\n");
6691 		goto finish;
6692 	}
6693 
6694 	stream->dm_stream_context = aconnector;
6695 
6696 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6697 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6698 
6699 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6700 		/* Search for preferred mode */
6701 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6702 			native_mode_found = true;
6703 			break;
6704 		}
6705 	}
6706 	if (!native_mode_found)
6707 		preferred_mode = list_first_entry_or_null(
6708 				&aconnector->base.modes,
6709 				struct drm_display_mode,
6710 				head);
6711 
6712 	mode_refresh = drm_mode_vrefresh(&mode);
6713 
6714 	if (preferred_mode == NULL) {
6715 		/*
6716 		 * This may not be an error, the use case is when we have no
6717 		 * usermode calls to reset and set mode upon hotplug. In this
6718 		 * case, we call set mode ourselves to restore the previous mode
6719 		 * and the modelist may not be filled in in time.
6720 		 */
6721 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6722 	} else {
6723 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6724 		if (recalculate_timing) {
6725 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6726 			drm_mode_copy(&saved_mode, &mode);
6727 			drm_mode_copy(&mode, freesync_mode);
6728 		} else {
6729 			decide_crtc_timing_for_drm_display_mode(
6730 				&mode, preferred_mode, scale);
6731 
6732 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6733 		}
6734 	}
6735 
6736 	if (recalculate_timing)
6737 		drm_mode_set_crtcinfo(&saved_mode, 0);
6738 	else if (!dm_state)
6739 		drm_mode_set_crtcinfo(&mode, 0);
6740 
6741        /*
6742 	* If scaling is enabled and refresh rate didn't change
6743 	* we copy the vic and polarities of the old timings
6744 	*/
6745 	if (!scale || mode_refresh != preferred_refresh)
6746 		fill_stream_properties_from_drm_display_mode(
6747 			stream, &mode, &aconnector->base, con_state, NULL,
6748 			requested_bpc);
6749 	else
6750 		fill_stream_properties_from_drm_display_mode(
6751 			stream, &mode, &aconnector->base, con_state, old_stream,
6752 			requested_bpc);
6753 
6754 #if defined(CONFIG_DRM_AMD_DC_DCN)
6755 	/* SST DSC determination policy */
6756 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6757 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6758 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6759 #endif
6760 
6761 	update_stream_scaling_settings(&mode, dm_state, stream);
6762 
6763 	fill_audio_info(
6764 		&stream->audio_info,
6765 		drm_connector,
6766 		sink);
6767 
6768 	update_stream_signal(stream, sink);
6769 
6770 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6771 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6772 
6773 	if (stream->link->psr_settings.psr_feature_enabled) {
6774 		//
6775 		// should decide stream support vsc sdp colorimetry capability
6776 		// before building vsc info packet
6777 		//
6778 		stream->use_vsc_sdp_for_colorimetry = false;
6779 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6780 			stream->use_vsc_sdp_for_colorimetry =
6781 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6782 		} else {
6783 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6784 				stream->use_vsc_sdp_for_colorimetry = true;
6785 		}
6786 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6787 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6788 
6789 	}
6790 finish:
6791 	dc_sink_release(sink);
6792 
6793 	return stream;
6794 }
6795 
6796 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6797 {
6798 	drm_crtc_cleanup(crtc);
6799 	kfree(crtc);
6800 }
6801 
6802 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6803 				  struct drm_crtc_state *state)
6804 {
6805 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6806 
6807 	/* TODO Destroy dc_stream objects are stream object is flattened */
6808 	if (cur->stream)
6809 		dc_stream_release(cur->stream);
6810 
6811 
6812 	__drm_atomic_helper_crtc_destroy_state(state);
6813 
6814 
6815 	kfree(state);
6816 }
6817 
6818 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6819 {
6820 	struct dm_crtc_state *state;
6821 
6822 	if (crtc->state)
6823 		dm_crtc_destroy_state(crtc, crtc->state);
6824 
6825 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6826 	if (WARN_ON(!state))
6827 		return;
6828 
6829 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6830 }
6831 
6832 static struct drm_crtc_state *
6833 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6834 {
6835 	struct dm_crtc_state *state, *cur;
6836 
6837 	cur = to_dm_crtc_state(crtc->state);
6838 
6839 	if (WARN_ON(!crtc->state))
6840 		return NULL;
6841 
6842 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6843 	if (!state)
6844 		return NULL;
6845 
6846 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6847 
6848 	if (cur->stream) {
6849 		state->stream = cur->stream;
6850 		dc_stream_retain(state->stream);
6851 	}
6852 
6853 	state->active_planes = cur->active_planes;
6854 	state->vrr_infopacket = cur->vrr_infopacket;
6855 	state->abm_level = cur->abm_level;
6856 	state->vrr_supported = cur->vrr_supported;
6857 	state->freesync_config = cur->freesync_config;
6858 	state->cm_has_degamma = cur->cm_has_degamma;
6859 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6860 	state->mpo_requested = cur->mpo_requested;
6861 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6862 
6863 	return &state->base;
6864 }
6865 
6866 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6867 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6868 {
6869 	crtc_debugfs_init(crtc);
6870 
6871 	return 0;
6872 }
6873 #endif
6874 
6875 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6876 {
6877 	enum dc_irq_source irq_source;
6878 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6879 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6880 	int rc;
6881 
6882 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6883 
6884 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6885 
6886 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6887 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6888 	return rc;
6889 }
6890 
6891 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6892 {
6893 	enum dc_irq_source irq_source;
6894 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6895 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6896 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6897 	struct amdgpu_display_manager *dm = &adev->dm;
6898 	struct vblank_control_work *work;
6899 	int rc = 0;
6900 
6901 	if (enable) {
6902 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6903 		if (amdgpu_dm_vrr_active(acrtc_state))
6904 			rc = dm_set_vupdate_irq(crtc, true);
6905 	} else {
6906 		/* vblank irq off -> vupdate irq off */
6907 		rc = dm_set_vupdate_irq(crtc, false);
6908 	}
6909 
6910 	if (rc)
6911 		return rc;
6912 
6913 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6914 
6915 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6916 		return -EBUSY;
6917 
6918 	if (amdgpu_in_reset(adev))
6919 		return 0;
6920 
6921 	if (dm->vblank_control_workqueue) {
6922 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6923 		if (!work)
6924 			return -ENOMEM;
6925 
6926 		INIT_WORK(&work->work, vblank_control_worker);
6927 		work->dm = dm;
6928 		work->acrtc = acrtc;
6929 		work->enable = enable;
6930 
6931 		if (acrtc_state->stream) {
6932 			dc_stream_retain(acrtc_state->stream);
6933 			work->stream = acrtc_state->stream;
6934 		}
6935 
6936 		queue_work(dm->vblank_control_workqueue, &work->work);
6937 	}
6938 
6939 	return 0;
6940 }
6941 
6942 static int dm_enable_vblank(struct drm_crtc *crtc)
6943 {
6944 	return dm_set_vblank(crtc, true);
6945 }
6946 
6947 static void dm_disable_vblank(struct drm_crtc *crtc)
6948 {
6949 	dm_set_vblank(crtc, false);
6950 }
6951 
6952 /* Implemented only the options currently available for the driver */
6953 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6954 	.reset = dm_crtc_reset_state,
6955 	.destroy = amdgpu_dm_crtc_destroy,
6956 	.set_config = drm_atomic_helper_set_config,
6957 	.page_flip = drm_atomic_helper_page_flip,
6958 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6959 	.atomic_destroy_state = dm_crtc_destroy_state,
6960 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6961 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6962 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6963 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6964 	.enable_vblank = dm_enable_vblank,
6965 	.disable_vblank = dm_disable_vblank,
6966 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6967 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6968 	.late_register = amdgpu_dm_crtc_late_register,
6969 #endif
6970 };
6971 
6972 static enum drm_connector_status
6973 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6974 {
6975 	bool connected;
6976 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6977 
6978 	/*
6979 	 * Notes:
6980 	 * 1. This interface is NOT called in context of HPD irq.
6981 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6982 	 * makes it a bad place for *any* MST-related activity.
6983 	 */
6984 
6985 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6986 	    !aconnector->fake_enable)
6987 		connected = (aconnector->dc_sink != NULL);
6988 	else
6989 		connected = (aconnector->base.force == DRM_FORCE_ON);
6990 
6991 	update_subconnector_property(aconnector);
6992 
6993 	return (connected ? connector_status_connected :
6994 			connector_status_disconnected);
6995 }
6996 
6997 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6998 					    struct drm_connector_state *connector_state,
6999 					    struct drm_property *property,
7000 					    uint64_t val)
7001 {
7002 	struct drm_device *dev = connector->dev;
7003 	struct amdgpu_device *adev = drm_to_adev(dev);
7004 	struct dm_connector_state *dm_old_state =
7005 		to_dm_connector_state(connector->state);
7006 	struct dm_connector_state *dm_new_state =
7007 		to_dm_connector_state(connector_state);
7008 
7009 	int ret = -EINVAL;
7010 
7011 	if (property == dev->mode_config.scaling_mode_property) {
7012 		enum amdgpu_rmx_type rmx_type;
7013 
7014 		switch (val) {
7015 		case DRM_MODE_SCALE_CENTER:
7016 			rmx_type = RMX_CENTER;
7017 			break;
7018 		case DRM_MODE_SCALE_ASPECT:
7019 			rmx_type = RMX_ASPECT;
7020 			break;
7021 		case DRM_MODE_SCALE_FULLSCREEN:
7022 			rmx_type = RMX_FULL;
7023 			break;
7024 		case DRM_MODE_SCALE_NONE:
7025 		default:
7026 			rmx_type = RMX_OFF;
7027 			break;
7028 		}
7029 
7030 		if (dm_old_state->scaling == rmx_type)
7031 			return 0;
7032 
7033 		dm_new_state->scaling = rmx_type;
7034 		ret = 0;
7035 	} else if (property == adev->mode_info.underscan_hborder_property) {
7036 		dm_new_state->underscan_hborder = val;
7037 		ret = 0;
7038 	} else if (property == adev->mode_info.underscan_vborder_property) {
7039 		dm_new_state->underscan_vborder = val;
7040 		ret = 0;
7041 	} else if (property == adev->mode_info.underscan_property) {
7042 		dm_new_state->underscan_enable = val;
7043 		ret = 0;
7044 	} else if (property == adev->mode_info.abm_level_property) {
7045 		dm_new_state->abm_level = val;
7046 		ret = 0;
7047 	}
7048 
7049 	return ret;
7050 }
7051 
7052 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
7053 					    const struct drm_connector_state *state,
7054 					    struct drm_property *property,
7055 					    uint64_t *val)
7056 {
7057 	struct drm_device *dev = connector->dev;
7058 	struct amdgpu_device *adev = drm_to_adev(dev);
7059 	struct dm_connector_state *dm_state =
7060 		to_dm_connector_state(state);
7061 	int ret = -EINVAL;
7062 
7063 	if (property == dev->mode_config.scaling_mode_property) {
7064 		switch (dm_state->scaling) {
7065 		case RMX_CENTER:
7066 			*val = DRM_MODE_SCALE_CENTER;
7067 			break;
7068 		case RMX_ASPECT:
7069 			*val = DRM_MODE_SCALE_ASPECT;
7070 			break;
7071 		case RMX_FULL:
7072 			*val = DRM_MODE_SCALE_FULLSCREEN;
7073 			break;
7074 		case RMX_OFF:
7075 		default:
7076 			*val = DRM_MODE_SCALE_NONE;
7077 			break;
7078 		}
7079 		ret = 0;
7080 	} else if (property == adev->mode_info.underscan_hborder_property) {
7081 		*val = dm_state->underscan_hborder;
7082 		ret = 0;
7083 	} else if (property == adev->mode_info.underscan_vborder_property) {
7084 		*val = dm_state->underscan_vborder;
7085 		ret = 0;
7086 	} else if (property == adev->mode_info.underscan_property) {
7087 		*val = dm_state->underscan_enable;
7088 		ret = 0;
7089 	} else if (property == adev->mode_info.abm_level_property) {
7090 		*val = dm_state->abm_level;
7091 		ret = 0;
7092 	}
7093 
7094 	return ret;
7095 }
7096 
7097 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
7098 {
7099 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
7100 
7101 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
7102 }
7103 
7104 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
7105 {
7106 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7107 	const struct dc_link *link = aconnector->dc_link;
7108 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7109 	struct amdgpu_display_manager *dm = &adev->dm;
7110 	int i;
7111 
7112 	/*
7113 	 * Call only if mst_mgr was iniitalized before since it's not done
7114 	 * for all connector types.
7115 	 */
7116 	if (aconnector->mst_mgr.dev)
7117 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
7118 
7119 	for (i = 0; i < dm->num_of_edps; i++) {
7120 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
7121 			backlight_device_unregister(dm->backlight_dev[i]);
7122 			dm->backlight_dev[i] = NULL;
7123 		}
7124 	}
7125 
7126 	if (aconnector->dc_em_sink)
7127 		dc_sink_release(aconnector->dc_em_sink);
7128 	aconnector->dc_em_sink = NULL;
7129 	if (aconnector->dc_sink)
7130 		dc_sink_release(aconnector->dc_sink);
7131 	aconnector->dc_sink = NULL;
7132 
7133 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
7134 	drm_connector_unregister(connector);
7135 	drm_connector_cleanup(connector);
7136 	if (aconnector->i2c) {
7137 		i2c_del_adapter(&aconnector->i2c->base);
7138 		kfree(aconnector->i2c);
7139 	}
7140 	kfree(aconnector->dm_dp_aux.aux.name);
7141 
7142 	kfree(connector);
7143 }
7144 
7145 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
7146 {
7147 	struct dm_connector_state *state =
7148 		to_dm_connector_state(connector->state);
7149 
7150 	if (connector->state)
7151 		__drm_atomic_helper_connector_destroy_state(connector->state);
7152 
7153 	kfree(state);
7154 
7155 	state = kzalloc(sizeof(*state), GFP_KERNEL);
7156 
7157 	if (state) {
7158 		state->scaling = RMX_OFF;
7159 		state->underscan_enable = false;
7160 		state->underscan_hborder = 0;
7161 		state->underscan_vborder = 0;
7162 		state->base.max_requested_bpc = 8;
7163 		state->vcpi_slots = 0;
7164 		state->pbn = 0;
7165 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
7166 			state->abm_level = amdgpu_dm_abm_level;
7167 
7168 		__drm_atomic_helper_connector_reset(connector, &state->base);
7169 	}
7170 }
7171 
7172 struct drm_connector_state *
7173 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
7174 {
7175 	struct dm_connector_state *state =
7176 		to_dm_connector_state(connector->state);
7177 
7178 	struct dm_connector_state *new_state =
7179 			kmemdup(state, sizeof(*state), GFP_KERNEL);
7180 
7181 	if (!new_state)
7182 		return NULL;
7183 
7184 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
7185 
7186 	new_state->freesync_capable = state->freesync_capable;
7187 	new_state->abm_level = state->abm_level;
7188 	new_state->scaling = state->scaling;
7189 	new_state->underscan_enable = state->underscan_enable;
7190 	new_state->underscan_hborder = state->underscan_hborder;
7191 	new_state->underscan_vborder = state->underscan_vborder;
7192 	new_state->vcpi_slots = state->vcpi_slots;
7193 	new_state->pbn = state->pbn;
7194 	return &new_state->base;
7195 }
7196 
7197 static int
7198 amdgpu_dm_connector_late_register(struct drm_connector *connector)
7199 {
7200 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7201 		to_amdgpu_dm_connector(connector);
7202 	int r;
7203 
7204 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
7205 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
7206 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
7207 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
7208 		if (r)
7209 			return r;
7210 	}
7211 
7212 #if defined(CONFIG_DEBUG_FS)
7213 	connector_debugfs_init(amdgpu_dm_connector);
7214 #endif
7215 
7216 	return 0;
7217 }
7218 
7219 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
7220 	.reset = amdgpu_dm_connector_funcs_reset,
7221 	.detect = amdgpu_dm_connector_detect,
7222 	.fill_modes = drm_helper_probe_single_connector_modes,
7223 	.destroy = amdgpu_dm_connector_destroy,
7224 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
7225 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7226 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
7227 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
7228 	.late_register = amdgpu_dm_connector_late_register,
7229 	.early_unregister = amdgpu_dm_connector_unregister
7230 };
7231 
7232 static int get_modes(struct drm_connector *connector)
7233 {
7234 	return amdgpu_dm_connector_get_modes(connector);
7235 }
7236 
7237 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
7238 {
7239 	struct dc_sink_init_data init_params = {
7240 			.link = aconnector->dc_link,
7241 			.sink_signal = SIGNAL_TYPE_VIRTUAL
7242 	};
7243 	struct edid *edid;
7244 
7245 	if (!aconnector->base.edid_blob_ptr) {
7246 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7247 				aconnector->base.name);
7248 
7249 		aconnector->base.force = DRM_FORCE_OFF;
7250 		aconnector->base.override_edid = false;
7251 		return;
7252 	}
7253 
7254 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7255 
7256 	aconnector->edid = edid;
7257 
7258 	aconnector->dc_em_sink = dc_link_add_remote_sink(
7259 		aconnector->dc_link,
7260 		(uint8_t *)edid,
7261 		(edid->extensions + 1) * EDID_LENGTH,
7262 		&init_params);
7263 
7264 	if (aconnector->base.force == DRM_FORCE_ON) {
7265 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
7266 		aconnector->dc_link->local_sink :
7267 		aconnector->dc_em_sink;
7268 		dc_sink_retain(aconnector->dc_sink);
7269 	}
7270 }
7271 
7272 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7273 {
7274 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7275 
7276 	/*
7277 	 * In case of headless boot with force on for DP managed connector
7278 	 * Those settings have to be != 0 to get initial modeset
7279 	 */
7280 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7281 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7282 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7283 	}
7284 
7285 
7286 	aconnector->base.override_edid = true;
7287 	create_eml_sink(aconnector);
7288 }
7289 
7290 struct dc_stream_state *
7291 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7292 				const struct drm_display_mode *drm_mode,
7293 				const struct dm_connector_state *dm_state,
7294 				const struct dc_stream_state *old_stream)
7295 {
7296 	struct drm_connector *connector = &aconnector->base;
7297 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7298 	struct dc_stream_state *stream;
7299 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7300 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7301 	enum dc_status dc_result = DC_OK;
7302 
7303 	do {
7304 		stream = create_stream_for_sink(aconnector, drm_mode,
7305 						dm_state, old_stream,
7306 						requested_bpc);
7307 		if (stream == NULL) {
7308 			DRM_ERROR("Failed to create stream for sink!\n");
7309 			break;
7310 		}
7311 
7312 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7313 		if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
7314 			dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
7315 
7316 		if (dc_result != DC_OK) {
7317 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7318 				      drm_mode->hdisplay,
7319 				      drm_mode->vdisplay,
7320 				      drm_mode->clock,
7321 				      dc_result,
7322 				      dc_status_to_str(dc_result));
7323 
7324 			dc_stream_release(stream);
7325 			stream = NULL;
7326 			requested_bpc -= 2; /* lower bpc to retry validation */
7327 		}
7328 
7329 	} while (stream == NULL && requested_bpc >= 6);
7330 
7331 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7332 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7333 
7334 		aconnector->force_yuv420_output = true;
7335 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7336 						dm_state, old_stream);
7337 		aconnector->force_yuv420_output = false;
7338 	}
7339 
7340 	return stream;
7341 }
7342 
7343 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7344 				   struct drm_display_mode *mode)
7345 {
7346 	int result = MODE_ERROR;
7347 	struct dc_sink *dc_sink;
7348 	/* TODO: Unhardcode stream count */
7349 	struct dc_stream_state *stream;
7350 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7351 
7352 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7353 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7354 		return result;
7355 
7356 	/*
7357 	 * Only run this the first time mode_valid is called to initilialize
7358 	 * EDID mgmt
7359 	 */
7360 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7361 		!aconnector->dc_em_sink)
7362 		handle_edid_mgmt(aconnector);
7363 
7364 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7365 
7366 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7367 				aconnector->base.force != DRM_FORCE_ON) {
7368 		DRM_ERROR("dc_sink is NULL!\n");
7369 		goto fail;
7370 	}
7371 
7372 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7373 	if (stream) {
7374 		dc_stream_release(stream);
7375 		result = MODE_OK;
7376 	}
7377 
7378 fail:
7379 	/* TODO: error handling*/
7380 	return result;
7381 }
7382 
7383 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7384 				struct dc_info_packet *out)
7385 {
7386 	struct hdmi_drm_infoframe frame;
7387 	unsigned char buf[30]; /* 26 + 4 */
7388 	ssize_t len;
7389 	int ret, i;
7390 
7391 	memset(out, 0, sizeof(*out));
7392 
7393 	if (!state->hdr_output_metadata)
7394 		return 0;
7395 
7396 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7397 	if (ret)
7398 		return ret;
7399 
7400 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7401 	if (len < 0)
7402 		return (int)len;
7403 
7404 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7405 	if (len != 30)
7406 		return -EINVAL;
7407 
7408 	/* Prepare the infopacket for DC. */
7409 	switch (state->connector->connector_type) {
7410 	case DRM_MODE_CONNECTOR_HDMIA:
7411 		out->hb0 = 0x87; /* type */
7412 		out->hb1 = 0x01; /* version */
7413 		out->hb2 = 0x1A; /* length */
7414 		out->sb[0] = buf[3]; /* checksum */
7415 		i = 1;
7416 		break;
7417 
7418 	case DRM_MODE_CONNECTOR_DisplayPort:
7419 	case DRM_MODE_CONNECTOR_eDP:
7420 		out->hb0 = 0x00; /* sdp id, zero */
7421 		out->hb1 = 0x87; /* type */
7422 		out->hb2 = 0x1D; /* payload len - 1 */
7423 		out->hb3 = (0x13 << 2); /* sdp version */
7424 		out->sb[0] = 0x01; /* version */
7425 		out->sb[1] = 0x1A; /* length */
7426 		i = 2;
7427 		break;
7428 
7429 	default:
7430 		return -EINVAL;
7431 	}
7432 
7433 	memcpy(&out->sb[i], &buf[4], 26);
7434 	out->valid = true;
7435 
7436 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7437 		       sizeof(out->sb), false);
7438 
7439 	return 0;
7440 }
7441 
7442 static int
7443 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7444 				 struct drm_atomic_state *state)
7445 {
7446 	struct drm_connector_state *new_con_state =
7447 		drm_atomic_get_new_connector_state(state, conn);
7448 	struct drm_connector_state *old_con_state =
7449 		drm_atomic_get_old_connector_state(state, conn);
7450 	struct drm_crtc *crtc = new_con_state->crtc;
7451 	struct drm_crtc_state *new_crtc_state;
7452 	int ret;
7453 
7454 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7455 
7456 	if (!crtc)
7457 		return 0;
7458 
7459 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7460 		struct dc_info_packet hdr_infopacket;
7461 
7462 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7463 		if (ret)
7464 			return ret;
7465 
7466 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7467 		if (IS_ERR(new_crtc_state))
7468 			return PTR_ERR(new_crtc_state);
7469 
7470 		/*
7471 		 * DC considers the stream backends changed if the
7472 		 * static metadata changes. Forcing the modeset also
7473 		 * gives a simple way for userspace to switch from
7474 		 * 8bpc to 10bpc when setting the metadata to enter
7475 		 * or exit HDR.
7476 		 *
7477 		 * Changing the static metadata after it's been
7478 		 * set is permissible, however. So only force a
7479 		 * modeset if we're entering or exiting HDR.
7480 		 */
7481 		new_crtc_state->mode_changed =
7482 			!old_con_state->hdr_output_metadata ||
7483 			!new_con_state->hdr_output_metadata;
7484 	}
7485 
7486 	return 0;
7487 }
7488 
7489 static const struct drm_connector_helper_funcs
7490 amdgpu_dm_connector_helper_funcs = {
7491 	/*
7492 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7493 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7494 	 * are missing after user start lightdm. So we need to renew modes list.
7495 	 * in get_modes call back, not just return the modes count
7496 	 */
7497 	.get_modes = get_modes,
7498 	.mode_valid = amdgpu_dm_connector_mode_valid,
7499 	.atomic_check = amdgpu_dm_connector_atomic_check,
7500 };
7501 
7502 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7503 {
7504 }
7505 
7506 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7507 {
7508 	struct drm_atomic_state *state = new_crtc_state->state;
7509 	struct drm_plane *plane;
7510 	int num_active = 0;
7511 
7512 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7513 		struct drm_plane_state *new_plane_state;
7514 
7515 		/* Cursor planes are "fake". */
7516 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7517 			continue;
7518 
7519 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7520 
7521 		if (!new_plane_state) {
7522 			/*
7523 			 * The plane is enable on the CRTC and hasn't changed
7524 			 * state. This means that it previously passed
7525 			 * validation and is therefore enabled.
7526 			 */
7527 			num_active += 1;
7528 			continue;
7529 		}
7530 
7531 		/* We need a framebuffer to be considered enabled. */
7532 		num_active += (new_plane_state->fb != NULL);
7533 	}
7534 
7535 	return num_active;
7536 }
7537 
7538 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7539 					 struct drm_crtc_state *new_crtc_state)
7540 {
7541 	struct dm_crtc_state *dm_new_crtc_state =
7542 		to_dm_crtc_state(new_crtc_state);
7543 
7544 	dm_new_crtc_state->active_planes = 0;
7545 
7546 	if (!dm_new_crtc_state->stream)
7547 		return;
7548 
7549 	dm_new_crtc_state->active_planes =
7550 		count_crtc_active_planes(new_crtc_state);
7551 }
7552 
7553 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7554 				       struct drm_atomic_state *state)
7555 {
7556 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7557 									  crtc);
7558 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7559 	struct dc *dc = adev->dm.dc;
7560 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7561 	int ret = -EINVAL;
7562 
7563 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7564 
7565 	dm_update_crtc_active_planes(crtc, crtc_state);
7566 
7567 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7568 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7569 		return ret;
7570 	}
7571 
7572 	/*
7573 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7574 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7575 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7576 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7577 	 */
7578 	if (crtc_state->enable &&
7579 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7580 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7581 		return -EINVAL;
7582 	}
7583 
7584 	/* In some use cases, like reset, no stream is attached */
7585 	if (!dm_crtc_state->stream)
7586 		return 0;
7587 
7588 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7589 		return 0;
7590 
7591 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7592 	return ret;
7593 }
7594 
7595 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7596 				      const struct drm_display_mode *mode,
7597 				      struct drm_display_mode *adjusted_mode)
7598 {
7599 	return true;
7600 }
7601 
7602 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7603 	.disable = dm_crtc_helper_disable,
7604 	.atomic_check = dm_crtc_helper_atomic_check,
7605 	.mode_fixup = dm_crtc_helper_mode_fixup,
7606 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7607 };
7608 
7609 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7610 {
7611 
7612 }
7613 
7614 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
7615 {
7616 	switch (display_color_depth) {
7617 		case COLOR_DEPTH_666:
7618 			return 6;
7619 		case COLOR_DEPTH_888:
7620 			return 8;
7621 		case COLOR_DEPTH_101010:
7622 			return 10;
7623 		case COLOR_DEPTH_121212:
7624 			return 12;
7625 		case COLOR_DEPTH_141414:
7626 			return 14;
7627 		case COLOR_DEPTH_161616:
7628 			return 16;
7629 		default:
7630 			break;
7631 		}
7632 	return 0;
7633 }
7634 
7635 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7636 					  struct drm_crtc_state *crtc_state,
7637 					  struct drm_connector_state *conn_state)
7638 {
7639 	struct drm_atomic_state *state = crtc_state->state;
7640 	struct drm_connector *connector = conn_state->connector;
7641 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7642 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7643 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7644 	struct drm_dp_mst_topology_mgr *mst_mgr;
7645 	struct drm_dp_mst_port *mst_port;
7646 	enum dc_color_depth color_depth;
7647 	int clock, bpp = 0;
7648 	bool is_y420 = false;
7649 
7650 	if (!aconnector->port || !aconnector->dc_sink)
7651 		return 0;
7652 
7653 	mst_port = aconnector->port;
7654 	mst_mgr = &aconnector->mst_port->mst_mgr;
7655 
7656 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7657 		return 0;
7658 
7659 	if (!state->duplicated) {
7660 		int max_bpc = conn_state->max_requested_bpc;
7661 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7662 				aconnector->force_yuv420_output;
7663 		color_depth = convert_color_depth_from_display_info(connector,
7664 								    is_y420,
7665 								    max_bpc);
7666 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7667 		clock = adjusted_mode->clock;
7668 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7669 	}
7670 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7671 									   mst_mgr,
7672 									   mst_port,
7673 									   dm_new_connector_state->pbn,
7674 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7675 	if (dm_new_connector_state->vcpi_slots < 0) {
7676 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7677 		return dm_new_connector_state->vcpi_slots;
7678 	}
7679 	return 0;
7680 }
7681 
7682 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7683 	.disable = dm_encoder_helper_disable,
7684 	.atomic_check = dm_encoder_helper_atomic_check
7685 };
7686 
7687 #if defined(CONFIG_DRM_AMD_DC_DCN)
7688 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7689 					    struct dc_state *dc_state,
7690 					    struct dsc_mst_fairness_vars *vars)
7691 {
7692 	struct dc_stream_state *stream = NULL;
7693 	struct drm_connector *connector;
7694 	struct drm_connector_state *new_con_state;
7695 	struct amdgpu_dm_connector *aconnector;
7696 	struct dm_connector_state *dm_conn_state;
7697 	int i, j;
7698 	int vcpi, pbn_div, pbn, slot_num = 0;
7699 
7700 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7701 
7702 		aconnector = to_amdgpu_dm_connector(connector);
7703 
7704 		if (!aconnector->port)
7705 			continue;
7706 
7707 		if (!new_con_state || !new_con_state->crtc)
7708 			continue;
7709 
7710 		dm_conn_state = to_dm_connector_state(new_con_state);
7711 
7712 		for (j = 0; j < dc_state->stream_count; j++) {
7713 			stream = dc_state->streams[j];
7714 			if (!stream)
7715 				continue;
7716 
7717 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7718 				break;
7719 
7720 			stream = NULL;
7721 		}
7722 
7723 		if (!stream)
7724 			continue;
7725 
7726 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7727 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7728 		for (j = 0; j < dc_state->stream_count; j++) {
7729 			if (vars[j].aconnector == aconnector) {
7730 				pbn = vars[j].pbn;
7731 				break;
7732 			}
7733 		}
7734 
7735 		if (j == dc_state->stream_count)
7736 			continue;
7737 
7738 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7739 
7740 		if (stream->timing.flags.DSC != 1) {
7741 			dm_conn_state->pbn = pbn;
7742 			dm_conn_state->vcpi_slots = slot_num;
7743 
7744 			drm_dp_mst_atomic_enable_dsc(state,
7745 						     aconnector->port,
7746 						     dm_conn_state->pbn,
7747 						     0,
7748 						     false);
7749 			continue;
7750 		}
7751 
7752 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7753 						    aconnector->port,
7754 						    pbn, pbn_div,
7755 						    true);
7756 		if (vcpi < 0)
7757 			return vcpi;
7758 
7759 		dm_conn_state->pbn = pbn;
7760 		dm_conn_state->vcpi_slots = vcpi;
7761 	}
7762 	return 0;
7763 }
7764 #endif
7765 
7766 static void dm_drm_plane_reset(struct drm_plane *plane)
7767 {
7768 	struct dm_plane_state *amdgpu_state = NULL;
7769 
7770 	if (plane->state)
7771 		plane->funcs->atomic_destroy_state(plane, plane->state);
7772 
7773 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7774 	WARN_ON(amdgpu_state == NULL);
7775 
7776 	if (amdgpu_state)
7777 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7778 }
7779 
7780 static struct drm_plane_state *
7781 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7782 {
7783 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7784 
7785 	old_dm_plane_state = to_dm_plane_state(plane->state);
7786 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7787 	if (!dm_plane_state)
7788 		return NULL;
7789 
7790 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7791 
7792 	if (old_dm_plane_state->dc_state) {
7793 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7794 		dc_plane_state_retain(dm_plane_state->dc_state);
7795 	}
7796 
7797 	return &dm_plane_state->base;
7798 }
7799 
7800 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7801 				struct drm_plane_state *state)
7802 {
7803 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7804 
7805 	if (dm_plane_state->dc_state)
7806 		dc_plane_state_release(dm_plane_state->dc_state);
7807 
7808 	drm_atomic_helper_plane_destroy_state(plane, state);
7809 }
7810 
7811 static const struct drm_plane_funcs dm_plane_funcs = {
7812 	.update_plane	= drm_atomic_helper_update_plane,
7813 	.disable_plane	= drm_atomic_helper_disable_plane,
7814 	.destroy	= drm_primary_helper_destroy,
7815 	.reset = dm_drm_plane_reset,
7816 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7817 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7818 	.format_mod_supported = dm_plane_format_mod_supported,
7819 };
7820 
7821 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7822 				      struct drm_plane_state *new_state)
7823 {
7824 	struct amdgpu_framebuffer *afb;
7825 	struct drm_gem_object *obj;
7826 	struct amdgpu_device *adev;
7827 	struct amdgpu_bo *rbo;
7828 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7829 	uint32_t domain;
7830 	int r;
7831 
7832 	if (!new_state->fb) {
7833 		DRM_DEBUG_KMS("No FB bound\n");
7834 		return 0;
7835 	}
7836 
7837 	afb = to_amdgpu_framebuffer(new_state->fb);
7838 	obj = new_state->fb->obj[0];
7839 	rbo = gem_to_amdgpu_bo(obj);
7840 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7841 
7842 	r = amdgpu_bo_reserve(rbo, true);
7843 	if (r) {
7844 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7845 		return r;
7846 	}
7847 
7848 	r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7849 	if (r) {
7850 		dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7851 		goto error_unlock;
7852 	}
7853 
7854 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7855 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7856 	else
7857 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7858 
7859 	r = amdgpu_bo_pin(rbo, domain);
7860 	if (unlikely(r != 0)) {
7861 		if (r != -ERESTARTSYS)
7862 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7863 		goto error_unlock;
7864 	}
7865 
7866 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7867 	if (unlikely(r != 0)) {
7868 		DRM_ERROR("%p bind failed\n", rbo);
7869 		goto error_unpin;
7870 	}
7871 
7872 	amdgpu_bo_unreserve(rbo);
7873 
7874 	afb->address = amdgpu_bo_gpu_offset(rbo);
7875 
7876 	amdgpu_bo_ref(rbo);
7877 
7878 	/**
7879 	 * We don't do surface updates on planes that have been newly created,
7880 	 * but we also don't have the afb->address during atomic check.
7881 	 *
7882 	 * Fill in buffer attributes depending on the address here, but only on
7883 	 * newly created planes since they're not being used by DC yet and this
7884 	 * won't modify global state.
7885 	 */
7886 	dm_plane_state_old = to_dm_plane_state(plane->state);
7887 	dm_plane_state_new = to_dm_plane_state(new_state);
7888 
7889 	if (dm_plane_state_new->dc_state &&
7890 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7891 		struct dc_plane_state *plane_state =
7892 			dm_plane_state_new->dc_state;
7893 		bool force_disable_dcc = !plane_state->dcc.enable;
7894 
7895 		fill_plane_buffer_attributes(
7896 			adev, afb, plane_state->format, plane_state->rotation,
7897 			afb->tiling_flags,
7898 			&plane_state->tiling_info, &plane_state->plane_size,
7899 			&plane_state->dcc, &plane_state->address,
7900 			afb->tmz_surface, force_disable_dcc);
7901 	}
7902 
7903 	return 0;
7904 
7905 error_unpin:
7906 	amdgpu_bo_unpin(rbo);
7907 
7908 error_unlock:
7909 	amdgpu_bo_unreserve(rbo);
7910 	return r;
7911 }
7912 
7913 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7914 				       struct drm_plane_state *old_state)
7915 {
7916 	struct amdgpu_bo *rbo;
7917 	int r;
7918 
7919 	if (!old_state->fb)
7920 		return;
7921 
7922 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7923 	r = amdgpu_bo_reserve(rbo, false);
7924 	if (unlikely(r)) {
7925 		DRM_ERROR("failed to reserve rbo before unpin\n");
7926 		return;
7927 	}
7928 
7929 	amdgpu_bo_unpin(rbo);
7930 	amdgpu_bo_unreserve(rbo);
7931 	amdgpu_bo_unref(&rbo);
7932 }
7933 
7934 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7935 				       struct drm_crtc_state *new_crtc_state)
7936 {
7937 	struct drm_framebuffer *fb = state->fb;
7938 	int min_downscale, max_upscale;
7939 	int min_scale = 0;
7940 	int max_scale = INT_MAX;
7941 
7942 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7943 	if (fb && state->crtc) {
7944 		/* Validate viewport to cover the case when only the position changes */
7945 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7946 			int viewport_width = state->crtc_w;
7947 			int viewport_height = state->crtc_h;
7948 
7949 			if (state->crtc_x < 0)
7950 				viewport_width += state->crtc_x;
7951 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7952 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7953 
7954 			if (state->crtc_y < 0)
7955 				viewport_height += state->crtc_y;
7956 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7957 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7958 
7959 			if (viewport_width < 0 || viewport_height < 0) {
7960 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7961 				return -EINVAL;
7962 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7963 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7964 				return -EINVAL;
7965 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7966 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7967 				return -EINVAL;
7968 			}
7969 
7970 		}
7971 
7972 		/* Get min/max allowed scaling factors from plane caps. */
7973 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7974 					     &min_downscale, &max_upscale);
7975 		/*
7976 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7977 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7978 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7979 		 */
7980 		min_scale = (1000 << 16) / max_upscale;
7981 		max_scale = (1000 << 16) / min_downscale;
7982 	}
7983 
7984 	return drm_atomic_helper_check_plane_state(
7985 		state, new_crtc_state, min_scale, max_scale, true, true);
7986 }
7987 
7988 static int dm_plane_atomic_check(struct drm_plane *plane,
7989 				 struct drm_atomic_state *state)
7990 {
7991 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7992 										 plane);
7993 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7994 	struct dc *dc = adev->dm.dc;
7995 	struct dm_plane_state *dm_plane_state;
7996 	struct dc_scaling_info scaling_info;
7997 	struct drm_crtc_state *new_crtc_state;
7998 	int ret;
7999 
8000 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
8001 
8002 	dm_plane_state = to_dm_plane_state(new_plane_state);
8003 
8004 	if (!dm_plane_state->dc_state)
8005 		return 0;
8006 
8007 	new_crtc_state =
8008 		drm_atomic_get_new_crtc_state(state,
8009 					      new_plane_state->crtc);
8010 	if (!new_crtc_state)
8011 		return -EINVAL;
8012 
8013 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8014 	if (ret)
8015 		return ret;
8016 
8017 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
8018 	if (ret)
8019 		return ret;
8020 
8021 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
8022 		return 0;
8023 
8024 	return -EINVAL;
8025 }
8026 
8027 static int dm_plane_atomic_async_check(struct drm_plane *plane,
8028 				       struct drm_atomic_state *state)
8029 {
8030 	/* Only support async updates on cursor planes. */
8031 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
8032 		return -EINVAL;
8033 
8034 	return 0;
8035 }
8036 
8037 static void dm_plane_atomic_async_update(struct drm_plane *plane,
8038 					 struct drm_atomic_state *state)
8039 {
8040 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
8041 									   plane);
8042 	struct drm_plane_state *old_state =
8043 		drm_atomic_get_old_plane_state(state, plane);
8044 
8045 	trace_amdgpu_dm_atomic_update_cursor(new_state);
8046 
8047 	swap(plane->state->fb, new_state->fb);
8048 
8049 	plane->state->src_x = new_state->src_x;
8050 	plane->state->src_y = new_state->src_y;
8051 	plane->state->src_w = new_state->src_w;
8052 	plane->state->src_h = new_state->src_h;
8053 	plane->state->crtc_x = new_state->crtc_x;
8054 	plane->state->crtc_y = new_state->crtc_y;
8055 	plane->state->crtc_w = new_state->crtc_w;
8056 	plane->state->crtc_h = new_state->crtc_h;
8057 
8058 	handle_cursor_update(plane, old_state);
8059 }
8060 
8061 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
8062 	.prepare_fb = dm_plane_helper_prepare_fb,
8063 	.cleanup_fb = dm_plane_helper_cleanup_fb,
8064 	.atomic_check = dm_plane_atomic_check,
8065 	.atomic_async_check = dm_plane_atomic_async_check,
8066 	.atomic_async_update = dm_plane_atomic_async_update
8067 };
8068 
8069 /*
8070  * TODO: these are currently initialized to rgb formats only.
8071  * For future use cases we should either initialize them dynamically based on
8072  * plane capabilities, or initialize this array to all formats, so internal drm
8073  * check will succeed, and let DC implement proper check
8074  */
8075 static const uint32_t rgb_formats[] = {
8076 	DRM_FORMAT_XRGB8888,
8077 	DRM_FORMAT_ARGB8888,
8078 	DRM_FORMAT_RGBA8888,
8079 	DRM_FORMAT_XRGB2101010,
8080 	DRM_FORMAT_XBGR2101010,
8081 	DRM_FORMAT_ARGB2101010,
8082 	DRM_FORMAT_ABGR2101010,
8083 	DRM_FORMAT_XRGB16161616,
8084 	DRM_FORMAT_XBGR16161616,
8085 	DRM_FORMAT_ARGB16161616,
8086 	DRM_FORMAT_ABGR16161616,
8087 	DRM_FORMAT_XBGR8888,
8088 	DRM_FORMAT_ABGR8888,
8089 	DRM_FORMAT_RGB565,
8090 };
8091 
8092 static const uint32_t overlay_formats[] = {
8093 	DRM_FORMAT_XRGB8888,
8094 	DRM_FORMAT_ARGB8888,
8095 	DRM_FORMAT_RGBA8888,
8096 	DRM_FORMAT_XBGR8888,
8097 	DRM_FORMAT_ABGR8888,
8098 	DRM_FORMAT_RGB565
8099 };
8100 
8101 static const u32 cursor_formats[] = {
8102 	DRM_FORMAT_ARGB8888
8103 };
8104 
8105 static int get_plane_formats(const struct drm_plane *plane,
8106 			     const struct dc_plane_cap *plane_cap,
8107 			     uint32_t *formats, int max_formats)
8108 {
8109 	int i, num_formats = 0;
8110 
8111 	/*
8112 	 * TODO: Query support for each group of formats directly from
8113 	 * DC plane caps. This will require adding more formats to the
8114 	 * caps list.
8115 	 */
8116 
8117 	switch (plane->type) {
8118 	case DRM_PLANE_TYPE_PRIMARY:
8119 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
8120 			if (num_formats >= max_formats)
8121 				break;
8122 
8123 			formats[num_formats++] = rgb_formats[i];
8124 		}
8125 
8126 		if (plane_cap && plane_cap->pixel_format_support.nv12)
8127 			formats[num_formats++] = DRM_FORMAT_NV12;
8128 		if (plane_cap && plane_cap->pixel_format_support.p010)
8129 			formats[num_formats++] = DRM_FORMAT_P010;
8130 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
8131 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
8132 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
8133 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
8134 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
8135 		}
8136 		break;
8137 
8138 	case DRM_PLANE_TYPE_OVERLAY:
8139 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
8140 			if (num_formats >= max_formats)
8141 				break;
8142 
8143 			formats[num_formats++] = overlay_formats[i];
8144 		}
8145 		break;
8146 
8147 	case DRM_PLANE_TYPE_CURSOR:
8148 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
8149 			if (num_formats >= max_formats)
8150 				break;
8151 
8152 			formats[num_formats++] = cursor_formats[i];
8153 		}
8154 		break;
8155 	}
8156 
8157 	return num_formats;
8158 }
8159 
8160 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
8161 				struct drm_plane *plane,
8162 				unsigned long possible_crtcs,
8163 				const struct dc_plane_cap *plane_cap)
8164 {
8165 	uint32_t formats[32];
8166 	int num_formats;
8167 	int res = -EPERM;
8168 	unsigned int supported_rotations;
8169 	uint64_t *modifiers = NULL;
8170 
8171 	num_formats = get_plane_formats(plane, plane_cap, formats,
8172 					ARRAY_SIZE(formats));
8173 
8174 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
8175 	if (res)
8176 		return res;
8177 
8178 	if (modifiers == NULL)
8179 		adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
8180 
8181 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
8182 				       &dm_plane_funcs, formats, num_formats,
8183 				       modifiers, plane->type, NULL);
8184 	kfree(modifiers);
8185 	if (res)
8186 		return res;
8187 
8188 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
8189 	    plane_cap && plane_cap->per_pixel_alpha) {
8190 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
8191 					  BIT(DRM_MODE_BLEND_PREMULTI) |
8192 					  BIT(DRM_MODE_BLEND_COVERAGE);
8193 
8194 		drm_plane_create_alpha_property(plane);
8195 		drm_plane_create_blend_mode_property(plane, blend_caps);
8196 	}
8197 
8198 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
8199 	    plane_cap &&
8200 	    (plane_cap->pixel_format_support.nv12 ||
8201 	     plane_cap->pixel_format_support.p010)) {
8202 		/* This only affects YUV formats. */
8203 		drm_plane_create_color_properties(
8204 			plane,
8205 			BIT(DRM_COLOR_YCBCR_BT601) |
8206 			BIT(DRM_COLOR_YCBCR_BT709) |
8207 			BIT(DRM_COLOR_YCBCR_BT2020),
8208 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
8209 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
8210 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
8211 	}
8212 
8213 	supported_rotations =
8214 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
8215 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
8216 
8217 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
8218 	    plane->type != DRM_PLANE_TYPE_CURSOR)
8219 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
8220 						   supported_rotations);
8221 
8222 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
8223 
8224 	/* Create (reset) the plane state */
8225 	if (plane->funcs->reset)
8226 		plane->funcs->reset(plane);
8227 
8228 	return 0;
8229 }
8230 
8231 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
8232 			       struct drm_plane *plane,
8233 			       uint32_t crtc_index)
8234 {
8235 	struct amdgpu_crtc *acrtc = NULL;
8236 	struct drm_plane *cursor_plane;
8237 
8238 	int res = -ENOMEM;
8239 
8240 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
8241 	if (!cursor_plane)
8242 		goto fail;
8243 
8244 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
8245 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
8246 
8247 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8248 	if (!acrtc)
8249 		goto fail;
8250 
8251 	res = drm_crtc_init_with_planes(
8252 			dm->ddev,
8253 			&acrtc->base,
8254 			plane,
8255 			cursor_plane,
8256 			&amdgpu_dm_crtc_funcs, NULL);
8257 
8258 	if (res)
8259 		goto fail;
8260 
8261 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8262 
8263 	/* Create (reset) the plane state */
8264 	if (acrtc->base.funcs->reset)
8265 		acrtc->base.funcs->reset(&acrtc->base);
8266 
8267 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8268 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8269 
8270 	acrtc->crtc_id = crtc_index;
8271 	acrtc->base.enabled = false;
8272 	acrtc->otg_inst = -1;
8273 
8274 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8275 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8276 				   true, MAX_COLOR_LUT_ENTRIES);
8277 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8278 
8279 	return 0;
8280 
8281 fail:
8282 	kfree(acrtc);
8283 	kfree(cursor_plane);
8284 	return res;
8285 }
8286 
8287 
8288 static int to_drm_connector_type(enum signal_type st)
8289 {
8290 	switch (st) {
8291 	case SIGNAL_TYPE_HDMI_TYPE_A:
8292 		return DRM_MODE_CONNECTOR_HDMIA;
8293 	case SIGNAL_TYPE_EDP:
8294 		return DRM_MODE_CONNECTOR_eDP;
8295 	case SIGNAL_TYPE_LVDS:
8296 		return DRM_MODE_CONNECTOR_LVDS;
8297 	case SIGNAL_TYPE_RGB:
8298 		return DRM_MODE_CONNECTOR_VGA;
8299 	case SIGNAL_TYPE_DISPLAY_PORT:
8300 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
8301 		return DRM_MODE_CONNECTOR_DisplayPort;
8302 	case SIGNAL_TYPE_DVI_DUAL_LINK:
8303 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
8304 		return DRM_MODE_CONNECTOR_DVID;
8305 	case SIGNAL_TYPE_VIRTUAL:
8306 		return DRM_MODE_CONNECTOR_VIRTUAL;
8307 
8308 	default:
8309 		return DRM_MODE_CONNECTOR_Unknown;
8310 	}
8311 }
8312 
8313 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8314 {
8315 	struct drm_encoder *encoder;
8316 
8317 	/* There is only one encoder per connector */
8318 	drm_connector_for_each_possible_encoder(connector, encoder)
8319 		return encoder;
8320 
8321 	return NULL;
8322 }
8323 
8324 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8325 {
8326 	struct drm_encoder *encoder;
8327 	struct amdgpu_encoder *amdgpu_encoder;
8328 
8329 	encoder = amdgpu_dm_connector_to_encoder(connector);
8330 
8331 	if (encoder == NULL)
8332 		return;
8333 
8334 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8335 
8336 	amdgpu_encoder->native_mode.clock = 0;
8337 
8338 	if (!list_empty(&connector->probed_modes)) {
8339 		struct drm_display_mode *preferred_mode = NULL;
8340 
8341 		list_for_each_entry(preferred_mode,
8342 				    &connector->probed_modes,
8343 				    head) {
8344 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8345 				amdgpu_encoder->native_mode = *preferred_mode;
8346 
8347 			break;
8348 		}
8349 
8350 	}
8351 }
8352 
8353 static struct drm_display_mode *
8354 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8355 			     char *name,
8356 			     int hdisplay, int vdisplay)
8357 {
8358 	struct drm_device *dev = encoder->dev;
8359 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8360 	struct drm_display_mode *mode = NULL;
8361 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8362 
8363 	mode = drm_mode_duplicate(dev, native_mode);
8364 
8365 	if (mode == NULL)
8366 		return NULL;
8367 
8368 	mode->hdisplay = hdisplay;
8369 	mode->vdisplay = vdisplay;
8370 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8371 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8372 
8373 	return mode;
8374 
8375 }
8376 
8377 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8378 						 struct drm_connector *connector)
8379 {
8380 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8381 	struct drm_display_mode *mode = NULL;
8382 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8383 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8384 				to_amdgpu_dm_connector(connector);
8385 	int i;
8386 	int n;
8387 	struct mode_size {
8388 		char name[DRM_DISPLAY_MODE_LEN];
8389 		int w;
8390 		int h;
8391 	} common_modes[] = {
8392 		{  "640x480",  640,  480},
8393 		{  "800x600",  800,  600},
8394 		{ "1024x768", 1024,  768},
8395 		{ "1280x720", 1280,  720},
8396 		{ "1280x800", 1280,  800},
8397 		{"1280x1024", 1280, 1024},
8398 		{ "1440x900", 1440,  900},
8399 		{"1680x1050", 1680, 1050},
8400 		{"1600x1200", 1600, 1200},
8401 		{"1920x1080", 1920, 1080},
8402 		{"1920x1200", 1920, 1200}
8403 	};
8404 
8405 	n = ARRAY_SIZE(common_modes);
8406 
8407 	for (i = 0; i < n; i++) {
8408 		struct drm_display_mode *curmode = NULL;
8409 		bool mode_existed = false;
8410 
8411 		if (common_modes[i].w > native_mode->hdisplay ||
8412 		    common_modes[i].h > native_mode->vdisplay ||
8413 		   (common_modes[i].w == native_mode->hdisplay &&
8414 		    common_modes[i].h == native_mode->vdisplay))
8415 			continue;
8416 
8417 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8418 			if (common_modes[i].w == curmode->hdisplay &&
8419 			    common_modes[i].h == curmode->vdisplay) {
8420 				mode_existed = true;
8421 				break;
8422 			}
8423 		}
8424 
8425 		if (mode_existed)
8426 			continue;
8427 
8428 		mode = amdgpu_dm_create_common_mode(encoder,
8429 				common_modes[i].name, common_modes[i].w,
8430 				common_modes[i].h);
8431 		if (!mode)
8432 			continue;
8433 
8434 		drm_mode_probed_add(connector, mode);
8435 		amdgpu_dm_connector->num_modes++;
8436 	}
8437 }
8438 
8439 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8440 {
8441 	struct drm_encoder *encoder;
8442 	struct amdgpu_encoder *amdgpu_encoder;
8443 	const struct drm_display_mode *native_mode;
8444 
8445 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8446 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8447 		return;
8448 
8449 	encoder = amdgpu_dm_connector_to_encoder(connector);
8450 	if (!encoder)
8451 		return;
8452 
8453 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8454 
8455 	native_mode = &amdgpu_encoder->native_mode;
8456 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8457 		return;
8458 
8459 	drm_connector_set_panel_orientation_with_quirk(connector,
8460 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8461 						       native_mode->hdisplay,
8462 						       native_mode->vdisplay);
8463 }
8464 
8465 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8466 					      struct edid *edid)
8467 {
8468 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8469 			to_amdgpu_dm_connector(connector);
8470 
8471 	if (edid) {
8472 		/* empty probed_modes */
8473 		INIT_LIST_HEAD(&connector->probed_modes);
8474 		amdgpu_dm_connector->num_modes =
8475 				drm_add_edid_modes(connector, edid);
8476 
8477 		/* sorting the probed modes before calling function
8478 		 * amdgpu_dm_get_native_mode() since EDID can have
8479 		 * more than one preferred mode. The modes that are
8480 		 * later in the probed mode list could be of higher
8481 		 * and preferred resolution. For example, 3840x2160
8482 		 * resolution in base EDID preferred timing and 4096x2160
8483 		 * preferred resolution in DID extension block later.
8484 		 */
8485 		drm_mode_sort(&connector->probed_modes);
8486 		amdgpu_dm_get_native_mode(connector);
8487 
8488 		/* Freesync capabilities are reset by calling
8489 		 * drm_add_edid_modes() and need to be
8490 		 * restored here.
8491 		 */
8492 		amdgpu_dm_update_freesync_caps(connector, edid);
8493 
8494 		amdgpu_set_panel_orientation(connector);
8495 	} else {
8496 		amdgpu_dm_connector->num_modes = 0;
8497 	}
8498 }
8499 
8500 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8501 			      struct drm_display_mode *mode)
8502 {
8503 	struct drm_display_mode *m;
8504 
8505 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8506 		if (drm_mode_equal(m, mode))
8507 			return true;
8508 	}
8509 
8510 	return false;
8511 }
8512 
8513 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8514 {
8515 	const struct drm_display_mode *m;
8516 	struct drm_display_mode *new_mode;
8517 	uint i;
8518 	uint32_t new_modes_count = 0;
8519 
8520 	/* Standard FPS values
8521 	 *
8522 	 * 23.976       - TV/NTSC
8523 	 * 24 	        - Cinema
8524 	 * 25 	        - TV/PAL
8525 	 * 29.97        - TV/NTSC
8526 	 * 30 	        - TV/NTSC
8527 	 * 48 	        - Cinema HFR
8528 	 * 50 	        - TV/PAL
8529 	 * 60 	        - Commonly used
8530 	 * 48,72,96,120 - Multiples of 24
8531 	 */
8532 	static const uint32_t common_rates[] = {
8533 		23976, 24000, 25000, 29970, 30000,
8534 		48000, 50000, 60000, 72000, 96000, 120000
8535 	};
8536 
8537 	/*
8538 	 * Find mode with highest refresh rate with the same resolution
8539 	 * as the preferred mode. Some monitors report a preferred mode
8540 	 * with lower resolution than the highest refresh rate supported.
8541 	 */
8542 
8543 	m = get_highest_refresh_rate_mode(aconnector, true);
8544 	if (!m)
8545 		return 0;
8546 
8547 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8548 		uint64_t target_vtotal, target_vtotal_diff;
8549 		uint64_t num, den;
8550 
8551 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8552 			continue;
8553 
8554 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8555 		    common_rates[i] > aconnector->max_vfreq * 1000)
8556 			continue;
8557 
8558 		num = (unsigned long long)m->clock * 1000 * 1000;
8559 		den = common_rates[i] * (unsigned long long)m->htotal;
8560 		target_vtotal = div_u64(num, den);
8561 		target_vtotal_diff = target_vtotal - m->vtotal;
8562 
8563 		/* Check for illegal modes */
8564 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8565 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8566 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8567 			continue;
8568 
8569 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8570 		if (!new_mode)
8571 			goto out;
8572 
8573 		new_mode->vtotal += (u16)target_vtotal_diff;
8574 		new_mode->vsync_start += (u16)target_vtotal_diff;
8575 		new_mode->vsync_end += (u16)target_vtotal_diff;
8576 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8577 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8578 
8579 		if (!is_duplicate_mode(aconnector, new_mode)) {
8580 			drm_mode_probed_add(&aconnector->base, new_mode);
8581 			new_modes_count += 1;
8582 		} else
8583 			drm_mode_destroy(aconnector->base.dev, new_mode);
8584 	}
8585  out:
8586 	return new_modes_count;
8587 }
8588 
8589 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8590 						   struct edid *edid)
8591 {
8592 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8593 		to_amdgpu_dm_connector(connector);
8594 
8595 	if (!edid)
8596 		return;
8597 
8598 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8599 		amdgpu_dm_connector->num_modes +=
8600 			add_fs_modes(amdgpu_dm_connector);
8601 }
8602 
8603 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8604 {
8605 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8606 			to_amdgpu_dm_connector(connector);
8607 	struct drm_encoder *encoder;
8608 	struct edid *edid = amdgpu_dm_connector->edid;
8609 
8610 	encoder = amdgpu_dm_connector_to_encoder(connector);
8611 
8612 	if (!drm_edid_is_valid(edid)) {
8613 		amdgpu_dm_connector->num_modes =
8614 				drm_add_modes_noedid(connector, 640, 480);
8615 	} else {
8616 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8617 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8618 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8619 	}
8620 	amdgpu_dm_fbc_init(connector);
8621 
8622 	return amdgpu_dm_connector->num_modes;
8623 }
8624 
8625 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8626 				     struct amdgpu_dm_connector *aconnector,
8627 				     int connector_type,
8628 				     struct dc_link *link,
8629 				     int link_index)
8630 {
8631 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8632 
8633 	/*
8634 	 * Some of the properties below require access to state, like bpc.
8635 	 * Allocate some default initial connector state with our reset helper.
8636 	 */
8637 	if (aconnector->base.funcs->reset)
8638 		aconnector->base.funcs->reset(&aconnector->base);
8639 
8640 	aconnector->connector_id = link_index;
8641 	aconnector->dc_link = link;
8642 	aconnector->base.interlace_allowed = false;
8643 	aconnector->base.doublescan_allowed = false;
8644 	aconnector->base.stereo_allowed = false;
8645 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8646 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8647 	aconnector->audio_inst = -1;
8648 	mutex_init(&aconnector->hpd_lock);
8649 
8650 	/*
8651 	 * configure support HPD hot plug connector_>polled default value is 0
8652 	 * which means HPD hot plug not supported
8653 	 */
8654 	switch (connector_type) {
8655 	case DRM_MODE_CONNECTOR_HDMIA:
8656 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8657 		aconnector->base.ycbcr_420_allowed =
8658 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8659 		break;
8660 	case DRM_MODE_CONNECTOR_DisplayPort:
8661 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8662 		link->link_enc = link_enc_cfg_get_link_enc(link);
8663 		ASSERT(link->link_enc);
8664 		if (link->link_enc)
8665 			aconnector->base.ycbcr_420_allowed =
8666 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8667 		break;
8668 	case DRM_MODE_CONNECTOR_DVID:
8669 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8670 		break;
8671 	default:
8672 		break;
8673 	}
8674 
8675 	drm_object_attach_property(&aconnector->base.base,
8676 				dm->ddev->mode_config.scaling_mode_property,
8677 				DRM_MODE_SCALE_NONE);
8678 
8679 	drm_object_attach_property(&aconnector->base.base,
8680 				adev->mode_info.underscan_property,
8681 				UNDERSCAN_OFF);
8682 	drm_object_attach_property(&aconnector->base.base,
8683 				adev->mode_info.underscan_hborder_property,
8684 				0);
8685 	drm_object_attach_property(&aconnector->base.base,
8686 				adev->mode_info.underscan_vborder_property,
8687 				0);
8688 
8689 	if (!aconnector->mst_port)
8690 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8691 
8692 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8693 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8694 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8695 
8696 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8697 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8698 		drm_object_attach_property(&aconnector->base.base,
8699 				adev->mode_info.abm_level_property, 0);
8700 	}
8701 
8702 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8703 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8704 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8705 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8706 
8707 		if (!aconnector->mst_port)
8708 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8709 
8710 #ifdef CONFIG_DRM_AMD_DC_HDCP
8711 		if (adev->dm.hdcp_workqueue)
8712 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8713 #endif
8714 	}
8715 }
8716 
8717 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8718 			      struct i2c_msg *msgs, int num)
8719 {
8720 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8721 	struct ddc_service *ddc_service = i2c->ddc_service;
8722 	struct i2c_command cmd;
8723 	int i;
8724 	int result = -EIO;
8725 
8726 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8727 
8728 	if (!cmd.payloads)
8729 		return result;
8730 
8731 	cmd.number_of_payloads = num;
8732 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8733 	cmd.speed = 100;
8734 
8735 	for (i = 0; i < num; i++) {
8736 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8737 		cmd.payloads[i].address = msgs[i].addr;
8738 		cmd.payloads[i].length = msgs[i].len;
8739 		cmd.payloads[i].data = msgs[i].buf;
8740 	}
8741 
8742 	if (dc_submit_i2c(
8743 			ddc_service->ctx->dc,
8744 			ddc_service->link->link_index,
8745 			&cmd))
8746 		result = num;
8747 
8748 	kfree(cmd.payloads);
8749 	return result;
8750 }
8751 
8752 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8753 {
8754 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8755 }
8756 
8757 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8758 	.master_xfer = amdgpu_dm_i2c_xfer,
8759 	.functionality = amdgpu_dm_i2c_func,
8760 };
8761 
8762 static struct amdgpu_i2c_adapter *
8763 create_i2c(struct ddc_service *ddc_service,
8764 	   int link_index,
8765 	   int *res)
8766 {
8767 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8768 	struct amdgpu_i2c_adapter *i2c;
8769 
8770 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8771 	if (!i2c)
8772 		return NULL;
8773 	i2c->base.owner = THIS_MODULE;
8774 	i2c->base.class = I2C_CLASS_DDC;
8775 	i2c->base.dev.parent = &adev->pdev->dev;
8776 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8777 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8778 	i2c_set_adapdata(&i2c->base, i2c);
8779 	i2c->ddc_service = ddc_service;
8780 
8781 	return i2c;
8782 }
8783 
8784 
8785 /*
8786  * Note: this function assumes that dc_link_detect() was called for the
8787  * dc_link which will be represented by this aconnector.
8788  */
8789 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8790 				    struct amdgpu_dm_connector *aconnector,
8791 				    uint32_t link_index,
8792 				    struct amdgpu_encoder *aencoder)
8793 {
8794 	int res = 0;
8795 	int connector_type;
8796 	struct dc *dc = dm->dc;
8797 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8798 	struct amdgpu_i2c_adapter *i2c;
8799 
8800 	link->priv = aconnector;
8801 
8802 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8803 
8804 	i2c = create_i2c(link->ddc, link->link_index, &res);
8805 	if (!i2c) {
8806 		DRM_ERROR("Failed to create i2c adapter data\n");
8807 		return -ENOMEM;
8808 	}
8809 
8810 	aconnector->i2c = i2c;
8811 	res = i2c_add_adapter(&i2c->base);
8812 
8813 	if (res) {
8814 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8815 		goto out_free;
8816 	}
8817 
8818 	connector_type = to_drm_connector_type(link->connector_signal);
8819 
8820 	res = drm_connector_init_with_ddc(
8821 			dm->ddev,
8822 			&aconnector->base,
8823 			&amdgpu_dm_connector_funcs,
8824 			connector_type,
8825 			&i2c->base);
8826 
8827 	if (res) {
8828 		DRM_ERROR("connector_init failed\n");
8829 		aconnector->connector_id = -1;
8830 		goto out_free;
8831 	}
8832 
8833 	drm_connector_helper_add(
8834 			&aconnector->base,
8835 			&amdgpu_dm_connector_helper_funcs);
8836 
8837 	amdgpu_dm_connector_init_helper(
8838 		dm,
8839 		aconnector,
8840 		connector_type,
8841 		link,
8842 		link_index);
8843 
8844 	drm_connector_attach_encoder(
8845 		&aconnector->base, &aencoder->base);
8846 
8847 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8848 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8849 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8850 
8851 out_free:
8852 	if (res) {
8853 		kfree(i2c);
8854 		aconnector->i2c = NULL;
8855 	}
8856 	return res;
8857 }
8858 
8859 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8860 {
8861 	switch (adev->mode_info.num_crtc) {
8862 	case 1:
8863 		return 0x1;
8864 	case 2:
8865 		return 0x3;
8866 	case 3:
8867 		return 0x7;
8868 	case 4:
8869 		return 0xf;
8870 	case 5:
8871 		return 0x1f;
8872 	case 6:
8873 	default:
8874 		return 0x3f;
8875 	}
8876 }
8877 
8878 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8879 				  struct amdgpu_encoder *aencoder,
8880 				  uint32_t link_index)
8881 {
8882 	struct amdgpu_device *adev = drm_to_adev(dev);
8883 
8884 	int res = drm_encoder_init(dev,
8885 				   &aencoder->base,
8886 				   &amdgpu_dm_encoder_funcs,
8887 				   DRM_MODE_ENCODER_TMDS,
8888 				   NULL);
8889 
8890 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8891 
8892 	if (!res)
8893 		aencoder->encoder_id = link_index;
8894 	else
8895 		aencoder->encoder_id = -1;
8896 
8897 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8898 
8899 	return res;
8900 }
8901 
8902 static void manage_dm_interrupts(struct amdgpu_device *adev,
8903 				 struct amdgpu_crtc *acrtc,
8904 				 bool enable)
8905 {
8906 	/*
8907 	 * We have no guarantee that the frontend index maps to the same
8908 	 * backend index - some even map to more than one.
8909 	 *
8910 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8911 	 */
8912 	int irq_type =
8913 		amdgpu_display_crtc_idx_to_irq_type(
8914 			adev,
8915 			acrtc->crtc_id);
8916 
8917 	if (enable) {
8918 		drm_crtc_vblank_on(&acrtc->base);
8919 		amdgpu_irq_get(
8920 			adev,
8921 			&adev->pageflip_irq,
8922 			irq_type);
8923 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8924 		amdgpu_irq_get(
8925 			adev,
8926 			&adev->vline0_irq,
8927 			irq_type);
8928 #endif
8929 	} else {
8930 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8931 		amdgpu_irq_put(
8932 			adev,
8933 			&adev->vline0_irq,
8934 			irq_type);
8935 #endif
8936 		amdgpu_irq_put(
8937 			adev,
8938 			&adev->pageflip_irq,
8939 			irq_type);
8940 		drm_crtc_vblank_off(&acrtc->base);
8941 	}
8942 }
8943 
8944 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8945 				      struct amdgpu_crtc *acrtc)
8946 {
8947 	int irq_type =
8948 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8949 
8950 	/**
8951 	 * This reads the current state for the IRQ and force reapplies
8952 	 * the setting to hardware.
8953 	 */
8954 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8955 }
8956 
8957 static bool
8958 is_scaling_state_different(const struct dm_connector_state *dm_state,
8959 			   const struct dm_connector_state *old_dm_state)
8960 {
8961 	if (dm_state->scaling != old_dm_state->scaling)
8962 		return true;
8963 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8964 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8965 			return true;
8966 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8967 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8968 			return true;
8969 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8970 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8971 		return true;
8972 	return false;
8973 }
8974 
8975 #ifdef CONFIG_DRM_AMD_DC_HDCP
8976 static bool is_content_protection_different(struct drm_connector_state *state,
8977 					    const struct drm_connector_state *old_state,
8978 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8979 {
8980 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8981 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8982 
8983 	/* Handle: Type0/1 change */
8984 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8985 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8986 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8987 		return true;
8988 	}
8989 
8990 	/* CP is being re enabled, ignore this
8991 	 *
8992 	 * Handles:	ENABLED -> DESIRED
8993 	 */
8994 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8995 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8996 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8997 		return false;
8998 	}
8999 
9000 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
9001 	 *
9002 	 * Handles:	UNDESIRED -> ENABLED
9003 	 */
9004 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
9005 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
9006 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9007 
9008 	/* Stream removed and re-enabled
9009 	 *
9010 	 * Can sometimes overlap with the HPD case,
9011 	 * thus set update_hdcp to false to avoid
9012 	 * setting HDCP multiple times.
9013 	 *
9014 	 * Handles:	DESIRED -> DESIRED (Special case)
9015 	 */
9016 	if (!(old_state->crtc && old_state->crtc->enabled) &&
9017 		state->crtc && state->crtc->enabled &&
9018 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
9019 		dm_con_state->update_hdcp = false;
9020 		return true;
9021 	}
9022 
9023 	/* Hot-plug, headless s3, dpms
9024 	 *
9025 	 * Only start HDCP if the display is connected/enabled.
9026 	 * update_hdcp flag will be set to false until the next
9027 	 * HPD comes in.
9028 	 *
9029 	 * Handles:	DESIRED -> DESIRED (Special case)
9030 	 */
9031 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
9032 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
9033 		dm_con_state->update_hdcp = false;
9034 		return true;
9035 	}
9036 
9037 	/*
9038 	 * Handles:	UNDESIRED -> UNDESIRED
9039 	 *		DESIRED -> DESIRED
9040 	 *		ENABLED -> ENABLED
9041 	 */
9042 	if (old_state->content_protection == state->content_protection)
9043 		return false;
9044 
9045 	/*
9046 	 * Handles:	UNDESIRED -> DESIRED
9047 	 *		DESIRED -> UNDESIRED
9048 	 *		ENABLED -> UNDESIRED
9049 	 */
9050 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
9051 		return true;
9052 
9053 	/*
9054 	 * Handles:	DESIRED -> ENABLED
9055 	 */
9056 	return false;
9057 }
9058 
9059 #endif
9060 static void remove_stream(struct amdgpu_device *adev,
9061 			  struct amdgpu_crtc *acrtc,
9062 			  struct dc_stream_state *stream)
9063 {
9064 	/* this is the update mode case */
9065 
9066 	acrtc->otg_inst = -1;
9067 	acrtc->enabled = false;
9068 }
9069 
9070 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
9071 			       struct dc_cursor_position *position)
9072 {
9073 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9074 	int x, y;
9075 	int xorigin = 0, yorigin = 0;
9076 
9077 	if (!crtc || !plane->state->fb)
9078 		return 0;
9079 
9080 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
9081 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
9082 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
9083 			  __func__,
9084 			  plane->state->crtc_w,
9085 			  plane->state->crtc_h);
9086 		return -EINVAL;
9087 	}
9088 
9089 	x = plane->state->crtc_x;
9090 	y = plane->state->crtc_y;
9091 
9092 	if (x <= -amdgpu_crtc->max_cursor_width ||
9093 	    y <= -amdgpu_crtc->max_cursor_height)
9094 		return 0;
9095 
9096 	if (x < 0) {
9097 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
9098 		x = 0;
9099 	}
9100 	if (y < 0) {
9101 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
9102 		y = 0;
9103 	}
9104 	position->enable = true;
9105 	position->translate_by_source = true;
9106 	position->x = x;
9107 	position->y = y;
9108 	position->x_hotspot = xorigin;
9109 	position->y_hotspot = yorigin;
9110 
9111 	return 0;
9112 }
9113 
9114 static void handle_cursor_update(struct drm_plane *plane,
9115 				 struct drm_plane_state *old_plane_state)
9116 {
9117 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
9118 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
9119 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
9120 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
9121 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9122 	uint64_t address = afb ? afb->address : 0;
9123 	struct dc_cursor_position position = {0};
9124 	struct dc_cursor_attributes attributes;
9125 	int ret;
9126 
9127 	if (!plane->state->fb && !old_plane_state->fb)
9128 		return;
9129 
9130 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
9131 		      __func__,
9132 		      amdgpu_crtc->crtc_id,
9133 		      plane->state->crtc_w,
9134 		      plane->state->crtc_h);
9135 
9136 	ret = get_cursor_position(plane, crtc, &position);
9137 	if (ret)
9138 		return;
9139 
9140 	if (!position.enable) {
9141 		/* turn off cursor */
9142 		if (crtc_state && crtc_state->stream) {
9143 			mutex_lock(&adev->dm.dc_lock);
9144 			dc_stream_set_cursor_position(crtc_state->stream,
9145 						      &position);
9146 			mutex_unlock(&adev->dm.dc_lock);
9147 		}
9148 		return;
9149 	}
9150 
9151 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
9152 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
9153 
9154 	memset(&attributes, 0, sizeof(attributes));
9155 	attributes.address.high_part = upper_32_bits(address);
9156 	attributes.address.low_part  = lower_32_bits(address);
9157 	attributes.width             = plane->state->crtc_w;
9158 	attributes.height            = plane->state->crtc_h;
9159 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
9160 	attributes.rotation_angle    = 0;
9161 	attributes.attribute_flags.value = 0;
9162 
9163 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
9164 
9165 	if (crtc_state->stream) {
9166 		mutex_lock(&adev->dm.dc_lock);
9167 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
9168 							 &attributes))
9169 			DRM_ERROR("DC failed to set cursor attributes\n");
9170 
9171 		if (!dc_stream_set_cursor_position(crtc_state->stream,
9172 						   &position))
9173 			DRM_ERROR("DC failed to set cursor position\n");
9174 		mutex_unlock(&adev->dm.dc_lock);
9175 	}
9176 }
9177 
9178 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
9179 {
9180 
9181 	assert_spin_locked(&acrtc->base.dev->event_lock);
9182 	WARN_ON(acrtc->event);
9183 
9184 	acrtc->event = acrtc->base.state->event;
9185 
9186 	/* Set the flip status */
9187 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
9188 
9189 	/* Mark this event as consumed */
9190 	acrtc->base.state->event = NULL;
9191 
9192 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
9193 		     acrtc->crtc_id);
9194 }
9195 
9196 static void update_freesync_state_on_stream(
9197 	struct amdgpu_display_manager *dm,
9198 	struct dm_crtc_state *new_crtc_state,
9199 	struct dc_stream_state *new_stream,
9200 	struct dc_plane_state *surface,
9201 	u32 flip_timestamp_in_us)
9202 {
9203 	struct mod_vrr_params vrr_params;
9204 	struct dc_info_packet vrr_infopacket = {0};
9205 	struct amdgpu_device *adev = dm->adev;
9206 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9207 	unsigned long flags;
9208 	bool pack_sdp_v1_3 = false;
9209 
9210 	if (!new_stream)
9211 		return;
9212 
9213 	/*
9214 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9215 	 * For now it's sufficient to just guard against these conditions.
9216 	 */
9217 
9218 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9219 		return;
9220 
9221 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9222         vrr_params = acrtc->dm_irq_params.vrr_params;
9223 
9224 	if (surface) {
9225 		mod_freesync_handle_preflip(
9226 			dm->freesync_module,
9227 			surface,
9228 			new_stream,
9229 			flip_timestamp_in_us,
9230 			&vrr_params);
9231 
9232 		if (adev->family < AMDGPU_FAMILY_AI &&
9233 		    amdgpu_dm_vrr_active(new_crtc_state)) {
9234 			mod_freesync_handle_v_update(dm->freesync_module,
9235 						     new_stream, &vrr_params);
9236 
9237 			/* Need to call this before the frame ends. */
9238 			dc_stream_adjust_vmin_vmax(dm->dc,
9239 						   new_crtc_state->stream,
9240 						   &vrr_params.adjust);
9241 		}
9242 	}
9243 
9244 	mod_freesync_build_vrr_infopacket(
9245 		dm->freesync_module,
9246 		new_stream,
9247 		&vrr_params,
9248 		PACKET_TYPE_VRR,
9249 		TRANSFER_FUNC_UNKNOWN,
9250 		&vrr_infopacket,
9251 		pack_sdp_v1_3);
9252 
9253 	new_crtc_state->freesync_timing_changed |=
9254 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9255 			&vrr_params.adjust,
9256 			sizeof(vrr_params.adjust)) != 0);
9257 
9258 	new_crtc_state->freesync_vrr_info_changed |=
9259 		(memcmp(&new_crtc_state->vrr_infopacket,
9260 			&vrr_infopacket,
9261 			sizeof(vrr_infopacket)) != 0);
9262 
9263 	acrtc->dm_irq_params.vrr_params = vrr_params;
9264 	new_crtc_state->vrr_infopacket = vrr_infopacket;
9265 
9266 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9267 	new_stream->vrr_infopacket = vrr_infopacket;
9268 
9269 	if (new_crtc_state->freesync_vrr_info_changed)
9270 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9271 			      new_crtc_state->base.crtc->base.id,
9272 			      (int)new_crtc_state->base.vrr_enabled,
9273 			      (int)vrr_params.state);
9274 
9275 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9276 }
9277 
9278 static void update_stream_irq_parameters(
9279 	struct amdgpu_display_manager *dm,
9280 	struct dm_crtc_state *new_crtc_state)
9281 {
9282 	struct dc_stream_state *new_stream = new_crtc_state->stream;
9283 	struct mod_vrr_params vrr_params;
9284 	struct mod_freesync_config config = new_crtc_state->freesync_config;
9285 	struct amdgpu_device *adev = dm->adev;
9286 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9287 	unsigned long flags;
9288 
9289 	if (!new_stream)
9290 		return;
9291 
9292 	/*
9293 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9294 	 * For now it's sufficient to just guard against these conditions.
9295 	 */
9296 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9297 		return;
9298 
9299 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9300 	vrr_params = acrtc->dm_irq_params.vrr_params;
9301 
9302 	if (new_crtc_state->vrr_supported &&
9303 	    config.min_refresh_in_uhz &&
9304 	    config.max_refresh_in_uhz) {
9305 		/*
9306 		 * if freesync compatible mode was set, config.state will be set
9307 		 * in atomic check
9308 		 */
9309 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9310 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9311 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9312 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9313 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9314 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9315 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9316 		} else {
9317 			config.state = new_crtc_state->base.vrr_enabled ?
9318 						     VRR_STATE_ACTIVE_VARIABLE :
9319 						     VRR_STATE_INACTIVE;
9320 		}
9321 	} else {
9322 		config.state = VRR_STATE_UNSUPPORTED;
9323 	}
9324 
9325 	mod_freesync_build_vrr_params(dm->freesync_module,
9326 				      new_stream,
9327 				      &config, &vrr_params);
9328 
9329 	new_crtc_state->freesync_timing_changed |=
9330 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9331 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9332 
9333 	new_crtc_state->freesync_config = config;
9334 	/* Copy state for access from DM IRQ handler */
9335 	acrtc->dm_irq_params.freesync_config = config;
9336 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9337 	acrtc->dm_irq_params.vrr_params = vrr_params;
9338 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9339 }
9340 
9341 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9342 					    struct dm_crtc_state *new_state)
9343 {
9344 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9345 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9346 
9347 	if (!old_vrr_active && new_vrr_active) {
9348 		/* Transition VRR inactive -> active:
9349 		 * While VRR is active, we must not disable vblank irq, as a
9350 		 * reenable after disable would compute bogus vblank/pflip
9351 		 * timestamps if it likely happened inside display front-porch.
9352 		 *
9353 		 * We also need vupdate irq for the actual core vblank handling
9354 		 * at end of vblank.
9355 		 */
9356 		dm_set_vupdate_irq(new_state->base.crtc, true);
9357 		drm_crtc_vblank_get(new_state->base.crtc);
9358 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9359 				 __func__, new_state->base.crtc->base.id);
9360 	} else if (old_vrr_active && !new_vrr_active) {
9361 		/* Transition VRR active -> inactive:
9362 		 * Allow vblank irq disable again for fixed refresh rate.
9363 		 */
9364 		dm_set_vupdate_irq(new_state->base.crtc, false);
9365 		drm_crtc_vblank_put(new_state->base.crtc);
9366 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9367 				 __func__, new_state->base.crtc->base.id);
9368 	}
9369 }
9370 
9371 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9372 {
9373 	struct drm_plane *plane;
9374 	struct drm_plane_state *old_plane_state;
9375 	int i;
9376 
9377 	/*
9378 	 * TODO: Make this per-stream so we don't issue redundant updates for
9379 	 * commits with multiple streams.
9380 	 */
9381 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9382 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9383 			handle_cursor_update(plane, old_plane_state);
9384 }
9385 
9386 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9387 				    struct dc_state *dc_state,
9388 				    struct drm_device *dev,
9389 				    struct amdgpu_display_manager *dm,
9390 				    struct drm_crtc *pcrtc,
9391 				    bool wait_for_vblank)
9392 {
9393 	uint32_t i;
9394 	uint64_t timestamp_ns;
9395 	struct drm_plane *plane;
9396 	struct drm_plane_state *old_plane_state, *new_plane_state;
9397 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9398 	struct drm_crtc_state *new_pcrtc_state =
9399 			drm_atomic_get_new_crtc_state(state, pcrtc);
9400 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9401 	struct dm_crtc_state *dm_old_crtc_state =
9402 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9403 	int planes_count = 0, vpos, hpos;
9404 	long r;
9405 	unsigned long flags;
9406 	struct amdgpu_bo *abo;
9407 	uint32_t target_vblank, last_flip_vblank;
9408 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9409 	bool cursor_update = false;
9410 	bool pflip_present = false;
9411 	struct {
9412 		struct dc_surface_update surface_updates[MAX_SURFACES];
9413 		struct dc_plane_info plane_infos[MAX_SURFACES];
9414 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9415 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9416 		struct dc_stream_update stream_update;
9417 	} *bundle;
9418 
9419 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9420 
9421 	if (!bundle) {
9422 		dm_error("Failed to allocate update bundle\n");
9423 		goto cleanup;
9424 	}
9425 
9426 	/*
9427 	 * Disable the cursor first if we're disabling all the planes.
9428 	 * It'll remain on the screen after the planes are re-enabled
9429 	 * if we don't.
9430 	 */
9431 	if (acrtc_state->active_planes == 0)
9432 		amdgpu_dm_commit_cursors(state);
9433 
9434 	/* update planes when needed */
9435 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9436 		struct drm_crtc *crtc = new_plane_state->crtc;
9437 		struct drm_crtc_state *new_crtc_state;
9438 		struct drm_framebuffer *fb = new_plane_state->fb;
9439 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9440 		bool plane_needs_flip;
9441 		struct dc_plane_state *dc_plane;
9442 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9443 
9444 		/* Cursor plane is handled after stream updates */
9445 		if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9446 			if ((fb && crtc == pcrtc) ||
9447 			    (old_plane_state->fb && old_plane_state->crtc == pcrtc))
9448 				cursor_update = true;
9449 
9450 			continue;
9451 		}
9452 
9453 		if (!fb || !crtc || pcrtc != crtc)
9454 			continue;
9455 
9456 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9457 		if (!new_crtc_state->active)
9458 			continue;
9459 
9460 		dc_plane = dm_new_plane_state->dc_state;
9461 
9462 		bundle->surface_updates[planes_count].surface = dc_plane;
9463 		if (new_pcrtc_state->color_mgmt_changed) {
9464 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9465 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9466 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9467 		}
9468 
9469 		fill_dc_scaling_info(dm->adev, new_plane_state,
9470 				     &bundle->scaling_infos[planes_count]);
9471 
9472 		bundle->surface_updates[planes_count].scaling_info =
9473 			&bundle->scaling_infos[planes_count];
9474 
9475 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9476 
9477 		pflip_present = pflip_present || plane_needs_flip;
9478 
9479 		if (!plane_needs_flip) {
9480 			planes_count += 1;
9481 			continue;
9482 		}
9483 
9484 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9485 
9486 		/*
9487 		 * Wait for all fences on this FB. Do limited wait to avoid
9488 		 * deadlock during GPU reset when this fence will not signal
9489 		 * but we hold reservation lock for the BO.
9490 		 */
9491 		r = dma_resv_wait_timeout(abo->tbo.base.resv,
9492 					  DMA_RESV_USAGE_WRITE, false,
9493 					  msecs_to_jiffies(5000));
9494 		if (unlikely(r <= 0))
9495 			DRM_ERROR("Waiting for fences timed out!");
9496 
9497 		fill_dc_plane_info_and_addr(
9498 			dm->adev, new_plane_state,
9499 			afb->tiling_flags,
9500 			&bundle->plane_infos[planes_count],
9501 			&bundle->flip_addrs[planes_count].address,
9502 			afb->tmz_surface, false);
9503 
9504 		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9505 				 new_plane_state->plane->index,
9506 				 bundle->plane_infos[planes_count].dcc.enable);
9507 
9508 		bundle->surface_updates[planes_count].plane_info =
9509 			&bundle->plane_infos[planes_count];
9510 
9511 		fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
9512 				    new_crtc_state,
9513 				    &bundle->flip_addrs[planes_count]);
9514 
9515 		/*
9516 		 * Only allow immediate flips for fast updates that don't
9517 		 * change FB pitch, DCC state, rotation or mirroing.
9518 		 */
9519 		bundle->flip_addrs[planes_count].flip_immediate =
9520 			crtc->state->async_flip &&
9521 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9522 
9523 		timestamp_ns = ktime_get_ns();
9524 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9525 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9526 		bundle->surface_updates[planes_count].surface = dc_plane;
9527 
9528 		if (!bundle->surface_updates[planes_count].surface) {
9529 			DRM_ERROR("No surface for CRTC: id=%d\n",
9530 					acrtc_attach->crtc_id);
9531 			continue;
9532 		}
9533 
9534 		if (plane == pcrtc->primary)
9535 			update_freesync_state_on_stream(
9536 				dm,
9537 				acrtc_state,
9538 				acrtc_state->stream,
9539 				dc_plane,
9540 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9541 
9542 		drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9543 				 __func__,
9544 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9545 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9546 
9547 		planes_count += 1;
9548 
9549 	}
9550 
9551 	if (pflip_present) {
9552 		if (!vrr_active) {
9553 			/* Use old throttling in non-vrr fixed refresh rate mode
9554 			 * to keep flip scheduling based on target vblank counts
9555 			 * working in a backwards compatible way, e.g., for
9556 			 * clients using the GLX_OML_sync_control extension or
9557 			 * DRI3/Present extension with defined target_msc.
9558 			 */
9559 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9560 		}
9561 		else {
9562 			/* For variable refresh rate mode only:
9563 			 * Get vblank of last completed flip to avoid > 1 vrr
9564 			 * flips per video frame by use of throttling, but allow
9565 			 * flip programming anywhere in the possibly large
9566 			 * variable vrr vblank interval for fine-grained flip
9567 			 * timing control and more opportunity to avoid stutter
9568 			 * on late submission of flips.
9569 			 */
9570 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9571 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9572 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9573 		}
9574 
9575 		target_vblank = last_flip_vblank + wait_for_vblank;
9576 
9577 		/*
9578 		 * Wait until we're out of the vertical blank period before the one
9579 		 * targeted by the flip
9580 		 */
9581 		while ((acrtc_attach->enabled &&
9582 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9583 							    0, &vpos, &hpos, NULL,
9584 							    NULL, &pcrtc->hwmode)
9585 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9586 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9587 			(int)(target_vblank -
9588 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9589 			usleep_range(1000, 1100);
9590 		}
9591 
9592 		/**
9593 		 * Prepare the flip event for the pageflip interrupt to handle.
9594 		 *
9595 		 * This only works in the case where we've already turned on the
9596 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9597 		 * from 0 -> n planes we have to skip a hardware generated event
9598 		 * and rely on sending it from software.
9599 		 */
9600 		if (acrtc_attach->base.state->event &&
9601 		    acrtc_state->active_planes > 0) {
9602 			drm_crtc_vblank_get(pcrtc);
9603 
9604 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9605 
9606 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9607 			prepare_flip_isr(acrtc_attach);
9608 
9609 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9610 		}
9611 
9612 		if (acrtc_state->stream) {
9613 			if (acrtc_state->freesync_vrr_info_changed)
9614 				bundle->stream_update.vrr_infopacket =
9615 					&acrtc_state->stream->vrr_infopacket;
9616 		}
9617 	} else if (cursor_update && acrtc_state->active_planes > 0 &&
9618 		   acrtc_attach->base.state->event) {
9619 		drm_crtc_vblank_get(pcrtc);
9620 
9621 		spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9622 
9623 		acrtc_attach->event = acrtc_attach->base.state->event;
9624 		acrtc_attach->base.state->event = NULL;
9625 
9626 		spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9627 	}
9628 
9629 	/* Update the planes if changed or disable if we don't have any. */
9630 	if ((planes_count || acrtc_state->active_planes == 0) &&
9631 		acrtc_state->stream) {
9632 		/*
9633 		 * If PSR or idle optimizations are enabled then flush out
9634 		 * any pending work before hardware programming.
9635 		 */
9636 		if (dm->vblank_control_workqueue)
9637 			flush_workqueue(dm->vblank_control_workqueue);
9638 
9639 		bundle->stream_update.stream = acrtc_state->stream;
9640 		if (new_pcrtc_state->mode_changed) {
9641 			bundle->stream_update.src = acrtc_state->stream->src;
9642 			bundle->stream_update.dst = acrtc_state->stream->dst;
9643 		}
9644 
9645 		if (new_pcrtc_state->color_mgmt_changed) {
9646 			/*
9647 			 * TODO: This isn't fully correct since we've actually
9648 			 * already modified the stream in place.
9649 			 */
9650 			bundle->stream_update.gamut_remap =
9651 				&acrtc_state->stream->gamut_remap_matrix;
9652 			bundle->stream_update.output_csc_transform =
9653 				&acrtc_state->stream->csc_color_matrix;
9654 			bundle->stream_update.out_transfer_func =
9655 				acrtc_state->stream->out_transfer_func;
9656 		}
9657 
9658 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9659 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9660 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9661 
9662 		/*
9663 		 * If FreeSync state on the stream has changed then we need to
9664 		 * re-adjust the min/max bounds now that DC doesn't handle this
9665 		 * as part of commit.
9666 		 */
9667 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9668 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9669 			dc_stream_adjust_vmin_vmax(
9670 				dm->dc, acrtc_state->stream,
9671 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9672 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9673 		}
9674 		mutex_lock(&dm->dc_lock);
9675 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9676 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9677 			amdgpu_dm_psr_disable(acrtc_state->stream);
9678 
9679 		dc_commit_updates_for_stream(dm->dc,
9680 						     bundle->surface_updates,
9681 						     planes_count,
9682 						     acrtc_state->stream,
9683 						     &bundle->stream_update,
9684 						     dc_state);
9685 
9686 		/**
9687 		 * Enable or disable the interrupts on the backend.
9688 		 *
9689 		 * Most pipes are put into power gating when unused.
9690 		 *
9691 		 * When power gating is enabled on a pipe we lose the
9692 		 * interrupt enablement state when power gating is disabled.
9693 		 *
9694 		 * So we need to update the IRQ control state in hardware
9695 		 * whenever the pipe turns on (since it could be previously
9696 		 * power gated) or off (since some pipes can't be power gated
9697 		 * on some ASICs).
9698 		 */
9699 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9700 			dm_update_pflip_irq_state(drm_to_adev(dev),
9701 						  acrtc_attach);
9702 
9703 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9704 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9705 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9706 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9707 
9708 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9709 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9710 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9711 			struct amdgpu_dm_connector *aconn =
9712 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9713 
9714 			if (aconn->psr_skip_count > 0)
9715 				aconn->psr_skip_count--;
9716 
9717 			/* Allow PSR when skip count is 0. */
9718 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9719 
9720 			/*
9721 			 * If sink supports PSR SU, there is no need to rely on
9722 			 * a vblank event disable request to enable PSR. PSR SU
9723 			 * can be enabled immediately once OS demonstrates an
9724 			 * adequate number of fast atomic commits to notify KMD
9725 			 * of update events. See `vblank_control_worker()`.
9726 			 */
9727 			if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
9728 			    acrtc_attach->dm_irq_params.allow_psr_entry &&
9729 			    !acrtc_state->stream->link->psr_settings.psr_allow_active)
9730 				amdgpu_dm_psr_enable(acrtc_state->stream);
9731 		} else {
9732 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9733 		}
9734 
9735 		mutex_unlock(&dm->dc_lock);
9736 	}
9737 
9738 	/*
9739 	 * Update cursor state *after* programming all the planes.
9740 	 * This avoids redundant programming in the case where we're going
9741 	 * to be disabling a single plane - those pipes are being disabled.
9742 	 */
9743 	if (acrtc_state->active_planes)
9744 		amdgpu_dm_commit_cursors(state);
9745 
9746 cleanup:
9747 	kfree(bundle);
9748 }
9749 
9750 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9751 				   struct drm_atomic_state *state)
9752 {
9753 	struct amdgpu_device *adev = drm_to_adev(dev);
9754 	struct amdgpu_dm_connector *aconnector;
9755 	struct drm_connector *connector;
9756 	struct drm_connector_state *old_con_state, *new_con_state;
9757 	struct drm_crtc_state *new_crtc_state;
9758 	struct dm_crtc_state *new_dm_crtc_state;
9759 	const struct dc_stream_status *status;
9760 	int i, inst;
9761 
9762 	/* Notify device removals. */
9763 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9764 		if (old_con_state->crtc != new_con_state->crtc) {
9765 			/* CRTC changes require notification. */
9766 			goto notify;
9767 		}
9768 
9769 		if (!new_con_state->crtc)
9770 			continue;
9771 
9772 		new_crtc_state = drm_atomic_get_new_crtc_state(
9773 			state, new_con_state->crtc);
9774 
9775 		if (!new_crtc_state)
9776 			continue;
9777 
9778 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9779 			continue;
9780 
9781 	notify:
9782 		aconnector = to_amdgpu_dm_connector(connector);
9783 
9784 		mutex_lock(&adev->dm.audio_lock);
9785 		inst = aconnector->audio_inst;
9786 		aconnector->audio_inst = -1;
9787 		mutex_unlock(&adev->dm.audio_lock);
9788 
9789 		amdgpu_dm_audio_eld_notify(adev, inst);
9790 	}
9791 
9792 	/* Notify audio device additions. */
9793 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9794 		if (!new_con_state->crtc)
9795 			continue;
9796 
9797 		new_crtc_state = drm_atomic_get_new_crtc_state(
9798 			state, new_con_state->crtc);
9799 
9800 		if (!new_crtc_state)
9801 			continue;
9802 
9803 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9804 			continue;
9805 
9806 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9807 		if (!new_dm_crtc_state->stream)
9808 			continue;
9809 
9810 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9811 		if (!status)
9812 			continue;
9813 
9814 		aconnector = to_amdgpu_dm_connector(connector);
9815 
9816 		mutex_lock(&adev->dm.audio_lock);
9817 		inst = status->audio_inst;
9818 		aconnector->audio_inst = inst;
9819 		mutex_unlock(&adev->dm.audio_lock);
9820 
9821 		amdgpu_dm_audio_eld_notify(adev, inst);
9822 	}
9823 }
9824 
9825 /*
9826  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9827  * @crtc_state: the DRM CRTC state
9828  * @stream_state: the DC stream state.
9829  *
9830  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9831  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9832  */
9833 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9834 						struct dc_stream_state *stream_state)
9835 {
9836 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9837 }
9838 
9839 /**
9840  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9841  * @state: The atomic state to commit
9842  *
9843  * This will tell DC to commit the constructed DC state from atomic_check,
9844  * programming the hardware. Any failures here implies a hardware failure, since
9845  * atomic check should have filtered anything non-kosher.
9846  */
9847 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9848 {
9849 	struct drm_device *dev = state->dev;
9850 	struct amdgpu_device *adev = drm_to_adev(dev);
9851 	struct amdgpu_display_manager *dm = &adev->dm;
9852 	struct dm_atomic_state *dm_state;
9853 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9854 	uint32_t i, j;
9855 	struct drm_crtc *crtc;
9856 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9857 	unsigned long flags;
9858 	bool wait_for_vblank = true;
9859 	struct drm_connector *connector;
9860 	struct drm_connector_state *old_con_state, *new_con_state;
9861 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9862 	int crtc_disable_count = 0;
9863 	bool mode_set_reset_required = false;
9864 
9865 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9866 
9867 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9868 
9869 	dm_state = dm_atomic_get_new_state(state);
9870 	if (dm_state && dm_state->context) {
9871 		dc_state = dm_state->context;
9872 	} else {
9873 		/* No state changes, retain current state. */
9874 		dc_state_temp = dc_create_state(dm->dc);
9875 		ASSERT(dc_state_temp);
9876 		dc_state = dc_state_temp;
9877 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9878 	}
9879 
9880 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9881 				       new_crtc_state, i) {
9882 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9883 
9884 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9885 
9886 		if (old_crtc_state->active &&
9887 		    (!new_crtc_state->active ||
9888 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9889 			manage_dm_interrupts(adev, acrtc, false);
9890 			dc_stream_release(dm_old_crtc_state->stream);
9891 		}
9892 	}
9893 
9894 	drm_atomic_helper_calc_timestamping_constants(state);
9895 
9896 	/* update changed items */
9897 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9898 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9899 
9900 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9901 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9902 
9903 		drm_dbg_state(state->dev,
9904 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9905 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9906 			"connectors_changed:%d\n",
9907 			acrtc->crtc_id,
9908 			new_crtc_state->enable,
9909 			new_crtc_state->active,
9910 			new_crtc_state->planes_changed,
9911 			new_crtc_state->mode_changed,
9912 			new_crtc_state->active_changed,
9913 			new_crtc_state->connectors_changed);
9914 
9915 		/* Disable cursor if disabling crtc */
9916 		if (old_crtc_state->active && !new_crtc_state->active) {
9917 			struct dc_cursor_position position;
9918 
9919 			memset(&position, 0, sizeof(position));
9920 			mutex_lock(&dm->dc_lock);
9921 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9922 			mutex_unlock(&dm->dc_lock);
9923 		}
9924 
9925 		/* Copy all transient state flags into dc state */
9926 		if (dm_new_crtc_state->stream) {
9927 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9928 							    dm_new_crtc_state->stream);
9929 		}
9930 
9931 		/* handles headless hotplug case, updating new_state and
9932 		 * aconnector as needed
9933 		 */
9934 
9935 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9936 
9937 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9938 
9939 			if (!dm_new_crtc_state->stream) {
9940 				/*
9941 				 * this could happen because of issues with
9942 				 * userspace notifications delivery.
9943 				 * In this case userspace tries to set mode on
9944 				 * display which is disconnected in fact.
9945 				 * dc_sink is NULL in this case on aconnector.
9946 				 * We expect reset mode will come soon.
9947 				 *
9948 				 * This can also happen when unplug is done
9949 				 * during resume sequence ended
9950 				 *
9951 				 * In this case, we want to pretend we still
9952 				 * have a sink to keep the pipe running so that
9953 				 * hw state is consistent with the sw state
9954 				 */
9955 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9956 						__func__, acrtc->base.base.id);
9957 				continue;
9958 			}
9959 
9960 			if (dm_old_crtc_state->stream)
9961 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9962 
9963 			pm_runtime_get_noresume(dev->dev);
9964 
9965 			acrtc->enabled = true;
9966 			acrtc->hw_mode = new_crtc_state->mode;
9967 			crtc->hwmode = new_crtc_state->mode;
9968 			mode_set_reset_required = true;
9969 		} else if (modereset_required(new_crtc_state)) {
9970 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9971 			/* i.e. reset mode */
9972 			if (dm_old_crtc_state->stream)
9973 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9974 
9975 			mode_set_reset_required = true;
9976 		}
9977 	} /* for_each_crtc_in_state() */
9978 
9979 	if (dc_state) {
9980 		/* if there mode set or reset, disable eDP PSR */
9981 		if (mode_set_reset_required) {
9982 			if (dm->vblank_control_workqueue)
9983 				flush_workqueue(dm->vblank_control_workqueue);
9984 
9985 			amdgpu_dm_psr_disable_all(dm);
9986 		}
9987 
9988 		dm_enable_per_frame_crtc_master_sync(dc_state);
9989 		mutex_lock(&dm->dc_lock);
9990 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9991 
9992 		/* Allow idle optimization when vblank count is 0 for display off */
9993 		if (dm->active_vblank_irq_count == 0)
9994 			dc_allow_idle_optimizations(dm->dc, true);
9995 		mutex_unlock(&dm->dc_lock);
9996 	}
9997 
9998 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9999 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10000 
10001 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10002 
10003 		if (dm_new_crtc_state->stream != NULL) {
10004 			const struct dc_stream_status *status =
10005 					dc_stream_get_status(dm_new_crtc_state->stream);
10006 
10007 			if (!status)
10008 				status = dc_stream_get_status_from_state(dc_state,
10009 									 dm_new_crtc_state->stream);
10010 			if (!status)
10011 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
10012 			else
10013 				acrtc->otg_inst = status->primary_otg_inst;
10014 		}
10015 	}
10016 #ifdef CONFIG_DRM_AMD_DC_HDCP
10017 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10018 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10019 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10020 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10021 
10022 		new_crtc_state = NULL;
10023 
10024 		if (acrtc)
10025 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
10026 
10027 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10028 
10029 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
10030 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
10031 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
10032 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
10033 			dm_new_con_state->update_hdcp = true;
10034 			continue;
10035 		}
10036 
10037 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
10038 			hdcp_update_display(
10039 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
10040 				new_con_state->hdcp_content_type,
10041 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
10042 	}
10043 #endif
10044 
10045 	/* Handle connector state changes */
10046 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10047 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10048 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10049 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10050 		struct dc_surface_update dummy_updates[MAX_SURFACES];
10051 		struct dc_stream_update stream_update;
10052 		struct dc_info_packet hdr_packet;
10053 		struct dc_stream_status *status = NULL;
10054 		bool abm_changed, hdr_changed, scaling_changed;
10055 
10056 		memset(&dummy_updates, 0, sizeof(dummy_updates));
10057 		memset(&stream_update, 0, sizeof(stream_update));
10058 
10059 		if (acrtc) {
10060 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
10061 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
10062 		}
10063 
10064 		/* Skip any modesets/resets */
10065 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
10066 			continue;
10067 
10068 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10069 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10070 
10071 		scaling_changed = is_scaling_state_different(dm_new_con_state,
10072 							     dm_old_con_state);
10073 
10074 		abm_changed = dm_new_crtc_state->abm_level !=
10075 			      dm_old_crtc_state->abm_level;
10076 
10077 		hdr_changed =
10078 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
10079 
10080 		if (!scaling_changed && !abm_changed && !hdr_changed)
10081 			continue;
10082 
10083 		stream_update.stream = dm_new_crtc_state->stream;
10084 		if (scaling_changed) {
10085 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
10086 					dm_new_con_state, dm_new_crtc_state->stream);
10087 
10088 			stream_update.src = dm_new_crtc_state->stream->src;
10089 			stream_update.dst = dm_new_crtc_state->stream->dst;
10090 		}
10091 
10092 		if (abm_changed) {
10093 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
10094 
10095 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
10096 		}
10097 
10098 		if (hdr_changed) {
10099 			fill_hdr_info_packet(new_con_state, &hdr_packet);
10100 			stream_update.hdr_static_metadata = &hdr_packet;
10101 		}
10102 
10103 		status = dc_stream_get_status(dm_new_crtc_state->stream);
10104 
10105 		if (WARN_ON(!status))
10106 			continue;
10107 
10108 		WARN_ON(!status->plane_count);
10109 
10110 		/*
10111 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
10112 		 * Here we create an empty update on each plane.
10113 		 * To fix this, DC should permit updating only stream properties.
10114 		 */
10115 		for (j = 0; j < status->plane_count; j++)
10116 			dummy_updates[j].surface = status->plane_states[0];
10117 
10118 
10119 		mutex_lock(&dm->dc_lock);
10120 		dc_commit_updates_for_stream(dm->dc,
10121 						     dummy_updates,
10122 						     status->plane_count,
10123 						     dm_new_crtc_state->stream,
10124 						     &stream_update,
10125 						     dc_state);
10126 		mutex_unlock(&dm->dc_lock);
10127 	}
10128 
10129 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
10130 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
10131 				      new_crtc_state, i) {
10132 		if (old_crtc_state->active && !new_crtc_state->active)
10133 			crtc_disable_count++;
10134 
10135 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10136 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10137 
10138 		/* For freesync config update on crtc state and params for irq */
10139 		update_stream_irq_parameters(dm, dm_new_crtc_state);
10140 
10141 		/* Handle vrr on->off / off->on transitions */
10142 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
10143 						dm_new_crtc_state);
10144 	}
10145 
10146 	/**
10147 	 * Enable interrupts for CRTCs that are newly enabled or went through
10148 	 * a modeset. It was intentionally deferred until after the front end
10149 	 * state was modified to wait until the OTG was on and so the IRQ
10150 	 * handlers didn't access stale or invalid state.
10151 	 */
10152 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10153 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10154 #ifdef CONFIG_DEBUG_FS
10155 		bool configure_crc = false;
10156 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
10157 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10158 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
10159 #endif
10160 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10161 		cur_crc_src = acrtc->dm_irq_params.crc_src;
10162 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10163 #endif
10164 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10165 
10166 		if (new_crtc_state->active &&
10167 		    (!old_crtc_state->active ||
10168 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
10169 			dc_stream_retain(dm_new_crtc_state->stream);
10170 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
10171 			manage_dm_interrupts(adev, acrtc, true);
10172 
10173 #ifdef CONFIG_DEBUG_FS
10174 			/**
10175 			 * Frontend may have changed so reapply the CRC capture
10176 			 * settings for the stream.
10177 			 */
10178 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10179 
10180 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
10181 				configure_crc = true;
10182 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10183 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
10184 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10185 					acrtc->dm_irq_params.crc_window.update_win = true;
10186 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
10187 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
10188 					crc_rd_wrk->crtc = crtc;
10189 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
10190 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10191 				}
10192 #endif
10193 			}
10194 
10195 			if (configure_crc)
10196 				if (amdgpu_dm_crtc_configure_crc_source(
10197 					crtc, dm_new_crtc_state, cur_crc_src))
10198 					DRM_DEBUG_DRIVER("Failed to configure crc source");
10199 #endif
10200 		}
10201 	}
10202 
10203 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
10204 		if (new_crtc_state->async_flip)
10205 			wait_for_vblank = false;
10206 
10207 	/* update planes when needed per crtc*/
10208 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
10209 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10210 
10211 		if (dm_new_crtc_state->stream)
10212 			amdgpu_dm_commit_planes(state, dc_state, dev,
10213 						dm, crtc, wait_for_vblank);
10214 	}
10215 
10216 	/* Update audio instances for each connector. */
10217 	amdgpu_dm_commit_audio(dev, state);
10218 
10219 	/* restore the backlight level */
10220 	for (i = 0; i < dm->num_of_edps; i++) {
10221 		if (dm->backlight_dev[i] &&
10222 		    (dm->actual_brightness[i] != dm->brightness[i]))
10223 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10224 	}
10225 
10226 	/*
10227 	 * send vblank event on all events not handled in flip and
10228 	 * mark consumed event for drm_atomic_helper_commit_hw_done
10229 	 */
10230 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10231 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10232 
10233 		if (new_crtc_state->event)
10234 			drm_send_event_locked(dev, &new_crtc_state->event->base);
10235 
10236 		new_crtc_state->event = NULL;
10237 	}
10238 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10239 
10240 	/* Signal HW programming completion */
10241 	drm_atomic_helper_commit_hw_done(state);
10242 
10243 	if (wait_for_vblank)
10244 		drm_atomic_helper_wait_for_flip_done(dev, state);
10245 
10246 	drm_atomic_helper_cleanup_planes(dev, state);
10247 
10248 	/* return the stolen vga memory back to VRAM */
10249 	if (!adev->mman.keep_stolen_vga_memory)
10250 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
10251 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
10252 
10253 	/*
10254 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
10255 	 * so we can put the GPU into runtime suspend if we're not driving any
10256 	 * displays anymore
10257 	 */
10258 	for (i = 0; i < crtc_disable_count; i++)
10259 		pm_runtime_put_autosuspend(dev->dev);
10260 	pm_runtime_mark_last_busy(dev->dev);
10261 
10262 	if (dc_state_temp)
10263 		dc_release_state(dc_state_temp);
10264 }
10265 
10266 
10267 static int dm_force_atomic_commit(struct drm_connector *connector)
10268 {
10269 	int ret = 0;
10270 	struct drm_device *ddev = connector->dev;
10271 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10272 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10273 	struct drm_plane *plane = disconnected_acrtc->base.primary;
10274 	struct drm_connector_state *conn_state;
10275 	struct drm_crtc_state *crtc_state;
10276 	struct drm_plane_state *plane_state;
10277 
10278 	if (!state)
10279 		return -ENOMEM;
10280 
10281 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
10282 
10283 	/* Construct an atomic state to restore previous display setting */
10284 
10285 	/*
10286 	 * Attach connectors to drm_atomic_state
10287 	 */
10288 	conn_state = drm_atomic_get_connector_state(state, connector);
10289 
10290 	ret = PTR_ERR_OR_ZERO(conn_state);
10291 	if (ret)
10292 		goto out;
10293 
10294 	/* Attach crtc to drm_atomic_state*/
10295 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10296 
10297 	ret = PTR_ERR_OR_ZERO(crtc_state);
10298 	if (ret)
10299 		goto out;
10300 
10301 	/* force a restore */
10302 	crtc_state->mode_changed = true;
10303 
10304 	/* Attach plane to drm_atomic_state */
10305 	plane_state = drm_atomic_get_plane_state(state, plane);
10306 
10307 	ret = PTR_ERR_OR_ZERO(plane_state);
10308 	if (ret)
10309 		goto out;
10310 
10311 	/* Call commit internally with the state we just constructed */
10312 	ret = drm_atomic_commit(state);
10313 
10314 out:
10315 	drm_atomic_state_put(state);
10316 	if (ret)
10317 		DRM_ERROR("Restoring old state failed with %i\n", ret);
10318 
10319 	return ret;
10320 }
10321 
10322 /*
10323  * This function handles all cases when set mode does not come upon hotplug.
10324  * This includes when a display is unplugged then plugged back into the
10325  * same port and when running without usermode desktop manager supprot
10326  */
10327 void dm_restore_drm_connector_state(struct drm_device *dev,
10328 				    struct drm_connector *connector)
10329 {
10330 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10331 	struct amdgpu_crtc *disconnected_acrtc;
10332 	struct dm_crtc_state *acrtc_state;
10333 
10334 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10335 		return;
10336 
10337 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10338 	if (!disconnected_acrtc)
10339 		return;
10340 
10341 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10342 	if (!acrtc_state->stream)
10343 		return;
10344 
10345 	/*
10346 	 * If the previous sink is not released and different from the current,
10347 	 * we deduce we are in a state where we can not rely on usermode call
10348 	 * to turn on the display, so we do it here
10349 	 */
10350 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10351 		dm_force_atomic_commit(&aconnector->base);
10352 }
10353 
10354 /*
10355  * Grabs all modesetting locks to serialize against any blocking commits,
10356  * Waits for completion of all non blocking commits.
10357  */
10358 static int do_aquire_global_lock(struct drm_device *dev,
10359 				 struct drm_atomic_state *state)
10360 {
10361 	struct drm_crtc *crtc;
10362 	struct drm_crtc_commit *commit;
10363 	long ret;
10364 
10365 	/*
10366 	 * Adding all modeset locks to aquire_ctx will
10367 	 * ensure that when the framework release it the
10368 	 * extra locks we are locking here will get released to
10369 	 */
10370 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10371 	if (ret)
10372 		return ret;
10373 
10374 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10375 		spin_lock(&crtc->commit_lock);
10376 		commit = list_first_entry_or_null(&crtc->commit_list,
10377 				struct drm_crtc_commit, commit_entry);
10378 		if (commit)
10379 			drm_crtc_commit_get(commit);
10380 		spin_unlock(&crtc->commit_lock);
10381 
10382 		if (!commit)
10383 			continue;
10384 
10385 		/*
10386 		 * Make sure all pending HW programming completed and
10387 		 * page flips done
10388 		 */
10389 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10390 
10391 		if (ret > 0)
10392 			ret = wait_for_completion_interruptible_timeout(
10393 					&commit->flip_done, 10*HZ);
10394 
10395 		if (ret == 0)
10396 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10397 				  "timed out\n", crtc->base.id, crtc->name);
10398 
10399 		drm_crtc_commit_put(commit);
10400 	}
10401 
10402 	return ret < 0 ? ret : 0;
10403 }
10404 
10405 static void get_freesync_config_for_crtc(
10406 	struct dm_crtc_state *new_crtc_state,
10407 	struct dm_connector_state *new_con_state)
10408 {
10409 	struct mod_freesync_config config = {0};
10410 	struct amdgpu_dm_connector *aconnector =
10411 			to_amdgpu_dm_connector(new_con_state->base.connector);
10412 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10413 	int vrefresh = drm_mode_vrefresh(mode);
10414 	bool fs_vid_mode = false;
10415 
10416 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10417 					vrefresh >= aconnector->min_vfreq &&
10418 					vrefresh <= aconnector->max_vfreq;
10419 
10420 	if (new_crtc_state->vrr_supported) {
10421 		new_crtc_state->stream->ignore_msa_timing_param = true;
10422 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10423 
10424 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10425 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10426 		config.vsif_supported = true;
10427 		config.btr = true;
10428 
10429 		if (fs_vid_mode) {
10430 			config.state = VRR_STATE_ACTIVE_FIXED;
10431 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10432 			goto out;
10433 		} else if (new_crtc_state->base.vrr_enabled) {
10434 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10435 		} else {
10436 			config.state = VRR_STATE_INACTIVE;
10437 		}
10438 	}
10439 out:
10440 	new_crtc_state->freesync_config = config;
10441 }
10442 
10443 static void reset_freesync_config_for_crtc(
10444 	struct dm_crtc_state *new_crtc_state)
10445 {
10446 	new_crtc_state->vrr_supported = false;
10447 
10448 	memset(&new_crtc_state->vrr_infopacket, 0,
10449 	       sizeof(new_crtc_state->vrr_infopacket));
10450 }
10451 
10452 static bool
10453 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10454 				 struct drm_crtc_state *new_crtc_state)
10455 {
10456 	const struct drm_display_mode *old_mode, *new_mode;
10457 
10458 	if (!old_crtc_state || !new_crtc_state)
10459 		return false;
10460 
10461 	old_mode = &old_crtc_state->mode;
10462 	new_mode = &new_crtc_state->mode;
10463 
10464 	if (old_mode->clock       == new_mode->clock &&
10465 	    old_mode->hdisplay    == new_mode->hdisplay &&
10466 	    old_mode->vdisplay    == new_mode->vdisplay &&
10467 	    old_mode->htotal      == new_mode->htotal &&
10468 	    old_mode->vtotal      != new_mode->vtotal &&
10469 	    old_mode->hsync_start == new_mode->hsync_start &&
10470 	    old_mode->vsync_start != new_mode->vsync_start &&
10471 	    old_mode->hsync_end   == new_mode->hsync_end &&
10472 	    old_mode->vsync_end   != new_mode->vsync_end &&
10473 	    old_mode->hskew       == new_mode->hskew &&
10474 	    old_mode->vscan       == new_mode->vscan &&
10475 	    (old_mode->vsync_end - old_mode->vsync_start) ==
10476 	    (new_mode->vsync_end - new_mode->vsync_start))
10477 		return true;
10478 
10479 	return false;
10480 }
10481 
10482 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10483 	uint64_t num, den, res;
10484 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10485 
10486 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10487 
10488 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10489 	den = (unsigned long long)new_crtc_state->mode.htotal *
10490 	      (unsigned long long)new_crtc_state->mode.vtotal;
10491 
10492 	res = div_u64(num, den);
10493 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10494 }
10495 
10496 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10497 			 struct drm_atomic_state *state,
10498 			 struct drm_crtc *crtc,
10499 			 struct drm_crtc_state *old_crtc_state,
10500 			 struct drm_crtc_state *new_crtc_state,
10501 			 bool enable,
10502 			 bool *lock_and_validation_needed)
10503 {
10504 	struct dm_atomic_state *dm_state = NULL;
10505 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10506 	struct dc_stream_state *new_stream;
10507 	int ret = 0;
10508 
10509 	/*
10510 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10511 	 * update changed items
10512 	 */
10513 	struct amdgpu_crtc *acrtc = NULL;
10514 	struct amdgpu_dm_connector *aconnector = NULL;
10515 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10516 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10517 
10518 	new_stream = NULL;
10519 
10520 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10521 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10522 	acrtc = to_amdgpu_crtc(crtc);
10523 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10524 
10525 	/* TODO This hack should go away */
10526 	if (aconnector && enable) {
10527 		/* Make sure fake sink is created in plug-in scenario */
10528 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10529 							    &aconnector->base);
10530 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10531 							    &aconnector->base);
10532 
10533 		if (IS_ERR(drm_new_conn_state)) {
10534 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10535 			goto fail;
10536 		}
10537 
10538 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10539 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10540 
10541 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10542 			goto skip_modeset;
10543 
10544 		new_stream = create_validate_stream_for_sink(aconnector,
10545 							     &new_crtc_state->mode,
10546 							     dm_new_conn_state,
10547 							     dm_old_crtc_state->stream);
10548 
10549 		/*
10550 		 * we can have no stream on ACTION_SET if a display
10551 		 * was disconnected during S3, in this case it is not an
10552 		 * error, the OS will be updated after detection, and
10553 		 * will do the right thing on next atomic commit
10554 		 */
10555 
10556 		if (!new_stream) {
10557 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10558 					__func__, acrtc->base.base.id);
10559 			ret = -ENOMEM;
10560 			goto fail;
10561 		}
10562 
10563 		/*
10564 		 * TODO: Check VSDB bits to decide whether this should
10565 		 * be enabled or not.
10566 		 */
10567 		new_stream->triggered_crtc_reset.enabled =
10568 			dm->force_timing_sync;
10569 
10570 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10571 
10572 		ret = fill_hdr_info_packet(drm_new_conn_state,
10573 					   &new_stream->hdr_static_metadata);
10574 		if (ret)
10575 			goto fail;
10576 
10577 		/*
10578 		 * If we already removed the old stream from the context
10579 		 * (and set the new stream to NULL) then we can't reuse
10580 		 * the old stream even if the stream and scaling are unchanged.
10581 		 * We'll hit the BUG_ON and black screen.
10582 		 *
10583 		 * TODO: Refactor this function to allow this check to work
10584 		 * in all conditions.
10585 		 */
10586 		if (dm_new_crtc_state->stream &&
10587 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10588 			goto skip_modeset;
10589 
10590 		if (dm_new_crtc_state->stream &&
10591 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10592 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10593 			new_crtc_state->mode_changed = false;
10594 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10595 					 new_crtc_state->mode_changed);
10596 		}
10597 	}
10598 
10599 	/* mode_changed flag may get updated above, need to check again */
10600 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10601 		goto skip_modeset;
10602 
10603 	drm_dbg_state(state->dev,
10604 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10605 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10606 		"connectors_changed:%d\n",
10607 		acrtc->crtc_id,
10608 		new_crtc_state->enable,
10609 		new_crtc_state->active,
10610 		new_crtc_state->planes_changed,
10611 		new_crtc_state->mode_changed,
10612 		new_crtc_state->active_changed,
10613 		new_crtc_state->connectors_changed);
10614 
10615 	/* Remove stream for any changed/disabled CRTC */
10616 	if (!enable) {
10617 
10618 		if (!dm_old_crtc_state->stream)
10619 			goto skip_modeset;
10620 
10621 		if (dm_new_crtc_state->stream &&
10622 		    is_timing_unchanged_for_freesync(new_crtc_state,
10623 						     old_crtc_state)) {
10624 			new_crtc_state->mode_changed = false;
10625 			DRM_DEBUG_DRIVER(
10626 				"Mode change not required for front porch change, "
10627 				"setting mode_changed to %d",
10628 				new_crtc_state->mode_changed);
10629 
10630 			set_freesync_fixed_config(dm_new_crtc_state);
10631 
10632 			goto skip_modeset;
10633 		} else if (aconnector &&
10634 			   is_freesync_video_mode(&new_crtc_state->mode,
10635 						  aconnector)) {
10636 			struct drm_display_mode *high_mode;
10637 
10638 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10639 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10640 				set_freesync_fixed_config(dm_new_crtc_state);
10641 			}
10642 		}
10643 
10644 		ret = dm_atomic_get_state(state, &dm_state);
10645 		if (ret)
10646 			goto fail;
10647 
10648 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10649 				crtc->base.id);
10650 
10651 		/* i.e. reset mode */
10652 		if (dc_remove_stream_from_ctx(
10653 				dm->dc,
10654 				dm_state->context,
10655 				dm_old_crtc_state->stream) != DC_OK) {
10656 			ret = -EINVAL;
10657 			goto fail;
10658 		}
10659 
10660 		dc_stream_release(dm_old_crtc_state->stream);
10661 		dm_new_crtc_state->stream = NULL;
10662 
10663 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10664 
10665 		*lock_and_validation_needed = true;
10666 
10667 	} else {/* Add stream for any updated/enabled CRTC */
10668 		/*
10669 		 * Quick fix to prevent NULL pointer on new_stream when
10670 		 * added MST connectors not found in existing crtc_state in the chained mode
10671 		 * TODO: need to dig out the root cause of that
10672 		 */
10673 		if (!aconnector)
10674 			goto skip_modeset;
10675 
10676 		if (modereset_required(new_crtc_state))
10677 			goto skip_modeset;
10678 
10679 		if (modeset_required(new_crtc_state, new_stream,
10680 				     dm_old_crtc_state->stream)) {
10681 
10682 			WARN_ON(dm_new_crtc_state->stream);
10683 
10684 			ret = dm_atomic_get_state(state, &dm_state);
10685 			if (ret)
10686 				goto fail;
10687 
10688 			dm_new_crtc_state->stream = new_stream;
10689 
10690 			dc_stream_retain(new_stream);
10691 
10692 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10693 					 crtc->base.id);
10694 
10695 			if (dc_add_stream_to_ctx(
10696 					dm->dc,
10697 					dm_state->context,
10698 					dm_new_crtc_state->stream) != DC_OK) {
10699 				ret = -EINVAL;
10700 				goto fail;
10701 			}
10702 
10703 			*lock_and_validation_needed = true;
10704 		}
10705 	}
10706 
10707 skip_modeset:
10708 	/* Release extra reference */
10709 	if (new_stream)
10710 		 dc_stream_release(new_stream);
10711 
10712 	/*
10713 	 * We want to do dc stream updates that do not require a
10714 	 * full modeset below.
10715 	 */
10716 	if (!(enable && aconnector && new_crtc_state->active))
10717 		return 0;
10718 	/*
10719 	 * Given above conditions, the dc state cannot be NULL because:
10720 	 * 1. We're in the process of enabling CRTCs (just been added
10721 	 *    to the dc context, or already is on the context)
10722 	 * 2. Has a valid connector attached, and
10723 	 * 3. Is currently active and enabled.
10724 	 * => The dc stream state currently exists.
10725 	 */
10726 	BUG_ON(dm_new_crtc_state->stream == NULL);
10727 
10728 	/* Scaling or underscan settings */
10729 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10730 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10731 		update_stream_scaling_settings(
10732 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10733 
10734 	/* ABM settings */
10735 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10736 
10737 	/*
10738 	 * Color management settings. We also update color properties
10739 	 * when a modeset is needed, to ensure it gets reprogrammed.
10740 	 */
10741 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10742 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10743 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10744 		if (ret)
10745 			goto fail;
10746 	}
10747 
10748 	/* Update Freesync settings. */
10749 	get_freesync_config_for_crtc(dm_new_crtc_state,
10750 				     dm_new_conn_state);
10751 
10752 	return ret;
10753 
10754 fail:
10755 	if (new_stream)
10756 		dc_stream_release(new_stream);
10757 	return ret;
10758 }
10759 
10760 static bool should_reset_plane(struct drm_atomic_state *state,
10761 			       struct drm_plane *plane,
10762 			       struct drm_plane_state *old_plane_state,
10763 			       struct drm_plane_state *new_plane_state)
10764 {
10765 	struct drm_plane *other;
10766 	struct drm_plane_state *old_other_state, *new_other_state;
10767 	struct drm_crtc_state *new_crtc_state;
10768 	int i;
10769 
10770 	/*
10771 	 * TODO: Remove this hack once the checks below are sufficient
10772 	 * enough to determine when we need to reset all the planes on
10773 	 * the stream.
10774 	 */
10775 	if (state->allow_modeset)
10776 		return true;
10777 
10778 	/* Exit early if we know that we're adding or removing the plane. */
10779 	if (old_plane_state->crtc != new_plane_state->crtc)
10780 		return true;
10781 
10782 	/* old crtc == new_crtc == NULL, plane not in context. */
10783 	if (!new_plane_state->crtc)
10784 		return false;
10785 
10786 	new_crtc_state =
10787 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10788 
10789 	if (!new_crtc_state)
10790 		return true;
10791 
10792 	/* CRTC Degamma changes currently require us to recreate planes. */
10793 	if (new_crtc_state->color_mgmt_changed)
10794 		return true;
10795 
10796 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10797 		return true;
10798 
10799 	/*
10800 	 * If there are any new primary or overlay planes being added or
10801 	 * removed then the z-order can potentially change. To ensure
10802 	 * correct z-order and pipe acquisition the current DC architecture
10803 	 * requires us to remove and recreate all existing planes.
10804 	 *
10805 	 * TODO: Come up with a more elegant solution for this.
10806 	 */
10807 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10808 		struct amdgpu_framebuffer *old_afb, *new_afb;
10809 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10810 			continue;
10811 
10812 		if (old_other_state->crtc != new_plane_state->crtc &&
10813 		    new_other_state->crtc != new_plane_state->crtc)
10814 			continue;
10815 
10816 		if (old_other_state->crtc != new_other_state->crtc)
10817 			return true;
10818 
10819 		/* Src/dst size and scaling updates. */
10820 		if (old_other_state->src_w != new_other_state->src_w ||
10821 		    old_other_state->src_h != new_other_state->src_h ||
10822 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10823 		    old_other_state->crtc_h != new_other_state->crtc_h)
10824 			return true;
10825 
10826 		/* Rotation / mirroring updates. */
10827 		if (old_other_state->rotation != new_other_state->rotation)
10828 			return true;
10829 
10830 		/* Blending updates. */
10831 		if (old_other_state->pixel_blend_mode !=
10832 		    new_other_state->pixel_blend_mode)
10833 			return true;
10834 
10835 		/* Alpha updates. */
10836 		if (old_other_state->alpha != new_other_state->alpha)
10837 			return true;
10838 
10839 		/* Colorspace changes. */
10840 		if (old_other_state->color_range != new_other_state->color_range ||
10841 		    old_other_state->color_encoding != new_other_state->color_encoding)
10842 			return true;
10843 
10844 		/* Framebuffer checks fall at the end. */
10845 		if (!old_other_state->fb || !new_other_state->fb)
10846 			continue;
10847 
10848 		/* Pixel format changes can require bandwidth updates. */
10849 		if (old_other_state->fb->format != new_other_state->fb->format)
10850 			return true;
10851 
10852 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10853 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10854 
10855 		/* Tiling and DCC changes also require bandwidth updates. */
10856 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10857 		    old_afb->base.modifier != new_afb->base.modifier)
10858 			return true;
10859 	}
10860 
10861 	return false;
10862 }
10863 
10864 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10865 			      struct drm_plane_state *new_plane_state,
10866 			      struct drm_framebuffer *fb)
10867 {
10868 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10869 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10870 	unsigned int pitch;
10871 	bool linear;
10872 
10873 	if (fb->width > new_acrtc->max_cursor_width ||
10874 	    fb->height > new_acrtc->max_cursor_height) {
10875 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10876 				 new_plane_state->fb->width,
10877 				 new_plane_state->fb->height);
10878 		return -EINVAL;
10879 	}
10880 	if (new_plane_state->src_w != fb->width << 16 ||
10881 	    new_plane_state->src_h != fb->height << 16) {
10882 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10883 		return -EINVAL;
10884 	}
10885 
10886 	/* Pitch in pixels */
10887 	pitch = fb->pitches[0] / fb->format->cpp[0];
10888 
10889 	if (fb->width != pitch) {
10890 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10891 				 fb->width, pitch);
10892 		return -EINVAL;
10893 	}
10894 
10895 	switch (pitch) {
10896 	case 64:
10897 	case 128:
10898 	case 256:
10899 		/* FB pitch is supported by cursor plane */
10900 		break;
10901 	default:
10902 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10903 		return -EINVAL;
10904 	}
10905 
10906 	/* Core DRM takes care of checking FB modifiers, so we only need to
10907 	 * check tiling flags when the FB doesn't have a modifier. */
10908 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10909 		if (adev->family < AMDGPU_FAMILY_AI) {
10910 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10911 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10912 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10913 		} else {
10914 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10915 		}
10916 		if (!linear) {
10917 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10918 			return -EINVAL;
10919 		}
10920 	}
10921 
10922 	return 0;
10923 }
10924 
10925 static int dm_update_plane_state(struct dc *dc,
10926 				 struct drm_atomic_state *state,
10927 				 struct drm_plane *plane,
10928 				 struct drm_plane_state *old_plane_state,
10929 				 struct drm_plane_state *new_plane_state,
10930 				 bool enable,
10931 				 bool *lock_and_validation_needed)
10932 {
10933 
10934 	struct dm_atomic_state *dm_state = NULL;
10935 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10936 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10937 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10938 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10939 	struct amdgpu_crtc *new_acrtc;
10940 	bool needs_reset;
10941 	int ret = 0;
10942 
10943 
10944 	new_plane_crtc = new_plane_state->crtc;
10945 	old_plane_crtc = old_plane_state->crtc;
10946 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10947 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10948 
10949 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10950 		if (!enable || !new_plane_crtc ||
10951 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10952 			return 0;
10953 
10954 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10955 
10956 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10957 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10958 			return -EINVAL;
10959 		}
10960 
10961 		if (new_plane_state->fb) {
10962 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10963 						 new_plane_state->fb);
10964 			if (ret)
10965 				return ret;
10966 		}
10967 
10968 		return 0;
10969 	}
10970 
10971 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10972 					 new_plane_state);
10973 
10974 	/* Remove any changed/removed planes */
10975 	if (!enable) {
10976 		if (!needs_reset)
10977 			return 0;
10978 
10979 		if (!old_plane_crtc)
10980 			return 0;
10981 
10982 		old_crtc_state = drm_atomic_get_old_crtc_state(
10983 				state, old_plane_crtc);
10984 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10985 
10986 		if (!dm_old_crtc_state->stream)
10987 			return 0;
10988 
10989 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10990 				plane->base.id, old_plane_crtc->base.id);
10991 
10992 		ret = dm_atomic_get_state(state, &dm_state);
10993 		if (ret)
10994 			return ret;
10995 
10996 		if (!dc_remove_plane_from_context(
10997 				dc,
10998 				dm_old_crtc_state->stream,
10999 				dm_old_plane_state->dc_state,
11000 				dm_state->context)) {
11001 
11002 			return -EINVAL;
11003 		}
11004 
11005 
11006 		dc_plane_state_release(dm_old_plane_state->dc_state);
11007 		dm_new_plane_state->dc_state = NULL;
11008 
11009 		*lock_and_validation_needed = true;
11010 
11011 	} else { /* Add new planes */
11012 		struct dc_plane_state *dc_new_plane_state;
11013 
11014 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
11015 			return 0;
11016 
11017 		if (!new_plane_crtc)
11018 			return 0;
11019 
11020 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
11021 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11022 
11023 		if (!dm_new_crtc_state->stream)
11024 			return 0;
11025 
11026 		if (!needs_reset)
11027 			return 0;
11028 
11029 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
11030 		if (ret)
11031 			return ret;
11032 
11033 		WARN_ON(dm_new_plane_state->dc_state);
11034 
11035 		dc_new_plane_state = dc_create_plane_state(dc);
11036 		if (!dc_new_plane_state)
11037 			return -ENOMEM;
11038 
11039 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
11040 				 plane->base.id, new_plane_crtc->base.id);
11041 
11042 		ret = fill_dc_plane_attributes(
11043 			drm_to_adev(new_plane_crtc->dev),
11044 			dc_new_plane_state,
11045 			new_plane_state,
11046 			new_crtc_state);
11047 		if (ret) {
11048 			dc_plane_state_release(dc_new_plane_state);
11049 			return ret;
11050 		}
11051 
11052 		ret = dm_atomic_get_state(state, &dm_state);
11053 		if (ret) {
11054 			dc_plane_state_release(dc_new_plane_state);
11055 			return ret;
11056 		}
11057 
11058 		/*
11059 		 * Any atomic check errors that occur after this will
11060 		 * not need a release. The plane state will be attached
11061 		 * to the stream, and therefore part of the atomic
11062 		 * state. It'll be released when the atomic state is
11063 		 * cleaned.
11064 		 */
11065 		if (!dc_add_plane_to_context(
11066 				dc,
11067 				dm_new_crtc_state->stream,
11068 				dc_new_plane_state,
11069 				dm_state->context)) {
11070 
11071 			dc_plane_state_release(dc_new_plane_state);
11072 			return -EINVAL;
11073 		}
11074 
11075 		dm_new_plane_state->dc_state = dc_new_plane_state;
11076 
11077 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
11078 
11079 		/* Tell DC to do a full surface update every time there
11080 		 * is a plane change. Inefficient, but works for now.
11081 		 */
11082 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
11083 
11084 		*lock_and_validation_needed = true;
11085 	}
11086 
11087 
11088 	return ret;
11089 }
11090 
11091 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
11092 				       int *src_w, int *src_h)
11093 {
11094 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
11095 	case DRM_MODE_ROTATE_90:
11096 	case DRM_MODE_ROTATE_270:
11097 		*src_w = plane_state->src_h >> 16;
11098 		*src_h = plane_state->src_w >> 16;
11099 		break;
11100 	case DRM_MODE_ROTATE_0:
11101 	case DRM_MODE_ROTATE_180:
11102 	default:
11103 		*src_w = plane_state->src_w >> 16;
11104 		*src_h = plane_state->src_h >> 16;
11105 		break;
11106 	}
11107 }
11108 
11109 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
11110 				struct drm_crtc *crtc,
11111 				struct drm_crtc_state *new_crtc_state)
11112 {
11113 	struct drm_plane *cursor = crtc->cursor, *underlying;
11114 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
11115 	int i;
11116 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
11117 	int cursor_src_w, cursor_src_h;
11118 	int underlying_src_w, underlying_src_h;
11119 
11120 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
11121 	 * cursor per pipe but it's going to inherit the scaling and
11122 	 * positioning from the underlying pipe. Check the cursor plane's
11123 	 * blending properties match the underlying planes'. */
11124 
11125 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
11126 	if (!new_cursor_state || !new_cursor_state->fb) {
11127 		return 0;
11128 	}
11129 
11130 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
11131 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
11132 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
11133 
11134 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
11135 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
11136 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
11137 			continue;
11138 
11139 		/* Ignore disabled planes */
11140 		if (!new_underlying_state->fb)
11141 			continue;
11142 
11143 		dm_get_oriented_plane_size(new_underlying_state,
11144 					   &underlying_src_w, &underlying_src_h);
11145 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
11146 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
11147 
11148 		if (cursor_scale_w != underlying_scale_w ||
11149 		    cursor_scale_h != underlying_scale_h) {
11150 			drm_dbg_atomic(crtc->dev,
11151 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
11152 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
11153 			return -EINVAL;
11154 		}
11155 
11156 		/* If this plane covers the whole CRTC, no need to check planes underneath */
11157 		if (new_underlying_state->crtc_x <= 0 &&
11158 		    new_underlying_state->crtc_y <= 0 &&
11159 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
11160 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
11161 			break;
11162 	}
11163 
11164 	return 0;
11165 }
11166 
11167 #if defined(CONFIG_DRM_AMD_DC_DCN)
11168 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
11169 {
11170 	struct drm_connector *connector;
11171 	struct drm_connector_state *conn_state, *old_conn_state;
11172 	struct amdgpu_dm_connector *aconnector = NULL;
11173 	int i;
11174 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
11175 		if (!conn_state->crtc)
11176 			conn_state = old_conn_state;
11177 
11178 		if (conn_state->crtc != crtc)
11179 			continue;
11180 
11181 		aconnector = to_amdgpu_dm_connector(connector);
11182 		if (!aconnector->port || !aconnector->mst_port)
11183 			aconnector = NULL;
11184 		else
11185 			break;
11186 	}
11187 
11188 	if (!aconnector)
11189 		return 0;
11190 
11191 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
11192 }
11193 #endif
11194 
11195 /**
11196  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
11197  * @dev: The DRM device
11198  * @state: The atomic state to commit
11199  *
11200  * Validate that the given atomic state is programmable by DC into hardware.
11201  * This involves constructing a &struct dc_state reflecting the new hardware
11202  * state we wish to commit, then querying DC to see if it is programmable. It's
11203  * important not to modify the existing DC state. Otherwise, atomic_check
11204  * may unexpectedly commit hardware changes.
11205  *
11206  * When validating the DC state, it's important that the right locks are
11207  * acquired. For full updates case which removes/adds/updates streams on one
11208  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
11209  * that any such full update commit will wait for completion of any outstanding
11210  * flip using DRMs synchronization events.
11211  *
11212  * Note that DM adds the affected connectors for all CRTCs in state, when that
11213  * might not seem necessary. This is because DC stream creation requires the
11214  * DC sink, which is tied to the DRM connector state. Cleaning this up should
11215  * be possible but non-trivial - a possible TODO item.
11216  *
11217  * Return: -Error code if validation failed.
11218  */
11219 static int amdgpu_dm_atomic_check(struct drm_device *dev,
11220 				  struct drm_atomic_state *state)
11221 {
11222 	struct amdgpu_device *adev = drm_to_adev(dev);
11223 	struct dm_atomic_state *dm_state = NULL;
11224 	struct dc *dc = adev->dm.dc;
11225 	struct drm_connector *connector;
11226 	struct drm_connector_state *old_con_state, *new_con_state;
11227 	struct drm_crtc *crtc;
11228 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11229 	struct drm_plane *plane;
11230 	struct drm_plane_state *old_plane_state, *new_plane_state;
11231 	enum dc_status status;
11232 	int ret, i;
11233 	bool lock_and_validation_needed = false;
11234 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
11235 #if defined(CONFIG_DRM_AMD_DC_DCN)
11236 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
11237 	struct drm_dp_mst_topology_state *mst_state;
11238 	struct drm_dp_mst_topology_mgr *mgr;
11239 #endif
11240 
11241 	trace_amdgpu_dm_atomic_check_begin(state);
11242 
11243 	ret = drm_atomic_helper_check_modeset(dev, state);
11244 	if (ret) {
11245 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
11246 		goto fail;
11247 	}
11248 
11249 	/* Check connector changes */
11250 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11251 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11252 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11253 
11254 		/* Skip connectors that are disabled or part of modeset already. */
11255 		if (!old_con_state->crtc && !new_con_state->crtc)
11256 			continue;
11257 
11258 		if (!new_con_state->crtc)
11259 			continue;
11260 
11261 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
11262 		if (IS_ERR(new_crtc_state)) {
11263 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
11264 			ret = PTR_ERR(new_crtc_state);
11265 			goto fail;
11266 		}
11267 
11268 		if (dm_old_con_state->abm_level !=
11269 		    dm_new_con_state->abm_level)
11270 			new_crtc_state->connectors_changed = true;
11271 	}
11272 
11273 #if defined(CONFIG_DRM_AMD_DC_DCN)
11274 	if (dc_resource_is_dsc_encoding_supported(dc)) {
11275 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11276 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11277 				ret = add_affected_mst_dsc_crtcs(state, crtc);
11278 				if (ret) {
11279 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11280 					goto fail;
11281 				}
11282 			}
11283 		}
11284 		if (!pre_validate_dsc(state, &dm_state, vars)) {
11285 			ret = -EINVAL;
11286 			goto fail;
11287 		}
11288 	}
11289 #endif
11290 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11291 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11292 
11293 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11294 		    !new_crtc_state->color_mgmt_changed &&
11295 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11296 			dm_old_crtc_state->dsc_force_changed == false)
11297 			continue;
11298 
11299 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11300 		if (ret) {
11301 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11302 			goto fail;
11303 		}
11304 
11305 		if (!new_crtc_state->enable)
11306 			continue;
11307 
11308 		ret = drm_atomic_add_affected_connectors(state, crtc);
11309 		if (ret) {
11310 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11311 			goto fail;
11312 		}
11313 
11314 		ret = drm_atomic_add_affected_planes(state, crtc);
11315 		if (ret) {
11316 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11317 			goto fail;
11318 		}
11319 
11320 		if (dm_old_crtc_state->dsc_force_changed)
11321 			new_crtc_state->mode_changed = true;
11322 	}
11323 
11324 	/*
11325 	 * Add all primary and overlay planes on the CRTC to the state
11326 	 * whenever a plane is enabled to maintain correct z-ordering
11327 	 * and to enable fast surface updates.
11328 	 */
11329 	drm_for_each_crtc(crtc, dev) {
11330 		bool modified = false;
11331 
11332 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11333 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11334 				continue;
11335 
11336 			if (new_plane_state->crtc == crtc ||
11337 			    old_plane_state->crtc == crtc) {
11338 				modified = true;
11339 				break;
11340 			}
11341 		}
11342 
11343 		if (!modified)
11344 			continue;
11345 
11346 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11347 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11348 				continue;
11349 
11350 			new_plane_state =
11351 				drm_atomic_get_plane_state(state, plane);
11352 
11353 			if (IS_ERR(new_plane_state)) {
11354 				ret = PTR_ERR(new_plane_state);
11355 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11356 				goto fail;
11357 			}
11358 		}
11359 	}
11360 
11361 	/* Remove exiting planes if they are modified */
11362 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11363 		ret = dm_update_plane_state(dc, state, plane,
11364 					    old_plane_state,
11365 					    new_plane_state,
11366 					    false,
11367 					    &lock_and_validation_needed);
11368 		if (ret) {
11369 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11370 			goto fail;
11371 		}
11372 	}
11373 
11374 	/* Disable all crtcs which require disable */
11375 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11376 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11377 					   old_crtc_state,
11378 					   new_crtc_state,
11379 					   false,
11380 					   &lock_and_validation_needed);
11381 		if (ret) {
11382 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11383 			goto fail;
11384 		}
11385 	}
11386 
11387 	/* Enable all crtcs which require enable */
11388 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11389 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11390 					   old_crtc_state,
11391 					   new_crtc_state,
11392 					   true,
11393 					   &lock_and_validation_needed);
11394 		if (ret) {
11395 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11396 			goto fail;
11397 		}
11398 	}
11399 
11400 	/* Add new/modified planes */
11401 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11402 		ret = dm_update_plane_state(dc, state, plane,
11403 					    old_plane_state,
11404 					    new_plane_state,
11405 					    true,
11406 					    &lock_and_validation_needed);
11407 		if (ret) {
11408 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11409 			goto fail;
11410 		}
11411 	}
11412 
11413 	/* Run this here since we want to validate the streams we created */
11414 	ret = drm_atomic_helper_check_planes(dev, state);
11415 	if (ret) {
11416 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11417 		goto fail;
11418 	}
11419 
11420 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11421 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11422 		if (dm_new_crtc_state->mpo_requested)
11423 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11424 	}
11425 
11426 	/* Check cursor planes scaling */
11427 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11428 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11429 		if (ret) {
11430 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11431 			goto fail;
11432 		}
11433 	}
11434 
11435 	if (state->legacy_cursor_update) {
11436 		/*
11437 		 * This is a fast cursor update coming from the plane update
11438 		 * helper, check if it can be done asynchronously for better
11439 		 * performance.
11440 		 */
11441 		state->async_update =
11442 			!drm_atomic_helper_async_check(dev, state);
11443 
11444 		/*
11445 		 * Skip the remaining global validation if this is an async
11446 		 * update. Cursor updates can be done without affecting
11447 		 * state or bandwidth calcs and this avoids the performance
11448 		 * penalty of locking the private state object and
11449 		 * allocating a new dc_state.
11450 		 */
11451 		if (state->async_update)
11452 			return 0;
11453 	}
11454 
11455 	/* Check scaling and underscan changes*/
11456 	/* TODO Removed scaling changes validation due to inability to commit
11457 	 * new stream into context w\o causing full reset. Need to
11458 	 * decide how to handle.
11459 	 */
11460 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11461 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11462 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11463 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11464 
11465 		/* Skip any modesets/resets */
11466 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11467 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11468 			continue;
11469 
11470 		/* Skip any thing not scale or underscan changes */
11471 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11472 			continue;
11473 
11474 		lock_and_validation_needed = true;
11475 	}
11476 
11477 #if defined(CONFIG_DRM_AMD_DC_DCN)
11478 	/* set the slot info for each mst_state based on the link encoding format */
11479 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11480 		struct amdgpu_dm_connector *aconnector;
11481 		struct drm_connector *connector;
11482 		struct drm_connector_list_iter iter;
11483 		u8 link_coding_cap;
11484 
11485 		if (!mgr->mst_state )
11486 			continue;
11487 
11488 		drm_connector_list_iter_begin(dev, &iter);
11489 		drm_for_each_connector_iter(connector, &iter) {
11490 			int id = connector->index;
11491 
11492 			if (id == mst_state->mgr->conn_base_id) {
11493 				aconnector = to_amdgpu_dm_connector(connector);
11494 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11495 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11496 
11497 				break;
11498 			}
11499 		}
11500 		drm_connector_list_iter_end(&iter);
11501 
11502 	}
11503 #endif
11504 	/**
11505 	 * Streams and planes are reset when there are changes that affect
11506 	 * bandwidth. Anything that affects bandwidth needs to go through
11507 	 * DC global validation to ensure that the configuration can be applied
11508 	 * to hardware.
11509 	 *
11510 	 * We have to currently stall out here in atomic_check for outstanding
11511 	 * commits to finish in this case because our IRQ handlers reference
11512 	 * DRM state directly - we can end up disabling interrupts too early
11513 	 * if we don't.
11514 	 *
11515 	 * TODO: Remove this stall and drop DM state private objects.
11516 	 */
11517 	if (lock_and_validation_needed) {
11518 		ret = dm_atomic_get_state(state, &dm_state);
11519 		if (ret) {
11520 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11521 			goto fail;
11522 		}
11523 
11524 		ret = do_aquire_global_lock(dev, state);
11525 		if (ret) {
11526 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11527 			goto fail;
11528 		}
11529 
11530 #if defined(CONFIG_DRM_AMD_DC_DCN)
11531 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11532 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11533 			ret = -EINVAL;
11534 			goto fail;
11535 		}
11536 
11537 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11538 		if (ret) {
11539 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11540 			goto fail;
11541 		}
11542 #endif
11543 
11544 		/*
11545 		 * Perform validation of MST topology in the state:
11546 		 * We need to perform MST atomic check before calling
11547 		 * dc_validate_global_state(), or there is a chance
11548 		 * to get stuck in an infinite loop and hang eventually.
11549 		 */
11550 		ret = drm_dp_mst_atomic_check(state);
11551 		if (ret) {
11552 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11553 			goto fail;
11554 		}
11555 		status = dc_validate_global_state(dc, dm_state->context, true);
11556 		if (status != DC_OK) {
11557 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11558 				       dc_status_to_str(status), status);
11559 			ret = -EINVAL;
11560 			goto fail;
11561 		}
11562 	} else {
11563 		/*
11564 		 * The commit is a fast update. Fast updates shouldn't change
11565 		 * the DC context, affect global validation, and can have their
11566 		 * commit work done in parallel with other commits not touching
11567 		 * the same resource. If we have a new DC context as part of
11568 		 * the DM atomic state from validation we need to free it and
11569 		 * retain the existing one instead.
11570 		 *
11571 		 * Furthermore, since the DM atomic state only contains the DC
11572 		 * context and can safely be annulled, we can free the state
11573 		 * and clear the associated private object now to free
11574 		 * some memory and avoid a possible use-after-free later.
11575 		 */
11576 
11577 		for (i = 0; i < state->num_private_objs; i++) {
11578 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11579 
11580 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11581 				int j = state->num_private_objs-1;
11582 
11583 				dm_atomic_destroy_state(obj,
11584 						state->private_objs[i].state);
11585 
11586 				/* If i is not at the end of the array then the
11587 				 * last element needs to be moved to where i was
11588 				 * before the array can safely be truncated.
11589 				 */
11590 				if (i != j)
11591 					state->private_objs[i] =
11592 						state->private_objs[j];
11593 
11594 				state->private_objs[j].ptr = NULL;
11595 				state->private_objs[j].state = NULL;
11596 				state->private_objs[j].old_state = NULL;
11597 				state->private_objs[j].new_state = NULL;
11598 
11599 				state->num_private_objs = j;
11600 				break;
11601 			}
11602 		}
11603 	}
11604 
11605 	/* Store the overall update type for use later in atomic check. */
11606 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11607 		struct dm_crtc_state *dm_new_crtc_state =
11608 			to_dm_crtc_state(new_crtc_state);
11609 
11610 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11611 							 UPDATE_TYPE_FULL :
11612 							 UPDATE_TYPE_FAST;
11613 	}
11614 
11615 	/* Must be success */
11616 	WARN_ON(ret);
11617 
11618 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11619 
11620 	return ret;
11621 
11622 fail:
11623 	if (ret == -EDEADLK)
11624 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11625 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11626 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11627 	else
11628 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11629 
11630 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11631 
11632 	return ret;
11633 }
11634 
11635 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11636 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11637 {
11638 	uint8_t dpcd_data;
11639 	bool capable = false;
11640 
11641 	if (amdgpu_dm_connector->dc_link &&
11642 		dm_helpers_dp_read_dpcd(
11643 				NULL,
11644 				amdgpu_dm_connector->dc_link,
11645 				DP_DOWN_STREAM_PORT_COUNT,
11646 				&dpcd_data,
11647 				sizeof(dpcd_data))) {
11648 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11649 	}
11650 
11651 	return capable;
11652 }
11653 
11654 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11655 		unsigned int offset,
11656 		unsigned int total_length,
11657 		uint8_t *data,
11658 		unsigned int length,
11659 		struct amdgpu_hdmi_vsdb_info *vsdb)
11660 {
11661 	bool res;
11662 	union dmub_rb_cmd cmd;
11663 	struct dmub_cmd_send_edid_cea *input;
11664 	struct dmub_cmd_edid_cea_output *output;
11665 
11666 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11667 		return false;
11668 
11669 	memset(&cmd, 0, sizeof(cmd));
11670 
11671 	input = &cmd.edid_cea.data.input;
11672 
11673 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11674 	cmd.edid_cea.header.sub_type = 0;
11675 	cmd.edid_cea.header.payload_bytes =
11676 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11677 	input->offset = offset;
11678 	input->length = length;
11679 	input->cea_total_length = total_length;
11680 	memcpy(input->payload, data, length);
11681 
11682 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11683 	if (!res) {
11684 		DRM_ERROR("EDID CEA parser failed\n");
11685 		return false;
11686 	}
11687 
11688 	output = &cmd.edid_cea.data.output;
11689 
11690 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11691 		if (!output->ack.success) {
11692 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11693 					output->ack.offset);
11694 		}
11695 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11696 		if (!output->amd_vsdb.vsdb_found)
11697 			return false;
11698 
11699 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11700 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11701 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11702 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11703 	} else {
11704 		DRM_WARN("Unknown EDID CEA parser results\n");
11705 		return false;
11706 	}
11707 
11708 	return true;
11709 }
11710 
11711 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11712 		uint8_t *edid_ext, int len,
11713 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11714 {
11715 	int i;
11716 
11717 	/* send extension block to DMCU for parsing */
11718 	for (i = 0; i < len; i += 8) {
11719 		bool res;
11720 		int offset;
11721 
11722 		/* send 8 bytes a time */
11723 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11724 			return false;
11725 
11726 		if (i+8 == len) {
11727 			/* EDID block sent completed, expect result */
11728 			int version, min_rate, max_rate;
11729 
11730 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11731 			if (res) {
11732 				/* amd vsdb found */
11733 				vsdb_info->freesync_supported = 1;
11734 				vsdb_info->amd_vsdb_version = version;
11735 				vsdb_info->min_refresh_rate_hz = min_rate;
11736 				vsdb_info->max_refresh_rate_hz = max_rate;
11737 				return true;
11738 			}
11739 			/* not amd vsdb */
11740 			return false;
11741 		}
11742 
11743 		/* check for ack*/
11744 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11745 		if (!res)
11746 			return false;
11747 	}
11748 
11749 	return false;
11750 }
11751 
11752 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11753 		uint8_t *edid_ext, int len,
11754 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11755 {
11756 	int i;
11757 
11758 	/* send extension block to DMCU for parsing */
11759 	for (i = 0; i < len; i += 8) {
11760 		/* send 8 bytes a time */
11761 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11762 			return false;
11763 	}
11764 
11765 	return vsdb_info->freesync_supported;
11766 }
11767 
11768 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11769 		uint8_t *edid_ext, int len,
11770 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11771 {
11772 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11773 
11774 	if (adev->dm.dmub_srv)
11775 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11776 	else
11777 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11778 }
11779 
11780 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11781 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11782 {
11783 	uint8_t *edid_ext = NULL;
11784 	int i;
11785 	bool valid_vsdb_found = false;
11786 
11787 	/*----- drm_find_cea_extension() -----*/
11788 	/* No EDID or EDID extensions */
11789 	if (edid == NULL || edid->extensions == 0)
11790 		return -ENODEV;
11791 
11792 	/* Find CEA extension */
11793 	for (i = 0; i < edid->extensions; i++) {
11794 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11795 		if (edid_ext[0] == CEA_EXT)
11796 			break;
11797 	}
11798 
11799 	if (i == edid->extensions)
11800 		return -ENODEV;
11801 
11802 	/*----- cea_db_offsets() -----*/
11803 	if (edid_ext[0] != CEA_EXT)
11804 		return -ENODEV;
11805 
11806 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11807 
11808 	return valid_vsdb_found ? i : -ENODEV;
11809 }
11810 
11811 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11812 					struct edid *edid)
11813 {
11814 	int i = 0;
11815 	struct detailed_timing *timing;
11816 	struct detailed_non_pixel *data;
11817 	struct detailed_data_monitor_range *range;
11818 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11819 			to_amdgpu_dm_connector(connector);
11820 	struct dm_connector_state *dm_con_state = NULL;
11821 	struct dc_sink *sink;
11822 
11823 	struct drm_device *dev = connector->dev;
11824 	struct amdgpu_device *adev = drm_to_adev(dev);
11825 	bool freesync_capable = false;
11826 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11827 
11828 	if (!connector->state) {
11829 		DRM_ERROR("%s - Connector has no state", __func__);
11830 		goto update;
11831 	}
11832 
11833 	sink = amdgpu_dm_connector->dc_sink ?
11834 		amdgpu_dm_connector->dc_sink :
11835 		amdgpu_dm_connector->dc_em_sink;
11836 
11837 	if (!edid || !sink) {
11838 		dm_con_state = to_dm_connector_state(connector->state);
11839 
11840 		amdgpu_dm_connector->min_vfreq = 0;
11841 		amdgpu_dm_connector->max_vfreq = 0;
11842 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11843 		connector->display_info.monitor_range.min_vfreq = 0;
11844 		connector->display_info.monitor_range.max_vfreq = 0;
11845 		freesync_capable = false;
11846 
11847 		goto update;
11848 	}
11849 
11850 	dm_con_state = to_dm_connector_state(connector->state);
11851 
11852 	if (!adev->dm.freesync_module)
11853 		goto update;
11854 
11855 
11856 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11857 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11858 		bool edid_check_required = false;
11859 
11860 		if (edid) {
11861 			edid_check_required = is_dp_capable_without_timing_msa(
11862 						adev->dm.dc,
11863 						amdgpu_dm_connector);
11864 		}
11865 
11866 		if (edid_check_required == true && (edid->version > 1 ||
11867 		   (edid->version == 1 && edid->revision > 1))) {
11868 			for (i = 0; i < 4; i++) {
11869 
11870 				timing	= &edid->detailed_timings[i];
11871 				data	= &timing->data.other_data;
11872 				range	= &data->data.range;
11873 				/*
11874 				 * Check if monitor has continuous frequency mode
11875 				 */
11876 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11877 					continue;
11878 				/*
11879 				 * Check for flag range limits only. If flag == 1 then
11880 				 * no additional timing information provided.
11881 				 * Default GTF, GTF Secondary curve and CVT are not
11882 				 * supported
11883 				 */
11884 				if (range->flags != 1)
11885 					continue;
11886 
11887 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11888 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11889 				amdgpu_dm_connector->pixel_clock_mhz =
11890 					range->pixel_clock_mhz * 10;
11891 
11892 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11893 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11894 
11895 				break;
11896 			}
11897 
11898 			if (amdgpu_dm_connector->max_vfreq -
11899 			    amdgpu_dm_connector->min_vfreq > 10) {
11900 
11901 				freesync_capable = true;
11902 			}
11903 		}
11904 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11905 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11906 		if (i >= 0 && vsdb_info.freesync_supported) {
11907 			timing  = &edid->detailed_timings[i];
11908 			data    = &timing->data.other_data;
11909 
11910 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11911 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11912 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11913 				freesync_capable = true;
11914 
11915 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11916 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11917 		}
11918 	}
11919 
11920 update:
11921 	if (dm_con_state)
11922 		dm_con_state->freesync_capable = freesync_capable;
11923 
11924 	if (connector->vrr_capable_property)
11925 		drm_connector_set_vrr_capable_property(connector,
11926 						       freesync_capable);
11927 }
11928 
11929 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11930 {
11931 	struct amdgpu_device *adev = drm_to_adev(dev);
11932 	struct dc *dc = adev->dm.dc;
11933 	int i;
11934 
11935 	mutex_lock(&adev->dm.dc_lock);
11936 	if (dc->current_state) {
11937 		for (i = 0; i < dc->current_state->stream_count; ++i)
11938 			dc->current_state->streams[i]
11939 				->triggered_crtc_reset.enabled =
11940 				adev->dm.force_timing_sync;
11941 
11942 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11943 		dc_trigger_sync(dc, dc->current_state);
11944 	}
11945 	mutex_unlock(&adev->dm.dc_lock);
11946 }
11947 
11948 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11949 		       uint32_t value, const char *func_name)
11950 {
11951 #ifdef DM_CHECK_ADDR_0
11952 	if (address == 0) {
11953 		DC_ERR("invalid register write. address = 0");
11954 		return;
11955 	}
11956 #endif
11957 	cgs_write_register(ctx->cgs_device, address, value);
11958 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11959 }
11960 
11961 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11962 			  const char *func_name)
11963 {
11964 	uint32_t value;
11965 #ifdef DM_CHECK_ADDR_0
11966 	if (address == 0) {
11967 		DC_ERR("invalid register read; address = 0\n");
11968 		return 0;
11969 	}
11970 #endif
11971 
11972 	if (ctx->dmub_srv &&
11973 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11974 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11975 		ASSERT(false);
11976 		return 0;
11977 	}
11978 
11979 	value = cgs_read_register(ctx->cgs_device, address);
11980 
11981 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11982 
11983 	return value;
11984 }
11985 
11986 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11987 						struct dc_context *ctx,
11988 						uint8_t status_type,
11989 						uint32_t *operation_result)
11990 {
11991 	struct amdgpu_device *adev = ctx->driver_context;
11992 	int return_status = -1;
11993 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11994 
11995 	if (is_cmd_aux) {
11996 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11997 			return_status = p_notify->aux_reply.length;
11998 			*operation_result = p_notify->result;
11999 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
12000 			*operation_result = AUX_RET_ERROR_TIMEOUT;
12001 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
12002 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
12003 		} else {
12004 			*operation_result = AUX_RET_ERROR_UNKNOWN;
12005 		}
12006 	} else {
12007 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
12008 			return_status = 0;
12009 			*operation_result = p_notify->sc_status;
12010 		} else {
12011 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
12012 		}
12013 	}
12014 
12015 	return return_status;
12016 }
12017 
12018 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
12019 	unsigned int link_index, void *cmd_payload, void *operation_result)
12020 {
12021 	struct amdgpu_device *adev = ctx->driver_context;
12022 	int ret = 0;
12023 
12024 	if (is_cmd_aux) {
12025 		dc_process_dmub_aux_transfer_async(ctx->dc,
12026 			link_index, (struct aux_payload *)cmd_payload);
12027 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
12028 					(struct set_config_cmd_payload *)cmd_payload,
12029 					adev->dm.dmub_notify)) {
12030 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12031 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
12032 					(uint32_t *)operation_result);
12033 	}
12034 
12035 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
12036 	if (ret == 0) {
12037 		DRM_ERROR("wait_for_completion_timeout timeout!");
12038 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12039 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
12040 				(uint32_t *)operation_result);
12041 	}
12042 
12043 	if (is_cmd_aux) {
12044 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
12045 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
12046 
12047 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
12048 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
12049 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
12050 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
12051 				       adev->dm.dmub_notify->aux_reply.length);
12052 			}
12053 		}
12054 	}
12055 
12056 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12057 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
12058 			(uint32_t *)operation_result);
12059 }
12060 
12061 /*
12062  * Check whether seamless boot is supported.
12063  *
12064  * So far we only support seamless boot on CHIP_VANGOGH.
12065  * If everything goes well, we may consider expanding
12066  * seamless boot to other ASICs.
12067  */
12068 bool check_seamless_boot_capability(struct amdgpu_device *adev)
12069 {
12070 	switch (adev->asic_type) {
12071 	case CHIP_VANGOGH:
12072 		if (!adev->mman.keep_stolen_vga_memory)
12073 			return true;
12074 		break;
12075 	default:
12076 		break;
12077 	}
12078 
12079 	return false;
12080 }
12081