xref: /openbmc/linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision f69e98a91a01fd7c5755dd710e94a17d6e9f583f)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42 
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55 
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64 
65 #include "ivsrcid/ivsrcid_vislands30.h"
66 
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75 
76 #include <drm/display/drm_dp_mst_helper.h>
77 #include <drm/display/drm_hdmi_helper.h>
78 #include <drm/drm_atomic.h>
79 #include <drm/drm_atomic_uapi.h>
80 #include <drm/drm_atomic_helper.h>
81 #include <drm/drm_fb_helper.h>
82 #include <drm/drm_fourcc.h>
83 #include <drm/drm_edid.h>
84 #include <drm/drm_vblank.h>
85 #include <drm/drm_audio_component.h>
86 
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88 
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "soc15_common.h"
93 #include "vega10_ip_offset.h"
94 
95 #include "soc15_common.h"
96 
97 #include "gc/gc_11_0_0_offset.h"
98 #include "gc/gc_11_0_0_sh_mask.h"
99 
100 #include "modules/inc/mod_freesync.h"
101 #include "modules/power/power_helpers.h"
102 #include "modules/inc/mod_info_packet.h"
103 
104 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
106 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
108 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
110 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
112 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
114 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
116 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
117 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
118 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
119 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
120 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
121 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
122 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
123 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
124 
125 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
126 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
127 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
128 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
129 
130 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
131 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
132 
133 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
134 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
135 
136 /* Number of bytes in PSP header for firmware. */
137 #define PSP_HEADER_BYTES 0x100
138 
139 /* Number of bytes in PSP footer for firmware. */
140 #define PSP_FOOTER_BYTES 0x100
141 
142 /**
143  * DOC: overview
144  *
145  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
146  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
147  * requests into DC requests, and DC responses into DRM responses.
148  *
149  * The root control structure is &struct amdgpu_display_manager.
150  */
151 
152 /* basic init/fini API */
153 static int amdgpu_dm_init(struct amdgpu_device *adev);
154 static void amdgpu_dm_fini(struct amdgpu_device *adev);
155 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
156 
157 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
158 {
159 	switch (link->dpcd_caps.dongle_type) {
160 	case DISPLAY_DONGLE_NONE:
161 		return DRM_MODE_SUBCONNECTOR_Native;
162 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
163 		return DRM_MODE_SUBCONNECTOR_VGA;
164 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
165 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
166 		return DRM_MODE_SUBCONNECTOR_DVID;
167 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
168 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
169 		return DRM_MODE_SUBCONNECTOR_HDMIA;
170 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
171 	default:
172 		return DRM_MODE_SUBCONNECTOR_Unknown;
173 	}
174 }
175 
176 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
177 {
178 	struct dc_link *link = aconnector->dc_link;
179 	struct drm_connector *connector = &aconnector->base;
180 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
181 
182 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
183 		return;
184 
185 	if (aconnector->dc_sink)
186 		subconnector = get_subconnector_type(link);
187 
188 	drm_object_property_set_value(&connector->base,
189 			connector->dev->mode_config.dp_subconnector_property,
190 			subconnector);
191 }
192 
193 /*
194  * initializes drm_device display related structures, based on the information
195  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
196  * drm_encoder, drm_mode_config
197  *
198  * Returns 0 on success
199  */
200 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
201 /* removes and deallocates the drm structures, created by the above function */
202 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
203 
204 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
205 				struct drm_plane *plane,
206 				unsigned long possible_crtcs,
207 				const struct dc_plane_cap *plane_cap);
208 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
209 			       struct drm_plane *plane,
210 			       uint32_t link_index);
211 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
212 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
213 				    uint32_t link_index,
214 				    struct amdgpu_encoder *amdgpu_encoder);
215 static int amdgpu_dm_encoder_init(struct drm_device *dev,
216 				  struct amdgpu_encoder *aencoder,
217 				  uint32_t link_index);
218 
219 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
220 
221 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
222 
223 static int amdgpu_dm_atomic_check(struct drm_device *dev,
224 				  struct drm_atomic_state *state);
225 
226 static void handle_cursor_update(struct drm_plane *plane,
227 				 struct drm_plane_state *old_plane_state);
228 
229 static const struct drm_format_info *
230 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
231 
232 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
233 static void handle_hpd_rx_irq(void *param);
234 
235 static bool
236 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
237 				 struct drm_crtc_state *new_crtc_state);
238 /*
239  * dm_vblank_get_counter
240  *
241  * @brief
242  * Get counter for number of vertical blanks
243  *
244  * @param
245  * struct amdgpu_device *adev - [in] desired amdgpu device
246  * int disp_idx - [in] which CRTC to get the counter from
247  *
248  * @return
249  * Counter for vertical blanks
250  */
251 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
252 {
253 	if (crtc >= adev->mode_info.num_crtc)
254 		return 0;
255 	else {
256 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
257 
258 		if (acrtc->dm_irq_params.stream == NULL) {
259 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
260 				  crtc);
261 			return 0;
262 		}
263 
264 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
265 	}
266 }
267 
268 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
269 				  u32 *vbl, u32 *position)
270 {
271 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
272 
273 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
274 		return -EINVAL;
275 	else {
276 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
277 
278 		if (acrtc->dm_irq_params.stream ==  NULL) {
279 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
280 				  crtc);
281 			return 0;
282 		}
283 
284 		/*
285 		 * TODO rework base driver to use values directly.
286 		 * for now parse it back into reg-format
287 		 */
288 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
289 					 &v_blank_start,
290 					 &v_blank_end,
291 					 &h_position,
292 					 &v_position);
293 
294 		*position = v_position | (h_position << 16);
295 		*vbl = v_blank_start | (v_blank_end << 16);
296 	}
297 
298 	return 0;
299 }
300 
301 static bool dm_is_idle(void *handle)
302 {
303 	/* XXX todo */
304 	return true;
305 }
306 
307 static int dm_wait_for_idle(void *handle)
308 {
309 	/* XXX todo */
310 	return 0;
311 }
312 
313 static bool dm_check_soft_reset(void *handle)
314 {
315 	return false;
316 }
317 
318 static int dm_soft_reset(void *handle)
319 {
320 	/* XXX todo */
321 	return 0;
322 }
323 
324 static struct amdgpu_crtc *
325 get_crtc_by_otg_inst(struct amdgpu_device *adev,
326 		     int otg_inst)
327 {
328 	struct drm_device *dev = adev_to_drm(adev);
329 	struct drm_crtc *crtc;
330 	struct amdgpu_crtc *amdgpu_crtc;
331 
332 	if (WARN_ON(otg_inst == -1))
333 		return adev->mode_info.crtcs[0];
334 
335 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
336 		amdgpu_crtc = to_amdgpu_crtc(crtc);
337 
338 		if (amdgpu_crtc->otg_inst == otg_inst)
339 			return amdgpu_crtc;
340 	}
341 
342 	return NULL;
343 }
344 
345 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
346 {
347 	return acrtc->dm_irq_params.freesync_config.state ==
348 		       VRR_STATE_ACTIVE_VARIABLE ||
349 	       acrtc->dm_irq_params.freesync_config.state ==
350 		       VRR_STATE_ACTIVE_FIXED;
351 }
352 
353 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
354 {
355 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
356 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
357 }
358 
359 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
360 					      struct dm_crtc_state *new_state)
361 {
362 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
363 		return true;
364 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
365 		return true;
366 	else
367 		return false;
368 }
369 
370 /**
371  * dm_pflip_high_irq() - Handle pageflip interrupt
372  * @interrupt_params: ignored
373  *
374  * Handles the pageflip interrupt by notifying all interested parties
375  * that the pageflip has been completed.
376  */
377 static void dm_pflip_high_irq(void *interrupt_params)
378 {
379 	struct amdgpu_crtc *amdgpu_crtc;
380 	struct common_irq_params *irq_params = interrupt_params;
381 	struct amdgpu_device *adev = irq_params->adev;
382 	unsigned long flags;
383 	struct drm_pending_vblank_event *e;
384 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
385 	bool vrr_active;
386 
387 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
388 
389 	/* IRQ could occur when in initial stage */
390 	/* TODO work and BO cleanup */
391 	if (amdgpu_crtc == NULL) {
392 		DC_LOG_PFLIP("CRTC is null, returning.\n");
393 		return;
394 	}
395 
396 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
397 
398 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
399 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
400 						 amdgpu_crtc->pflip_status,
401 						 AMDGPU_FLIP_SUBMITTED,
402 						 amdgpu_crtc->crtc_id,
403 						 amdgpu_crtc);
404 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
405 		return;
406 	}
407 
408 	/* page flip completed. */
409 	e = amdgpu_crtc->event;
410 	amdgpu_crtc->event = NULL;
411 
412 	WARN_ON(!e);
413 
414 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
415 
416 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
417 	if (!vrr_active ||
418 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
419 				      &v_blank_end, &hpos, &vpos) ||
420 	    (vpos < v_blank_start)) {
421 		/* Update to correct count and vblank timestamp if racing with
422 		 * vblank irq. This also updates to the correct vblank timestamp
423 		 * even in VRR mode, as scanout is past the front-porch atm.
424 		 */
425 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
426 
427 		/* Wake up userspace by sending the pageflip event with proper
428 		 * count and timestamp of vblank of flip completion.
429 		 */
430 		if (e) {
431 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
432 
433 			/* Event sent, so done with vblank for this flip */
434 			drm_crtc_vblank_put(&amdgpu_crtc->base);
435 		}
436 	} else if (e) {
437 		/* VRR active and inside front-porch: vblank count and
438 		 * timestamp for pageflip event will only be up to date after
439 		 * drm_crtc_handle_vblank() has been executed from late vblank
440 		 * irq handler after start of back-porch (vline 0). We queue the
441 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
442 		 * updated timestamp and count, once it runs after us.
443 		 *
444 		 * We need to open-code this instead of using the helper
445 		 * drm_crtc_arm_vblank_event(), as that helper would
446 		 * call drm_crtc_accurate_vblank_count(), which we must
447 		 * not call in VRR mode while we are in front-porch!
448 		 */
449 
450 		/* sequence will be replaced by real count during send-out. */
451 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
452 		e->pipe = amdgpu_crtc->crtc_id;
453 
454 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
455 		e = NULL;
456 	}
457 
458 	/* Keep track of vblank of this flip for flip throttling. We use the
459 	 * cooked hw counter, as that one incremented at start of this vblank
460 	 * of pageflip completion, so last_flip_vblank is the forbidden count
461 	 * for queueing new pageflips if vsync + VRR is enabled.
462 	 */
463 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
464 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
465 
466 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
467 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
468 
469 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
470 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
471 		     vrr_active, (int) !e);
472 }
473 
474 static void dm_vupdate_high_irq(void *interrupt_params)
475 {
476 	struct common_irq_params *irq_params = interrupt_params;
477 	struct amdgpu_device *adev = irq_params->adev;
478 	struct amdgpu_crtc *acrtc;
479 	struct drm_device *drm_dev;
480 	struct drm_vblank_crtc *vblank;
481 	ktime_t frame_duration_ns, previous_timestamp;
482 	unsigned long flags;
483 	int vrr_active;
484 
485 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
486 
487 	if (acrtc) {
488 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
489 		drm_dev = acrtc->base.dev;
490 		vblank = &drm_dev->vblank[acrtc->base.index];
491 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
492 		frame_duration_ns = vblank->time - previous_timestamp;
493 
494 		if (frame_duration_ns > 0) {
495 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
496 						frame_duration_ns,
497 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
498 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
499 		}
500 
501 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
502 			      acrtc->crtc_id,
503 			      vrr_active);
504 
505 		/* Core vblank handling is done here after end of front-porch in
506 		 * vrr mode, as vblank timestamping will give valid results
507 		 * while now done after front-porch. This will also deliver
508 		 * page-flip completion events that have been queued to us
509 		 * if a pageflip happened inside front-porch.
510 		 */
511 		if (vrr_active) {
512 			drm_crtc_handle_vblank(&acrtc->base);
513 
514 			/* BTR processing for pre-DCE12 ASICs */
515 			if (acrtc->dm_irq_params.stream &&
516 			    adev->family < AMDGPU_FAMILY_AI) {
517 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
518 				mod_freesync_handle_v_update(
519 				    adev->dm.freesync_module,
520 				    acrtc->dm_irq_params.stream,
521 				    &acrtc->dm_irq_params.vrr_params);
522 
523 				dc_stream_adjust_vmin_vmax(
524 				    adev->dm.dc,
525 				    acrtc->dm_irq_params.stream,
526 				    &acrtc->dm_irq_params.vrr_params.adjust);
527 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
528 			}
529 		}
530 	}
531 }
532 
533 /**
534  * dm_crtc_high_irq() - Handles CRTC interrupt
535  * @interrupt_params: used for determining the CRTC instance
536  *
537  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
538  * event handler.
539  */
540 static void dm_crtc_high_irq(void *interrupt_params)
541 {
542 	struct common_irq_params *irq_params = interrupt_params;
543 	struct amdgpu_device *adev = irq_params->adev;
544 	struct amdgpu_crtc *acrtc;
545 	unsigned long flags;
546 	int vrr_active;
547 
548 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
549 	if (!acrtc)
550 		return;
551 
552 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
553 
554 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
555 		      vrr_active, acrtc->dm_irq_params.active_planes);
556 
557 	/**
558 	 * Core vblank handling at start of front-porch is only possible
559 	 * in non-vrr mode, as only there vblank timestamping will give
560 	 * valid results while done in front-porch. Otherwise defer it
561 	 * to dm_vupdate_high_irq after end of front-porch.
562 	 */
563 	if (!vrr_active)
564 		drm_crtc_handle_vblank(&acrtc->base);
565 
566 	/**
567 	 * Following stuff must happen at start of vblank, for crc
568 	 * computation and below-the-range btr support in vrr mode.
569 	 */
570 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
571 
572 	/* BTR updates need to happen before VUPDATE on Vega and above. */
573 	if (adev->family < AMDGPU_FAMILY_AI)
574 		return;
575 
576 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
577 
578 	if (acrtc->dm_irq_params.stream &&
579 	    acrtc->dm_irq_params.vrr_params.supported &&
580 	    acrtc->dm_irq_params.freesync_config.state ==
581 		    VRR_STATE_ACTIVE_VARIABLE) {
582 		mod_freesync_handle_v_update(adev->dm.freesync_module,
583 					     acrtc->dm_irq_params.stream,
584 					     &acrtc->dm_irq_params.vrr_params);
585 
586 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
587 					   &acrtc->dm_irq_params.vrr_params.adjust);
588 	}
589 
590 	/*
591 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
592 	 * In that case, pageflip completion interrupts won't fire and pageflip
593 	 * completion events won't get delivered. Prevent this by sending
594 	 * pending pageflip events from here if a flip is still pending.
595 	 *
596 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
597 	 * avoid race conditions between flip programming and completion,
598 	 * which could cause too early flip completion events.
599 	 */
600 	if (adev->family >= AMDGPU_FAMILY_RV &&
601 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
602 	    acrtc->dm_irq_params.active_planes == 0) {
603 		if (acrtc->event) {
604 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
605 			acrtc->event = NULL;
606 			drm_crtc_vblank_put(&acrtc->base);
607 		}
608 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
609 	}
610 
611 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
612 }
613 
614 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
615 /**
616  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
617  * DCN generation ASICs
618  * @interrupt_params: interrupt parameters
619  *
620  * Used to set crc window/read out crc value at vertical line 0 position
621  */
622 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
623 {
624 	struct common_irq_params *irq_params = interrupt_params;
625 	struct amdgpu_device *adev = irq_params->adev;
626 	struct amdgpu_crtc *acrtc;
627 
628 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
629 
630 	if (!acrtc)
631 		return;
632 
633 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
634 }
635 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
636 
637 /**
638  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
639  * @adev: amdgpu_device pointer
640  * @notify: dmub notification structure
641  *
642  * Dmub AUX or SET_CONFIG command completion processing callback
643  * Copies dmub notification to DM which is to be read by AUX command.
644  * issuing thread and also signals the event to wake up the thread.
645  */
646 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
647 					struct dmub_notification *notify)
648 {
649 	if (adev->dm.dmub_notify)
650 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
651 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
652 		complete(&adev->dm.dmub_aux_transfer_done);
653 }
654 
655 /**
656  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
657  * @adev: amdgpu_device pointer
658  * @notify: dmub notification structure
659  *
660  * Dmub Hpd interrupt processing callback. Gets displayindex through the
661  * ink index and calls helper to do the processing.
662  */
663 static void dmub_hpd_callback(struct amdgpu_device *adev,
664 			      struct dmub_notification *notify)
665 {
666 	struct amdgpu_dm_connector *aconnector;
667 	struct amdgpu_dm_connector *hpd_aconnector = NULL;
668 	struct drm_connector *connector;
669 	struct drm_connector_list_iter iter;
670 	struct dc_link *link;
671 	uint8_t link_index = 0;
672 	struct drm_device *dev;
673 
674 	if (adev == NULL)
675 		return;
676 
677 	if (notify == NULL) {
678 		DRM_ERROR("DMUB HPD callback notification was NULL");
679 		return;
680 	}
681 
682 	if (notify->link_index > adev->dm.dc->link_count) {
683 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
684 		return;
685 	}
686 
687 	link_index = notify->link_index;
688 	link = adev->dm.dc->links[link_index];
689 	dev = adev->dm.ddev;
690 
691 	drm_connector_list_iter_begin(dev, &iter);
692 	drm_for_each_connector_iter(connector, &iter) {
693 		aconnector = to_amdgpu_dm_connector(connector);
694 		if (link && aconnector->dc_link == link) {
695 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
696 			hpd_aconnector = aconnector;
697 			break;
698 		}
699 	}
700 	drm_connector_list_iter_end(&iter);
701 
702 	if (hpd_aconnector) {
703 		if (notify->type == DMUB_NOTIFICATION_HPD)
704 			handle_hpd_irq_helper(hpd_aconnector);
705 		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
706 			handle_hpd_rx_irq(hpd_aconnector);
707 	}
708 }
709 
710 /**
711  * register_dmub_notify_callback - Sets callback for DMUB notify
712  * @adev: amdgpu_device pointer
713  * @type: Type of dmub notification
714  * @callback: Dmub interrupt callback function
715  * @dmub_int_thread_offload: offload indicator
716  *
717  * API to register a dmub callback handler for a dmub notification
718  * Also sets indicator whether callback processing to be offloaded.
719  * to dmub interrupt handling thread
720  * Return: true if successfully registered, false if there is existing registration
721  */
722 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
723 					  enum dmub_notification_type type,
724 					  dmub_notify_interrupt_callback_t callback,
725 					  bool dmub_int_thread_offload)
726 {
727 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
728 		adev->dm.dmub_callback[type] = callback;
729 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
730 	} else
731 		return false;
732 
733 	return true;
734 }
735 
736 static void dm_handle_hpd_work(struct work_struct *work)
737 {
738 	struct dmub_hpd_work *dmub_hpd_wrk;
739 
740 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
741 
742 	if (!dmub_hpd_wrk->dmub_notify) {
743 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
744 		return;
745 	}
746 
747 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
748 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
749 		dmub_hpd_wrk->dmub_notify);
750 	}
751 
752 	kfree(dmub_hpd_wrk->dmub_notify);
753 	kfree(dmub_hpd_wrk);
754 
755 }
756 
757 #define DMUB_TRACE_MAX_READ 64
758 /**
759  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
760  * @interrupt_params: used for determining the Outbox instance
761  *
762  * Handles the Outbox Interrupt
763  * event handler.
764  */
765 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
766 {
767 	struct dmub_notification notify;
768 	struct common_irq_params *irq_params = interrupt_params;
769 	struct amdgpu_device *adev = irq_params->adev;
770 	struct amdgpu_display_manager *dm = &adev->dm;
771 	struct dmcub_trace_buf_entry entry = { 0 };
772 	uint32_t count = 0;
773 	struct dmub_hpd_work *dmub_hpd_wrk;
774 	struct dc_link *plink = NULL;
775 
776 	if (dc_enable_dmub_notifications(adev->dm.dc) &&
777 		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
778 
779 		do {
780 			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
781 			if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
782 				DRM_ERROR("DM: notify type %d invalid!", notify.type);
783 				continue;
784 			}
785 			if (!dm->dmub_callback[notify.type]) {
786 				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
787 				continue;
788 			}
789 			if (dm->dmub_thread_offload[notify.type] == true) {
790 				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
791 				if (!dmub_hpd_wrk) {
792 					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
793 					return;
794 				}
795 				dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
796 				if (!dmub_hpd_wrk->dmub_notify) {
797 					kfree(dmub_hpd_wrk);
798 					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
799 					return;
800 				}
801 				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
802 				if (dmub_hpd_wrk->dmub_notify)
803 					memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
804 				dmub_hpd_wrk->adev = adev;
805 				if (notify.type == DMUB_NOTIFICATION_HPD) {
806 					plink = adev->dm.dc->links[notify.link_index];
807 					if (plink) {
808 						plink->hpd_status =
809 							notify.hpd_status == DP_HPD_PLUG;
810 					}
811 				}
812 				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
813 			} else {
814 				dm->dmub_callback[notify.type](adev, &notify);
815 			}
816 		} while (notify.pending_notification);
817 	}
818 
819 
820 	do {
821 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
822 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
823 							entry.param0, entry.param1);
824 
825 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
826 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
827 		} else
828 			break;
829 
830 		count++;
831 
832 	} while (count <= DMUB_TRACE_MAX_READ);
833 
834 	if (count > DMUB_TRACE_MAX_READ)
835 		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
836 }
837 
838 static int dm_set_clockgating_state(void *handle,
839 		  enum amd_clockgating_state state)
840 {
841 	return 0;
842 }
843 
844 static int dm_set_powergating_state(void *handle,
845 		  enum amd_powergating_state state)
846 {
847 	return 0;
848 }
849 
850 /* Prototypes of private functions */
851 static int dm_early_init(void* handle);
852 
853 /* Allocate memory for FBC compressed data  */
854 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
855 {
856 	struct drm_device *dev = connector->dev;
857 	struct amdgpu_device *adev = drm_to_adev(dev);
858 	struct dm_compressor_info *compressor = &adev->dm.compressor;
859 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
860 	struct drm_display_mode *mode;
861 	unsigned long max_size = 0;
862 
863 	if (adev->dm.dc->fbc_compressor == NULL)
864 		return;
865 
866 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
867 		return;
868 
869 	if (compressor->bo_ptr)
870 		return;
871 
872 
873 	list_for_each_entry(mode, &connector->modes, head) {
874 		if (max_size < mode->htotal * mode->vtotal)
875 			max_size = mode->htotal * mode->vtotal;
876 	}
877 
878 	if (max_size) {
879 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
880 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
881 			    &compressor->gpu_addr, &compressor->cpu_addr);
882 
883 		if (r)
884 			DRM_ERROR("DM: Failed to initialize FBC\n");
885 		else {
886 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
887 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
888 		}
889 
890 	}
891 
892 }
893 
894 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
895 					  int pipe, bool *enabled,
896 					  unsigned char *buf, int max_bytes)
897 {
898 	struct drm_device *dev = dev_get_drvdata(kdev);
899 	struct amdgpu_device *adev = drm_to_adev(dev);
900 	struct drm_connector *connector;
901 	struct drm_connector_list_iter conn_iter;
902 	struct amdgpu_dm_connector *aconnector;
903 	int ret = 0;
904 
905 	*enabled = false;
906 
907 	mutex_lock(&adev->dm.audio_lock);
908 
909 	drm_connector_list_iter_begin(dev, &conn_iter);
910 	drm_for_each_connector_iter(connector, &conn_iter) {
911 		aconnector = to_amdgpu_dm_connector(connector);
912 		if (aconnector->audio_inst != port)
913 			continue;
914 
915 		*enabled = true;
916 		ret = drm_eld_size(connector->eld);
917 		memcpy(buf, connector->eld, min(max_bytes, ret));
918 
919 		break;
920 	}
921 	drm_connector_list_iter_end(&conn_iter);
922 
923 	mutex_unlock(&adev->dm.audio_lock);
924 
925 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
926 
927 	return ret;
928 }
929 
930 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
931 	.get_eld = amdgpu_dm_audio_component_get_eld,
932 };
933 
934 static int amdgpu_dm_audio_component_bind(struct device *kdev,
935 				       struct device *hda_kdev, void *data)
936 {
937 	struct drm_device *dev = dev_get_drvdata(kdev);
938 	struct amdgpu_device *adev = drm_to_adev(dev);
939 	struct drm_audio_component *acomp = data;
940 
941 	acomp->ops = &amdgpu_dm_audio_component_ops;
942 	acomp->dev = kdev;
943 	adev->dm.audio_component = acomp;
944 
945 	return 0;
946 }
947 
948 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
949 					  struct device *hda_kdev, void *data)
950 {
951 	struct drm_device *dev = dev_get_drvdata(kdev);
952 	struct amdgpu_device *adev = drm_to_adev(dev);
953 	struct drm_audio_component *acomp = data;
954 
955 	acomp->ops = NULL;
956 	acomp->dev = NULL;
957 	adev->dm.audio_component = NULL;
958 }
959 
960 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
961 	.bind	= amdgpu_dm_audio_component_bind,
962 	.unbind	= amdgpu_dm_audio_component_unbind,
963 };
964 
965 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
966 {
967 	int i, ret;
968 
969 	if (!amdgpu_audio)
970 		return 0;
971 
972 	adev->mode_info.audio.enabled = true;
973 
974 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
975 
976 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
977 		adev->mode_info.audio.pin[i].channels = -1;
978 		adev->mode_info.audio.pin[i].rate = -1;
979 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
980 		adev->mode_info.audio.pin[i].status_bits = 0;
981 		adev->mode_info.audio.pin[i].category_code = 0;
982 		adev->mode_info.audio.pin[i].connected = false;
983 		adev->mode_info.audio.pin[i].id =
984 			adev->dm.dc->res_pool->audios[i]->inst;
985 		adev->mode_info.audio.pin[i].offset = 0;
986 	}
987 
988 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
989 	if (ret < 0)
990 		return ret;
991 
992 	adev->dm.audio_registered = true;
993 
994 	return 0;
995 }
996 
997 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
998 {
999 	if (!amdgpu_audio)
1000 		return;
1001 
1002 	if (!adev->mode_info.audio.enabled)
1003 		return;
1004 
1005 	if (adev->dm.audio_registered) {
1006 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1007 		adev->dm.audio_registered = false;
1008 	}
1009 
1010 	/* TODO: Disable audio? */
1011 
1012 	adev->mode_info.audio.enabled = false;
1013 }
1014 
1015 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1016 {
1017 	struct drm_audio_component *acomp = adev->dm.audio_component;
1018 
1019 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1020 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1021 
1022 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1023 						 pin, -1);
1024 	}
1025 }
1026 
1027 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1028 {
1029 	const struct dmcub_firmware_header_v1_0 *hdr;
1030 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1031 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1032 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1033 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1034 	struct abm *abm = adev->dm.dc->res_pool->abm;
1035 	struct dmub_srv_hw_params hw_params;
1036 	enum dmub_status status;
1037 	const unsigned char *fw_inst_const, *fw_bss_data;
1038 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1039 	bool has_hw_support;
1040 
1041 	if (!dmub_srv)
1042 		/* DMUB isn't supported on the ASIC. */
1043 		return 0;
1044 
1045 	if (!fb_info) {
1046 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1047 		return -EINVAL;
1048 	}
1049 
1050 	if (!dmub_fw) {
1051 		/* Firmware required for DMUB support. */
1052 		DRM_ERROR("No firmware provided for DMUB.\n");
1053 		return -EINVAL;
1054 	}
1055 
1056 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1057 	if (status != DMUB_STATUS_OK) {
1058 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1059 		return -EINVAL;
1060 	}
1061 
1062 	if (!has_hw_support) {
1063 		DRM_INFO("DMUB unsupported on ASIC\n");
1064 		return 0;
1065 	}
1066 
1067 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1068 	status = dmub_srv_hw_reset(dmub_srv);
1069 	if (status != DMUB_STATUS_OK)
1070 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1071 
1072 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1073 
1074 	fw_inst_const = dmub_fw->data +
1075 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1076 			PSP_HEADER_BYTES;
1077 
1078 	fw_bss_data = dmub_fw->data +
1079 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1080 		      le32_to_cpu(hdr->inst_const_bytes);
1081 
1082 	/* Copy firmware and bios info into FB memory. */
1083 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1084 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1085 
1086 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1087 
1088 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1089 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1090 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1091 	 * will be done by dm_dmub_hw_init
1092 	 */
1093 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1094 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1095 				fw_inst_const_size);
1096 	}
1097 
1098 	if (fw_bss_data_size)
1099 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1100 		       fw_bss_data, fw_bss_data_size);
1101 
1102 	/* Copy firmware bios info into FB memory. */
1103 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1104 	       adev->bios_size);
1105 
1106 	/* Reset regions that need to be reset. */
1107 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1108 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1109 
1110 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1111 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1112 
1113 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1114 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1115 
1116 	/* Initialize hardware. */
1117 	memset(&hw_params, 0, sizeof(hw_params));
1118 	hw_params.fb_base = adev->gmc.fb_start;
1119 	hw_params.fb_offset = adev->gmc.aper_base;
1120 
1121 	/* backdoor load firmware and trigger dmub running */
1122 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1123 		hw_params.load_inst_const = true;
1124 
1125 	if (dmcu)
1126 		hw_params.psp_version = dmcu->psp_version;
1127 
1128 	for (i = 0; i < fb_info->num_fb; ++i)
1129 		hw_params.fb[i] = &fb_info->fb[i];
1130 
1131 	switch (adev->ip_versions[DCE_HWIP][0]) {
1132 	case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1133 		hw_params.dpia_supported = true;
1134 		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1135 		break;
1136 	default:
1137 		break;
1138 	}
1139 
1140 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1141 	if (status != DMUB_STATUS_OK) {
1142 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1143 		return -EINVAL;
1144 	}
1145 
1146 	/* Wait for firmware load to finish. */
1147 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1148 	if (status != DMUB_STATUS_OK)
1149 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1150 
1151 	/* Init DMCU and ABM if available. */
1152 	if (dmcu && abm) {
1153 		dmcu->funcs->dmcu_init(dmcu);
1154 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1155 	}
1156 
1157 	if (!adev->dm.dc->ctx->dmub_srv)
1158 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1159 	if (!adev->dm.dc->ctx->dmub_srv) {
1160 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1161 		return -ENOMEM;
1162 	}
1163 
1164 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1165 		 adev->dm.dmcub_fw_version);
1166 
1167 	return 0;
1168 }
1169 
1170 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1171 {
1172 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1173 	enum dmub_status status;
1174 	bool init;
1175 
1176 	if (!dmub_srv) {
1177 		/* DMUB isn't supported on the ASIC. */
1178 		return;
1179 	}
1180 
1181 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1182 	if (status != DMUB_STATUS_OK)
1183 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1184 
1185 	if (status == DMUB_STATUS_OK && init) {
1186 		/* Wait for firmware load to finish. */
1187 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1188 		if (status != DMUB_STATUS_OK)
1189 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1190 	} else {
1191 		/* Perform the full hardware initialization. */
1192 		dm_dmub_hw_init(adev);
1193 	}
1194 }
1195 
1196 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1197 {
1198 	uint64_t pt_base;
1199 	uint32_t logical_addr_low;
1200 	uint32_t logical_addr_high;
1201 	uint32_t agp_base, agp_bot, agp_top;
1202 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1203 
1204 	memset(pa_config, 0, sizeof(*pa_config));
1205 
1206 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1207 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1208 
1209 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1210 		/*
1211 		 * Raven2 has a HW issue that it is unable to use the vram which
1212 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1213 		 * workaround that increase system aperture high address (add 1)
1214 		 * to get rid of the VM fault and hardware hang.
1215 		 */
1216 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1217 	else
1218 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1219 
1220 	agp_base = 0;
1221 	agp_bot = adev->gmc.agp_start >> 24;
1222 	agp_top = adev->gmc.agp_end >> 24;
1223 
1224 
1225 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1226 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1227 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1228 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1229 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1230 	page_table_base.low_part = lower_32_bits(pt_base);
1231 
1232 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1233 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1234 
1235 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1236 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1237 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1238 
1239 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1240 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1241 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1242 
1243 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1244 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1245 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1246 
1247 	pa_config->is_hvm_enabled = 0;
1248 
1249 }
1250 
1251 static void vblank_control_worker(struct work_struct *work)
1252 {
1253 	struct vblank_control_work *vblank_work =
1254 		container_of(work, struct vblank_control_work, work);
1255 	struct amdgpu_display_manager *dm = vblank_work->dm;
1256 
1257 	mutex_lock(&dm->dc_lock);
1258 
1259 	if (vblank_work->enable)
1260 		dm->active_vblank_irq_count++;
1261 	else if(dm->active_vblank_irq_count)
1262 		dm->active_vblank_irq_count--;
1263 
1264 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1265 
1266 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1267 
1268 	/*
1269 	 * Control PSR based on vblank requirements from OS
1270 	 *
1271 	 * If panel supports PSR SU, there's no need to disable PSR when OS is
1272 	 * submitting fast atomic commits (we infer this by whether the OS
1273 	 * requests vblank events). Fast atomic commits will simply trigger a
1274 	 * full-frame-update (FFU); a specific case of selective-update (SU)
1275 	 * where the SU region is the full hactive*vactive region. See
1276 	 * fill_dc_dirty_rects().
1277 	 */
1278 	if (vblank_work->stream && vblank_work->stream->link) {
1279 		if (vblank_work->enable) {
1280 			if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
1281 			    vblank_work->stream->link->psr_settings.psr_allow_active)
1282 				amdgpu_dm_psr_disable(vblank_work->stream);
1283 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1284 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1285 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1286 			amdgpu_dm_psr_enable(vblank_work->stream);
1287 		}
1288 	}
1289 
1290 	mutex_unlock(&dm->dc_lock);
1291 
1292 	dc_stream_release(vblank_work->stream);
1293 
1294 	kfree(vblank_work);
1295 }
1296 
1297 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1298 {
1299 	struct hpd_rx_irq_offload_work *offload_work;
1300 	struct amdgpu_dm_connector *aconnector;
1301 	struct dc_link *dc_link;
1302 	struct amdgpu_device *adev;
1303 	enum dc_connection_type new_connection_type = dc_connection_none;
1304 	unsigned long flags;
1305 
1306 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1307 	aconnector = offload_work->offload_wq->aconnector;
1308 
1309 	if (!aconnector) {
1310 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1311 		goto skip;
1312 	}
1313 
1314 	adev = drm_to_adev(aconnector->base.dev);
1315 	dc_link = aconnector->dc_link;
1316 
1317 	mutex_lock(&aconnector->hpd_lock);
1318 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1319 		DRM_ERROR("KMS: Failed to detect connector\n");
1320 	mutex_unlock(&aconnector->hpd_lock);
1321 
1322 	if (new_connection_type == dc_connection_none)
1323 		goto skip;
1324 
1325 	if (amdgpu_in_reset(adev))
1326 		goto skip;
1327 
1328 	mutex_lock(&adev->dm.dc_lock);
1329 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1330 		dc_link_dp_handle_automated_test(dc_link);
1331 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1332 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1333 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1334 		dc_link_dp_handle_link_loss(dc_link);
1335 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1336 		offload_work->offload_wq->is_handling_link_loss = false;
1337 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1338 	}
1339 	mutex_unlock(&adev->dm.dc_lock);
1340 
1341 skip:
1342 	kfree(offload_work);
1343 
1344 }
1345 
1346 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1347 {
1348 	int max_caps = dc->caps.max_links;
1349 	int i = 0;
1350 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1351 
1352 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1353 
1354 	if (!hpd_rx_offload_wq)
1355 		return NULL;
1356 
1357 
1358 	for (i = 0; i < max_caps; i++) {
1359 		hpd_rx_offload_wq[i].wq =
1360 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1361 
1362 		if (hpd_rx_offload_wq[i].wq == NULL) {
1363 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1364 			return NULL;
1365 		}
1366 
1367 		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1368 	}
1369 
1370 	return hpd_rx_offload_wq;
1371 }
1372 
1373 struct amdgpu_stutter_quirk {
1374 	u16 chip_vendor;
1375 	u16 chip_device;
1376 	u16 subsys_vendor;
1377 	u16 subsys_device;
1378 	u8 revision;
1379 };
1380 
1381 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1382 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1383 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1384 	{ 0, 0, 0, 0, 0 },
1385 };
1386 
1387 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1388 {
1389 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1390 
1391 	while (p && p->chip_device != 0) {
1392 		if (pdev->vendor == p->chip_vendor &&
1393 		    pdev->device == p->chip_device &&
1394 		    pdev->subsystem_vendor == p->subsys_vendor &&
1395 		    pdev->subsystem_device == p->subsys_device &&
1396 		    pdev->revision == p->revision) {
1397 			return true;
1398 		}
1399 		++p;
1400 	}
1401 	return false;
1402 }
1403 
1404 static int amdgpu_dm_init(struct amdgpu_device *adev)
1405 {
1406 	struct dc_init_data init_data;
1407 #ifdef CONFIG_DRM_AMD_DC_HDCP
1408 	struct dc_callback_init init_params;
1409 #endif
1410 	int r;
1411 
1412 	adev->dm.ddev = adev_to_drm(adev);
1413 	adev->dm.adev = adev;
1414 
1415 	/* Zero all the fields */
1416 	memset(&init_data, 0, sizeof(init_data));
1417 #ifdef CONFIG_DRM_AMD_DC_HDCP
1418 	memset(&init_params, 0, sizeof(init_params));
1419 #endif
1420 
1421 	mutex_init(&adev->dm.dc_lock);
1422 	mutex_init(&adev->dm.audio_lock);
1423 	spin_lock_init(&adev->dm.vblank_lock);
1424 
1425 	if(amdgpu_dm_irq_init(adev)) {
1426 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1427 		goto error;
1428 	}
1429 
1430 	init_data.asic_id.chip_family = adev->family;
1431 
1432 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1433 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1434 	init_data.asic_id.chip_id = adev->pdev->device;
1435 
1436 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1437 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1438 	init_data.asic_id.atombios_base_address =
1439 		adev->mode_info.atom_context->bios;
1440 
1441 	init_data.driver = adev;
1442 
1443 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1444 
1445 	if (!adev->dm.cgs_device) {
1446 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1447 		goto error;
1448 	}
1449 
1450 	init_data.cgs_device = adev->dm.cgs_device;
1451 
1452 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1453 
1454 	switch (adev->ip_versions[DCE_HWIP][0]) {
1455 	case IP_VERSION(2, 1, 0):
1456 		switch (adev->dm.dmcub_fw_version) {
1457 		case 0: /* development */
1458 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1459 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1460 			init_data.flags.disable_dmcu = false;
1461 			break;
1462 		default:
1463 			init_data.flags.disable_dmcu = true;
1464 		}
1465 		break;
1466 	case IP_VERSION(2, 0, 3):
1467 		init_data.flags.disable_dmcu = true;
1468 		break;
1469 	default:
1470 		break;
1471 	}
1472 
1473 	switch (adev->asic_type) {
1474 	case CHIP_CARRIZO:
1475 	case CHIP_STONEY:
1476 		init_data.flags.gpu_vm_support = true;
1477 		break;
1478 	default:
1479 		switch (adev->ip_versions[DCE_HWIP][0]) {
1480 		case IP_VERSION(1, 0, 0):
1481 		case IP_VERSION(1, 0, 1):
1482 			/* enable S/G on PCO and RV2 */
1483 			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1484 			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1485 				init_data.flags.gpu_vm_support = true;
1486 			break;
1487 		case IP_VERSION(2, 1, 0):
1488 		case IP_VERSION(3, 0, 1):
1489 		case IP_VERSION(3, 1, 2):
1490 		case IP_VERSION(3, 1, 3):
1491 		case IP_VERSION(3, 1, 5):
1492 		case IP_VERSION(3, 1, 6):
1493 			init_data.flags.gpu_vm_support = true;
1494 			break;
1495 		default:
1496 			break;
1497 		}
1498 		break;
1499 	}
1500 
1501 	if (init_data.flags.gpu_vm_support)
1502 		adev->mode_info.gpu_vm_support = true;
1503 
1504 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1505 		init_data.flags.fbc_support = true;
1506 
1507 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1508 		init_data.flags.multi_mon_pp_mclk_switch = true;
1509 
1510 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1511 		init_data.flags.disable_fractional_pwm = true;
1512 
1513 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1514 		init_data.flags.edp_no_power_sequencing = true;
1515 
1516 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1517 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1518 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1519 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1520 
1521 	init_data.flags.seamless_boot_edp_requested = false;
1522 
1523 	if (check_seamless_boot_capability(adev)) {
1524 		init_data.flags.seamless_boot_edp_requested = true;
1525 		init_data.flags.allow_seamless_boot_optimization = true;
1526 		DRM_INFO("Seamless boot condition check passed\n");
1527 	}
1528 
1529 	init_data.flags.enable_mipi_converter_optimization = true;
1530 
1531 	INIT_LIST_HEAD(&adev->dm.da_list);
1532 	/* Display Core create. */
1533 	adev->dm.dc = dc_create(&init_data);
1534 
1535 	if (adev->dm.dc) {
1536 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1537 	} else {
1538 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1539 		goto error;
1540 	}
1541 
1542 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1543 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1544 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1545 	}
1546 
1547 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1548 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1549 	if (dm_should_disable_stutter(adev->pdev))
1550 		adev->dm.dc->debug.disable_stutter = true;
1551 
1552 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1553 		adev->dm.dc->debug.disable_stutter = true;
1554 
1555 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1556 		adev->dm.dc->debug.disable_dsc = true;
1557 		adev->dm.dc->debug.disable_dsc_edp = true;
1558 	}
1559 
1560 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1561 		adev->dm.dc->debug.disable_clock_gate = true;
1562 
1563 	r = dm_dmub_hw_init(adev);
1564 	if (r) {
1565 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1566 		goto error;
1567 	}
1568 
1569 	dc_hardware_init(adev->dm.dc);
1570 
1571 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1572 	if (!adev->dm.hpd_rx_offload_wq) {
1573 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1574 		goto error;
1575 	}
1576 
1577 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1578 		struct dc_phy_addr_space_config pa_config;
1579 
1580 		mmhub_read_system_context(adev, &pa_config);
1581 
1582 		// Call the DC init_memory func
1583 		dc_setup_system_context(adev->dm.dc, &pa_config);
1584 	}
1585 
1586 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1587 	if (!adev->dm.freesync_module) {
1588 		DRM_ERROR(
1589 		"amdgpu: failed to initialize freesync_module.\n");
1590 	} else
1591 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1592 				adev->dm.freesync_module);
1593 
1594 	amdgpu_dm_init_color_mod();
1595 
1596 	if (adev->dm.dc->caps.max_links > 0) {
1597 		adev->dm.vblank_control_workqueue =
1598 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1599 		if (!adev->dm.vblank_control_workqueue)
1600 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1601 	}
1602 
1603 #ifdef CONFIG_DRM_AMD_DC_HDCP
1604 	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1605 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1606 
1607 		if (!adev->dm.hdcp_workqueue)
1608 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1609 		else
1610 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1611 
1612 		dc_init_callbacks(adev->dm.dc, &init_params);
1613 	}
1614 #endif
1615 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1616 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1617 #endif
1618 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1619 		init_completion(&adev->dm.dmub_aux_transfer_done);
1620 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1621 		if (!adev->dm.dmub_notify) {
1622 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1623 			goto error;
1624 		}
1625 
1626 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1627 		if (!adev->dm.delayed_hpd_wq) {
1628 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1629 			goto error;
1630 		}
1631 
1632 		amdgpu_dm_outbox_init(adev);
1633 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1634 			dmub_aux_setconfig_callback, false)) {
1635 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1636 			goto error;
1637 		}
1638 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1639 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1640 			goto error;
1641 		}
1642 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1643 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1644 			goto error;
1645 		}
1646 	}
1647 
1648 	if (amdgpu_dm_initialize_drm_device(adev)) {
1649 		DRM_ERROR(
1650 		"amdgpu: failed to initialize sw for display support.\n");
1651 		goto error;
1652 	}
1653 
1654 	/* create fake encoders for MST */
1655 	dm_dp_create_fake_mst_encoders(adev);
1656 
1657 	/* TODO: Add_display_info? */
1658 
1659 	/* TODO use dynamic cursor width */
1660 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1661 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1662 
1663 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1664 		DRM_ERROR(
1665 		"amdgpu: failed to initialize sw for display support.\n");
1666 		goto error;
1667 	}
1668 
1669 
1670 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1671 
1672 	return 0;
1673 error:
1674 	amdgpu_dm_fini(adev);
1675 
1676 	return -EINVAL;
1677 }
1678 
1679 static int amdgpu_dm_early_fini(void *handle)
1680 {
1681 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1682 
1683 	amdgpu_dm_audio_fini(adev);
1684 
1685 	return 0;
1686 }
1687 
1688 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1689 {
1690 	int i;
1691 
1692 	if (adev->dm.vblank_control_workqueue) {
1693 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1694 		adev->dm.vblank_control_workqueue = NULL;
1695 	}
1696 
1697 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1698 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1699 	}
1700 
1701 	amdgpu_dm_destroy_drm_device(&adev->dm);
1702 
1703 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1704 	if (adev->dm.crc_rd_wrk) {
1705 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1706 		kfree(adev->dm.crc_rd_wrk);
1707 		adev->dm.crc_rd_wrk = NULL;
1708 	}
1709 #endif
1710 #ifdef CONFIG_DRM_AMD_DC_HDCP
1711 	if (adev->dm.hdcp_workqueue) {
1712 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1713 		adev->dm.hdcp_workqueue = NULL;
1714 	}
1715 
1716 	if (adev->dm.dc)
1717 		dc_deinit_callbacks(adev->dm.dc);
1718 #endif
1719 
1720 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1721 
1722 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1723 		kfree(adev->dm.dmub_notify);
1724 		adev->dm.dmub_notify = NULL;
1725 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1726 		adev->dm.delayed_hpd_wq = NULL;
1727 	}
1728 
1729 	if (adev->dm.dmub_bo)
1730 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1731 				      &adev->dm.dmub_bo_gpu_addr,
1732 				      &adev->dm.dmub_bo_cpu_addr);
1733 
1734 	if (adev->dm.hpd_rx_offload_wq) {
1735 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1736 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1737 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1738 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1739 			}
1740 		}
1741 
1742 		kfree(adev->dm.hpd_rx_offload_wq);
1743 		adev->dm.hpd_rx_offload_wq = NULL;
1744 	}
1745 
1746 	/* DC Destroy TODO: Replace destroy DAL */
1747 	if (adev->dm.dc)
1748 		dc_destroy(&adev->dm.dc);
1749 	/*
1750 	 * TODO: pageflip, vlank interrupt
1751 	 *
1752 	 * amdgpu_dm_irq_fini(adev);
1753 	 */
1754 
1755 	if (adev->dm.cgs_device) {
1756 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1757 		adev->dm.cgs_device = NULL;
1758 	}
1759 	if (adev->dm.freesync_module) {
1760 		mod_freesync_destroy(adev->dm.freesync_module);
1761 		adev->dm.freesync_module = NULL;
1762 	}
1763 
1764 	mutex_destroy(&adev->dm.audio_lock);
1765 	mutex_destroy(&adev->dm.dc_lock);
1766 
1767 	return;
1768 }
1769 
1770 static int load_dmcu_fw(struct amdgpu_device *adev)
1771 {
1772 	const char *fw_name_dmcu = NULL;
1773 	int r;
1774 	const struct dmcu_firmware_header_v1_0 *hdr;
1775 
1776 	switch(adev->asic_type) {
1777 #if defined(CONFIG_DRM_AMD_DC_SI)
1778 	case CHIP_TAHITI:
1779 	case CHIP_PITCAIRN:
1780 	case CHIP_VERDE:
1781 	case CHIP_OLAND:
1782 #endif
1783 	case CHIP_BONAIRE:
1784 	case CHIP_HAWAII:
1785 	case CHIP_KAVERI:
1786 	case CHIP_KABINI:
1787 	case CHIP_MULLINS:
1788 	case CHIP_TONGA:
1789 	case CHIP_FIJI:
1790 	case CHIP_CARRIZO:
1791 	case CHIP_STONEY:
1792 	case CHIP_POLARIS11:
1793 	case CHIP_POLARIS10:
1794 	case CHIP_POLARIS12:
1795 	case CHIP_VEGAM:
1796 	case CHIP_VEGA10:
1797 	case CHIP_VEGA12:
1798 	case CHIP_VEGA20:
1799 		return 0;
1800 	case CHIP_NAVI12:
1801 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1802 		break;
1803 	case CHIP_RAVEN:
1804 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1805 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1806 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1807 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1808 		else
1809 			return 0;
1810 		break;
1811 	default:
1812 		switch (adev->ip_versions[DCE_HWIP][0]) {
1813 		case IP_VERSION(2, 0, 2):
1814 		case IP_VERSION(2, 0, 3):
1815 		case IP_VERSION(2, 0, 0):
1816 		case IP_VERSION(2, 1, 0):
1817 		case IP_VERSION(3, 0, 0):
1818 		case IP_VERSION(3, 0, 2):
1819 		case IP_VERSION(3, 0, 3):
1820 		case IP_VERSION(3, 0, 1):
1821 		case IP_VERSION(3, 1, 2):
1822 		case IP_VERSION(3, 1, 3):
1823 		case IP_VERSION(3, 1, 5):
1824 		case IP_VERSION(3, 1, 6):
1825 		case IP_VERSION(3, 2, 0):
1826 		case IP_VERSION(3, 2, 1):
1827 			return 0;
1828 		default:
1829 			break;
1830 		}
1831 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1832 		return -EINVAL;
1833 	}
1834 
1835 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1836 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1837 		return 0;
1838 	}
1839 
1840 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1841 	if (r == -ENOENT) {
1842 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1843 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1844 		adev->dm.fw_dmcu = NULL;
1845 		return 0;
1846 	}
1847 	if (r) {
1848 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1849 			fw_name_dmcu);
1850 		return r;
1851 	}
1852 
1853 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1854 	if (r) {
1855 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1856 			fw_name_dmcu);
1857 		release_firmware(adev->dm.fw_dmcu);
1858 		adev->dm.fw_dmcu = NULL;
1859 		return r;
1860 	}
1861 
1862 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1863 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1864 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1865 	adev->firmware.fw_size +=
1866 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1867 
1868 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1869 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1870 	adev->firmware.fw_size +=
1871 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1872 
1873 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1874 
1875 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1876 
1877 	return 0;
1878 }
1879 
1880 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1881 {
1882 	struct amdgpu_device *adev = ctx;
1883 
1884 	return dm_read_reg(adev->dm.dc->ctx, address);
1885 }
1886 
1887 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1888 				     uint32_t value)
1889 {
1890 	struct amdgpu_device *adev = ctx;
1891 
1892 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1893 }
1894 
1895 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1896 {
1897 	struct dmub_srv_create_params create_params;
1898 	struct dmub_srv_region_params region_params;
1899 	struct dmub_srv_region_info region_info;
1900 	struct dmub_srv_fb_params fb_params;
1901 	struct dmub_srv_fb_info *fb_info;
1902 	struct dmub_srv *dmub_srv;
1903 	const struct dmcub_firmware_header_v1_0 *hdr;
1904 	const char *fw_name_dmub;
1905 	enum dmub_asic dmub_asic;
1906 	enum dmub_status status;
1907 	int r;
1908 
1909 	switch (adev->ip_versions[DCE_HWIP][0]) {
1910 	case IP_VERSION(2, 1, 0):
1911 		dmub_asic = DMUB_ASIC_DCN21;
1912 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1913 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1914 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1915 		break;
1916 	case IP_VERSION(3, 0, 0):
1917 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1918 			dmub_asic = DMUB_ASIC_DCN30;
1919 			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1920 		} else {
1921 			dmub_asic = DMUB_ASIC_DCN30;
1922 			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1923 		}
1924 		break;
1925 	case IP_VERSION(3, 0, 1):
1926 		dmub_asic = DMUB_ASIC_DCN301;
1927 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1928 		break;
1929 	case IP_VERSION(3, 0, 2):
1930 		dmub_asic = DMUB_ASIC_DCN302;
1931 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1932 		break;
1933 	case IP_VERSION(3, 0, 3):
1934 		dmub_asic = DMUB_ASIC_DCN303;
1935 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1936 		break;
1937 	case IP_VERSION(3, 1, 2):
1938 	case IP_VERSION(3, 1, 3):
1939 		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1940 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1941 		break;
1942 	case IP_VERSION(3, 1, 5):
1943 		dmub_asic = DMUB_ASIC_DCN315;
1944 		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1945 		break;
1946 	case IP_VERSION(3, 1, 6):
1947 		dmub_asic = DMUB_ASIC_DCN316;
1948 		fw_name_dmub = FIRMWARE_DCN316_DMUB;
1949 		break;
1950 	case IP_VERSION(3, 2, 0):
1951 		dmub_asic = DMUB_ASIC_DCN32;
1952 		fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
1953 		break;
1954 	case IP_VERSION(3, 2, 1):
1955 		dmub_asic = DMUB_ASIC_DCN321;
1956 		fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
1957 		break;
1958 	default:
1959 		/* ASIC doesn't support DMUB. */
1960 		return 0;
1961 	}
1962 
1963 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1964 	if (r) {
1965 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1966 		return 0;
1967 	}
1968 
1969 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1970 	if (r) {
1971 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1972 		return 0;
1973 	}
1974 
1975 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1976 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1977 
1978 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1979 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1980 			AMDGPU_UCODE_ID_DMCUB;
1981 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1982 			adev->dm.dmub_fw;
1983 		adev->firmware.fw_size +=
1984 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1985 
1986 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1987 			 adev->dm.dmcub_fw_version);
1988 	}
1989 
1990 
1991 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1992 	dmub_srv = adev->dm.dmub_srv;
1993 
1994 	if (!dmub_srv) {
1995 		DRM_ERROR("Failed to allocate DMUB service!\n");
1996 		return -ENOMEM;
1997 	}
1998 
1999 	memset(&create_params, 0, sizeof(create_params));
2000 	create_params.user_ctx = adev;
2001 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2002 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2003 	create_params.asic = dmub_asic;
2004 
2005 	/* Create the DMUB service. */
2006 	status = dmub_srv_create(dmub_srv, &create_params);
2007 	if (status != DMUB_STATUS_OK) {
2008 		DRM_ERROR("Error creating DMUB service: %d\n", status);
2009 		return -EINVAL;
2010 	}
2011 
2012 	/* Calculate the size of all the regions for the DMUB service. */
2013 	memset(&region_params, 0, sizeof(region_params));
2014 
2015 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2016 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2017 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2018 	region_params.vbios_size = adev->bios_size;
2019 	region_params.fw_bss_data = region_params.bss_data_size ?
2020 		adev->dm.dmub_fw->data +
2021 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2022 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
2023 	region_params.fw_inst_const =
2024 		adev->dm.dmub_fw->data +
2025 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2026 		PSP_HEADER_BYTES;
2027 
2028 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2029 					   &region_info);
2030 
2031 	if (status != DMUB_STATUS_OK) {
2032 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2033 		return -EINVAL;
2034 	}
2035 
2036 	/*
2037 	 * Allocate a framebuffer based on the total size of all the regions.
2038 	 * TODO: Move this into GART.
2039 	 */
2040 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2041 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2042 				    &adev->dm.dmub_bo_gpu_addr,
2043 				    &adev->dm.dmub_bo_cpu_addr);
2044 	if (r)
2045 		return r;
2046 
2047 	/* Rebase the regions on the framebuffer address. */
2048 	memset(&fb_params, 0, sizeof(fb_params));
2049 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2050 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2051 	fb_params.region_info = &region_info;
2052 
2053 	adev->dm.dmub_fb_info =
2054 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2055 	fb_info = adev->dm.dmub_fb_info;
2056 
2057 	if (!fb_info) {
2058 		DRM_ERROR(
2059 			"Failed to allocate framebuffer info for DMUB service!\n");
2060 		return -ENOMEM;
2061 	}
2062 
2063 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2064 	if (status != DMUB_STATUS_OK) {
2065 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2066 		return -EINVAL;
2067 	}
2068 
2069 	return 0;
2070 }
2071 
2072 static int dm_sw_init(void *handle)
2073 {
2074 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2075 	int r;
2076 
2077 	r = dm_dmub_sw_init(adev);
2078 	if (r)
2079 		return r;
2080 
2081 	return load_dmcu_fw(adev);
2082 }
2083 
2084 static int dm_sw_fini(void *handle)
2085 {
2086 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2087 
2088 	kfree(adev->dm.dmub_fb_info);
2089 	adev->dm.dmub_fb_info = NULL;
2090 
2091 	if (adev->dm.dmub_srv) {
2092 		dmub_srv_destroy(adev->dm.dmub_srv);
2093 		adev->dm.dmub_srv = NULL;
2094 	}
2095 
2096 	release_firmware(adev->dm.dmub_fw);
2097 	adev->dm.dmub_fw = NULL;
2098 
2099 	release_firmware(adev->dm.fw_dmcu);
2100 	adev->dm.fw_dmcu = NULL;
2101 
2102 	return 0;
2103 }
2104 
2105 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2106 {
2107 	struct amdgpu_dm_connector *aconnector;
2108 	struct drm_connector *connector;
2109 	struct drm_connector_list_iter iter;
2110 	int ret = 0;
2111 
2112 	drm_connector_list_iter_begin(dev, &iter);
2113 	drm_for_each_connector_iter(connector, &iter) {
2114 		aconnector = to_amdgpu_dm_connector(connector);
2115 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2116 		    aconnector->mst_mgr.aux) {
2117 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2118 					 aconnector,
2119 					 aconnector->base.base.id);
2120 
2121 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2122 			if (ret < 0) {
2123 				DRM_ERROR("DM_MST: Failed to start MST\n");
2124 				aconnector->dc_link->type =
2125 					dc_connection_single;
2126 				break;
2127 			}
2128 		}
2129 	}
2130 	drm_connector_list_iter_end(&iter);
2131 
2132 	return ret;
2133 }
2134 
2135 static int dm_late_init(void *handle)
2136 {
2137 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2138 
2139 	struct dmcu_iram_parameters params;
2140 	unsigned int linear_lut[16];
2141 	int i;
2142 	struct dmcu *dmcu = NULL;
2143 
2144 	dmcu = adev->dm.dc->res_pool->dmcu;
2145 
2146 	for (i = 0; i < 16; i++)
2147 		linear_lut[i] = 0xFFFF * i / 15;
2148 
2149 	params.set = 0;
2150 	params.backlight_ramping_override = false;
2151 	params.backlight_ramping_start = 0xCCCC;
2152 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2153 	params.backlight_lut_array_size = 16;
2154 	params.backlight_lut_array = linear_lut;
2155 
2156 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2157 	 * 0xFFFF x 0.01 = 0x28F
2158 	 */
2159 	params.min_abm_backlight = 0x28F;
2160 	/* In the case where abm is implemented on dmcub,
2161 	* dmcu object will be null.
2162 	* ABM 2.4 and up are implemented on dmcub.
2163 	*/
2164 	if (dmcu) {
2165 		if (!dmcu_load_iram(dmcu, params))
2166 			return -EINVAL;
2167 	} else if (adev->dm.dc->ctx->dmub_srv) {
2168 		struct dc_link *edp_links[MAX_NUM_EDP];
2169 		int edp_num;
2170 
2171 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2172 		for (i = 0; i < edp_num; i++) {
2173 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2174 				return -EINVAL;
2175 		}
2176 	}
2177 
2178 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2179 }
2180 
2181 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2182 {
2183 	struct amdgpu_dm_connector *aconnector;
2184 	struct drm_connector *connector;
2185 	struct drm_connector_list_iter iter;
2186 	struct drm_dp_mst_topology_mgr *mgr;
2187 	int ret;
2188 	bool need_hotplug = false;
2189 
2190 	drm_connector_list_iter_begin(dev, &iter);
2191 	drm_for_each_connector_iter(connector, &iter) {
2192 		aconnector = to_amdgpu_dm_connector(connector);
2193 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2194 		    aconnector->mst_port)
2195 			continue;
2196 
2197 		mgr = &aconnector->mst_mgr;
2198 
2199 		if (suspend) {
2200 			drm_dp_mst_topology_mgr_suspend(mgr);
2201 		} else {
2202 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2203 			if (ret < 0) {
2204 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2205 				need_hotplug = true;
2206 			}
2207 		}
2208 	}
2209 	drm_connector_list_iter_end(&iter);
2210 
2211 	if (need_hotplug)
2212 		drm_kms_helper_hotplug_event(dev);
2213 }
2214 
2215 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2216 {
2217 	int ret = 0;
2218 
2219 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2220 	 * on window driver dc implementation.
2221 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2222 	 * should be passed to smu during boot up and resume from s3.
2223 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2224 	 * dcn20_resource_construct
2225 	 * then call pplib functions below to pass the settings to smu:
2226 	 * smu_set_watermarks_for_clock_ranges
2227 	 * smu_set_watermarks_table
2228 	 * navi10_set_watermarks_table
2229 	 * smu_write_watermarks_table
2230 	 *
2231 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2232 	 * dc has implemented different flow for window driver:
2233 	 * dc_hardware_init / dc_set_power_state
2234 	 * dcn10_init_hw
2235 	 * notify_wm_ranges
2236 	 * set_wm_ranges
2237 	 * -- Linux
2238 	 * smu_set_watermarks_for_clock_ranges
2239 	 * renoir_set_watermarks_table
2240 	 * smu_write_watermarks_table
2241 	 *
2242 	 * For Linux,
2243 	 * dc_hardware_init -> amdgpu_dm_init
2244 	 * dc_set_power_state --> dm_resume
2245 	 *
2246 	 * therefore, this function apply to navi10/12/14 but not Renoir
2247 	 * *
2248 	 */
2249 	switch (adev->ip_versions[DCE_HWIP][0]) {
2250 	case IP_VERSION(2, 0, 2):
2251 	case IP_VERSION(2, 0, 0):
2252 		break;
2253 	default:
2254 		return 0;
2255 	}
2256 
2257 	ret = amdgpu_dpm_write_watermarks_table(adev);
2258 	if (ret) {
2259 		DRM_ERROR("Failed to update WMTABLE!\n");
2260 		return ret;
2261 	}
2262 
2263 	return 0;
2264 }
2265 
2266 /**
2267  * dm_hw_init() - Initialize DC device
2268  * @handle: The base driver device containing the amdgpu_dm device.
2269  *
2270  * Initialize the &struct amdgpu_display_manager device. This involves calling
2271  * the initializers of each DM component, then populating the struct with them.
2272  *
2273  * Although the function implies hardware initialization, both hardware and
2274  * software are initialized here. Splitting them out to their relevant init
2275  * hooks is a future TODO item.
2276  *
2277  * Some notable things that are initialized here:
2278  *
2279  * - Display Core, both software and hardware
2280  * - DC modules that we need (freesync and color management)
2281  * - DRM software states
2282  * - Interrupt sources and handlers
2283  * - Vblank support
2284  * - Debug FS entries, if enabled
2285  */
2286 static int dm_hw_init(void *handle)
2287 {
2288 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2289 	/* Create DAL display manager */
2290 	amdgpu_dm_init(adev);
2291 	amdgpu_dm_hpd_init(adev);
2292 
2293 	return 0;
2294 }
2295 
2296 /**
2297  * dm_hw_fini() - Teardown DC device
2298  * @handle: The base driver device containing the amdgpu_dm device.
2299  *
2300  * Teardown components within &struct amdgpu_display_manager that require
2301  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2302  * were loaded. Also flush IRQ workqueues and disable them.
2303  */
2304 static int dm_hw_fini(void *handle)
2305 {
2306 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2307 
2308 	amdgpu_dm_hpd_fini(adev);
2309 
2310 	amdgpu_dm_irq_fini(adev);
2311 	amdgpu_dm_fini(adev);
2312 	return 0;
2313 }
2314 
2315 
2316 static int dm_enable_vblank(struct drm_crtc *crtc);
2317 static void dm_disable_vblank(struct drm_crtc *crtc);
2318 
2319 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2320 				 struct dc_state *state, bool enable)
2321 {
2322 	enum dc_irq_source irq_source;
2323 	struct amdgpu_crtc *acrtc;
2324 	int rc = -EBUSY;
2325 	int i = 0;
2326 
2327 	for (i = 0; i < state->stream_count; i++) {
2328 		acrtc = get_crtc_by_otg_inst(
2329 				adev, state->stream_status[i].primary_otg_inst);
2330 
2331 		if (acrtc && state->stream_status[i].plane_count != 0) {
2332 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2333 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2334 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2335 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2336 			if (rc)
2337 				DRM_WARN("Failed to %s pflip interrupts\n",
2338 					 enable ? "enable" : "disable");
2339 
2340 			if (enable) {
2341 				rc = dm_enable_vblank(&acrtc->base);
2342 				if (rc)
2343 					DRM_WARN("Failed to enable vblank interrupts\n");
2344 			} else {
2345 				dm_disable_vblank(&acrtc->base);
2346 			}
2347 
2348 		}
2349 	}
2350 
2351 }
2352 
2353 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2354 {
2355 	struct dc_state *context = NULL;
2356 	enum dc_status res = DC_ERROR_UNEXPECTED;
2357 	int i;
2358 	struct dc_stream_state *del_streams[MAX_PIPES];
2359 	int del_streams_count = 0;
2360 
2361 	memset(del_streams, 0, sizeof(del_streams));
2362 
2363 	context = dc_create_state(dc);
2364 	if (context == NULL)
2365 		goto context_alloc_fail;
2366 
2367 	dc_resource_state_copy_construct_current(dc, context);
2368 
2369 	/* First remove from context all streams */
2370 	for (i = 0; i < context->stream_count; i++) {
2371 		struct dc_stream_state *stream = context->streams[i];
2372 
2373 		del_streams[del_streams_count++] = stream;
2374 	}
2375 
2376 	/* Remove all planes for removed streams and then remove the streams */
2377 	for (i = 0; i < del_streams_count; i++) {
2378 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2379 			res = DC_FAIL_DETACH_SURFACES;
2380 			goto fail;
2381 		}
2382 
2383 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2384 		if (res != DC_OK)
2385 			goto fail;
2386 	}
2387 
2388 	res = dc_commit_state(dc, context);
2389 
2390 fail:
2391 	dc_release_state(context);
2392 
2393 context_alloc_fail:
2394 	return res;
2395 }
2396 
2397 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2398 {
2399 	int i;
2400 
2401 	if (dm->hpd_rx_offload_wq) {
2402 		for (i = 0; i < dm->dc->caps.max_links; i++)
2403 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2404 	}
2405 }
2406 
2407 static int dm_suspend(void *handle)
2408 {
2409 	struct amdgpu_device *adev = handle;
2410 	struct amdgpu_display_manager *dm = &adev->dm;
2411 	int ret = 0;
2412 
2413 	if (amdgpu_in_reset(adev)) {
2414 		mutex_lock(&dm->dc_lock);
2415 
2416 		dc_allow_idle_optimizations(adev->dm.dc, false);
2417 
2418 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2419 
2420 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2421 
2422 		amdgpu_dm_commit_zero_streams(dm->dc);
2423 
2424 		amdgpu_dm_irq_suspend(adev);
2425 
2426 		hpd_rx_irq_work_suspend(dm);
2427 
2428 		return ret;
2429 	}
2430 
2431 	WARN_ON(adev->dm.cached_state);
2432 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2433 
2434 	s3_handle_mst(adev_to_drm(adev), true);
2435 
2436 	amdgpu_dm_irq_suspend(adev);
2437 
2438 	hpd_rx_irq_work_suspend(dm);
2439 
2440 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2441 
2442 	return 0;
2443 }
2444 
2445 struct amdgpu_dm_connector *
2446 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2447 					     struct drm_crtc *crtc)
2448 {
2449 	uint32_t i;
2450 	struct drm_connector_state *new_con_state;
2451 	struct drm_connector *connector;
2452 	struct drm_crtc *crtc_from_state;
2453 
2454 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2455 		crtc_from_state = new_con_state->crtc;
2456 
2457 		if (crtc_from_state == crtc)
2458 			return to_amdgpu_dm_connector(connector);
2459 	}
2460 
2461 	return NULL;
2462 }
2463 
2464 static void emulated_link_detect(struct dc_link *link)
2465 {
2466 	struct dc_sink_init_data sink_init_data = { 0 };
2467 	struct display_sink_capability sink_caps = { 0 };
2468 	enum dc_edid_status edid_status;
2469 	struct dc_context *dc_ctx = link->ctx;
2470 	struct dc_sink *sink = NULL;
2471 	struct dc_sink *prev_sink = NULL;
2472 
2473 	link->type = dc_connection_none;
2474 	prev_sink = link->local_sink;
2475 
2476 	if (prev_sink)
2477 		dc_sink_release(prev_sink);
2478 
2479 	switch (link->connector_signal) {
2480 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2481 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2482 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2483 		break;
2484 	}
2485 
2486 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2487 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2488 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2489 		break;
2490 	}
2491 
2492 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2493 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2494 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2495 		break;
2496 	}
2497 
2498 	case SIGNAL_TYPE_LVDS: {
2499 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2500 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2501 		break;
2502 	}
2503 
2504 	case SIGNAL_TYPE_EDP: {
2505 		sink_caps.transaction_type =
2506 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2507 		sink_caps.signal = SIGNAL_TYPE_EDP;
2508 		break;
2509 	}
2510 
2511 	case SIGNAL_TYPE_DISPLAY_PORT: {
2512 		sink_caps.transaction_type =
2513 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2514 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2515 		break;
2516 	}
2517 
2518 	default:
2519 		DC_ERROR("Invalid connector type! signal:%d\n",
2520 			link->connector_signal);
2521 		return;
2522 	}
2523 
2524 	sink_init_data.link = link;
2525 	sink_init_data.sink_signal = sink_caps.signal;
2526 
2527 	sink = dc_sink_create(&sink_init_data);
2528 	if (!sink) {
2529 		DC_ERROR("Failed to create sink!\n");
2530 		return;
2531 	}
2532 
2533 	/* dc_sink_create returns a new reference */
2534 	link->local_sink = sink;
2535 
2536 	edid_status = dm_helpers_read_local_edid(
2537 			link->ctx,
2538 			link,
2539 			sink);
2540 
2541 	if (edid_status != EDID_OK)
2542 		DC_ERROR("Failed to read EDID");
2543 
2544 }
2545 
2546 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2547 				     struct amdgpu_display_manager *dm)
2548 {
2549 	struct {
2550 		struct dc_surface_update surface_updates[MAX_SURFACES];
2551 		struct dc_plane_info plane_infos[MAX_SURFACES];
2552 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2553 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2554 		struct dc_stream_update stream_update;
2555 	} * bundle;
2556 	int k, m;
2557 
2558 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2559 
2560 	if (!bundle) {
2561 		dm_error("Failed to allocate update bundle\n");
2562 		goto cleanup;
2563 	}
2564 
2565 	for (k = 0; k < dc_state->stream_count; k++) {
2566 		bundle->stream_update.stream = dc_state->streams[k];
2567 
2568 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2569 			bundle->surface_updates[m].surface =
2570 				dc_state->stream_status->plane_states[m];
2571 			bundle->surface_updates[m].surface->force_full_update =
2572 				true;
2573 		}
2574 		dc_commit_updates_for_stream(
2575 			dm->dc, bundle->surface_updates,
2576 			dc_state->stream_status->plane_count,
2577 			dc_state->streams[k], &bundle->stream_update, dc_state);
2578 	}
2579 
2580 cleanup:
2581 	kfree(bundle);
2582 
2583 	return;
2584 }
2585 
2586 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2587 {
2588 	struct dc_stream_state *stream_state;
2589 	struct amdgpu_dm_connector *aconnector = link->priv;
2590 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2591 	struct dc_stream_update stream_update;
2592 	bool dpms_off = true;
2593 
2594 	memset(&stream_update, 0, sizeof(stream_update));
2595 	stream_update.dpms_off = &dpms_off;
2596 
2597 	mutex_lock(&adev->dm.dc_lock);
2598 	stream_state = dc_stream_find_from_link(link);
2599 
2600 	if (stream_state == NULL) {
2601 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2602 		mutex_unlock(&adev->dm.dc_lock);
2603 		return;
2604 	}
2605 
2606 	stream_update.stream = stream_state;
2607 	acrtc_state->force_dpms_off = true;
2608 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2609 				     stream_state, &stream_update,
2610 				     stream_state->ctx->dc->current_state);
2611 	mutex_unlock(&adev->dm.dc_lock);
2612 }
2613 
2614 static int dm_resume(void *handle)
2615 {
2616 	struct amdgpu_device *adev = handle;
2617 	struct drm_device *ddev = adev_to_drm(adev);
2618 	struct amdgpu_display_manager *dm = &adev->dm;
2619 	struct amdgpu_dm_connector *aconnector;
2620 	struct drm_connector *connector;
2621 	struct drm_connector_list_iter iter;
2622 	struct drm_crtc *crtc;
2623 	struct drm_crtc_state *new_crtc_state;
2624 	struct dm_crtc_state *dm_new_crtc_state;
2625 	struct drm_plane *plane;
2626 	struct drm_plane_state *new_plane_state;
2627 	struct dm_plane_state *dm_new_plane_state;
2628 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2629 	enum dc_connection_type new_connection_type = dc_connection_none;
2630 	struct dc_state *dc_state;
2631 	int i, r, j;
2632 
2633 	if (amdgpu_in_reset(adev)) {
2634 		dc_state = dm->cached_dc_state;
2635 
2636 		/*
2637 		 * The dc->current_state is backed up into dm->cached_dc_state
2638 		 * before we commit 0 streams.
2639 		 *
2640 		 * DC will clear link encoder assignments on the real state
2641 		 * but the changes won't propagate over to the copy we made
2642 		 * before the 0 streams commit.
2643 		 *
2644 		 * DC expects that link encoder assignments are *not* valid
2645 		 * when committing a state, so as a workaround we can copy
2646 		 * off of the current state.
2647 		 *
2648 		 * We lose the previous assignments, but we had already
2649 		 * commit 0 streams anyway.
2650 		 */
2651 		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2652 
2653 		if (dc_enable_dmub_notifications(adev->dm.dc))
2654 			amdgpu_dm_outbox_init(adev);
2655 
2656 		r = dm_dmub_hw_init(adev);
2657 		if (r)
2658 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2659 
2660 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2661 		dc_resume(dm->dc);
2662 
2663 		amdgpu_dm_irq_resume_early(adev);
2664 
2665 		for (i = 0; i < dc_state->stream_count; i++) {
2666 			dc_state->streams[i]->mode_changed = true;
2667 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2668 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2669 					= 0xffffffff;
2670 			}
2671 		}
2672 
2673 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2674 
2675 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2676 
2677 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2678 
2679 		dc_release_state(dm->cached_dc_state);
2680 		dm->cached_dc_state = NULL;
2681 
2682 		amdgpu_dm_irq_resume_late(adev);
2683 
2684 		mutex_unlock(&dm->dc_lock);
2685 
2686 		return 0;
2687 	}
2688 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2689 	dc_release_state(dm_state->context);
2690 	dm_state->context = dc_create_state(dm->dc);
2691 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2692 	dc_resource_state_construct(dm->dc, dm_state->context);
2693 
2694 	/* Re-enable outbox interrupts for DPIA. */
2695 	if (dc_enable_dmub_notifications(adev->dm.dc))
2696 		amdgpu_dm_outbox_init(adev);
2697 
2698 	/* Before powering on DC we need to re-initialize DMUB. */
2699 	dm_dmub_hw_resume(adev);
2700 
2701 	/* power on hardware */
2702 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2703 
2704 	/* program HPD filter */
2705 	dc_resume(dm->dc);
2706 
2707 	/*
2708 	 * early enable HPD Rx IRQ, should be done before set mode as short
2709 	 * pulse interrupts are used for MST
2710 	 */
2711 	amdgpu_dm_irq_resume_early(adev);
2712 
2713 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2714 	s3_handle_mst(ddev, false);
2715 
2716 	/* Do detection*/
2717 	drm_connector_list_iter_begin(ddev, &iter);
2718 	drm_for_each_connector_iter(connector, &iter) {
2719 		aconnector = to_amdgpu_dm_connector(connector);
2720 
2721 		/*
2722 		 * this is the case when traversing through already created
2723 		 * MST connectors, should be skipped
2724 		 */
2725 		if (aconnector->dc_link &&
2726 		    aconnector->dc_link->type == dc_connection_mst_branch)
2727 			continue;
2728 
2729 		mutex_lock(&aconnector->hpd_lock);
2730 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2731 			DRM_ERROR("KMS: Failed to detect connector\n");
2732 
2733 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2734 			emulated_link_detect(aconnector->dc_link);
2735 		else
2736 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2737 
2738 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2739 			aconnector->fake_enable = false;
2740 
2741 		if (aconnector->dc_sink)
2742 			dc_sink_release(aconnector->dc_sink);
2743 		aconnector->dc_sink = NULL;
2744 		amdgpu_dm_update_connector_after_detect(aconnector);
2745 		mutex_unlock(&aconnector->hpd_lock);
2746 	}
2747 	drm_connector_list_iter_end(&iter);
2748 
2749 	/* Force mode set in atomic commit */
2750 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2751 		new_crtc_state->active_changed = true;
2752 
2753 	/*
2754 	 * atomic_check is expected to create the dc states. We need to release
2755 	 * them here, since they were duplicated as part of the suspend
2756 	 * procedure.
2757 	 */
2758 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2759 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2760 		if (dm_new_crtc_state->stream) {
2761 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2762 			dc_stream_release(dm_new_crtc_state->stream);
2763 			dm_new_crtc_state->stream = NULL;
2764 		}
2765 	}
2766 
2767 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2768 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2769 		if (dm_new_plane_state->dc_state) {
2770 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2771 			dc_plane_state_release(dm_new_plane_state->dc_state);
2772 			dm_new_plane_state->dc_state = NULL;
2773 		}
2774 	}
2775 
2776 	drm_atomic_helper_resume(ddev, dm->cached_state);
2777 
2778 	dm->cached_state = NULL;
2779 
2780 	amdgpu_dm_irq_resume_late(adev);
2781 
2782 	amdgpu_dm_smu_write_watermarks_table(adev);
2783 
2784 	return 0;
2785 }
2786 
2787 /**
2788  * DOC: DM Lifecycle
2789  *
2790  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2791  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2792  * the base driver's device list to be initialized and torn down accordingly.
2793  *
2794  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2795  */
2796 
2797 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2798 	.name = "dm",
2799 	.early_init = dm_early_init,
2800 	.late_init = dm_late_init,
2801 	.sw_init = dm_sw_init,
2802 	.sw_fini = dm_sw_fini,
2803 	.early_fini = amdgpu_dm_early_fini,
2804 	.hw_init = dm_hw_init,
2805 	.hw_fini = dm_hw_fini,
2806 	.suspend = dm_suspend,
2807 	.resume = dm_resume,
2808 	.is_idle = dm_is_idle,
2809 	.wait_for_idle = dm_wait_for_idle,
2810 	.check_soft_reset = dm_check_soft_reset,
2811 	.soft_reset = dm_soft_reset,
2812 	.set_clockgating_state = dm_set_clockgating_state,
2813 	.set_powergating_state = dm_set_powergating_state,
2814 };
2815 
2816 const struct amdgpu_ip_block_version dm_ip_block =
2817 {
2818 	.type = AMD_IP_BLOCK_TYPE_DCE,
2819 	.major = 1,
2820 	.minor = 0,
2821 	.rev = 0,
2822 	.funcs = &amdgpu_dm_funcs,
2823 };
2824 
2825 
2826 /**
2827  * DOC: atomic
2828  *
2829  * *WIP*
2830  */
2831 
2832 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2833 	.fb_create = amdgpu_display_user_framebuffer_create,
2834 	.get_format_info = amd_get_format_info,
2835 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2836 	.atomic_check = amdgpu_dm_atomic_check,
2837 	.atomic_commit = drm_atomic_helper_commit,
2838 };
2839 
2840 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2841 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2842 };
2843 
2844 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2845 {
2846 	u32 max_avg, min_cll, max, min, q, r;
2847 	struct amdgpu_dm_backlight_caps *caps;
2848 	struct amdgpu_display_manager *dm;
2849 	struct drm_connector *conn_base;
2850 	struct amdgpu_device *adev;
2851 	struct dc_link *link = NULL;
2852 	static const u8 pre_computed_values[] = {
2853 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2854 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2855 	int i;
2856 
2857 	if (!aconnector || !aconnector->dc_link)
2858 		return;
2859 
2860 	link = aconnector->dc_link;
2861 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2862 		return;
2863 
2864 	conn_base = &aconnector->base;
2865 	adev = drm_to_adev(conn_base->dev);
2866 	dm = &adev->dm;
2867 	for (i = 0; i < dm->num_of_edps; i++) {
2868 		if (link == dm->backlight_link[i])
2869 			break;
2870 	}
2871 	if (i >= dm->num_of_edps)
2872 		return;
2873 	caps = &dm->backlight_caps[i];
2874 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2875 	caps->aux_support = false;
2876 	max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2877 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2878 
2879 	if (caps->ext_caps->bits.oled == 1 /*||
2880 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2881 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2882 		caps->aux_support = true;
2883 
2884 	if (amdgpu_backlight == 0)
2885 		caps->aux_support = false;
2886 	else if (amdgpu_backlight == 1)
2887 		caps->aux_support = true;
2888 
2889 	/* From the specification (CTA-861-G), for calculating the maximum
2890 	 * luminance we need to use:
2891 	 *	Luminance = 50*2**(CV/32)
2892 	 * Where CV is a one-byte value.
2893 	 * For calculating this expression we may need float point precision;
2894 	 * to avoid this complexity level, we take advantage that CV is divided
2895 	 * by a constant. From the Euclids division algorithm, we know that CV
2896 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2897 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2898 	 * need to pre-compute the value of r/32. For pre-computing the values
2899 	 * We just used the following Ruby line:
2900 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2901 	 * The results of the above expressions can be verified at
2902 	 * pre_computed_values.
2903 	 */
2904 	q = max_avg >> 5;
2905 	r = max_avg % 32;
2906 	max = (1 << q) * pre_computed_values[r];
2907 
2908 	// min luminance: maxLum * (CV/255)^2 / 100
2909 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2910 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2911 
2912 	caps->aux_max_input_signal = max;
2913 	caps->aux_min_input_signal = min;
2914 }
2915 
2916 void amdgpu_dm_update_connector_after_detect(
2917 		struct amdgpu_dm_connector *aconnector)
2918 {
2919 	struct drm_connector *connector = &aconnector->base;
2920 	struct drm_device *dev = connector->dev;
2921 	struct dc_sink *sink;
2922 
2923 	/* MST handled by drm_mst framework */
2924 	if (aconnector->mst_mgr.mst_state == true)
2925 		return;
2926 
2927 	sink = aconnector->dc_link->local_sink;
2928 	if (sink)
2929 		dc_sink_retain(sink);
2930 
2931 	/*
2932 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2933 	 * the connector sink is set to either fake or physical sink depends on link status.
2934 	 * Skip if already done during boot.
2935 	 */
2936 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2937 			&& aconnector->dc_em_sink) {
2938 
2939 		/*
2940 		 * For S3 resume with headless use eml_sink to fake stream
2941 		 * because on resume connector->sink is set to NULL
2942 		 */
2943 		mutex_lock(&dev->mode_config.mutex);
2944 
2945 		if (sink) {
2946 			if (aconnector->dc_sink) {
2947 				amdgpu_dm_update_freesync_caps(connector, NULL);
2948 				/*
2949 				 * retain and release below are used to
2950 				 * bump up refcount for sink because the link doesn't point
2951 				 * to it anymore after disconnect, so on next crtc to connector
2952 				 * reshuffle by UMD we will get into unwanted dc_sink release
2953 				 */
2954 				dc_sink_release(aconnector->dc_sink);
2955 			}
2956 			aconnector->dc_sink = sink;
2957 			dc_sink_retain(aconnector->dc_sink);
2958 			amdgpu_dm_update_freesync_caps(connector,
2959 					aconnector->edid);
2960 		} else {
2961 			amdgpu_dm_update_freesync_caps(connector, NULL);
2962 			if (!aconnector->dc_sink) {
2963 				aconnector->dc_sink = aconnector->dc_em_sink;
2964 				dc_sink_retain(aconnector->dc_sink);
2965 			}
2966 		}
2967 
2968 		mutex_unlock(&dev->mode_config.mutex);
2969 
2970 		if (sink)
2971 			dc_sink_release(sink);
2972 		return;
2973 	}
2974 
2975 	/*
2976 	 * TODO: temporary guard to look for proper fix
2977 	 * if this sink is MST sink, we should not do anything
2978 	 */
2979 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2980 		dc_sink_release(sink);
2981 		return;
2982 	}
2983 
2984 	if (aconnector->dc_sink == sink) {
2985 		/*
2986 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2987 		 * Do nothing!!
2988 		 */
2989 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2990 				aconnector->connector_id);
2991 		if (sink)
2992 			dc_sink_release(sink);
2993 		return;
2994 	}
2995 
2996 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2997 		aconnector->connector_id, aconnector->dc_sink, sink);
2998 
2999 	mutex_lock(&dev->mode_config.mutex);
3000 
3001 	/*
3002 	 * 1. Update status of the drm connector
3003 	 * 2. Send an event and let userspace tell us what to do
3004 	 */
3005 	if (sink) {
3006 		/*
3007 		 * TODO: check if we still need the S3 mode update workaround.
3008 		 * If yes, put it here.
3009 		 */
3010 		if (aconnector->dc_sink) {
3011 			amdgpu_dm_update_freesync_caps(connector, NULL);
3012 			dc_sink_release(aconnector->dc_sink);
3013 		}
3014 
3015 		aconnector->dc_sink = sink;
3016 		dc_sink_retain(aconnector->dc_sink);
3017 		if (sink->dc_edid.length == 0) {
3018 			aconnector->edid = NULL;
3019 			if (aconnector->dc_link->aux_mode) {
3020 				drm_dp_cec_unset_edid(
3021 					&aconnector->dm_dp_aux.aux);
3022 			}
3023 		} else {
3024 			aconnector->edid =
3025 				(struct edid *)sink->dc_edid.raw_edid;
3026 
3027 			if (aconnector->dc_link->aux_mode)
3028 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3029 						    aconnector->edid);
3030 		}
3031 
3032 		drm_connector_update_edid_property(connector, aconnector->edid);
3033 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3034 		update_connector_ext_caps(aconnector);
3035 	} else {
3036 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3037 		amdgpu_dm_update_freesync_caps(connector, NULL);
3038 		drm_connector_update_edid_property(connector, NULL);
3039 		aconnector->num_modes = 0;
3040 		dc_sink_release(aconnector->dc_sink);
3041 		aconnector->dc_sink = NULL;
3042 		aconnector->edid = NULL;
3043 #ifdef CONFIG_DRM_AMD_DC_HDCP
3044 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3045 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3046 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3047 #endif
3048 	}
3049 
3050 	mutex_unlock(&dev->mode_config.mutex);
3051 
3052 	update_subconnector_property(aconnector);
3053 
3054 	if (sink)
3055 		dc_sink_release(sink);
3056 }
3057 
3058 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3059 {
3060 	struct drm_connector *connector = &aconnector->base;
3061 	struct drm_device *dev = connector->dev;
3062 	enum dc_connection_type new_connection_type = dc_connection_none;
3063 	struct amdgpu_device *adev = drm_to_adev(dev);
3064 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3065 	struct dm_crtc_state *dm_crtc_state = NULL;
3066 
3067 	if (adev->dm.disable_hpd_irq)
3068 		return;
3069 
3070 	if (dm_con_state->base.state && dm_con_state->base.crtc)
3071 		dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3072 					dm_con_state->base.state,
3073 					dm_con_state->base.crtc));
3074 	/*
3075 	 * In case of failure or MST no need to update connector status or notify the OS
3076 	 * since (for MST case) MST does this in its own context.
3077 	 */
3078 	mutex_lock(&aconnector->hpd_lock);
3079 
3080 #ifdef CONFIG_DRM_AMD_DC_HDCP
3081 	if (adev->dm.hdcp_workqueue) {
3082 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3083 		dm_con_state->update_hdcp = true;
3084 	}
3085 #endif
3086 	if (aconnector->fake_enable)
3087 		aconnector->fake_enable = false;
3088 
3089 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3090 		DRM_ERROR("KMS: Failed to detect connector\n");
3091 
3092 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3093 		emulated_link_detect(aconnector->dc_link);
3094 
3095 		drm_modeset_lock_all(dev);
3096 		dm_restore_drm_connector_state(dev, connector);
3097 		drm_modeset_unlock_all(dev);
3098 
3099 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3100 			drm_kms_helper_connector_hotplug_event(connector);
3101 
3102 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3103 		if (new_connection_type == dc_connection_none &&
3104 		    aconnector->dc_link->type == dc_connection_none &&
3105 		    dm_crtc_state)
3106 			dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3107 
3108 		amdgpu_dm_update_connector_after_detect(aconnector);
3109 
3110 		drm_modeset_lock_all(dev);
3111 		dm_restore_drm_connector_state(dev, connector);
3112 		drm_modeset_unlock_all(dev);
3113 
3114 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3115 			drm_kms_helper_connector_hotplug_event(connector);
3116 	}
3117 	mutex_unlock(&aconnector->hpd_lock);
3118 
3119 }
3120 
3121 static void handle_hpd_irq(void *param)
3122 {
3123 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3124 
3125 	handle_hpd_irq_helper(aconnector);
3126 
3127 }
3128 
3129 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3130 {
3131 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3132 	uint8_t dret;
3133 	bool new_irq_handled = false;
3134 	int dpcd_addr;
3135 	int dpcd_bytes_to_read;
3136 
3137 	const int max_process_count = 30;
3138 	int process_count = 0;
3139 
3140 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3141 
3142 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3143 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3144 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3145 		dpcd_addr = DP_SINK_COUNT;
3146 	} else {
3147 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3148 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3149 		dpcd_addr = DP_SINK_COUNT_ESI;
3150 	}
3151 
3152 	dret = drm_dp_dpcd_read(
3153 		&aconnector->dm_dp_aux.aux,
3154 		dpcd_addr,
3155 		esi,
3156 		dpcd_bytes_to_read);
3157 
3158 	while (dret == dpcd_bytes_to_read &&
3159 		process_count < max_process_count) {
3160 		uint8_t retry;
3161 		dret = 0;
3162 
3163 		process_count++;
3164 
3165 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3166 		/* handle HPD short pulse irq */
3167 		if (aconnector->mst_mgr.mst_state)
3168 			drm_dp_mst_hpd_irq(
3169 				&aconnector->mst_mgr,
3170 				esi,
3171 				&new_irq_handled);
3172 
3173 		if (new_irq_handled) {
3174 			/* ACK at DPCD to notify down stream */
3175 			const int ack_dpcd_bytes_to_write =
3176 				dpcd_bytes_to_read - 1;
3177 
3178 			for (retry = 0; retry < 3; retry++) {
3179 				uint8_t wret;
3180 
3181 				wret = drm_dp_dpcd_write(
3182 					&aconnector->dm_dp_aux.aux,
3183 					dpcd_addr + 1,
3184 					&esi[1],
3185 					ack_dpcd_bytes_to_write);
3186 				if (wret == ack_dpcd_bytes_to_write)
3187 					break;
3188 			}
3189 
3190 			/* check if there is new irq to be handled */
3191 			dret = drm_dp_dpcd_read(
3192 				&aconnector->dm_dp_aux.aux,
3193 				dpcd_addr,
3194 				esi,
3195 				dpcd_bytes_to_read);
3196 
3197 			new_irq_handled = false;
3198 		} else {
3199 			break;
3200 		}
3201 	}
3202 
3203 	if (process_count == max_process_count)
3204 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3205 }
3206 
3207 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3208 							union hpd_irq_data hpd_irq_data)
3209 {
3210 	struct hpd_rx_irq_offload_work *offload_work =
3211 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3212 
3213 	if (!offload_work) {
3214 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3215 		return;
3216 	}
3217 
3218 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3219 	offload_work->data = hpd_irq_data;
3220 	offload_work->offload_wq = offload_wq;
3221 
3222 	queue_work(offload_wq->wq, &offload_work->work);
3223 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3224 }
3225 
3226 static void handle_hpd_rx_irq(void *param)
3227 {
3228 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3229 	struct drm_connector *connector = &aconnector->base;
3230 	struct drm_device *dev = connector->dev;
3231 	struct dc_link *dc_link = aconnector->dc_link;
3232 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3233 	bool result = false;
3234 	enum dc_connection_type new_connection_type = dc_connection_none;
3235 	struct amdgpu_device *adev = drm_to_adev(dev);
3236 	union hpd_irq_data hpd_irq_data;
3237 	bool link_loss = false;
3238 	bool has_left_work = false;
3239 	int idx = aconnector->base.index;
3240 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3241 
3242 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3243 
3244 	if (adev->dm.disable_hpd_irq)
3245 		return;
3246 
3247 	/*
3248 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3249 	 * conflict, after implement i2c helper, this mutex should be
3250 	 * retired.
3251 	 */
3252 	mutex_lock(&aconnector->hpd_lock);
3253 
3254 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3255 						&link_loss, true, &has_left_work);
3256 
3257 	if (!has_left_work)
3258 		goto out;
3259 
3260 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3261 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3262 		goto out;
3263 	}
3264 
3265 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3266 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3267 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3268 			dm_handle_mst_sideband_msg(aconnector);
3269 			goto out;
3270 		}
3271 
3272 		if (link_loss) {
3273 			bool skip = false;
3274 
3275 			spin_lock(&offload_wq->offload_lock);
3276 			skip = offload_wq->is_handling_link_loss;
3277 
3278 			if (!skip)
3279 				offload_wq->is_handling_link_loss = true;
3280 
3281 			spin_unlock(&offload_wq->offload_lock);
3282 
3283 			if (!skip)
3284 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3285 
3286 			goto out;
3287 		}
3288 	}
3289 
3290 out:
3291 	if (result && !is_mst_root_connector) {
3292 		/* Downstream Port status changed. */
3293 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3294 			DRM_ERROR("KMS: Failed to detect connector\n");
3295 
3296 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3297 			emulated_link_detect(dc_link);
3298 
3299 			if (aconnector->fake_enable)
3300 				aconnector->fake_enable = false;
3301 
3302 			amdgpu_dm_update_connector_after_detect(aconnector);
3303 
3304 
3305 			drm_modeset_lock_all(dev);
3306 			dm_restore_drm_connector_state(dev, connector);
3307 			drm_modeset_unlock_all(dev);
3308 
3309 			drm_kms_helper_connector_hotplug_event(connector);
3310 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3311 
3312 			if (aconnector->fake_enable)
3313 				aconnector->fake_enable = false;
3314 
3315 			amdgpu_dm_update_connector_after_detect(aconnector);
3316 
3317 
3318 			drm_modeset_lock_all(dev);
3319 			dm_restore_drm_connector_state(dev, connector);
3320 			drm_modeset_unlock_all(dev);
3321 
3322 			drm_kms_helper_connector_hotplug_event(connector);
3323 		}
3324 	}
3325 #ifdef CONFIG_DRM_AMD_DC_HDCP
3326 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3327 		if (adev->dm.hdcp_workqueue)
3328 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3329 	}
3330 #endif
3331 
3332 	if (dc_link->type != dc_connection_mst_branch)
3333 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3334 
3335 	mutex_unlock(&aconnector->hpd_lock);
3336 }
3337 
3338 static void register_hpd_handlers(struct amdgpu_device *adev)
3339 {
3340 	struct drm_device *dev = adev_to_drm(adev);
3341 	struct drm_connector *connector;
3342 	struct amdgpu_dm_connector *aconnector;
3343 	const struct dc_link *dc_link;
3344 	struct dc_interrupt_params int_params = {0};
3345 
3346 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3347 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3348 
3349 	list_for_each_entry(connector,
3350 			&dev->mode_config.connector_list, head)	{
3351 
3352 		aconnector = to_amdgpu_dm_connector(connector);
3353 		dc_link = aconnector->dc_link;
3354 
3355 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3356 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3357 			int_params.irq_source = dc_link->irq_source_hpd;
3358 
3359 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3360 					handle_hpd_irq,
3361 					(void *) aconnector);
3362 		}
3363 
3364 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3365 
3366 			/* Also register for DP short pulse (hpd_rx). */
3367 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3368 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3369 
3370 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3371 					handle_hpd_rx_irq,
3372 					(void *) aconnector);
3373 
3374 			if (adev->dm.hpd_rx_offload_wq)
3375 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3376 					aconnector;
3377 		}
3378 	}
3379 }
3380 
3381 #if defined(CONFIG_DRM_AMD_DC_SI)
3382 /* Register IRQ sources and initialize IRQ callbacks */
3383 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3384 {
3385 	struct dc *dc = adev->dm.dc;
3386 	struct common_irq_params *c_irq_params;
3387 	struct dc_interrupt_params int_params = {0};
3388 	int r;
3389 	int i;
3390 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3391 
3392 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3393 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3394 
3395 	/*
3396 	 * Actions of amdgpu_irq_add_id():
3397 	 * 1. Register a set() function with base driver.
3398 	 *    Base driver will call set() function to enable/disable an
3399 	 *    interrupt in DC hardware.
3400 	 * 2. Register amdgpu_dm_irq_handler().
3401 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3402 	 *    coming from DC hardware.
3403 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3404 	 *    for acknowledging and handling. */
3405 
3406 	/* Use VBLANK interrupt */
3407 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3408 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3409 		if (r) {
3410 			DRM_ERROR("Failed to add crtc irq id!\n");
3411 			return r;
3412 		}
3413 
3414 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3415 		int_params.irq_source =
3416 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3417 
3418 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3419 
3420 		c_irq_params->adev = adev;
3421 		c_irq_params->irq_src = int_params.irq_source;
3422 
3423 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3424 				dm_crtc_high_irq, c_irq_params);
3425 	}
3426 
3427 	/* Use GRPH_PFLIP interrupt */
3428 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3429 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3430 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3431 		if (r) {
3432 			DRM_ERROR("Failed to add page flip irq id!\n");
3433 			return r;
3434 		}
3435 
3436 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3437 		int_params.irq_source =
3438 			dc_interrupt_to_irq_source(dc, i, 0);
3439 
3440 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3441 
3442 		c_irq_params->adev = adev;
3443 		c_irq_params->irq_src = int_params.irq_source;
3444 
3445 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3446 				dm_pflip_high_irq, c_irq_params);
3447 
3448 	}
3449 
3450 	/* HPD */
3451 	r = amdgpu_irq_add_id(adev, client_id,
3452 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3453 	if (r) {
3454 		DRM_ERROR("Failed to add hpd irq id!\n");
3455 		return r;
3456 	}
3457 
3458 	register_hpd_handlers(adev);
3459 
3460 	return 0;
3461 }
3462 #endif
3463 
3464 /* Register IRQ sources and initialize IRQ callbacks */
3465 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3466 {
3467 	struct dc *dc = adev->dm.dc;
3468 	struct common_irq_params *c_irq_params;
3469 	struct dc_interrupt_params int_params = {0};
3470 	int r;
3471 	int i;
3472 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3473 
3474 	if (adev->family >= AMDGPU_FAMILY_AI)
3475 		client_id = SOC15_IH_CLIENTID_DCE;
3476 
3477 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3478 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3479 
3480 	/*
3481 	 * Actions of amdgpu_irq_add_id():
3482 	 * 1. Register a set() function with base driver.
3483 	 *    Base driver will call set() function to enable/disable an
3484 	 *    interrupt in DC hardware.
3485 	 * 2. Register amdgpu_dm_irq_handler().
3486 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3487 	 *    coming from DC hardware.
3488 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3489 	 *    for acknowledging and handling. */
3490 
3491 	/* Use VBLANK interrupt */
3492 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3493 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3494 		if (r) {
3495 			DRM_ERROR("Failed to add crtc irq id!\n");
3496 			return r;
3497 		}
3498 
3499 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3500 		int_params.irq_source =
3501 			dc_interrupt_to_irq_source(dc, i, 0);
3502 
3503 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3504 
3505 		c_irq_params->adev = adev;
3506 		c_irq_params->irq_src = int_params.irq_source;
3507 
3508 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3509 				dm_crtc_high_irq, c_irq_params);
3510 	}
3511 
3512 	/* Use VUPDATE interrupt */
3513 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3514 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3515 		if (r) {
3516 			DRM_ERROR("Failed to add vupdate irq id!\n");
3517 			return r;
3518 		}
3519 
3520 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3521 		int_params.irq_source =
3522 			dc_interrupt_to_irq_source(dc, i, 0);
3523 
3524 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3525 
3526 		c_irq_params->adev = adev;
3527 		c_irq_params->irq_src = int_params.irq_source;
3528 
3529 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3530 				dm_vupdate_high_irq, c_irq_params);
3531 	}
3532 
3533 	/* Use GRPH_PFLIP interrupt */
3534 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3535 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3536 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3537 		if (r) {
3538 			DRM_ERROR("Failed to add page flip irq id!\n");
3539 			return r;
3540 		}
3541 
3542 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3543 		int_params.irq_source =
3544 			dc_interrupt_to_irq_source(dc, i, 0);
3545 
3546 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3547 
3548 		c_irq_params->adev = adev;
3549 		c_irq_params->irq_src = int_params.irq_source;
3550 
3551 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3552 				dm_pflip_high_irq, c_irq_params);
3553 
3554 	}
3555 
3556 	/* HPD */
3557 	r = amdgpu_irq_add_id(adev, client_id,
3558 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3559 	if (r) {
3560 		DRM_ERROR("Failed to add hpd irq id!\n");
3561 		return r;
3562 	}
3563 
3564 	register_hpd_handlers(adev);
3565 
3566 	return 0;
3567 }
3568 
3569 /* Register IRQ sources and initialize IRQ callbacks */
3570 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3571 {
3572 	struct dc *dc = adev->dm.dc;
3573 	struct common_irq_params *c_irq_params;
3574 	struct dc_interrupt_params int_params = {0};
3575 	int r;
3576 	int i;
3577 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3578 	static const unsigned int vrtl_int_srcid[] = {
3579 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3580 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3581 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3582 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3583 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3584 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3585 	};
3586 #endif
3587 
3588 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3589 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3590 
3591 	/*
3592 	 * Actions of amdgpu_irq_add_id():
3593 	 * 1. Register a set() function with base driver.
3594 	 *    Base driver will call set() function to enable/disable an
3595 	 *    interrupt in DC hardware.
3596 	 * 2. Register amdgpu_dm_irq_handler().
3597 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3598 	 *    coming from DC hardware.
3599 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3600 	 *    for acknowledging and handling.
3601 	 */
3602 
3603 	/* Use VSTARTUP interrupt */
3604 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3605 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3606 			i++) {
3607 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3608 
3609 		if (r) {
3610 			DRM_ERROR("Failed to add crtc irq id!\n");
3611 			return r;
3612 		}
3613 
3614 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3615 		int_params.irq_source =
3616 			dc_interrupt_to_irq_source(dc, i, 0);
3617 
3618 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3619 
3620 		c_irq_params->adev = adev;
3621 		c_irq_params->irq_src = int_params.irq_source;
3622 
3623 		amdgpu_dm_irq_register_interrupt(
3624 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3625 	}
3626 
3627 	/* Use otg vertical line interrupt */
3628 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3629 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3630 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3631 				vrtl_int_srcid[i], &adev->vline0_irq);
3632 
3633 		if (r) {
3634 			DRM_ERROR("Failed to add vline0 irq id!\n");
3635 			return r;
3636 		}
3637 
3638 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3639 		int_params.irq_source =
3640 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3641 
3642 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3643 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3644 			break;
3645 		}
3646 
3647 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3648 					- DC_IRQ_SOURCE_DC1_VLINE0];
3649 
3650 		c_irq_params->adev = adev;
3651 		c_irq_params->irq_src = int_params.irq_source;
3652 
3653 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3654 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3655 	}
3656 #endif
3657 
3658 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3659 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3660 	 * to trigger at end of each vblank, regardless of state of the lock,
3661 	 * matching DCE behaviour.
3662 	 */
3663 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3664 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3665 	     i++) {
3666 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3667 
3668 		if (r) {
3669 			DRM_ERROR("Failed to add vupdate irq id!\n");
3670 			return r;
3671 		}
3672 
3673 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3674 		int_params.irq_source =
3675 			dc_interrupt_to_irq_source(dc, i, 0);
3676 
3677 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3678 
3679 		c_irq_params->adev = adev;
3680 		c_irq_params->irq_src = int_params.irq_source;
3681 
3682 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3683 				dm_vupdate_high_irq, c_irq_params);
3684 	}
3685 
3686 	/* Use GRPH_PFLIP interrupt */
3687 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3688 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3689 			i++) {
3690 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3691 		if (r) {
3692 			DRM_ERROR("Failed to add page flip irq id!\n");
3693 			return r;
3694 		}
3695 
3696 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3697 		int_params.irq_source =
3698 			dc_interrupt_to_irq_source(dc, i, 0);
3699 
3700 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3701 
3702 		c_irq_params->adev = adev;
3703 		c_irq_params->irq_src = int_params.irq_source;
3704 
3705 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3706 				dm_pflip_high_irq, c_irq_params);
3707 
3708 	}
3709 
3710 	/* HPD */
3711 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3712 			&adev->hpd_irq);
3713 	if (r) {
3714 		DRM_ERROR("Failed to add hpd irq id!\n");
3715 		return r;
3716 	}
3717 
3718 	register_hpd_handlers(adev);
3719 
3720 	return 0;
3721 }
3722 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3723 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3724 {
3725 	struct dc *dc = adev->dm.dc;
3726 	struct common_irq_params *c_irq_params;
3727 	struct dc_interrupt_params int_params = {0};
3728 	int r, i;
3729 
3730 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3731 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3732 
3733 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3734 			&adev->dmub_outbox_irq);
3735 	if (r) {
3736 		DRM_ERROR("Failed to add outbox irq id!\n");
3737 		return r;
3738 	}
3739 
3740 	if (dc->ctx->dmub_srv) {
3741 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3742 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3743 		int_params.irq_source =
3744 		dc_interrupt_to_irq_source(dc, i, 0);
3745 
3746 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3747 
3748 		c_irq_params->adev = adev;
3749 		c_irq_params->irq_src = int_params.irq_source;
3750 
3751 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3752 				dm_dmub_outbox1_low_irq, c_irq_params);
3753 	}
3754 
3755 	return 0;
3756 }
3757 
3758 /*
3759  * Acquires the lock for the atomic state object and returns
3760  * the new atomic state.
3761  *
3762  * This should only be called during atomic check.
3763  */
3764 int dm_atomic_get_state(struct drm_atomic_state *state,
3765 			struct dm_atomic_state **dm_state)
3766 {
3767 	struct drm_device *dev = state->dev;
3768 	struct amdgpu_device *adev = drm_to_adev(dev);
3769 	struct amdgpu_display_manager *dm = &adev->dm;
3770 	struct drm_private_state *priv_state;
3771 
3772 	if (*dm_state)
3773 		return 0;
3774 
3775 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3776 	if (IS_ERR(priv_state))
3777 		return PTR_ERR(priv_state);
3778 
3779 	*dm_state = to_dm_atomic_state(priv_state);
3780 
3781 	return 0;
3782 }
3783 
3784 static struct dm_atomic_state *
3785 dm_atomic_get_new_state(struct drm_atomic_state *state)
3786 {
3787 	struct drm_device *dev = state->dev;
3788 	struct amdgpu_device *adev = drm_to_adev(dev);
3789 	struct amdgpu_display_manager *dm = &adev->dm;
3790 	struct drm_private_obj *obj;
3791 	struct drm_private_state *new_obj_state;
3792 	int i;
3793 
3794 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3795 		if (obj->funcs == dm->atomic_obj.funcs)
3796 			return to_dm_atomic_state(new_obj_state);
3797 	}
3798 
3799 	return NULL;
3800 }
3801 
3802 static struct drm_private_state *
3803 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3804 {
3805 	struct dm_atomic_state *old_state, *new_state;
3806 
3807 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3808 	if (!new_state)
3809 		return NULL;
3810 
3811 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3812 
3813 	old_state = to_dm_atomic_state(obj->state);
3814 
3815 	if (old_state && old_state->context)
3816 		new_state->context = dc_copy_state(old_state->context);
3817 
3818 	if (!new_state->context) {
3819 		kfree(new_state);
3820 		return NULL;
3821 	}
3822 
3823 	return &new_state->base;
3824 }
3825 
3826 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3827 				    struct drm_private_state *state)
3828 {
3829 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3830 
3831 	if (dm_state && dm_state->context)
3832 		dc_release_state(dm_state->context);
3833 
3834 	kfree(dm_state);
3835 }
3836 
3837 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3838 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3839 	.atomic_destroy_state = dm_atomic_destroy_state,
3840 };
3841 
3842 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3843 {
3844 	struct dm_atomic_state *state;
3845 	int r;
3846 
3847 	adev->mode_info.mode_config_initialized = true;
3848 
3849 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3850 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3851 
3852 	adev_to_drm(adev)->mode_config.max_width = 16384;
3853 	adev_to_drm(adev)->mode_config.max_height = 16384;
3854 
3855 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3856 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3857 	/* indicates support for immediate flip */
3858 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3859 
3860 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3861 
3862 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3863 	if (!state)
3864 		return -ENOMEM;
3865 
3866 	state->context = dc_create_state(adev->dm.dc);
3867 	if (!state->context) {
3868 		kfree(state);
3869 		return -ENOMEM;
3870 	}
3871 
3872 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3873 
3874 	drm_atomic_private_obj_init(adev_to_drm(adev),
3875 				    &adev->dm.atomic_obj,
3876 				    &state->base,
3877 				    &dm_atomic_state_funcs);
3878 
3879 	r = amdgpu_display_modeset_create_props(adev);
3880 	if (r) {
3881 		dc_release_state(state->context);
3882 		kfree(state);
3883 		return r;
3884 	}
3885 
3886 	r = amdgpu_dm_audio_init(adev);
3887 	if (r) {
3888 		dc_release_state(state->context);
3889 		kfree(state);
3890 		return r;
3891 	}
3892 
3893 	return 0;
3894 }
3895 
3896 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3897 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3898 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3899 
3900 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3901 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3902 
3903 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3904 					    int bl_idx)
3905 {
3906 #if defined(CONFIG_ACPI)
3907 	struct amdgpu_dm_backlight_caps caps;
3908 
3909 	memset(&caps, 0, sizeof(caps));
3910 
3911 	if (dm->backlight_caps[bl_idx].caps_valid)
3912 		return;
3913 
3914 	amdgpu_acpi_get_backlight_caps(&caps);
3915 	if (caps.caps_valid) {
3916 		dm->backlight_caps[bl_idx].caps_valid = true;
3917 		if (caps.aux_support)
3918 			return;
3919 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3920 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3921 	} else {
3922 		dm->backlight_caps[bl_idx].min_input_signal =
3923 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3924 		dm->backlight_caps[bl_idx].max_input_signal =
3925 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3926 	}
3927 #else
3928 	if (dm->backlight_caps[bl_idx].aux_support)
3929 		return;
3930 
3931 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3932 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3933 #endif
3934 }
3935 
3936 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3937 				unsigned *min, unsigned *max)
3938 {
3939 	if (!caps)
3940 		return 0;
3941 
3942 	if (caps->aux_support) {
3943 		// Firmware limits are in nits, DC API wants millinits.
3944 		*max = 1000 * caps->aux_max_input_signal;
3945 		*min = 1000 * caps->aux_min_input_signal;
3946 	} else {
3947 		// Firmware limits are 8-bit, PWM control is 16-bit.
3948 		*max = 0x101 * caps->max_input_signal;
3949 		*min = 0x101 * caps->min_input_signal;
3950 	}
3951 	return 1;
3952 }
3953 
3954 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3955 					uint32_t brightness)
3956 {
3957 	unsigned min, max;
3958 
3959 	if (!get_brightness_range(caps, &min, &max))
3960 		return brightness;
3961 
3962 	// Rescale 0..255 to min..max
3963 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3964 				       AMDGPU_MAX_BL_LEVEL);
3965 }
3966 
3967 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3968 				      uint32_t brightness)
3969 {
3970 	unsigned min, max;
3971 
3972 	if (!get_brightness_range(caps, &min, &max))
3973 		return brightness;
3974 
3975 	if (brightness < min)
3976 		return 0;
3977 	// Rescale min..max to 0..255
3978 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3979 				 max - min);
3980 }
3981 
3982 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3983 					 int bl_idx,
3984 					 u32 user_brightness)
3985 {
3986 	struct amdgpu_dm_backlight_caps caps;
3987 	struct dc_link *link;
3988 	u32 brightness;
3989 	bool rc;
3990 
3991 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3992 	caps = dm->backlight_caps[bl_idx];
3993 
3994 	dm->brightness[bl_idx] = user_brightness;
3995 	/* update scratch register */
3996 	if (bl_idx == 0)
3997 		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3998 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3999 	link = (struct dc_link *)dm->backlight_link[bl_idx];
4000 
4001 	/* Change brightness based on AUX property */
4002 	if (caps.aux_support) {
4003 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
4004 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4005 		if (!rc)
4006 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4007 	} else {
4008 		rc = dc_link_set_backlight_level(link, brightness, 0);
4009 		if (!rc)
4010 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4011 	}
4012 
4013 	if (rc)
4014 		dm->actual_brightness[bl_idx] = user_brightness;
4015 }
4016 
4017 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4018 {
4019 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4020 	int i;
4021 
4022 	for (i = 0; i < dm->num_of_edps; i++) {
4023 		if (bd == dm->backlight_dev[i])
4024 			break;
4025 	}
4026 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4027 		i = 0;
4028 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4029 
4030 	return 0;
4031 }
4032 
4033 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4034 					 int bl_idx)
4035 {
4036 	struct amdgpu_dm_backlight_caps caps;
4037 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4038 
4039 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4040 	caps = dm->backlight_caps[bl_idx];
4041 
4042 	if (caps.aux_support) {
4043 		u32 avg, peak;
4044 		bool rc;
4045 
4046 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4047 		if (!rc)
4048 			return dm->brightness[bl_idx];
4049 		return convert_brightness_to_user(&caps, avg);
4050 	} else {
4051 		int ret = dc_link_get_backlight_level(link);
4052 
4053 		if (ret == DC_ERROR_UNEXPECTED)
4054 			return dm->brightness[bl_idx];
4055 		return convert_brightness_to_user(&caps, ret);
4056 	}
4057 }
4058 
4059 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4060 {
4061 	struct amdgpu_display_manager *dm = bl_get_data(bd);
4062 	int i;
4063 
4064 	for (i = 0; i < dm->num_of_edps; i++) {
4065 		if (bd == dm->backlight_dev[i])
4066 			break;
4067 	}
4068 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4069 		i = 0;
4070 	return amdgpu_dm_backlight_get_level(dm, i);
4071 }
4072 
4073 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4074 	.options = BL_CORE_SUSPENDRESUME,
4075 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4076 	.update_status	= amdgpu_dm_backlight_update_status,
4077 };
4078 
4079 static void
4080 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4081 {
4082 	char bl_name[16];
4083 	struct backlight_properties props = { 0 };
4084 
4085 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4086 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4087 
4088 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4089 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4090 	props.type = BACKLIGHT_RAW;
4091 
4092 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4093 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4094 
4095 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4096 								       adev_to_drm(dm->adev)->dev,
4097 								       dm,
4098 								       &amdgpu_dm_backlight_ops,
4099 								       &props);
4100 
4101 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4102 		DRM_ERROR("DM: Backlight registration failed!\n");
4103 	else
4104 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4105 }
4106 #endif
4107 
4108 static int initialize_plane(struct amdgpu_display_manager *dm,
4109 			    struct amdgpu_mode_info *mode_info, int plane_id,
4110 			    enum drm_plane_type plane_type,
4111 			    const struct dc_plane_cap *plane_cap)
4112 {
4113 	struct drm_plane *plane;
4114 	unsigned long possible_crtcs;
4115 	int ret = 0;
4116 
4117 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4118 	if (!plane) {
4119 		DRM_ERROR("KMS: Failed to allocate plane\n");
4120 		return -ENOMEM;
4121 	}
4122 	plane->type = plane_type;
4123 
4124 	/*
4125 	 * HACK: IGT tests expect that the primary plane for a CRTC
4126 	 * can only have one possible CRTC. Only expose support for
4127 	 * any CRTC if they're not going to be used as a primary plane
4128 	 * for a CRTC - like overlay or underlay planes.
4129 	 */
4130 	possible_crtcs = 1 << plane_id;
4131 	if (plane_id >= dm->dc->caps.max_streams)
4132 		possible_crtcs = 0xff;
4133 
4134 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4135 
4136 	if (ret) {
4137 		DRM_ERROR("KMS: Failed to initialize plane\n");
4138 		kfree(plane);
4139 		return ret;
4140 	}
4141 
4142 	if (mode_info)
4143 		mode_info->planes[plane_id] = plane;
4144 
4145 	return ret;
4146 }
4147 
4148 
4149 static void register_backlight_device(struct amdgpu_display_manager *dm,
4150 				      struct dc_link *link)
4151 {
4152 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4153 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4154 
4155 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4156 	    link->type != dc_connection_none) {
4157 		/*
4158 		 * Event if registration failed, we should continue with
4159 		 * DM initialization because not having a backlight control
4160 		 * is better then a black screen.
4161 		 */
4162 		if (!dm->backlight_dev[dm->num_of_edps])
4163 			amdgpu_dm_register_backlight_device(dm);
4164 
4165 		if (dm->backlight_dev[dm->num_of_edps]) {
4166 			dm->backlight_link[dm->num_of_edps] = link;
4167 			dm->num_of_edps++;
4168 		}
4169 	}
4170 #endif
4171 }
4172 
4173 
4174 /*
4175  * In this architecture, the association
4176  * connector -> encoder -> crtc
4177  * id not really requried. The crtc and connector will hold the
4178  * display_index as an abstraction to use with DAL component
4179  *
4180  * Returns 0 on success
4181  */
4182 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4183 {
4184 	struct amdgpu_display_manager *dm = &adev->dm;
4185 	int32_t i;
4186 	struct amdgpu_dm_connector *aconnector = NULL;
4187 	struct amdgpu_encoder *aencoder = NULL;
4188 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4189 	uint32_t link_cnt;
4190 	int32_t primary_planes;
4191 	enum dc_connection_type new_connection_type = dc_connection_none;
4192 	const struct dc_plane_cap *plane;
4193 	bool psr_feature_enabled = false;
4194 
4195 	dm->display_indexes_num = dm->dc->caps.max_streams;
4196 	/* Update the actual used number of crtc */
4197 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4198 
4199 	link_cnt = dm->dc->caps.max_links;
4200 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4201 		DRM_ERROR("DM: Failed to initialize mode config\n");
4202 		return -EINVAL;
4203 	}
4204 
4205 	/* There is one primary plane per CRTC */
4206 	primary_planes = dm->dc->caps.max_streams;
4207 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4208 
4209 	/*
4210 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4211 	 * Order is reversed to match iteration order in atomic check.
4212 	 */
4213 	for (i = (primary_planes - 1); i >= 0; i--) {
4214 		plane = &dm->dc->caps.planes[i];
4215 
4216 		if (initialize_plane(dm, mode_info, i,
4217 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4218 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4219 			goto fail;
4220 		}
4221 	}
4222 
4223 	/*
4224 	 * Initialize overlay planes, index starting after primary planes.
4225 	 * These planes have a higher DRM index than the primary planes since
4226 	 * they should be considered as having a higher z-order.
4227 	 * Order is reversed to match iteration order in atomic check.
4228 	 *
4229 	 * Only support DCN for now, and only expose one so we don't encourage
4230 	 * userspace to use up all the pipes.
4231 	 */
4232 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4233 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4234 
4235 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4236 			continue;
4237 
4238 		if (!plane->blends_with_above || !plane->blends_with_below)
4239 			continue;
4240 
4241 		if (!plane->pixel_format_support.argb8888)
4242 			continue;
4243 
4244 		if (initialize_plane(dm, NULL, primary_planes + i,
4245 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4246 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4247 			goto fail;
4248 		}
4249 
4250 		/* Only create one overlay plane. */
4251 		break;
4252 	}
4253 
4254 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4255 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4256 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4257 			goto fail;
4258 		}
4259 
4260 	/* Use Outbox interrupt */
4261 	switch (adev->ip_versions[DCE_HWIP][0]) {
4262 	case IP_VERSION(3, 0, 0):
4263 	case IP_VERSION(3, 1, 2):
4264 	case IP_VERSION(3, 1, 3):
4265 	case IP_VERSION(3, 1, 5):
4266 	case IP_VERSION(3, 1, 6):
4267 	case IP_VERSION(3, 2, 0):
4268 	case IP_VERSION(3, 2, 1):
4269 	case IP_VERSION(2, 1, 0):
4270 		if (register_outbox_irq_handlers(dm->adev)) {
4271 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4272 			goto fail;
4273 		}
4274 		break;
4275 	default:
4276 		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4277 			      adev->ip_versions[DCE_HWIP][0]);
4278 	}
4279 
4280 	/* Determine whether to enable PSR support by default. */
4281 	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4282 		switch (adev->ip_versions[DCE_HWIP][0]) {
4283 		case IP_VERSION(3, 1, 2):
4284 		case IP_VERSION(3, 1, 3):
4285 		case IP_VERSION(3, 1, 5):
4286 		case IP_VERSION(3, 1, 6):
4287 		case IP_VERSION(3, 2, 0):
4288 		case IP_VERSION(3, 2, 1):
4289 			psr_feature_enabled = true;
4290 			break;
4291 		default:
4292 			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4293 			break;
4294 		}
4295 	}
4296 
4297 	/* Disable vblank IRQs aggressively for power-saving. */
4298 	adev_to_drm(adev)->vblank_disable_immediate = true;
4299 
4300 	/* loops over all connectors on the board */
4301 	for (i = 0; i < link_cnt; i++) {
4302 		struct dc_link *link = NULL;
4303 
4304 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4305 			DRM_ERROR(
4306 				"KMS: Cannot support more than %d display indexes\n",
4307 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4308 			continue;
4309 		}
4310 
4311 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4312 		if (!aconnector)
4313 			goto fail;
4314 
4315 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4316 		if (!aencoder)
4317 			goto fail;
4318 
4319 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4320 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4321 			goto fail;
4322 		}
4323 
4324 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4325 			DRM_ERROR("KMS: Failed to initialize connector\n");
4326 			goto fail;
4327 		}
4328 
4329 		link = dc_get_link_at_index(dm->dc, i);
4330 
4331 		if (!dc_link_detect_sink(link, &new_connection_type))
4332 			DRM_ERROR("KMS: Failed to detect connector\n");
4333 
4334 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4335 			emulated_link_detect(link);
4336 			amdgpu_dm_update_connector_after_detect(aconnector);
4337 
4338 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4339 			amdgpu_dm_update_connector_after_detect(aconnector);
4340 			register_backlight_device(dm, link);
4341 			if (dm->num_of_edps)
4342 				update_connector_ext_caps(aconnector);
4343 			if (psr_feature_enabled)
4344 				amdgpu_dm_set_psr_caps(link);
4345 
4346 			/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4347 			 * PSR is also supported.
4348 			 */
4349 			if (link->psr_settings.psr_feature_enabled)
4350 				adev_to_drm(adev)->vblank_disable_immediate = false;
4351 		}
4352 
4353 
4354 	}
4355 
4356 	/* Software is initialized. Now we can register interrupt handlers. */
4357 	switch (adev->asic_type) {
4358 #if defined(CONFIG_DRM_AMD_DC_SI)
4359 	case CHIP_TAHITI:
4360 	case CHIP_PITCAIRN:
4361 	case CHIP_VERDE:
4362 	case CHIP_OLAND:
4363 		if (dce60_register_irq_handlers(dm->adev)) {
4364 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4365 			goto fail;
4366 		}
4367 		break;
4368 #endif
4369 	case CHIP_BONAIRE:
4370 	case CHIP_HAWAII:
4371 	case CHIP_KAVERI:
4372 	case CHIP_KABINI:
4373 	case CHIP_MULLINS:
4374 	case CHIP_TONGA:
4375 	case CHIP_FIJI:
4376 	case CHIP_CARRIZO:
4377 	case CHIP_STONEY:
4378 	case CHIP_POLARIS11:
4379 	case CHIP_POLARIS10:
4380 	case CHIP_POLARIS12:
4381 	case CHIP_VEGAM:
4382 	case CHIP_VEGA10:
4383 	case CHIP_VEGA12:
4384 	case CHIP_VEGA20:
4385 		if (dce110_register_irq_handlers(dm->adev)) {
4386 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4387 			goto fail;
4388 		}
4389 		break;
4390 	default:
4391 		switch (adev->ip_versions[DCE_HWIP][0]) {
4392 		case IP_VERSION(1, 0, 0):
4393 		case IP_VERSION(1, 0, 1):
4394 		case IP_VERSION(2, 0, 2):
4395 		case IP_VERSION(2, 0, 3):
4396 		case IP_VERSION(2, 0, 0):
4397 		case IP_VERSION(2, 1, 0):
4398 		case IP_VERSION(3, 0, 0):
4399 		case IP_VERSION(3, 0, 2):
4400 		case IP_VERSION(3, 0, 3):
4401 		case IP_VERSION(3, 0, 1):
4402 		case IP_VERSION(3, 1, 2):
4403 		case IP_VERSION(3, 1, 3):
4404 		case IP_VERSION(3, 1, 5):
4405 		case IP_VERSION(3, 1, 6):
4406 		case IP_VERSION(3, 2, 0):
4407 		case IP_VERSION(3, 2, 1):
4408 			if (dcn10_register_irq_handlers(dm->adev)) {
4409 				DRM_ERROR("DM: Failed to initialize IRQ\n");
4410 				goto fail;
4411 			}
4412 			break;
4413 		default:
4414 			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4415 					adev->ip_versions[DCE_HWIP][0]);
4416 			goto fail;
4417 		}
4418 		break;
4419 	}
4420 
4421 	return 0;
4422 fail:
4423 	kfree(aencoder);
4424 	kfree(aconnector);
4425 
4426 	return -EINVAL;
4427 }
4428 
4429 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4430 {
4431 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4432 	return;
4433 }
4434 
4435 /******************************************************************************
4436  * amdgpu_display_funcs functions
4437  *****************************************************************************/
4438 
4439 /*
4440  * dm_bandwidth_update - program display watermarks
4441  *
4442  * @adev: amdgpu_device pointer
4443  *
4444  * Calculate and program the display watermarks and line buffer allocation.
4445  */
4446 static void dm_bandwidth_update(struct amdgpu_device *adev)
4447 {
4448 	/* TODO: implement later */
4449 }
4450 
4451 static const struct amdgpu_display_funcs dm_display_funcs = {
4452 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4453 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4454 	.backlight_set_level = NULL, /* never called for DC */
4455 	.backlight_get_level = NULL, /* never called for DC */
4456 	.hpd_sense = NULL,/* called unconditionally */
4457 	.hpd_set_polarity = NULL, /* called unconditionally */
4458 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4459 	.page_flip_get_scanoutpos =
4460 		dm_crtc_get_scanoutpos,/* called unconditionally */
4461 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4462 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4463 };
4464 
4465 #if defined(CONFIG_DEBUG_KERNEL_DC)
4466 
4467 static ssize_t s3_debug_store(struct device *device,
4468 			      struct device_attribute *attr,
4469 			      const char *buf,
4470 			      size_t count)
4471 {
4472 	int ret;
4473 	int s3_state;
4474 	struct drm_device *drm_dev = dev_get_drvdata(device);
4475 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4476 
4477 	ret = kstrtoint(buf, 0, &s3_state);
4478 
4479 	if (ret == 0) {
4480 		if (s3_state) {
4481 			dm_resume(adev);
4482 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4483 		} else
4484 			dm_suspend(adev);
4485 	}
4486 
4487 	return ret == 0 ? count : 0;
4488 }
4489 
4490 DEVICE_ATTR_WO(s3_debug);
4491 
4492 #endif
4493 
4494 static int dm_early_init(void *handle)
4495 {
4496 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4497 
4498 	switch (adev->asic_type) {
4499 #if defined(CONFIG_DRM_AMD_DC_SI)
4500 	case CHIP_TAHITI:
4501 	case CHIP_PITCAIRN:
4502 	case CHIP_VERDE:
4503 		adev->mode_info.num_crtc = 6;
4504 		adev->mode_info.num_hpd = 6;
4505 		adev->mode_info.num_dig = 6;
4506 		break;
4507 	case CHIP_OLAND:
4508 		adev->mode_info.num_crtc = 2;
4509 		adev->mode_info.num_hpd = 2;
4510 		adev->mode_info.num_dig = 2;
4511 		break;
4512 #endif
4513 	case CHIP_BONAIRE:
4514 	case CHIP_HAWAII:
4515 		adev->mode_info.num_crtc = 6;
4516 		adev->mode_info.num_hpd = 6;
4517 		adev->mode_info.num_dig = 6;
4518 		break;
4519 	case CHIP_KAVERI:
4520 		adev->mode_info.num_crtc = 4;
4521 		adev->mode_info.num_hpd = 6;
4522 		adev->mode_info.num_dig = 7;
4523 		break;
4524 	case CHIP_KABINI:
4525 	case CHIP_MULLINS:
4526 		adev->mode_info.num_crtc = 2;
4527 		adev->mode_info.num_hpd = 6;
4528 		adev->mode_info.num_dig = 6;
4529 		break;
4530 	case CHIP_FIJI:
4531 	case CHIP_TONGA:
4532 		adev->mode_info.num_crtc = 6;
4533 		adev->mode_info.num_hpd = 6;
4534 		adev->mode_info.num_dig = 7;
4535 		break;
4536 	case CHIP_CARRIZO:
4537 		adev->mode_info.num_crtc = 3;
4538 		adev->mode_info.num_hpd = 6;
4539 		adev->mode_info.num_dig = 9;
4540 		break;
4541 	case CHIP_STONEY:
4542 		adev->mode_info.num_crtc = 2;
4543 		adev->mode_info.num_hpd = 6;
4544 		adev->mode_info.num_dig = 9;
4545 		break;
4546 	case CHIP_POLARIS11:
4547 	case CHIP_POLARIS12:
4548 		adev->mode_info.num_crtc = 5;
4549 		adev->mode_info.num_hpd = 5;
4550 		adev->mode_info.num_dig = 5;
4551 		break;
4552 	case CHIP_POLARIS10:
4553 	case CHIP_VEGAM:
4554 		adev->mode_info.num_crtc = 6;
4555 		adev->mode_info.num_hpd = 6;
4556 		adev->mode_info.num_dig = 6;
4557 		break;
4558 	case CHIP_VEGA10:
4559 	case CHIP_VEGA12:
4560 	case CHIP_VEGA20:
4561 		adev->mode_info.num_crtc = 6;
4562 		adev->mode_info.num_hpd = 6;
4563 		adev->mode_info.num_dig = 6;
4564 		break;
4565 	default:
4566 
4567 		switch (adev->ip_versions[DCE_HWIP][0]) {
4568 		case IP_VERSION(2, 0, 2):
4569 		case IP_VERSION(3, 0, 0):
4570 			adev->mode_info.num_crtc = 6;
4571 			adev->mode_info.num_hpd = 6;
4572 			adev->mode_info.num_dig = 6;
4573 			break;
4574 		case IP_VERSION(2, 0, 0):
4575 		case IP_VERSION(3, 0, 2):
4576 			adev->mode_info.num_crtc = 5;
4577 			adev->mode_info.num_hpd = 5;
4578 			adev->mode_info.num_dig = 5;
4579 			break;
4580 		case IP_VERSION(2, 0, 3):
4581 		case IP_VERSION(3, 0, 3):
4582 			adev->mode_info.num_crtc = 2;
4583 			adev->mode_info.num_hpd = 2;
4584 			adev->mode_info.num_dig = 2;
4585 			break;
4586 		case IP_VERSION(1, 0, 0):
4587 		case IP_VERSION(1, 0, 1):
4588 		case IP_VERSION(3, 0, 1):
4589 		case IP_VERSION(2, 1, 0):
4590 		case IP_VERSION(3, 1, 2):
4591 		case IP_VERSION(3, 1, 3):
4592 		case IP_VERSION(3, 1, 5):
4593 		case IP_VERSION(3, 1, 6):
4594 		case IP_VERSION(3, 2, 0):
4595 		case IP_VERSION(3, 2, 1):
4596 			adev->mode_info.num_crtc = 4;
4597 			adev->mode_info.num_hpd = 4;
4598 			adev->mode_info.num_dig = 4;
4599 			break;
4600 		default:
4601 			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4602 					adev->ip_versions[DCE_HWIP][0]);
4603 			return -EINVAL;
4604 		}
4605 		break;
4606 	}
4607 
4608 	amdgpu_dm_set_irq_funcs(adev);
4609 
4610 	if (adev->mode_info.funcs == NULL)
4611 		adev->mode_info.funcs = &dm_display_funcs;
4612 
4613 	/*
4614 	 * Note: Do NOT change adev->audio_endpt_rreg and
4615 	 * adev->audio_endpt_wreg because they are initialised in
4616 	 * amdgpu_device_init()
4617 	 */
4618 #if defined(CONFIG_DEBUG_KERNEL_DC)
4619 	device_create_file(
4620 		adev_to_drm(adev)->dev,
4621 		&dev_attr_s3_debug);
4622 #endif
4623 
4624 	return 0;
4625 }
4626 
4627 static bool modeset_required(struct drm_crtc_state *crtc_state,
4628 			     struct dc_stream_state *new_stream,
4629 			     struct dc_stream_state *old_stream)
4630 {
4631 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4632 }
4633 
4634 static bool modereset_required(struct drm_crtc_state *crtc_state)
4635 {
4636 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4637 }
4638 
4639 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4640 {
4641 	drm_encoder_cleanup(encoder);
4642 	kfree(encoder);
4643 }
4644 
4645 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4646 	.destroy = amdgpu_dm_encoder_destroy,
4647 };
4648 
4649 
4650 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4651 					 struct drm_framebuffer *fb,
4652 					 int *min_downscale, int *max_upscale)
4653 {
4654 	struct amdgpu_device *adev = drm_to_adev(dev);
4655 	struct dc *dc = adev->dm.dc;
4656 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4657 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4658 
4659 	switch (fb->format->format) {
4660 	case DRM_FORMAT_P010:
4661 	case DRM_FORMAT_NV12:
4662 	case DRM_FORMAT_NV21:
4663 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4664 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4665 		break;
4666 
4667 	case DRM_FORMAT_XRGB16161616F:
4668 	case DRM_FORMAT_ARGB16161616F:
4669 	case DRM_FORMAT_XBGR16161616F:
4670 	case DRM_FORMAT_ABGR16161616F:
4671 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4672 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4673 		break;
4674 
4675 	default:
4676 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4677 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4678 		break;
4679 	}
4680 
4681 	/*
4682 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4683 	 * scaling factor of 1.0 == 1000 units.
4684 	 */
4685 	if (*max_upscale == 1)
4686 		*max_upscale = 1000;
4687 
4688 	if (*min_downscale == 1)
4689 		*min_downscale = 1000;
4690 }
4691 
4692 
4693 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4694 				const struct drm_plane_state *state,
4695 				struct dc_scaling_info *scaling_info)
4696 {
4697 	int scale_w, scale_h, min_downscale, max_upscale;
4698 
4699 	memset(scaling_info, 0, sizeof(*scaling_info));
4700 
4701 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4702 	scaling_info->src_rect.x = state->src_x >> 16;
4703 	scaling_info->src_rect.y = state->src_y >> 16;
4704 
4705 	/*
4706 	 * For reasons we don't (yet) fully understand a non-zero
4707 	 * src_y coordinate into an NV12 buffer can cause a
4708 	 * system hang on DCN1x.
4709 	 * To avoid hangs (and maybe be overly cautious)
4710 	 * let's reject both non-zero src_x and src_y.
4711 	 *
4712 	 * We currently know of only one use-case to reproduce a
4713 	 * scenario with non-zero src_x and src_y for NV12, which
4714 	 * is to gesture the YouTube Android app into full screen
4715 	 * on ChromeOS.
4716 	 */
4717 	if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4718 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4719 	    (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4720 	    (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4721 		return -EINVAL;
4722 
4723 	scaling_info->src_rect.width = state->src_w >> 16;
4724 	if (scaling_info->src_rect.width == 0)
4725 		return -EINVAL;
4726 
4727 	scaling_info->src_rect.height = state->src_h >> 16;
4728 	if (scaling_info->src_rect.height == 0)
4729 		return -EINVAL;
4730 
4731 	scaling_info->dst_rect.x = state->crtc_x;
4732 	scaling_info->dst_rect.y = state->crtc_y;
4733 
4734 	if (state->crtc_w == 0)
4735 		return -EINVAL;
4736 
4737 	scaling_info->dst_rect.width = state->crtc_w;
4738 
4739 	if (state->crtc_h == 0)
4740 		return -EINVAL;
4741 
4742 	scaling_info->dst_rect.height = state->crtc_h;
4743 
4744 	/* DRM doesn't specify clipping on destination output. */
4745 	scaling_info->clip_rect = scaling_info->dst_rect;
4746 
4747 	/* Validate scaling per-format with DC plane caps */
4748 	if (state->plane && state->plane->dev && state->fb) {
4749 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4750 					     &min_downscale, &max_upscale);
4751 	} else {
4752 		min_downscale = 250;
4753 		max_upscale = 16000;
4754 	}
4755 
4756 	scale_w = scaling_info->dst_rect.width * 1000 /
4757 		  scaling_info->src_rect.width;
4758 
4759 	if (scale_w < min_downscale || scale_w > max_upscale)
4760 		return -EINVAL;
4761 
4762 	scale_h = scaling_info->dst_rect.height * 1000 /
4763 		  scaling_info->src_rect.height;
4764 
4765 	if (scale_h < min_downscale || scale_h > max_upscale)
4766 		return -EINVAL;
4767 
4768 	/*
4769 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4770 	 * assume reasonable defaults based on the format.
4771 	 */
4772 
4773 	return 0;
4774 }
4775 
4776 static void
4777 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4778 				 uint64_t tiling_flags)
4779 {
4780 	/* Fill GFX8 params */
4781 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4782 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4783 
4784 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4785 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4786 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4787 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4788 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4789 
4790 		/* XXX fix me for VI */
4791 		tiling_info->gfx8.num_banks = num_banks;
4792 		tiling_info->gfx8.array_mode =
4793 				DC_ARRAY_2D_TILED_THIN1;
4794 		tiling_info->gfx8.tile_split = tile_split;
4795 		tiling_info->gfx8.bank_width = bankw;
4796 		tiling_info->gfx8.bank_height = bankh;
4797 		tiling_info->gfx8.tile_aspect = mtaspect;
4798 		tiling_info->gfx8.tile_mode =
4799 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4800 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4801 			== DC_ARRAY_1D_TILED_THIN1) {
4802 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4803 	}
4804 
4805 	tiling_info->gfx8.pipe_config =
4806 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4807 }
4808 
4809 static void
4810 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4811 				  union dc_tiling_info *tiling_info)
4812 {
4813 	tiling_info->gfx9.num_pipes =
4814 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4815 	tiling_info->gfx9.num_banks =
4816 		adev->gfx.config.gb_addr_config_fields.num_banks;
4817 	tiling_info->gfx9.pipe_interleave =
4818 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4819 	tiling_info->gfx9.num_shader_engines =
4820 		adev->gfx.config.gb_addr_config_fields.num_se;
4821 	tiling_info->gfx9.max_compressed_frags =
4822 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4823 	tiling_info->gfx9.num_rb_per_se =
4824 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4825 	tiling_info->gfx9.shaderEnable = 1;
4826 	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4827 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4828 }
4829 
4830 static int
4831 validate_dcc(struct amdgpu_device *adev,
4832 	     const enum surface_pixel_format format,
4833 	     const enum dc_rotation_angle rotation,
4834 	     const union dc_tiling_info *tiling_info,
4835 	     const struct dc_plane_dcc_param *dcc,
4836 	     const struct dc_plane_address *address,
4837 	     const struct plane_size *plane_size)
4838 {
4839 	struct dc *dc = adev->dm.dc;
4840 	struct dc_dcc_surface_param input;
4841 	struct dc_surface_dcc_cap output;
4842 
4843 	memset(&input, 0, sizeof(input));
4844 	memset(&output, 0, sizeof(output));
4845 
4846 	if (!dcc->enable)
4847 		return 0;
4848 
4849 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4850 	    !dc->cap_funcs.get_dcc_compression_cap)
4851 		return -EINVAL;
4852 
4853 	input.format = format;
4854 	input.surface_size.width = plane_size->surface_size.width;
4855 	input.surface_size.height = plane_size->surface_size.height;
4856 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4857 
4858 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4859 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4860 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4861 		input.scan = SCAN_DIRECTION_VERTICAL;
4862 
4863 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4864 		return -EINVAL;
4865 
4866 	if (!output.capable)
4867 		return -EINVAL;
4868 
4869 	if (dcc->independent_64b_blks == 0 &&
4870 	    output.grph.rgb.independent_64b_blks != 0)
4871 		return -EINVAL;
4872 
4873 	return 0;
4874 }
4875 
4876 static bool
4877 modifier_has_dcc(uint64_t modifier)
4878 {
4879 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4880 }
4881 
4882 static unsigned
4883 modifier_gfx9_swizzle_mode(uint64_t modifier)
4884 {
4885 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4886 		return 0;
4887 
4888 	return AMD_FMT_MOD_GET(TILE, modifier);
4889 }
4890 
4891 static const struct drm_format_info *
4892 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4893 {
4894 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4895 }
4896 
4897 static void
4898 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4899 				    union dc_tiling_info *tiling_info,
4900 				    uint64_t modifier)
4901 {
4902 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4903 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4904 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4905 	unsigned int pipes_log2;
4906 
4907 	pipes_log2 = min(5u, mod_pipe_xor_bits);
4908 
4909 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4910 
4911 	if (!IS_AMD_FMT_MOD(modifier))
4912 		return;
4913 
4914 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4915 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4916 
4917 	if (adev->family >= AMDGPU_FAMILY_NV) {
4918 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4919 	} else {
4920 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4921 
4922 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4923 	}
4924 }
4925 
4926 enum dm_micro_swizzle {
4927 	MICRO_SWIZZLE_Z = 0,
4928 	MICRO_SWIZZLE_S = 1,
4929 	MICRO_SWIZZLE_D = 2,
4930 	MICRO_SWIZZLE_R = 3
4931 };
4932 
4933 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4934 					  uint32_t format,
4935 					  uint64_t modifier)
4936 {
4937 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4938 	const struct drm_format_info *info = drm_format_info(format);
4939 	struct hw_asic_id asic_id = adev->dm.dc->ctx->asic_id;
4940 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4941 
4942 	if (!info)
4943 		return false;
4944 
4945 	/*
4946 	 * We always have to allow these modifiers:
4947 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4948 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4949 	 */
4950 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4951 	    modifier == DRM_FORMAT_MOD_INVALID) {
4952 		return true;
4953 	}
4954 
4955 	/* check if swizzle mode is supported by this version of DCN */
4956 	switch (asic_id.chip_family) {
4957 		case FAMILY_SI:
4958 		case FAMILY_CI:
4959 		case FAMILY_KV:
4960 		case FAMILY_CZ:
4961 		case FAMILY_VI:
4962 			/* asics before AI does not have modifier support */
4963 			return false;
4964 			break;
4965 		case FAMILY_AI:
4966 		case FAMILY_RV:
4967 		case FAMILY_NV:
4968 		case FAMILY_VGH:
4969 		case FAMILY_YELLOW_CARP:
4970 		case AMDGPU_FAMILY_GC_10_3_6:
4971 		case AMDGPU_FAMILY_GC_10_3_7:
4972 			switch (AMD_FMT_MOD_GET(TILE, modifier)) {
4973 				case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
4974 				case AMD_FMT_MOD_TILE_GFX9_64K_D_X:
4975 				case AMD_FMT_MOD_TILE_GFX9_64K_S_X:
4976 				case AMD_FMT_MOD_TILE_GFX9_64K_D:
4977 					return true;
4978 					break;
4979 				default:
4980 					return false;
4981 					break;
4982 			}
4983 			break;
4984 		case AMDGPU_FAMILY_GC_11_0_0:
4985 			switch (AMD_FMT_MOD_GET(TILE, modifier)) {
4986 				case AMD_FMT_MOD_TILE_GFX11_256K_R_X:
4987 				case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
4988 				case AMD_FMT_MOD_TILE_GFX9_64K_D_X:
4989 				case AMD_FMT_MOD_TILE_GFX9_64K_S_X:
4990 				case AMD_FMT_MOD_TILE_GFX9_64K_D:
4991 					return true;
4992 					break;
4993 				default:
4994 					return false;
4995 					break;
4996 			}
4997 			break;
4998 		default:
4999 			ASSERT(0); /* Unknown asic */
5000 			break;
5001 	}
5002 
5003 	/*
5004 	 * For D swizzle the canonical modifier depends on the bpp, so check
5005 	 * it here.
5006 	 */
5007 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
5008 	    adev->family >= AMDGPU_FAMILY_NV) {
5009 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
5010 			return false;
5011 	}
5012 
5013 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
5014 	    info->cpp[0] < 8)
5015 		return false;
5016 
5017 	if (modifier_has_dcc(modifier)) {
5018 		/* Per radeonsi comments 16/64 bpp are more complicated. */
5019 		if (info->cpp[0] != 4)
5020 			return false;
5021 		/* We support multi-planar formats, but not when combined with
5022 		 * additional DCC metadata planes. */
5023 		if (info->num_planes > 1)
5024 			return false;
5025 	}
5026 
5027 	return true;
5028 }
5029 
5030 static void
5031 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
5032 {
5033 	if (!*mods)
5034 		return;
5035 
5036 	if (*cap - *size < 1) {
5037 		uint64_t new_cap = *cap * 2;
5038 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
5039 
5040 		if (!new_mods) {
5041 			kfree(*mods);
5042 			*mods = NULL;
5043 			return;
5044 		}
5045 
5046 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
5047 		kfree(*mods);
5048 		*mods = new_mods;
5049 		*cap = new_cap;
5050 	}
5051 
5052 	(*mods)[*size] = mod;
5053 	*size += 1;
5054 }
5055 
5056 static void
5057 add_gfx9_modifiers(const struct amdgpu_device *adev,
5058 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
5059 {
5060 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5061 	int pipe_xor_bits = min(8, pipes +
5062 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
5063 	int bank_xor_bits = min(8 - pipe_xor_bits,
5064 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
5065 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
5066 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
5067 
5068 
5069 	if (adev->family == AMDGPU_FAMILY_RV) {
5070 		/* Raven2 and later */
5071 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
5072 
5073 		/*
5074 		 * No _D DCC swizzles yet because we only allow 32bpp, which
5075 		 * doesn't support _D on DCN
5076 		 */
5077 
5078 		if (has_constant_encode) {
5079 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5080 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5081 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5082 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5083 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5084 				    AMD_FMT_MOD_SET(DCC, 1) |
5085 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5086 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5087 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5088 		}
5089 
5090 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5091 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5092 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5093 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5094 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5095 			    AMD_FMT_MOD_SET(DCC, 1) |
5096 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5097 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5098 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5099 
5100 		if (has_constant_encode) {
5101 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
5102 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5103 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5104 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5105 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5106 				    AMD_FMT_MOD_SET(DCC, 1) |
5107 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5108 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5109 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5110 
5111 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5112 				    AMD_FMT_MOD_SET(RB, rb) |
5113 				    AMD_FMT_MOD_SET(PIPE, pipes));
5114 		}
5115 
5116 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5117 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5118 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5119 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5120 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5121 			    AMD_FMT_MOD_SET(DCC, 1) |
5122 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5123 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5124 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5125 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5126 			    AMD_FMT_MOD_SET(RB, rb) |
5127 			    AMD_FMT_MOD_SET(PIPE, pipes));
5128 	}
5129 
5130 	/*
5131 	 * Only supported for 64bpp on Raven, will be filtered on format in
5132 	 * dm_plane_format_mod_supported.
5133 	 */
5134 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5135 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5136 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5137 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5138 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5139 
5140 	if (adev->family == AMDGPU_FAMILY_RV) {
5141 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5142 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5143 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5144 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5145 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5146 	}
5147 
5148 	/*
5149 	 * Only supported for 64bpp on Raven, will be filtered on format in
5150 	 * dm_plane_format_mod_supported.
5151 	 */
5152 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5153 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5154 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5155 
5156 	if (adev->family == AMDGPU_FAMILY_RV) {
5157 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5158 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5159 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5160 	}
5161 }
5162 
5163 static void
5164 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5165 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5166 {
5167 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5168 
5169 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5170 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5171 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5172 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5173 		    AMD_FMT_MOD_SET(DCC, 1) |
5174 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5175 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5176 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5177 
5178 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5179 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5180 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5181 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5182 		    AMD_FMT_MOD_SET(DCC, 1) |
5183 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5184 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5185 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5186 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5187 
5188 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5189 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5190 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5191 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5192 
5193 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5194 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5195 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5196 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5197 
5198 
5199 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5200 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5201 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5202 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5203 
5204 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5205 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5206 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5207 }
5208 
5209 static void
5210 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5211 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5212 {
5213 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5214 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5215 
5216 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5217 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5218 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5219 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5220 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5221 		    AMD_FMT_MOD_SET(DCC, 1) |
5222 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5223 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5224 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5225 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5226 
5227 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5228 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5229 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5230 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5231 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5232 		    AMD_FMT_MOD_SET(DCC, 1) |
5233 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5234 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5235 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5236 
5237 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5238 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5239 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5240 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5241 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5242 		    AMD_FMT_MOD_SET(DCC, 1) |
5243 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5244 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5245 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5246 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5247 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5248 
5249 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5250 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5251 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5252 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5253 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5254 		    AMD_FMT_MOD_SET(DCC, 1) |
5255 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5256 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5257 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5258 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5259 
5260 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5261 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5262 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5263 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5264 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5265 
5266 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5267 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5268 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5269 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5270 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5271 
5272 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5273 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5274 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5275 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5276 
5277 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5278 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5279 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5280 }
5281 
5282 static void
5283 add_gfx11_modifiers(struct amdgpu_device *adev,
5284 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5285 {
5286 	int num_pipes = 0;
5287 	int pipe_xor_bits = 0;
5288 	int num_pkrs = 0;
5289 	int pkrs = 0;
5290 	u32 gb_addr_config;
5291 	unsigned swizzle_r_x;
5292 	uint64_t modifier_r_x;
5293 	uint64_t modifier_dcc_best;
5294 	uint64_t modifier_dcc_4k;
5295 
5296 	/* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
5297 	 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */
5298 	gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
5299 	ASSERT(gb_addr_config != 0);
5300 
5301 	num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
5302 	pkrs = ilog2(num_pkrs);
5303 	num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
5304 	pipe_xor_bits = ilog2(num_pipes);
5305 
5306 	/* R_X swizzle modes are the best for rendering and DCC requires them. */
5307 	swizzle_r_x = num_pipes > 16 ? AMD_FMT_MOD_TILE_GFX11_256K_R_X :
5308                                               AMD_FMT_MOD_TILE_GFX9_64K_R_X;
5309 
5310 	modifier_r_x = AMD_FMT_MOD |
5311 		AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5312 		AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
5313 		AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5314 		AMD_FMT_MOD_SET(PACKERS, pkrs);
5315 
5316 	/* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
5317 	modifier_dcc_best = modifier_r_x |
5318 		AMD_FMT_MOD_SET(DCC, 1) |
5319 		AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
5320 		AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5321 		AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
5322 
5323 	/* DCC settings for 4K and greater resolutions. (required by display hw) */
5324 	modifier_dcc_4k = modifier_r_x |
5325 			AMD_FMT_MOD_SET(DCC, 1) |
5326 			AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5327 			AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5328 			AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
5329 
5330 	add_modifier(mods, size, capacity, modifier_dcc_best);
5331 	add_modifier(mods, size, capacity, modifier_dcc_4k);
5332 
5333 	add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5334 	add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
5335 
5336 	add_modifier(mods, size, capacity, modifier_r_x);
5337 
5338 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5339              AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
5340 			 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
5341 }
5342 
5343 static int
5344 get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5345 {
5346 	uint64_t size = 0, capacity = 128;
5347 	*mods = NULL;
5348 
5349 	/* We have not hooked up any pre-GFX9 modifiers. */
5350 	if (adev->family < AMDGPU_FAMILY_AI)
5351 		return 0;
5352 
5353 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5354 
5355 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5356 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5357 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5358 		return *mods ? 0 : -ENOMEM;
5359 	}
5360 
5361 	switch (adev->family) {
5362 	case AMDGPU_FAMILY_AI:
5363 	case AMDGPU_FAMILY_RV:
5364 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5365 		break;
5366 	case AMDGPU_FAMILY_NV:
5367 	case AMDGPU_FAMILY_VGH:
5368 	case AMDGPU_FAMILY_YC:
5369 	case AMDGPU_FAMILY_GC_10_3_6:
5370 	case AMDGPU_FAMILY_GC_10_3_7:
5371 		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5372 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5373 		else
5374 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5375 		break;
5376 	case AMDGPU_FAMILY_GC_11_0_0:
5377 		add_gfx11_modifiers(adev, mods, &size, &capacity);
5378 		break;
5379 	}
5380 
5381 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5382 
5383 	/* INVALID marks the end of the list. */
5384 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5385 
5386 	if (!*mods)
5387 		return -ENOMEM;
5388 
5389 	return 0;
5390 }
5391 
5392 static int
5393 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5394 					  const struct amdgpu_framebuffer *afb,
5395 					  const enum surface_pixel_format format,
5396 					  const enum dc_rotation_angle rotation,
5397 					  const struct plane_size *plane_size,
5398 					  union dc_tiling_info *tiling_info,
5399 					  struct dc_plane_dcc_param *dcc,
5400 					  struct dc_plane_address *address,
5401 					  const bool force_disable_dcc)
5402 {
5403 	const uint64_t modifier = afb->base.modifier;
5404 	int ret = 0;
5405 
5406 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5407 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5408 
5409 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5410 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5411 		bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5412 		bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5413 
5414 		dcc->enable = 1;
5415 		dcc->meta_pitch = afb->base.pitches[1];
5416 		dcc->independent_64b_blks = independent_64b_blks;
5417 		if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5418 			if (independent_64b_blks && independent_128b_blks)
5419 				dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5420 			else if (independent_128b_blks)
5421 				dcc->dcc_ind_blk = hubp_ind_block_128b;
5422 			else if (independent_64b_blks && !independent_128b_blks)
5423 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5424 			else
5425 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5426 		} else {
5427 			if (independent_64b_blks)
5428 				dcc->dcc_ind_blk = hubp_ind_block_64b;
5429 			else
5430 				dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5431 		}
5432 
5433 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5434 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5435 	}
5436 
5437 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5438 	if (ret)
5439 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5440 
5441 	return ret;
5442 }
5443 
5444 static int
5445 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5446 			     const struct amdgpu_framebuffer *afb,
5447 			     const enum surface_pixel_format format,
5448 			     const enum dc_rotation_angle rotation,
5449 			     const uint64_t tiling_flags,
5450 			     union dc_tiling_info *tiling_info,
5451 			     struct plane_size *plane_size,
5452 			     struct dc_plane_dcc_param *dcc,
5453 			     struct dc_plane_address *address,
5454 			     bool tmz_surface,
5455 			     bool force_disable_dcc)
5456 {
5457 	const struct drm_framebuffer *fb = &afb->base;
5458 	int ret;
5459 
5460 	memset(tiling_info, 0, sizeof(*tiling_info));
5461 	memset(plane_size, 0, sizeof(*plane_size));
5462 	memset(dcc, 0, sizeof(*dcc));
5463 	memset(address, 0, sizeof(*address));
5464 
5465 	address->tmz_surface = tmz_surface;
5466 
5467 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5468 		uint64_t addr = afb->address + fb->offsets[0];
5469 
5470 		plane_size->surface_size.x = 0;
5471 		plane_size->surface_size.y = 0;
5472 		plane_size->surface_size.width = fb->width;
5473 		plane_size->surface_size.height = fb->height;
5474 		plane_size->surface_pitch =
5475 			fb->pitches[0] / fb->format->cpp[0];
5476 
5477 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5478 		address->grph.addr.low_part = lower_32_bits(addr);
5479 		address->grph.addr.high_part = upper_32_bits(addr);
5480 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5481 		uint64_t luma_addr = afb->address + fb->offsets[0];
5482 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5483 
5484 		plane_size->surface_size.x = 0;
5485 		plane_size->surface_size.y = 0;
5486 		plane_size->surface_size.width = fb->width;
5487 		plane_size->surface_size.height = fb->height;
5488 		plane_size->surface_pitch =
5489 			fb->pitches[0] / fb->format->cpp[0];
5490 
5491 		plane_size->chroma_size.x = 0;
5492 		plane_size->chroma_size.y = 0;
5493 		/* TODO: set these based on surface format */
5494 		plane_size->chroma_size.width = fb->width / 2;
5495 		plane_size->chroma_size.height = fb->height / 2;
5496 
5497 		plane_size->chroma_pitch =
5498 			fb->pitches[1] / fb->format->cpp[1];
5499 
5500 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5501 		address->video_progressive.luma_addr.low_part =
5502 			lower_32_bits(luma_addr);
5503 		address->video_progressive.luma_addr.high_part =
5504 			upper_32_bits(luma_addr);
5505 		address->video_progressive.chroma_addr.low_part =
5506 			lower_32_bits(chroma_addr);
5507 		address->video_progressive.chroma_addr.high_part =
5508 			upper_32_bits(chroma_addr);
5509 	}
5510 
5511 	if (adev->family >= AMDGPU_FAMILY_AI) {
5512 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5513 								rotation, plane_size,
5514 								tiling_info, dcc,
5515 								address,
5516 								force_disable_dcc);
5517 		if (ret)
5518 			return ret;
5519 	} else {
5520 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5521 	}
5522 
5523 	return 0;
5524 }
5525 
5526 static void
5527 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5528 			       bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5529 			       bool *global_alpha, int *global_alpha_value)
5530 {
5531 	*per_pixel_alpha = false;
5532 	*pre_multiplied_alpha = true;
5533 	*global_alpha = false;
5534 	*global_alpha_value = 0xff;
5535 
5536 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5537 		return;
5538 
5539 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5540 		plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
5541 		static const uint32_t alpha_formats[] = {
5542 			DRM_FORMAT_ARGB8888,
5543 			DRM_FORMAT_RGBA8888,
5544 			DRM_FORMAT_ABGR8888,
5545 		};
5546 		uint32_t format = plane_state->fb->format->format;
5547 		unsigned int i;
5548 
5549 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5550 			if (format == alpha_formats[i]) {
5551 				*per_pixel_alpha = true;
5552 				break;
5553 			}
5554 		}
5555 
5556 		if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5557 			*pre_multiplied_alpha = false;
5558 	}
5559 
5560 	if (plane_state->alpha < 0xffff) {
5561 		*global_alpha = true;
5562 		*global_alpha_value = plane_state->alpha >> 8;
5563 	}
5564 }
5565 
5566 static int
5567 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5568 			    const enum surface_pixel_format format,
5569 			    enum dc_color_space *color_space)
5570 {
5571 	bool full_range;
5572 
5573 	*color_space = COLOR_SPACE_SRGB;
5574 
5575 	/* DRM color properties only affect non-RGB formats. */
5576 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5577 		return 0;
5578 
5579 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5580 
5581 	switch (plane_state->color_encoding) {
5582 	case DRM_COLOR_YCBCR_BT601:
5583 		if (full_range)
5584 			*color_space = COLOR_SPACE_YCBCR601;
5585 		else
5586 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5587 		break;
5588 
5589 	case DRM_COLOR_YCBCR_BT709:
5590 		if (full_range)
5591 			*color_space = COLOR_SPACE_YCBCR709;
5592 		else
5593 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5594 		break;
5595 
5596 	case DRM_COLOR_YCBCR_BT2020:
5597 		if (full_range)
5598 			*color_space = COLOR_SPACE_2020_YCBCR;
5599 		else
5600 			return -EINVAL;
5601 		break;
5602 
5603 	default:
5604 		return -EINVAL;
5605 	}
5606 
5607 	return 0;
5608 }
5609 
5610 static int
5611 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5612 			    const struct drm_plane_state *plane_state,
5613 			    const uint64_t tiling_flags,
5614 			    struct dc_plane_info *plane_info,
5615 			    struct dc_plane_address *address,
5616 			    bool tmz_surface,
5617 			    bool force_disable_dcc)
5618 {
5619 	const struct drm_framebuffer *fb = plane_state->fb;
5620 	const struct amdgpu_framebuffer *afb =
5621 		to_amdgpu_framebuffer(plane_state->fb);
5622 	int ret;
5623 
5624 	memset(plane_info, 0, sizeof(*plane_info));
5625 
5626 	switch (fb->format->format) {
5627 	case DRM_FORMAT_C8:
5628 		plane_info->format =
5629 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5630 		break;
5631 	case DRM_FORMAT_RGB565:
5632 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5633 		break;
5634 	case DRM_FORMAT_XRGB8888:
5635 	case DRM_FORMAT_ARGB8888:
5636 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5637 		break;
5638 	case DRM_FORMAT_XRGB2101010:
5639 	case DRM_FORMAT_ARGB2101010:
5640 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5641 		break;
5642 	case DRM_FORMAT_XBGR2101010:
5643 	case DRM_FORMAT_ABGR2101010:
5644 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5645 		break;
5646 	case DRM_FORMAT_XBGR8888:
5647 	case DRM_FORMAT_ABGR8888:
5648 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5649 		break;
5650 	case DRM_FORMAT_NV21:
5651 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5652 		break;
5653 	case DRM_FORMAT_NV12:
5654 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5655 		break;
5656 	case DRM_FORMAT_P010:
5657 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5658 		break;
5659 	case DRM_FORMAT_XRGB16161616F:
5660 	case DRM_FORMAT_ARGB16161616F:
5661 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5662 		break;
5663 	case DRM_FORMAT_XBGR16161616F:
5664 	case DRM_FORMAT_ABGR16161616F:
5665 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5666 		break;
5667 	case DRM_FORMAT_XRGB16161616:
5668 	case DRM_FORMAT_ARGB16161616:
5669 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5670 		break;
5671 	case DRM_FORMAT_XBGR16161616:
5672 	case DRM_FORMAT_ABGR16161616:
5673 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5674 		break;
5675 	default:
5676 		DRM_ERROR(
5677 			"Unsupported screen format %p4cc\n",
5678 			&fb->format->format);
5679 		return -EINVAL;
5680 	}
5681 
5682 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5683 	case DRM_MODE_ROTATE_0:
5684 		plane_info->rotation = ROTATION_ANGLE_0;
5685 		break;
5686 	case DRM_MODE_ROTATE_90:
5687 		plane_info->rotation = ROTATION_ANGLE_90;
5688 		break;
5689 	case DRM_MODE_ROTATE_180:
5690 		plane_info->rotation = ROTATION_ANGLE_180;
5691 		break;
5692 	case DRM_MODE_ROTATE_270:
5693 		plane_info->rotation = ROTATION_ANGLE_270;
5694 		break;
5695 	default:
5696 		plane_info->rotation = ROTATION_ANGLE_0;
5697 		break;
5698 	}
5699 
5700 	plane_info->visible = true;
5701 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5702 
5703 	plane_info->layer_index = 0;
5704 
5705 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5706 					  &plane_info->color_space);
5707 	if (ret)
5708 		return ret;
5709 
5710 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5711 					   plane_info->rotation, tiling_flags,
5712 					   &plane_info->tiling_info,
5713 					   &plane_info->plane_size,
5714 					   &plane_info->dcc, address, tmz_surface,
5715 					   force_disable_dcc);
5716 	if (ret)
5717 		return ret;
5718 
5719 	fill_blending_from_plane_state(
5720 		plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5721 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5722 
5723 	return 0;
5724 }
5725 
5726 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5727 				    struct dc_plane_state *dc_plane_state,
5728 				    struct drm_plane_state *plane_state,
5729 				    struct drm_crtc_state *crtc_state)
5730 {
5731 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5732 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5733 	struct dc_scaling_info scaling_info;
5734 	struct dc_plane_info plane_info;
5735 	int ret;
5736 	bool force_disable_dcc = false;
5737 
5738 	ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5739 	if (ret)
5740 		return ret;
5741 
5742 	dc_plane_state->src_rect = scaling_info.src_rect;
5743 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5744 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5745 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5746 
5747 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5748 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5749 					  afb->tiling_flags,
5750 					  &plane_info,
5751 					  &dc_plane_state->address,
5752 					  afb->tmz_surface,
5753 					  force_disable_dcc);
5754 	if (ret)
5755 		return ret;
5756 
5757 	dc_plane_state->format = plane_info.format;
5758 	dc_plane_state->color_space = plane_info.color_space;
5759 	dc_plane_state->format = plane_info.format;
5760 	dc_plane_state->plane_size = plane_info.plane_size;
5761 	dc_plane_state->rotation = plane_info.rotation;
5762 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5763 	dc_plane_state->stereo_format = plane_info.stereo_format;
5764 	dc_plane_state->tiling_info = plane_info.tiling_info;
5765 	dc_plane_state->visible = plane_info.visible;
5766 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5767 	dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5768 	dc_plane_state->global_alpha = plane_info.global_alpha;
5769 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5770 	dc_plane_state->dcc = plane_info.dcc;
5771 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5772 	dc_plane_state->flip_int_enabled = true;
5773 
5774 	/*
5775 	 * Always set input transfer function, since plane state is refreshed
5776 	 * every time.
5777 	 */
5778 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5779 	if (ret)
5780 		return ret;
5781 
5782 	return 0;
5783 }
5784 
5785 /**
5786  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
5787  *
5788  * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
5789  *         remote fb
5790  * @old_plane_state: Old state of @plane
5791  * @new_plane_state: New state of @plane
5792  * @crtc_state: New state of CRTC connected to the @plane
5793  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
5794  *
5795  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
5796  * (referred to as "damage clips" in DRM nomenclature) that require updating on
5797  * the eDP remote buffer. The responsibility of specifying the dirty regions is
5798  * amdgpu_dm's.
5799  *
5800  * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
5801  * plane with regions that require flushing to the eDP remote buffer. In
5802  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
5803  * implicitly provide damage clips without any client support via the plane
5804  * bounds.
5805  *
5806  * Today, amdgpu_dm only supports the MPO and cursor usecase.
5807  *
5808  * TODO: Also enable for FB_DAMAGE_CLIPS
5809  */
5810 static void fill_dc_dirty_rects(struct drm_plane *plane,
5811 				struct drm_plane_state *old_plane_state,
5812 				struct drm_plane_state *new_plane_state,
5813 				struct drm_crtc_state *crtc_state,
5814 				struct dc_flip_addrs *flip_addrs)
5815 {
5816 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5817 	struct rect *dirty_rects = flip_addrs->dirty_rects;
5818 	uint32_t num_clips;
5819 	bool bb_changed;
5820 	bool fb_changed;
5821 	uint32_t i = 0;
5822 
5823 	flip_addrs->dirty_rect_count = 0;
5824 
5825 	/*
5826 	 * Cursor plane has it's own dirty rect update interface. See
5827 	 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
5828 	 */
5829 	if (plane->type == DRM_PLANE_TYPE_CURSOR)
5830 		return;
5831 
5832 	/*
5833 	 * Today, we only consider MPO use-case for PSR SU. If MPO not
5834 	 * requested, and there is a plane update, do FFU.
5835 	 */
5836 	if (!dm_crtc_state->mpo_requested) {
5837 		dirty_rects[0].x = 0;
5838 		dirty_rects[0].y = 0;
5839 		dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
5840 		dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
5841 		flip_addrs->dirty_rect_count = 1;
5842 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
5843 				 new_plane_state->plane->base.id,
5844 				 dm_crtc_state->base.mode.crtc_hdisplay,
5845 				 dm_crtc_state->base.mode.crtc_vdisplay);
5846 		return;
5847 	}
5848 
5849 	/*
5850 	 * MPO is requested. Add entire plane bounding box to dirty rects if
5851 	 * flipped to or damaged.
5852 	 *
5853 	 * If plane is moved or resized, also add old bounding box to dirty
5854 	 * rects.
5855 	 */
5856 	num_clips = drm_plane_get_damage_clips_count(new_plane_state);
5857 	fb_changed = old_plane_state->fb->base.id !=
5858 		     new_plane_state->fb->base.id;
5859 	bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
5860 		      old_plane_state->crtc_y != new_plane_state->crtc_y ||
5861 		      old_plane_state->crtc_w != new_plane_state->crtc_w ||
5862 		      old_plane_state->crtc_h != new_plane_state->crtc_h);
5863 
5864 	DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5865 			 new_plane_state->plane->base.id,
5866 			 bb_changed, fb_changed, num_clips);
5867 
5868 	if (num_clips || fb_changed || bb_changed) {
5869 		dirty_rects[i].x = new_plane_state->crtc_x;
5870 		dirty_rects[i].y = new_plane_state->crtc_y;
5871 		dirty_rects[i].width = new_plane_state->crtc_w;
5872 		dirty_rects[i].height = new_plane_state->crtc_h;
5873 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5874 				 new_plane_state->plane->base.id,
5875 				 dirty_rects[i].x, dirty_rects[i].y,
5876 				 dirty_rects[i].width, dirty_rects[i].height);
5877 		i += 1;
5878 	}
5879 
5880 	/* Add old plane bounding-box if plane is moved or resized */
5881 	if (bb_changed) {
5882 		dirty_rects[i].x = old_plane_state->crtc_x;
5883 		dirty_rects[i].y = old_plane_state->crtc_y;
5884 		dirty_rects[i].width = old_plane_state->crtc_w;
5885 		dirty_rects[i].height = old_plane_state->crtc_h;
5886 		DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
5887 				old_plane_state->plane->base.id,
5888 				dirty_rects[i].x, dirty_rects[i].y,
5889 				dirty_rects[i].width, dirty_rects[i].height);
5890 		i += 1;
5891 	}
5892 
5893 	flip_addrs->dirty_rect_count = i;
5894 }
5895 
5896 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5897 					   const struct dm_connector_state *dm_state,
5898 					   struct dc_stream_state *stream)
5899 {
5900 	enum amdgpu_rmx_type rmx_type;
5901 
5902 	struct rect src = { 0 }; /* viewport in composition space*/
5903 	struct rect dst = { 0 }; /* stream addressable area */
5904 
5905 	/* no mode. nothing to be done */
5906 	if (!mode)
5907 		return;
5908 
5909 	/* Full screen scaling by default */
5910 	src.width = mode->hdisplay;
5911 	src.height = mode->vdisplay;
5912 	dst.width = stream->timing.h_addressable;
5913 	dst.height = stream->timing.v_addressable;
5914 
5915 	if (dm_state) {
5916 		rmx_type = dm_state->scaling;
5917 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5918 			if (src.width * dst.height <
5919 					src.height * dst.width) {
5920 				/* height needs less upscaling/more downscaling */
5921 				dst.width = src.width *
5922 						dst.height / src.height;
5923 			} else {
5924 				/* width needs less upscaling/more downscaling */
5925 				dst.height = src.height *
5926 						dst.width / src.width;
5927 			}
5928 		} else if (rmx_type == RMX_CENTER) {
5929 			dst = src;
5930 		}
5931 
5932 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5933 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5934 
5935 		if (dm_state->underscan_enable) {
5936 			dst.x += dm_state->underscan_hborder / 2;
5937 			dst.y += dm_state->underscan_vborder / 2;
5938 			dst.width -= dm_state->underscan_hborder;
5939 			dst.height -= dm_state->underscan_vborder;
5940 		}
5941 	}
5942 
5943 	stream->src = src;
5944 	stream->dst = dst;
5945 
5946 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5947 		      dst.x, dst.y, dst.width, dst.height);
5948 
5949 }
5950 
5951 static enum dc_color_depth
5952 convert_color_depth_from_display_info(const struct drm_connector *connector,
5953 				      bool is_y420, int requested_bpc)
5954 {
5955 	uint8_t bpc;
5956 
5957 	if (is_y420) {
5958 		bpc = 8;
5959 
5960 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5961 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5962 			bpc = 16;
5963 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5964 			bpc = 12;
5965 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5966 			bpc = 10;
5967 	} else {
5968 		bpc = (uint8_t)connector->display_info.bpc;
5969 		/* Assume 8 bpc by default if no bpc is specified. */
5970 		bpc = bpc ? bpc : 8;
5971 	}
5972 
5973 	if (requested_bpc > 0) {
5974 		/*
5975 		 * Cap display bpc based on the user requested value.
5976 		 *
5977 		 * The value for state->max_bpc may not correctly updated
5978 		 * depending on when the connector gets added to the state
5979 		 * or if this was called outside of atomic check, so it
5980 		 * can't be used directly.
5981 		 */
5982 		bpc = min_t(u8, bpc, requested_bpc);
5983 
5984 		/* Round down to the nearest even number. */
5985 		bpc = bpc - (bpc & 1);
5986 	}
5987 
5988 	switch (bpc) {
5989 	case 0:
5990 		/*
5991 		 * Temporary Work around, DRM doesn't parse color depth for
5992 		 * EDID revision before 1.4
5993 		 * TODO: Fix edid parsing
5994 		 */
5995 		return COLOR_DEPTH_888;
5996 	case 6:
5997 		return COLOR_DEPTH_666;
5998 	case 8:
5999 		return COLOR_DEPTH_888;
6000 	case 10:
6001 		return COLOR_DEPTH_101010;
6002 	case 12:
6003 		return COLOR_DEPTH_121212;
6004 	case 14:
6005 		return COLOR_DEPTH_141414;
6006 	case 16:
6007 		return COLOR_DEPTH_161616;
6008 	default:
6009 		return COLOR_DEPTH_UNDEFINED;
6010 	}
6011 }
6012 
6013 static enum dc_aspect_ratio
6014 get_aspect_ratio(const struct drm_display_mode *mode_in)
6015 {
6016 	/* 1-1 mapping, since both enums follow the HDMI spec. */
6017 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
6018 }
6019 
6020 static enum dc_color_space
6021 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
6022 {
6023 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
6024 
6025 	switch (dc_crtc_timing->pixel_encoding)	{
6026 	case PIXEL_ENCODING_YCBCR422:
6027 	case PIXEL_ENCODING_YCBCR444:
6028 	case PIXEL_ENCODING_YCBCR420:
6029 	{
6030 		/*
6031 		 * 27030khz is the separation point between HDTV and SDTV
6032 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
6033 		 * respectively
6034 		 */
6035 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
6036 			if (dc_crtc_timing->flags.Y_ONLY)
6037 				color_space =
6038 					COLOR_SPACE_YCBCR709_LIMITED;
6039 			else
6040 				color_space = COLOR_SPACE_YCBCR709;
6041 		} else {
6042 			if (dc_crtc_timing->flags.Y_ONLY)
6043 				color_space =
6044 					COLOR_SPACE_YCBCR601_LIMITED;
6045 			else
6046 				color_space = COLOR_SPACE_YCBCR601;
6047 		}
6048 
6049 	}
6050 	break;
6051 	case PIXEL_ENCODING_RGB:
6052 		color_space = COLOR_SPACE_SRGB;
6053 		break;
6054 
6055 	default:
6056 		WARN_ON(1);
6057 		break;
6058 	}
6059 
6060 	return color_space;
6061 }
6062 
6063 static bool adjust_colour_depth_from_display_info(
6064 	struct dc_crtc_timing *timing_out,
6065 	const struct drm_display_info *info)
6066 {
6067 	enum dc_color_depth depth = timing_out->display_color_depth;
6068 	int normalized_clk;
6069 	do {
6070 		normalized_clk = timing_out->pix_clk_100hz / 10;
6071 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
6072 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
6073 			normalized_clk /= 2;
6074 		/* Adjusting pix clock following on HDMI spec based on colour depth */
6075 		switch (depth) {
6076 		case COLOR_DEPTH_888:
6077 			break;
6078 		case COLOR_DEPTH_101010:
6079 			normalized_clk = (normalized_clk * 30) / 24;
6080 			break;
6081 		case COLOR_DEPTH_121212:
6082 			normalized_clk = (normalized_clk * 36) / 24;
6083 			break;
6084 		case COLOR_DEPTH_161616:
6085 			normalized_clk = (normalized_clk * 48) / 24;
6086 			break;
6087 		default:
6088 			/* The above depths are the only ones valid for HDMI. */
6089 			return false;
6090 		}
6091 		if (normalized_clk <= info->max_tmds_clock) {
6092 			timing_out->display_color_depth = depth;
6093 			return true;
6094 		}
6095 	} while (--depth > COLOR_DEPTH_666);
6096 	return false;
6097 }
6098 
6099 static void fill_stream_properties_from_drm_display_mode(
6100 	struct dc_stream_state *stream,
6101 	const struct drm_display_mode *mode_in,
6102 	const struct drm_connector *connector,
6103 	const struct drm_connector_state *connector_state,
6104 	const struct dc_stream_state *old_stream,
6105 	int requested_bpc)
6106 {
6107 	struct dc_crtc_timing *timing_out = &stream->timing;
6108 	const struct drm_display_info *info = &connector->display_info;
6109 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6110 	struct hdmi_vendor_infoframe hv_frame;
6111 	struct hdmi_avi_infoframe avi_frame;
6112 
6113 	memset(&hv_frame, 0, sizeof(hv_frame));
6114 	memset(&avi_frame, 0, sizeof(avi_frame));
6115 
6116 	timing_out->h_border_left = 0;
6117 	timing_out->h_border_right = 0;
6118 	timing_out->v_border_top = 0;
6119 	timing_out->v_border_bottom = 0;
6120 	/* TODO: un-hardcode */
6121 	if (drm_mode_is_420_only(info, mode_in)
6122 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6123 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6124 	else if (drm_mode_is_420_also(info, mode_in)
6125 			&& aconnector->force_yuv420_output)
6126 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6127 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
6128 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6129 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
6130 	else
6131 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
6132 
6133 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
6134 	timing_out->display_color_depth = convert_color_depth_from_display_info(
6135 		connector,
6136 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
6137 		requested_bpc);
6138 	timing_out->scan_type = SCANNING_TYPE_NODATA;
6139 	timing_out->hdmi_vic = 0;
6140 
6141 	if(old_stream) {
6142 		timing_out->vic = old_stream->timing.vic;
6143 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
6144 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
6145 	} else {
6146 		timing_out->vic = drm_match_cea_mode(mode_in);
6147 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
6148 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
6149 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
6150 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
6151 	}
6152 
6153 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6154 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
6155 		timing_out->vic = avi_frame.video_code;
6156 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
6157 		timing_out->hdmi_vic = hv_frame.vic;
6158 	}
6159 
6160 	if (is_freesync_video_mode(mode_in, aconnector)) {
6161 		timing_out->h_addressable = mode_in->hdisplay;
6162 		timing_out->h_total = mode_in->htotal;
6163 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
6164 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
6165 		timing_out->v_total = mode_in->vtotal;
6166 		timing_out->v_addressable = mode_in->vdisplay;
6167 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
6168 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
6169 		timing_out->pix_clk_100hz = mode_in->clock * 10;
6170 	} else {
6171 		timing_out->h_addressable = mode_in->crtc_hdisplay;
6172 		timing_out->h_total = mode_in->crtc_htotal;
6173 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
6174 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
6175 		timing_out->v_total = mode_in->crtc_vtotal;
6176 		timing_out->v_addressable = mode_in->crtc_vdisplay;
6177 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
6178 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
6179 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
6180 	}
6181 
6182 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
6183 
6184 	stream->output_color_space = get_output_color_space(timing_out);
6185 
6186 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
6187 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
6188 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
6189 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
6190 		    drm_mode_is_420_also(info, mode_in) &&
6191 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
6192 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
6193 			adjust_colour_depth_from_display_info(timing_out, info);
6194 		}
6195 	}
6196 }
6197 
6198 static void fill_audio_info(struct audio_info *audio_info,
6199 			    const struct drm_connector *drm_connector,
6200 			    const struct dc_sink *dc_sink)
6201 {
6202 	int i = 0;
6203 	int cea_revision = 0;
6204 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
6205 
6206 	audio_info->manufacture_id = edid_caps->manufacturer_id;
6207 	audio_info->product_id = edid_caps->product_id;
6208 
6209 	cea_revision = drm_connector->display_info.cea_rev;
6210 
6211 	strscpy(audio_info->display_name,
6212 		edid_caps->display_name,
6213 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
6214 
6215 	if (cea_revision >= 3) {
6216 		audio_info->mode_count = edid_caps->audio_mode_count;
6217 
6218 		for (i = 0; i < audio_info->mode_count; ++i) {
6219 			audio_info->modes[i].format_code =
6220 					(enum audio_format_code)
6221 					(edid_caps->audio_modes[i].format_code);
6222 			audio_info->modes[i].channel_count =
6223 					edid_caps->audio_modes[i].channel_count;
6224 			audio_info->modes[i].sample_rates.all =
6225 					edid_caps->audio_modes[i].sample_rate;
6226 			audio_info->modes[i].sample_size =
6227 					edid_caps->audio_modes[i].sample_size;
6228 		}
6229 	}
6230 
6231 	audio_info->flags.all = edid_caps->speaker_flags;
6232 
6233 	/* TODO: We only check for the progressive mode, check for interlace mode too */
6234 	if (drm_connector->latency_present[0]) {
6235 		audio_info->video_latency = drm_connector->video_latency[0];
6236 		audio_info->audio_latency = drm_connector->audio_latency[0];
6237 	}
6238 
6239 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
6240 
6241 }
6242 
6243 static void
6244 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
6245 				      struct drm_display_mode *dst_mode)
6246 {
6247 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
6248 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
6249 	dst_mode->crtc_clock = src_mode->crtc_clock;
6250 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
6251 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
6252 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
6253 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
6254 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
6255 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
6256 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6257 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6258 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6259 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6260 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6261 }
6262 
6263 static void
6264 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6265 					const struct drm_display_mode *native_mode,
6266 					bool scale_enabled)
6267 {
6268 	if (scale_enabled) {
6269 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6270 	} else if (native_mode->clock == drm_mode->clock &&
6271 			native_mode->htotal == drm_mode->htotal &&
6272 			native_mode->vtotal == drm_mode->vtotal) {
6273 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6274 	} else {
6275 		/* no scaling nor amdgpu inserted, no need to patch */
6276 	}
6277 }
6278 
6279 static struct dc_sink *
6280 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6281 {
6282 	struct dc_sink_init_data sink_init_data = { 0 };
6283 	struct dc_sink *sink = NULL;
6284 	sink_init_data.link = aconnector->dc_link;
6285 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6286 
6287 	sink = dc_sink_create(&sink_init_data);
6288 	if (!sink) {
6289 		DRM_ERROR("Failed to create sink!\n");
6290 		return NULL;
6291 	}
6292 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6293 
6294 	return sink;
6295 }
6296 
6297 static void set_multisync_trigger_params(
6298 		struct dc_stream_state *stream)
6299 {
6300 	struct dc_stream_state *master = NULL;
6301 
6302 	if (stream->triggered_crtc_reset.enabled) {
6303 		master = stream->triggered_crtc_reset.event_source;
6304 		stream->triggered_crtc_reset.event =
6305 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6306 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6307 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6308 	}
6309 }
6310 
6311 static void set_master_stream(struct dc_stream_state *stream_set[],
6312 			      int stream_count)
6313 {
6314 	int j, highest_rfr = 0, master_stream = 0;
6315 
6316 	for (j = 0;  j < stream_count; j++) {
6317 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6318 			int refresh_rate = 0;
6319 
6320 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6321 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6322 			if (refresh_rate > highest_rfr) {
6323 				highest_rfr = refresh_rate;
6324 				master_stream = j;
6325 			}
6326 		}
6327 	}
6328 	for (j = 0;  j < stream_count; j++) {
6329 		if (stream_set[j])
6330 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6331 	}
6332 }
6333 
6334 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6335 {
6336 	int i = 0;
6337 	struct dc_stream_state *stream;
6338 
6339 	if (context->stream_count < 2)
6340 		return;
6341 	for (i = 0; i < context->stream_count ; i++) {
6342 		if (!context->streams[i])
6343 			continue;
6344 		/*
6345 		 * TODO: add a function to read AMD VSDB bits and set
6346 		 * crtc_sync_master.multi_sync_enabled flag
6347 		 * For now it's set to false
6348 		 */
6349 	}
6350 
6351 	set_master_stream(context->streams, context->stream_count);
6352 
6353 	for (i = 0; i < context->stream_count ; i++) {
6354 		stream = context->streams[i];
6355 
6356 		if (!stream)
6357 			continue;
6358 
6359 		set_multisync_trigger_params(stream);
6360 	}
6361 }
6362 
6363 #if defined(CONFIG_DRM_AMD_DC_DCN)
6364 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6365 							struct dc_sink *sink, struct dc_stream_state *stream,
6366 							struct dsc_dec_dpcd_caps *dsc_caps)
6367 {
6368 	stream->timing.flags.DSC = 0;
6369 	dsc_caps->is_dsc_supported = false;
6370 
6371 	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6372 		sink->sink_signal == SIGNAL_TYPE_EDP)) {
6373 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6374 			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6375 			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6376 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6377 				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6378 				dsc_caps);
6379 	}
6380 }
6381 
6382 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6383 				    struct dc_sink *sink, struct dc_stream_state *stream,
6384 				    struct dsc_dec_dpcd_caps *dsc_caps,
6385 				    uint32_t max_dsc_target_bpp_limit_override)
6386 {
6387 	const struct dc_link_settings *verified_link_cap = NULL;
6388 	uint32_t link_bw_in_kbps;
6389 	uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6390 	struct dc *dc = sink->ctx->dc;
6391 	struct dc_dsc_bw_range bw_range = {0};
6392 	struct dc_dsc_config dsc_cfg = {0};
6393 
6394 	verified_link_cap = dc_link_get_link_cap(stream->link);
6395 	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6396 	edp_min_bpp_x16 = 8 * 16;
6397 	edp_max_bpp_x16 = 8 * 16;
6398 
6399 	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6400 		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6401 
6402 	if (edp_max_bpp_x16 < edp_min_bpp_x16)
6403 		edp_min_bpp_x16 = edp_max_bpp_x16;
6404 
6405 	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6406 				dc->debug.dsc_min_slice_height_override,
6407 				edp_min_bpp_x16, edp_max_bpp_x16,
6408 				dsc_caps,
6409 				&stream->timing,
6410 				&bw_range)) {
6411 
6412 		if (bw_range.max_kbps < link_bw_in_kbps) {
6413 			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6414 					dsc_caps,
6415 					dc->debug.dsc_min_slice_height_override,
6416 					max_dsc_target_bpp_limit_override,
6417 					0,
6418 					&stream->timing,
6419 					&dsc_cfg)) {
6420 				stream->timing.dsc_cfg = dsc_cfg;
6421 				stream->timing.flags.DSC = 1;
6422 				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6423 			}
6424 			return;
6425 		}
6426 	}
6427 
6428 	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6429 				dsc_caps,
6430 				dc->debug.dsc_min_slice_height_override,
6431 				max_dsc_target_bpp_limit_override,
6432 				link_bw_in_kbps,
6433 				&stream->timing,
6434 				&dsc_cfg)) {
6435 		stream->timing.dsc_cfg = dsc_cfg;
6436 		stream->timing.flags.DSC = 1;
6437 	}
6438 }
6439 
6440 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6441 										struct dc_sink *sink, struct dc_stream_state *stream,
6442 										struct dsc_dec_dpcd_caps *dsc_caps)
6443 {
6444 	struct drm_connector *drm_connector = &aconnector->base;
6445 	uint32_t link_bandwidth_kbps;
6446 	uint32_t max_dsc_target_bpp_limit_override = 0;
6447 	struct dc *dc = sink->ctx->dc;
6448 	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6449 	uint32_t dsc_max_supported_bw_in_kbps;
6450 
6451 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6452 							dc_link_get_link_cap(aconnector->dc_link));
6453 
6454 	if (stream->link && stream->link->local_sink)
6455 		max_dsc_target_bpp_limit_override =
6456 			stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6457 
6458 	/* Set DSC policy according to dsc_clock_en */
6459 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6460 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6461 
6462 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6463 	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6464 
6465 		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6466 
6467 	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6468 		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6469 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6470 						dsc_caps,
6471 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6472 						max_dsc_target_bpp_limit_override,
6473 						link_bandwidth_kbps,
6474 						&stream->timing,
6475 						&stream->timing.dsc_cfg)) {
6476 				stream->timing.flags.DSC = 1;
6477 				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6478 								 __func__, drm_connector->name);
6479 			}
6480 		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6481 			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6482 			max_supported_bw_in_kbps = link_bandwidth_kbps;
6483 			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6484 
6485 			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6486 					max_supported_bw_in_kbps > 0 &&
6487 					dsc_max_supported_bw_in_kbps > 0)
6488 				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6489 						dsc_caps,
6490 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6491 						max_dsc_target_bpp_limit_override,
6492 						dsc_max_supported_bw_in_kbps,
6493 						&stream->timing,
6494 						&stream->timing.dsc_cfg)) {
6495 					stream->timing.flags.DSC = 1;
6496 					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6497 									 __func__, drm_connector->name);
6498 				}
6499 		}
6500 	}
6501 
6502 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6503 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6504 		stream->timing.flags.DSC = 1;
6505 
6506 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6507 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6508 
6509 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6510 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6511 
6512 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6513 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6514 }
6515 #endif /* CONFIG_DRM_AMD_DC_DCN */
6516 
6517 /**
6518  * DOC: FreeSync Video
6519  *
6520  * When a userspace application wants to play a video, the content follows a
6521  * standard format definition that usually specifies the FPS for that format.
6522  * The below list illustrates some video format and the expected FPS,
6523  * respectively:
6524  *
6525  * - TV/NTSC (23.976 FPS)
6526  * - Cinema (24 FPS)
6527  * - TV/PAL (25 FPS)
6528  * - TV/NTSC (29.97 FPS)
6529  * - TV/NTSC (30 FPS)
6530  * - Cinema HFR (48 FPS)
6531  * - TV/PAL (50 FPS)
6532  * - Commonly used (60 FPS)
6533  * - Multiples of 24 (48,72,96,120 FPS)
6534  *
6535  * The list of standards video format is not huge and can be added to the
6536  * connector modeset list beforehand. With that, userspace can leverage
6537  * FreeSync to extends the front porch in order to attain the target refresh
6538  * rate. Such a switch will happen seamlessly, without screen blanking or
6539  * reprogramming of the output in any other way. If the userspace requests a
6540  * modesetting change compatible with FreeSync modes that only differ in the
6541  * refresh rate, DC will skip the full update and avoid blink during the
6542  * transition. For example, the video player can change the modesetting from
6543  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6544  * causing any display blink. This same concept can be applied to a mode
6545  * setting change.
6546  */
6547 static struct drm_display_mode *
6548 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6549 			  bool use_probed_modes)
6550 {
6551 	struct drm_display_mode *m, *m_pref = NULL;
6552 	u16 current_refresh, highest_refresh;
6553 	struct list_head *list_head = use_probed_modes ?
6554 						    &aconnector->base.probed_modes :
6555 						    &aconnector->base.modes;
6556 
6557 	if (aconnector->freesync_vid_base.clock != 0)
6558 		return &aconnector->freesync_vid_base;
6559 
6560 	/* Find the preferred mode */
6561 	list_for_each_entry (m, list_head, head) {
6562 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6563 			m_pref = m;
6564 			break;
6565 		}
6566 	}
6567 
6568 	if (!m_pref) {
6569 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6570 		m_pref = list_first_entry_or_null(
6571 			&aconnector->base.modes, struct drm_display_mode, head);
6572 		if (!m_pref) {
6573 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6574 			return NULL;
6575 		}
6576 	}
6577 
6578 	highest_refresh = drm_mode_vrefresh(m_pref);
6579 
6580 	/*
6581 	 * Find the mode with highest refresh rate with same resolution.
6582 	 * For some monitors, preferred mode is not the mode with highest
6583 	 * supported refresh rate.
6584 	 */
6585 	list_for_each_entry (m, list_head, head) {
6586 		current_refresh  = drm_mode_vrefresh(m);
6587 
6588 		if (m->hdisplay == m_pref->hdisplay &&
6589 		    m->vdisplay == m_pref->vdisplay &&
6590 		    highest_refresh < current_refresh) {
6591 			highest_refresh = current_refresh;
6592 			m_pref = m;
6593 		}
6594 	}
6595 
6596 	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6597 	return m_pref;
6598 }
6599 
6600 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6601 				   struct amdgpu_dm_connector *aconnector)
6602 {
6603 	struct drm_display_mode *high_mode;
6604 	int timing_diff;
6605 
6606 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6607 	if (!high_mode || !mode)
6608 		return false;
6609 
6610 	timing_diff = high_mode->vtotal - mode->vtotal;
6611 
6612 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6613 	    high_mode->hdisplay != mode->hdisplay ||
6614 	    high_mode->vdisplay != mode->vdisplay ||
6615 	    high_mode->hsync_start != mode->hsync_start ||
6616 	    high_mode->hsync_end != mode->hsync_end ||
6617 	    high_mode->htotal != mode->htotal ||
6618 	    high_mode->hskew != mode->hskew ||
6619 	    high_mode->vscan != mode->vscan ||
6620 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6621 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6622 		return false;
6623 	else
6624 		return true;
6625 }
6626 
6627 static struct dc_stream_state *
6628 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6629 		       const struct drm_display_mode *drm_mode,
6630 		       const struct dm_connector_state *dm_state,
6631 		       const struct dc_stream_state *old_stream,
6632 		       int requested_bpc)
6633 {
6634 	struct drm_display_mode *preferred_mode = NULL;
6635 	struct drm_connector *drm_connector;
6636 	const struct drm_connector_state *con_state =
6637 		dm_state ? &dm_state->base : NULL;
6638 	struct dc_stream_state *stream = NULL;
6639 	struct drm_display_mode mode = *drm_mode;
6640 	struct drm_display_mode saved_mode;
6641 	struct drm_display_mode *freesync_mode = NULL;
6642 	bool native_mode_found = false;
6643 	bool recalculate_timing = false;
6644 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6645 	int mode_refresh;
6646 	int preferred_refresh = 0;
6647 #if defined(CONFIG_DRM_AMD_DC_DCN)
6648 	struct dsc_dec_dpcd_caps dsc_caps;
6649 #endif
6650 	struct dc_sink *sink = NULL;
6651 
6652 	memset(&saved_mode, 0, sizeof(saved_mode));
6653 
6654 	if (aconnector == NULL) {
6655 		DRM_ERROR("aconnector is NULL!\n");
6656 		return stream;
6657 	}
6658 
6659 	drm_connector = &aconnector->base;
6660 
6661 	if (!aconnector->dc_sink) {
6662 		sink = create_fake_sink(aconnector);
6663 		if (!sink)
6664 			return stream;
6665 	} else {
6666 		sink = aconnector->dc_sink;
6667 		dc_sink_retain(sink);
6668 	}
6669 
6670 	stream = dc_create_stream_for_sink(sink);
6671 
6672 	if (stream == NULL) {
6673 		DRM_ERROR("Failed to create stream for sink!\n");
6674 		goto finish;
6675 	}
6676 
6677 	stream->dm_stream_context = aconnector;
6678 
6679 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6680 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6681 
6682 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6683 		/* Search for preferred mode */
6684 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6685 			native_mode_found = true;
6686 			break;
6687 		}
6688 	}
6689 	if (!native_mode_found)
6690 		preferred_mode = list_first_entry_or_null(
6691 				&aconnector->base.modes,
6692 				struct drm_display_mode,
6693 				head);
6694 
6695 	mode_refresh = drm_mode_vrefresh(&mode);
6696 
6697 	if (preferred_mode == NULL) {
6698 		/*
6699 		 * This may not be an error, the use case is when we have no
6700 		 * usermode calls to reset and set mode upon hotplug. In this
6701 		 * case, we call set mode ourselves to restore the previous mode
6702 		 * and the modelist may not be filled in in time.
6703 		 */
6704 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6705 	} else {
6706 		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6707 		if (recalculate_timing) {
6708 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6709 			drm_mode_copy(&saved_mode, &mode);
6710 			drm_mode_copy(&mode, freesync_mode);
6711 		} else {
6712 			decide_crtc_timing_for_drm_display_mode(
6713 				&mode, preferred_mode, scale);
6714 
6715 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6716 		}
6717 	}
6718 
6719 	if (recalculate_timing)
6720 		drm_mode_set_crtcinfo(&saved_mode, 0);
6721 	else if (!dm_state)
6722 		drm_mode_set_crtcinfo(&mode, 0);
6723 
6724        /*
6725 	* If scaling is enabled and refresh rate didn't change
6726 	* we copy the vic and polarities of the old timings
6727 	*/
6728 	if (!scale || mode_refresh != preferred_refresh)
6729 		fill_stream_properties_from_drm_display_mode(
6730 			stream, &mode, &aconnector->base, con_state, NULL,
6731 			requested_bpc);
6732 	else
6733 		fill_stream_properties_from_drm_display_mode(
6734 			stream, &mode, &aconnector->base, con_state, old_stream,
6735 			requested_bpc);
6736 
6737 #if defined(CONFIG_DRM_AMD_DC_DCN)
6738 	/* SST DSC determination policy */
6739 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6740 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6741 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6742 #endif
6743 
6744 	update_stream_scaling_settings(&mode, dm_state, stream);
6745 
6746 	fill_audio_info(
6747 		&stream->audio_info,
6748 		drm_connector,
6749 		sink);
6750 
6751 	update_stream_signal(stream, sink);
6752 
6753 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6754 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6755 
6756 	if (stream->link->psr_settings.psr_feature_enabled) {
6757 		//
6758 		// should decide stream support vsc sdp colorimetry capability
6759 		// before building vsc info packet
6760 		//
6761 		stream->use_vsc_sdp_for_colorimetry = false;
6762 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6763 			stream->use_vsc_sdp_for_colorimetry =
6764 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6765 		} else {
6766 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6767 				stream->use_vsc_sdp_for_colorimetry = true;
6768 		}
6769 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6770 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6771 
6772 	}
6773 finish:
6774 	dc_sink_release(sink);
6775 
6776 	return stream;
6777 }
6778 
6779 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6780 {
6781 	drm_crtc_cleanup(crtc);
6782 	kfree(crtc);
6783 }
6784 
6785 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6786 				  struct drm_crtc_state *state)
6787 {
6788 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6789 
6790 	/* TODO Destroy dc_stream objects are stream object is flattened */
6791 	if (cur->stream)
6792 		dc_stream_release(cur->stream);
6793 
6794 
6795 	__drm_atomic_helper_crtc_destroy_state(state);
6796 
6797 
6798 	kfree(state);
6799 }
6800 
6801 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6802 {
6803 	struct dm_crtc_state *state;
6804 
6805 	if (crtc->state)
6806 		dm_crtc_destroy_state(crtc, crtc->state);
6807 
6808 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6809 	if (WARN_ON(!state))
6810 		return;
6811 
6812 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6813 }
6814 
6815 static struct drm_crtc_state *
6816 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6817 {
6818 	struct dm_crtc_state *state, *cur;
6819 
6820 	cur = to_dm_crtc_state(crtc->state);
6821 
6822 	if (WARN_ON(!crtc->state))
6823 		return NULL;
6824 
6825 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6826 	if (!state)
6827 		return NULL;
6828 
6829 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6830 
6831 	if (cur->stream) {
6832 		state->stream = cur->stream;
6833 		dc_stream_retain(state->stream);
6834 	}
6835 
6836 	state->active_planes = cur->active_planes;
6837 	state->vrr_infopacket = cur->vrr_infopacket;
6838 	state->abm_level = cur->abm_level;
6839 	state->vrr_supported = cur->vrr_supported;
6840 	state->freesync_config = cur->freesync_config;
6841 	state->cm_has_degamma = cur->cm_has_degamma;
6842 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6843 	state->force_dpms_off = cur->force_dpms_off;
6844 	state->mpo_requested = cur->mpo_requested;
6845 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6846 
6847 	return &state->base;
6848 }
6849 
6850 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6851 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6852 {
6853 	crtc_debugfs_init(crtc);
6854 
6855 	return 0;
6856 }
6857 #endif
6858 
6859 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6860 {
6861 	enum dc_irq_source irq_source;
6862 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6863 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6864 	int rc;
6865 
6866 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6867 
6868 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6869 
6870 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6871 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6872 	return rc;
6873 }
6874 
6875 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6876 {
6877 	enum dc_irq_source irq_source;
6878 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6879 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6880 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6881 	struct amdgpu_display_manager *dm = &adev->dm;
6882 	struct vblank_control_work *work;
6883 	int rc = 0;
6884 
6885 	if (enable) {
6886 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6887 		if (amdgpu_dm_vrr_active(acrtc_state))
6888 			rc = dm_set_vupdate_irq(crtc, true);
6889 	} else {
6890 		/* vblank irq off -> vupdate irq off */
6891 		rc = dm_set_vupdate_irq(crtc, false);
6892 	}
6893 
6894 	if (rc)
6895 		return rc;
6896 
6897 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6898 
6899 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6900 		return -EBUSY;
6901 
6902 	if (amdgpu_in_reset(adev))
6903 		return 0;
6904 
6905 	if (dm->vblank_control_workqueue) {
6906 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6907 		if (!work)
6908 			return -ENOMEM;
6909 
6910 		INIT_WORK(&work->work, vblank_control_worker);
6911 		work->dm = dm;
6912 		work->acrtc = acrtc;
6913 		work->enable = enable;
6914 
6915 		if (acrtc_state->stream) {
6916 			dc_stream_retain(acrtc_state->stream);
6917 			work->stream = acrtc_state->stream;
6918 		}
6919 
6920 		queue_work(dm->vblank_control_workqueue, &work->work);
6921 	}
6922 
6923 	return 0;
6924 }
6925 
6926 static int dm_enable_vblank(struct drm_crtc *crtc)
6927 {
6928 	return dm_set_vblank(crtc, true);
6929 }
6930 
6931 static void dm_disable_vblank(struct drm_crtc *crtc)
6932 {
6933 	dm_set_vblank(crtc, false);
6934 }
6935 
6936 /* Implemented only the options currently available for the driver */
6937 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6938 	.reset = dm_crtc_reset_state,
6939 	.destroy = amdgpu_dm_crtc_destroy,
6940 	.set_config = drm_atomic_helper_set_config,
6941 	.page_flip = drm_atomic_helper_page_flip,
6942 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6943 	.atomic_destroy_state = dm_crtc_destroy_state,
6944 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6945 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6946 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6947 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6948 	.enable_vblank = dm_enable_vblank,
6949 	.disable_vblank = dm_disable_vblank,
6950 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6951 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6952 	.late_register = amdgpu_dm_crtc_late_register,
6953 #endif
6954 };
6955 
6956 static enum drm_connector_status
6957 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6958 {
6959 	bool connected;
6960 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6961 
6962 	/*
6963 	 * Notes:
6964 	 * 1. This interface is NOT called in context of HPD irq.
6965 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6966 	 * makes it a bad place for *any* MST-related activity.
6967 	 */
6968 
6969 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6970 	    !aconnector->fake_enable)
6971 		connected = (aconnector->dc_sink != NULL);
6972 	else
6973 		connected = (aconnector->base.force == DRM_FORCE_ON);
6974 
6975 	update_subconnector_property(aconnector);
6976 
6977 	return (connected ? connector_status_connected :
6978 			connector_status_disconnected);
6979 }
6980 
6981 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6982 					    struct drm_connector_state *connector_state,
6983 					    struct drm_property *property,
6984 					    uint64_t val)
6985 {
6986 	struct drm_device *dev = connector->dev;
6987 	struct amdgpu_device *adev = drm_to_adev(dev);
6988 	struct dm_connector_state *dm_old_state =
6989 		to_dm_connector_state(connector->state);
6990 	struct dm_connector_state *dm_new_state =
6991 		to_dm_connector_state(connector_state);
6992 
6993 	int ret = -EINVAL;
6994 
6995 	if (property == dev->mode_config.scaling_mode_property) {
6996 		enum amdgpu_rmx_type rmx_type;
6997 
6998 		switch (val) {
6999 		case DRM_MODE_SCALE_CENTER:
7000 			rmx_type = RMX_CENTER;
7001 			break;
7002 		case DRM_MODE_SCALE_ASPECT:
7003 			rmx_type = RMX_ASPECT;
7004 			break;
7005 		case DRM_MODE_SCALE_FULLSCREEN:
7006 			rmx_type = RMX_FULL;
7007 			break;
7008 		case DRM_MODE_SCALE_NONE:
7009 		default:
7010 			rmx_type = RMX_OFF;
7011 			break;
7012 		}
7013 
7014 		if (dm_old_state->scaling == rmx_type)
7015 			return 0;
7016 
7017 		dm_new_state->scaling = rmx_type;
7018 		ret = 0;
7019 	} else if (property == adev->mode_info.underscan_hborder_property) {
7020 		dm_new_state->underscan_hborder = val;
7021 		ret = 0;
7022 	} else if (property == adev->mode_info.underscan_vborder_property) {
7023 		dm_new_state->underscan_vborder = val;
7024 		ret = 0;
7025 	} else if (property == adev->mode_info.underscan_property) {
7026 		dm_new_state->underscan_enable = val;
7027 		ret = 0;
7028 	} else if (property == adev->mode_info.abm_level_property) {
7029 		dm_new_state->abm_level = val;
7030 		ret = 0;
7031 	}
7032 
7033 	return ret;
7034 }
7035 
7036 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
7037 					    const struct drm_connector_state *state,
7038 					    struct drm_property *property,
7039 					    uint64_t *val)
7040 {
7041 	struct drm_device *dev = connector->dev;
7042 	struct amdgpu_device *adev = drm_to_adev(dev);
7043 	struct dm_connector_state *dm_state =
7044 		to_dm_connector_state(state);
7045 	int ret = -EINVAL;
7046 
7047 	if (property == dev->mode_config.scaling_mode_property) {
7048 		switch (dm_state->scaling) {
7049 		case RMX_CENTER:
7050 			*val = DRM_MODE_SCALE_CENTER;
7051 			break;
7052 		case RMX_ASPECT:
7053 			*val = DRM_MODE_SCALE_ASPECT;
7054 			break;
7055 		case RMX_FULL:
7056 			*val = DRM_MODE_SCALE_FULLSCREEN;
7057 			break;
7058 		case RMX_OFF:
7059 		default:
7060 			*val = DRM_MODE_SCALE_NONE;
7061 			break;
7062 		}
7063 		ret = 0;
7064 	} else if (property == adev->mode_info.underscan_hborder_property) {
7065 		*val = dm_state->underscan_hborder;
7066 		ret = 0;
7067 	} else if (property == adev->mode_info.underscan_vborder_property) {
7068 		*val = dm_state->underscan_vborder;
7069 		ret = 0;
7070 	} else if (property == adev->mode_info.underscan_property) {
7071 		*val = dm_state->underscan_enable;
7072 		ret = 0;
7073 	} else if (property == adev->mode_info.abm_level_property) {
7074 		*val = dm_state->abm_level;
7075 		ret = 0;
7076 	}
7077 
7078 	return ret;
7079 }
7080 
7081 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
7082 {
7083 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
7084 
7085 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
7086 }
7087 
7088 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
7089 {
7090 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7091 	const struct dc_link *link = aconnector->dc_link;
7092 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7093 	struct amdgpu_display_manager *dm = &adev->dm;
7094 	int i;
7095 
7096 	/*
7097 	 * Call only if mst_mgr was iniitalized before since it's not done
7098 	 * for all connector types.
7099 	 */
7100 	if (aconnector->mst_mgr.dev)
7101 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
7102 
7103 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
7104 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
7105 	for (i = 0; i < dm->num_of_edps; i++) {
7106 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
7107 			backlight_device_unregister(dm->backlight_dev[i]);
7108 			dm->backlight_dev[i] = NULL;
7109 		}
7110 	}
7111 #endif
7112 
7113 	if (aconnector->dc_em_sink)
7114 		dc_sink_release(aconnector->dc_em_sink);
7115 	aconnector->dc_em_sink = NULL;
7116 	if (aconnector->dc_sink)
7117 		dc_sink_release(aconnector->dc_sink);
7118 	aconnector->dc_sink = NULL;
7119 
7120 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
7121 	drm_connector_unregister(connector);
7122 	drm_connector_cleanup(connector);
7123 	if (aconnector->i2c) {
7124 		i2c_del_adapter(&aconnector->i2c->base);
7125 		kfree(aconnector->i2c);
7126 	}
7127 	kfree(aconnector->dm_dp_aux.aux.name);
7128 
7129 	kfree(connector);
7130 }
7131 
7132 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
7133 {
7134 	struct dm_connector_state *state =
7135 		to_dm_connector_state(connector->state);
7136 
7137 	if (connector->state)
7138 		__drm_atomic_helper_connector_destroy_state(connector->state);
7139 
7140 	kfree(state);
7141 
7142 	state = kzalloc(sizeof(*state), GFP_KERNEL);
7143 
7144 	if (state) {
7145 		state->scaling = RMX_OFF;
7146 		state->underscan_enable = false;
7147 		state->underscan_hborder = 0;
7148 		state->underscan_vborder = 0;
7149 		state->base.max_requested_bpc = 8;
7150 		state->vcpi_slots = 0;
7151 		state->pbn = 0;
7152 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
7153 			state->abm_level = amdgpu_dm_abm_level;
7154 
7155 		__drm_atomic_helper_connector_reset(connector, &state->base);
7156 	}
7157 }
7158 
7159 struct drm_connector_state *
7160 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
7161 {
7162 	struct dm_connector_state *state =
7163 		to_dm_connector_state(connector->state);
7164 
7165 	struct dm_connector_state *new_state =
7166 			kmemdup(state, sizeof(*state), GFP_KERNEL);
7167 
7168 	if (!new_state)
7169 		return NULL;
7170 
7171 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
7172 
7173 	new_state->freesync_capable = state->freesync_capable;
7174 	new_state->abm_level = state->abm_level;
7175 	new_state->scaling = state->scaling;
7176 	new_state->underscan_enable = state->underscan_enable;
7177 	new_state->underscan_hborder = state->underscan_hborder;
7178 	new_state->underscan_vborder = state->underscan_vborder;
7179 	new_state->vcpi_slots = state->vcpi_slots;
7180 	new_state->pbn = state->pbn;
7181 	return &new_state->base;
7182 }
7183 
7184 static int
7185 amdgpu_dm_connector_late_register(struct drm_connector *connector)
7186 {
7187 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7188 		to_amdgpu_dm_connector(connector);
7189 	int r;
7190 
7191 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
7192 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
7193 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
7194 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
7195 		if (r)
7196 			return r;
7197 	}
7198 
7199 #if defined(CONFIG_DEBUG_FS)
7200 	connector_debugfs_init(amdgpu_dm_connector);
7201 #endif
7202 
7203 	return 0;
7204 }
7205 
7206 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
7207 	.reset = amdgpu_dm_connector_funcs_reset,
7208 	.detect = amdgpu_dm_connector_detect,
7209 	.fill_modes = drm_helper_probe_single_connector_modes,
7210 	.destroy = amdgpu_dm_connector_destroy,
7211 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
7212 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
7213 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
7214 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
7215 	.late_register = amdgpu_dm_connector_late_register,
7216 	.early_unregister = amdgpu_dm_connector_unregister
7217 };
7218 
7219 static int get_modes(struct drm_connector *connector)
7220 {
7221 	return amdgpu_dm_connector_get_modes(connector);
7222 }
7223 
7224 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
7225 {
7226 	struct dc_sink_init_data init_params = {
7227 			.link = aconnector->dc_link,
7228 			.sink_signal = SIGNAL_TYPE_VIRTUAL
7229 	};
7230 	struct edid *edid;
7231 
7232 	if (!aconnector->base.edid_blob_ptr) {
7233 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
7234 				aconnector->base.name);
7235 
7236 		aconnector->base.force = DRM_FORCE_OFF;
7237 		aconnector->base.override_edid = false;
7238 		return;
7239 	}
7240 
7241 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
7242 
7243 	aconnector->edid = edid;
7244 
7245 	aconnector->dc_em_sink = dc_link_add_remote_sink(
7246 		aconnector->dc_link,
7247 		(uint8_t *)edid,
7248 		(edid->extensions + 1) * EDID_LENGTH,
7249 		&init_params);
7250 
7251 	if (aconnector->base.force == DRM_FORCE_ON) {
7252 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
7253 		aconnector->dc_link->local_sink :
7254 		aconnector->dc_em_sink;
7255 		dc_sink_retain(aconnector->dc_sink);
7256 	}
7257 }
7258 
7259 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7260 {
7261 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7262 
7263 	/*
7264 	 * In case of headless boot with force on for DP managed connector
7265 	 * Those settings have to be != 0 to get initial modeset
7266 	 */
7267 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7268 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7269 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7270 	}
7271 
7272 
7273 	aconnector->base.override_edid = true;
7274 	create_eml_sink(aconnector);
7275 }
7276 
7277 struct dc_stream_state *
7278 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7279 				const struct drm_display_mode *drm_mode,
7280 				const struct dm_connector_state *dm_state,
7281 				const struct dc_stream_state *old_stream)
7282 {
7283 	struct drm_connector *connector = &aconnector->base;
7284 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
7285 	struct dc_stream_state *stream;
7286 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7287 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7288 	enum dc_status dc_result = DC_OK;
7289 
7290 	do {
7291 		stream = create_stream_for_sink(aconnector, drm_mode,
7292 						dm_state, old_stream,
7293 						requested_bpc);
7294 		if (stream == NULL) {
7295 			DRM_ERROR("Failed to create stream for sink!\n");
7296 			break;
7297 		}
7298 
7299 		dc_result = dc_validate_stream(adev->dm.dc, stream);
7300 
7301 		if (dc_result != DC_OK) {
7302 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7303 				      drm_mode->hdisplay,
7304 				      drm_mode->vdisplay,
7305 				      drm_mode->clock,
7306 				      dc_result,
7307 				      dc_status_to_str(dc_result));
7308 
7309 			dc_stream_release(stream);
7310 			stream = NULL;
7311 			requested_bpc -= 2; /* lower bpc to retry validation */
7312 		}
7313 
7314 	} while (stream == NULL && requested_bpc >= 6);
7315 
7316 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7317 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7318 
7319 		aconnector->force_yuv420_output = true;
7320 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
7321 						dm_state, old_stream);
7322 		aconnector->force_yuv420_output = false;
7323 	}
7324 
7325 	return stream;
7326 }
7327 
7328 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7329 				   struct drm_display_mode *mode)
7330 {
7331 	int result = MODE_ERROR;
7332 	struct dc_sink *dc_sink;
7333 	/* TODO: Unhardcode stream count */
7334 	struct dc_stream_state *stream;
7335 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7336 
7337 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7338 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
7339 		return result;
7340 
7341 	/*
7342 	 * Only run this the first time mode_valid is called to initilialize
7343 	 * EDID mgmt
7344 	 */
7345 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7346 		!aconnector->dc_em_sink)
7347 		handle_edid_mgmt(aconnector);
7348 
7349 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7350 
7351 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7352 				aconnector->base.force != DRM_FORCE_ON) {
7353 		DRM_ERROR("dc_sink is NULL!\n");
7354 		goto fail;
7355 	}
7356 
7357 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7358 	if (stream) {
7359 		dc_stream_release(stream);
7360 		result = MODE_OK;
7361 	}
7362 
7363 fail:
7364 	/* TODO: error handling*/
7365 	return result;
7366 }
7367 
7368 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7369 				struct dc_info_packet *out)
7370 {
7371 	struct hdmi_drm_infoframe frame;
7372 	unsigned char buf[30]; /* 26 + 4 */
7373 	ssize_t len;
7374 	int ret, i;
7375 
7376 	memset(out, 0, sizeof(*out));
7377 
7378 	if (!state->hdr_output_metadata)
7379 		return 0;
7380 
7381 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7382 	if (ret)
7383 		return ret;
7384 
7385 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7386 	if (len < 0)
7387 		return (int)len;
7388 
7389 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
7390 	if (len != 30)
7391 		return -EINVAL;
7392 
7393 	/* Prepare the infopacket for DC. */
7394 	switch (state->connector->connector_type) {
7395 	case DRM_MODE_CONNECTOR_HDMIA:
7396 		out->hb0 = 0x87; /* type */
7397 		out->hb1 = 0x01; /* version */
7398 		out->hb2 = 0x1A; /* length */
7399 		out->sb[0] = buf[3]; /* checksum */
7400 		i = 1;
7401 		break;
7402 
7403 	case DRM_MODE_CONNECTOR_DisplayPort:
7404 	case DRM_MODE_CONNECTOR_eDP:
7405 		out->hb0 = 0x00; /* sdp id, zero */
7406 		out->hb1 = 0x87; /* type */
7407 		out->hb2 = 0x1D; /* payload len - 1 */
7408 		out->hb3 = (0x13 << 2); /* sdp version */
7409 		out->sb[0] = 0x01; /* version */
7410 		out->sb[1] = 0x1A; /* length */
7411 		i = 2;
7412 		break;
7413 
7414 	default:
7415 		return -EINVAL;
7416 	}
7417 
7418 	memcpy(&out->sb[i], &buf[4], 26);
7419 	out->valid = true;
7420 
7421 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7422 		       sizeof(out->sb), false);
7423 
7424 	return 0;
7425 }
7426 
7427 static int
7428 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7429 				 struct drm_atomic_state *state)
7430 {
7431 	struct drm_connector_state *new_con_state =
7432 		drm_atomic_get_new_connector_state(state, conn);
7433 	struct drm_connector_state *old_con_state =
7434 		drm_atomic_get_old_connector_state(state, conn);
7435 	struct drm_crtc *crtc = new_con_state->crtc;
7436 	struct drm_crtc_state *new_crtc_state;
7437 	int ret;
7438 
7439 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
7440 
7441 	if (!crtc)
7442 		return 0;
7443 
7444 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7445 		struct dc_info_packet hdr_infopacket;
7446 
7447 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7448 		if (ret)
7449 			return ret;
7450 
7451 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7452 		if (IS_ERR(new_crtc_state))
7453 			return PTR_ERR(new_crtc_state);
7454 
7455 		/*
7456 		 * DC considers the stream backends changed if the
7457 		 * static metadata changes. Forcing the modeset also
7458 		 * gives a simple way for userspace to switch from
7459 		 * 8bpc to 10bpc when setting the metadata to enter
7460 		 * or exit HDR.
7461 		 *
7462 		 * Changing the static metadata after it's been
7463 		 * set is permissible, however. So only force a
7464 		 * modeset if we're entering or exiting HDR.
7465 		 */
7466 		new_crtc_state->mode_changed =
7467 			!old_con_state->hdr_output_metadata ||
7468 			!new_con_state->hdr_output_metadata;
7469 	}
7470 
7471 	return 0;
7472 }
7473 
7474 static const struct drm_connector_helper_funcs
7475 amdgpu_dm_connector_helper_funcs = {
7476 	/*
7477 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7478 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7479 	 * are missing after user start lightdm. So we need to renew modes list.
7480 	 * in get_modes call back, not just return the modes count
7481 	 */
7482 	.get_modes = get_modes,
7483 	.mode_valid = amdgpu_dm_connector_mode_valid,
7484 	.atomic_check = amdgpu_dm_connector_atomic_check,
7485 };
7486 
7487 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7488 {
7489 }
7490 
7491 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7492 {
7493 	struct drm_atomic_state *state = new_crtc_state->state;
7494 	struct drm_plane *plane;
7495 	int num_active = 0;
7496 
7497 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7498 		struct drm_plane_state *new_plane_state;
7499 
7500 		/* Cursor planes are "fake". */
7501 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7502 			continue;
7503 
7504 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7505 
7506 		if (!new_plane_state) {
7507 			/*
7508 			 * The plane is enable on the CRTC and hasn't changed
7509 			 * state. This means that it previously passed
7510 			 * validation and is therefore enabled.
7511 			 */
7512 			num_active += 1;
7513 			continue;
7514 		}
7515 
7516 		/* We need a framebuffer to be considered enabled. */
7517 		num_active += (new_plane_state->fb != NULL);
7518 	}
7519 
7520 	return num_active;
7521 }
7522 
7523 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7524 					 struct drm_crtc_state *new_crtc_state)
7525 {
7526 	struct dm_crtc_state *dm_new_crtc_state =
7527 		to_dm_crtc_state(new_crtc_state);
7528 
7529 	dm_new_crtc_state->active_planes = 0;
7530 
7531 	if (!dm_new_crtc_state->stream)
7532 		return;
7533 
7534 	dm_new_crtc_state->active_planes =
7535 		count_crtc_active_planes(new_crtc_state);
7536 }
7537 
7538 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7539 				       struct drm_atomic_state *state)
7540 {
7541 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7542 									  crtc);
7543 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7544 	struct dc *dc = adev->dm.dc;
7545 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7546 	int ret = -EINVAL;
7547 
7548 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7549 
7550 	dm_update_crtc_active_planes(crtc, crtc_state);
7551 
7552 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7553 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7554 		return ret;
7555 	}
7556 
7557 	/*
7558 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7559 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7560 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7561 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7562 	 */
7563 	if (crtc_state->enable &&
7564 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7565 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7566 		return -EINVAL;
7567 	}
7568 
7569 	/* In some use cases, like reset, no stream is attached */
7570 	if (!dm_crtc_state->stream)
7571 		return 0;
7572 
7573 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7574 		return 0;
7575 
7576 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7577 	return ret;
7578 }
7579 
7580 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7581 				      const struct drm_display_mode *mode,
7582 				      struct drm_display_mode *adjusted_mode)
7583 {
7584 	return true;
7585 }
7586 
7587 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7588 	.disable = dm_crtc_helper_disable,
7589 	.atomic_check = dm_crtc_helper_atomic_check,
7590 	.mode_fixup = dm_crtc_helper_mode_fixup,
7591 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7592 };
7593 
7594 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7595 {
7596 
7597 }
7598 
7599 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7600 {
7601 	switch (display_color_depth) {
7602 		case COLOR_DEPTH_666:
7603 			return 6;
7604 		case COLOR_DEPTH_888:
7605 			return 8;
7606 		case COLOR_DEPTH_101010:
7607 			return 10;
7608 		case COLOR_DEPTH_121212:
7609 			return 12;
7610 		case COLOR_DEPTH_141414:
7611 			return 14;
7612 		case COLOR_DEPTH_161616:
7613 			return 16;
7614 		default:
7615 			break;
7616 		}
7617 	return 0;
7618 }
7619 
7620 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7621 					  struct drm_crtc_state *crtc_state,
7622 					  struct drm_connector_state *conn_state)
7623 {
7624 	struct drm_atomic_state *state = crtc_state->state;
7625 	struct drm_connector *connector = conn_state->connector;
7626 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7627 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7628 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7629 	struct drm_dp_mst_topology_mgr *mst_mgr;
7630 	struct drm_dp_mst_port *mst_port;
7631 	enum dc_color_depth color_depth;
7632 	int clock, bpp = 0;
7633 	bool is_y420 = false;
7634 
7635 	if (!aconnector->port || !aconnector->dc_sink)
7636 		return 0;
7637 
7638 	mst_port = aconnector->port;
7639 	mst_mgr = &aconnector->mst_port->mst_mgr;
7640 
7641 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7642 		return 0;
7643 
7644 	if (!state->duplicated) {
7645 		int max_bpc = conn_state->max_requested_bpc;
7646 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7647 				aconnector->force_yuv420_output;
7648 		color_depth = convert_color_depth_from_display_info(connector,
7649 								    is_y420,
7650 								    max_bpc);
7651 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7652 		clock = adjusted_mode->clock;
7653 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7654 	}
7655 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7656 									   mst_mgr,
7657 									   mst_port,
7658 									   dm_new_connector_state->pbn,
7659 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7660 	if (dm_new_connector_state->vcpi_slots < 0) {
7661 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7662 		return dm_new_connector_state->vcpi_slots;
7663 	}
7664 	return 0;
7665 }
7666 
7667 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7668 	.disable = dm_encoder_helper_disable,
7669 	.atomic_check = dm_encoder_helper_atomic_check
7670 };
7671 
7672 #if defined(CONFIG_DRM_AMD_DC_DCN)
7673 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7674 					    struct dc_state *dc_state,
7675 					    struct dsc_mst_fairness_vars *vars)
7676 {
7677 	struct dc_stream_state *stream = NULL;
7678 	struct drm_connector *connector;
7679 	struct drm_connector_state *new_con_state;
7680 	struct amdgpu_dm_connector *aconnector;
7681 	struct dm_connector_state *dm_conn_state;
7682 	int i, j;
7683 	int vcpi, pbn_div, pbn, slot_num = 0;
7684 
7685 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7686 
7687 		aconnector = to_amdgpu_dm_connector(connector);
7688 
7689 		if (!aconnector->port)
7690 			continue;
7691 
7692 		if (!new_con_state || !new_con_state->crtc)
7693 			continue;
7694 
7695 		dm_conn_state = to_dm_connector_state(new_con_state);
7696 
7697 		for (j = 0; j < dc_state->stream_count; j++) {
7698 			stream = dc_state->streams[j];
7699 			if (!stream)
7700 				continue;
7701 
7702 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7703 				break;
7704 
7705 			stream = NULL;
7706 		}
7707 
7708 		if (!stream)
7709 			continue;
7710 
7711 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7712 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7713 		for (j = 0; j < dc_state->stream_count; j++) {
7714 			if (vars[j].aconnector == aconnector) {
7715 				pbn = vars[j].pbn;
7716 				break;
7717 			}
7718 		}
7719 
7720 		if (j == dc_state->stream_count)
7721 			continue;
7722 
7723 		slot_num = DIV_ROUND_UP(pbn, pbn_div);
7724 
7725 		if (stream->timing.flags.DSC != 1) {
7726 			dm_conn_state->pbn = pbn;
7727 			dm_conn_state->vcpi_slots = slot_num;
7728 
7729 			drm_dp_mst_atomic_enable_dsc(state,
7730 						     aconnector->port,
7731 						     dm_conn_state->pbn,
7732 						     0,
7733 						     false);
7734 			continue;
7735 		}
7736 
7737 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7738 						    aconnector->port,
7739 						    pbn, pbn_div,
7740 						    true);
7741 		if (vcpi < 0)
7742 			return vcpi;
7743 
7744 		dm_conn_state->pbn = pbn;
7745 		dm_conn_state->vcpi_slots = vcpi;
7746 	}
7747 	return 0;
7748 }
7749 #endif
7750 
7751 static void dm_drm_plane_reset(struct drm_plane *plane)
7752 {
7753 	struct dm_plane_state *amdgpu_state = NULL;
7754 
7755 	if (plane->state)
7756 		plane->funcs->atomic_destroy_state(plane, plane->state);
7757 
7758 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7759 	WARN_ON(amdgpu_state == NULL);
7760 
7761 	if (amdgpu_state)
7762 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7763 }
7764 
7765 static struct drm_plane_state *
7766 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7767 {
7768 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7769 
7770 	old_dm_plane_state = to_dm_plane_state(plane->state);
7771 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7772 	if (!dm_plane_state)
7773 		return NULL;
7774 
7775 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7776 
7777 	if (old_dm_plane_state->dc_state) {
7778 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7779 		dc_plane_state_retain(dm_plane_state->dc_state);
7780 	}
7781 
7782 	return &dm_plane_state->base;
7783 }
7784 
7785 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7786 				struct drm_plane_state *state)
7787 {
7788 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7789 
7790 	if (dm_plane_state->dc_state)
7791 		dc_plane_state_release(dm_plane_state->dc_state);
7792 
7793 	drm_atomic_helper_plane_destroy_state(plane, state);
7794 }
7795 
7796 static const struct drm_plane_funcs dm_plane_funcs = {
7797 	.update_plane	= drm_atomic_helper_update_plane,
7798 	.disable_plane	= drm_atomic_helper_disable_plane,
7799 	.destroy	= drm_primary_helper_destroy,
7800 	.reset = dm_drm_plane_reset,
7801 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7802 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7803 	.format_mod_supported = dm_plane_format_mod_supported,
7804 };
7805 
7806 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7807 				      struct drm_plane_state *new_state)
7808 {
7809 	struct amdgpu_framebuffer *afb;
7810 	struct drm_gem_object *obj;
7811 	struct amdgpu_device *adev;
7812 	struct amdgpu_bo *rbo;
7813 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7814 	uint32_t domain;
7815 	int r;
7816 
7817 	if (!new_state->fb) {
7818 		DRM_DEBUG_KMS("No FB bound\n");
7819 		return 0;
7820 	}
7821 
7822 	afb = to_amdgpu_framebuffer(new_state->fb);
7823 	obj = new_state->fb->obj[0];
7824 	rbo = gem_to_amdgpu_bo(obj);
7825 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7826 
7827 	r = amdgpu_bo_reserve(rbo, true);
7828 	if (r) {
7829 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7830 		return r;
7831 	}
7832 
7833 	r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7834 	if (r) {
7835 		dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7836 		goto error_unlock;
7837 	}
7838 
7839 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7840 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7841 	else
7842 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7843 
7844 	r = amdgpu_bo_pin(rbo, domain);
7845 	if (unlikely(r != 0)) {
7846 		if (r != -ERESTARTSYS)
7847 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7848 		goto error_unlock;
7849 	}
7850 
7851 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7852 	if (unlikely(r != 0)) {
7853 		DRM_ERROR("%p bind failed\n", rbo);
7854 		goto error_unpin;
7855 	}
7856 
7857 	amdgpu_bo_unreserve(rbo);
7858 
7859 	afb->address = amdgpu_bo_gpu_offset(rbo);
7860 
7861 	amdgpu_bo_ref(rbo);
7862 
7863 	/**
7864 	 * We don't do surface updates on planes that have been newly created,
7865 	 * but we also don't have the afb->address during atomic check.
7866 	 *
7867 	 * Fill in buffer attributes depending on the address here, but only on
7868 	 * newly created planes since they're not being used by DC yet and this
7869 	 * won't modify global state.
7870 	 */
7871 	dm_plane_state_old = to_dm_plane_state(plane->state);
7872 	dm_plane_state_new = to_dm_plane_state(new_state);
7873 
7874 	if (dm_plane_state_new->dc_state &&
7875 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7876 		struct dc_plane_state *plane_state =
7877 			dm_plane_state_new->dc_state;
7878 		bool force_disable_dcc = !plane_state->dcc.enable;
7879 
7880 		fill_plane_buffer_attributes(
7881 			adev, afb, plane_state->format, plane_state->rotation,
7882 			afb->tiling_flags,
7883 			&plane_state->tiling_info, &plane_state->plane_size,
7884 			&plane_state->dcc, &plane_state->address,
7885 			afb->tmz_surface, force_disable_dcc);
7886 	}
7887 
7888 	return 0;
7889 
7890 error_unpin:
7891 	amdgpu_bo_unpin(rbo);
7892 
7893 error_unlock:
7894 	amdgpu_bo_unreserve(rbo);
7895 	return r;
7896 }
7897 
7898 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7899 				       struct drm_plane_state *old_state)
7900 {
7901 	struct amdgpu_bo *rbo;
7902 	int r;
7903 
7904 	if (!old_state->fb)
7905 		return;
7906 
7907 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7908 	r = amdgpu_bo_reserve(rbo, false);
7909 	if (unlikely(r)) {
7910 		DRM_ERROR("failed to reserve rbo before unpin\n");
7911 		return;
7912 	}
7913 
7914 	amdgpu_bo_unpin(rbo);
7915 	amdgpu_bo_unreserve(rbo);
7916 	amdgpu_bo_unref(&rbo);
7917 }
7918 
7919 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7920 				       struct drm_crtc_state *new_crtc_state)
7921 {
7922 	struct drm_framebuffer *fb = state->fb;
7923 	int min_downscale, max_upscale;
7924 	int min_scale = 0;
7925 	int max_scale = INT_MAX;
7926 
7927 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7928 	if (fb && state->crtc) {
7929 		/* Validate viewport to cover the case when only the position changes */
7930 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7931 			int viewport_width = state->crtc_w;
7932 			int viewport_height = state->crtc_h;
7933 
7934 			if (state->crtc_x < 0)
7935 				viewport_width += state->crtc_x;
7936 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7937 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7938 
7939 			if (state->crtc_y < 0)
7940 				viewport_height += state->crtc_y;
7941 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7942 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7943 
7944 			if (viewport_width < 0 || viewport_height < 0) {
7945 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7946 				return -EINVAL;
7947 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7948 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7949 				return -EINVAL;
7950 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7951 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7952 				return -EINVAL;
7953 			}
7954 
7955 		}
7956 
7957 		/* Get min/max allowed scaling factors from plane caps. */
7958 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7959 					     &min_downscale, &max_upscale);
7960 		/*
7961 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7962 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7963 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7964 		 */
7965 		min_scale = (1000 << 16) / max_upscale;
7966 		max_scale = (1000 << 16) / min_downscale;
7967 	}
7968 
7969 	return drm_atomic_helper_check_plane_state(
7970 		state, new_crtc_state, min_scale, max_scale, true, true);
7971 }
7972 
7973 static int dm_plane_atomic_check(struct drm_plane *plane,
7974 				 struct drm_atomic_state *state)
7975 {
7976 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7977 										 plane);
7978 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7979 	struct dc *dc = adev->dm.dc;
7980 	struct dm_plane_state *dm_plane_state;
7981 	struct dc_scaling_info scaling_info;
7982 	struct drm_crtc_state *new_crtc_state;
7983 	int ret;
7984 
7985 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7986 
7987 	dm_plane_state = to_dm_plane_state(new_plane_state);
7988 
7989 	if (!dm_plane_state->dc_state)
7990 		return 0;
7991 
7992 	new_crtc_state =
7993 		drm_atomic_get_new_crtc_state(state,
7994 					      new_plane_state->crtc);
7995 	if (!new_crtc_state)
7996 		return -EINVAL;
7997 
7998 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7999 	if (ret)
8000 		return ret;
8001 
8002 	ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
8003 	if (ret)
8004 		return ret;
8005 
8006 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
8007 		return 0;
8008 
8009 	return -EINVAL;
8010 }
8011 
8012 static int dm_plane_atomic_async_check(struct drm_plane *plane,
8013 				       struct drm_atomic_state *state)
8014 {
8015 	/* Only support async updates on cursor planes. */
8016 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
8017 		return -EINVAL;
8018 
8019 	return 0;
8020 }
8021 
8022 static void dm_plane_atomic_async_update(struct drm_plane *plane,
8023 					 struct drm_atomic_state *state)
8024 {
8025 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
8026 									   plane);
8027 	struct drm_plane_state *old_state =
8028 		drm_atomic_get_old_plane_state(state, plane);
8029 
8030 	trace_amdgpu_dm_atomic_update_cursor(new_state);
8031 
8032 	swap(plane->state->fb, new_state->fb);
8033 
8034 	plane->state->src_x = new_state->src_x;
8035 	plane->state->src_y = new_state->src_y;
8036 	plane->state->src_w = new_state->src_w;
8037 	plane->state->src_h = new_state->src_h;
8038 	plane->state->crtc_x = new_state->crtc_x;
8039 	plane->state->crtc_y = new_state->crtc_y;
8040 	plane->state->crtc_w = new_state->crtc_w;
8041 	plane->state->crtc_h = new_state->crtc_h;
8042 
8043 	handle_cursor_update(plane, old_state);
8044 }
8045 
8046 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
8047 	.prepare_fb = dm_plane_helper_prepare_fb,
8048 	.cleanup_fb = dm_plane_helper_cleanup_fb,
8049 	.atomic_check = dm_plane_atomic_check,
8050 	.atomic_async_check = dm_plane_atomic_async_check,
8051 	.atomic_async_update = dm_plane_atomic_async_update
8052 };
8053 
8054 /*
8055  * TODO: these are currently initialized to rgb formats only.
8056  * For future use cases we should either initialize them dynamically based on
8057  * plane capabilities, or initialize this array to all formats, so internal drm
8058  * check will succeed, and let DC implement proper check
8059  */
8060 static const uint32_t rgb_formats[] = {
8061 	DRM_FORMAT_XRGB8888,
8062 	DRM_FORMAT_ARGB8888,
8063 	DRM_FORMAT_RGBA8888,
8064 	DRM_FORMAT_XRGB2101010,
8065 	DRM_FORMAT_XBGR2101010,
8066 	DRM_FORMAT_ARGB2101010,
8067 	DRM_FORMAT_ABGR2101010,
8068 	DRM_FORMAT_XRGB16161616,
8069 	DRM_FORMAT_XBGR16161616,
8070 	DRM_FORMAT_ARGB16161616,
8071 	DRM_FORMAT_ABGR16161616,
8072 	DRM_FORMAT_XBGR8888,
8073 	DRM_FORMAT_ABGR8888,
8074 	DRM_FORMAT_RGB565,
8075 };
8076 
8077 static const uint32_t overlay_formats[] = {
8078 	DRM_FORMAT_XRGB8888,
8079 	DRM_FORMAT_ARGB8888,
8080 	DRM_FORMAT_RGBA8888,
8081 	DRM_FORMAT_XBGR8888,
8082 	DRM_FORMAT_ABGR8888,
8083 	DRM_FORMAT_RGB565
8084 };
8085 
8086 static const u32 cursor_formats[] = {
8087 	DRM_FORMAT_ARGB8888
8088 };
8089 
8090 static int get_plane_formats(const struct drm_plane *plane,
8091 			     const struct dc_plane_cap *plane_cap,
8092 			     uint32_t *formats, int max_formats)
8093 {
8094 	int i, num_formats = 0;
8095 
8096 	/*
8097 	 * TODO: Query support for each group of formats directly from
8098 	 * DC plane caps. This will require adding more formats to the
8099 	 * caps list.
8100 	 */
8101 
8102 	switch (plane->type) {
8103 	case DRM_PLANE_TYPE_PRIMARY:
8104 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
8105 			if (num_formats >= max_formats)
8106 				break;
8107 
8108 			formats[num_formats++] = rgb_formats[i];
8109 		}
8110 
8111 		if (plane_cap && plane_cap->pixel_format_support.nv12)
8112 			formats[num_formats++] = DRM_FORMAT_NV12;
8113 		if (plane_cap && plane_cap->pixel_format_support.p010)
8114 			formats[num_formats++] = DRM_FORMAT_P010;
8115 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
8116 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
8117 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
8118 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
8119 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
8120 		}
8121 		break;
8122 
8123 	case DRM_PLANE_TYPE_OVERLAY:
8124 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
8125 			if (num_formats >= max_formats)
8126 				break;
8127 
8128 			formats[num_formats++] = overlay_formats[i];
8129 		}
8130 		break;
8131 
8132 	case DRM_PLANE_TYPE_CURSOR:
8133 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
8134 			if (num_formats >= max_formats)
8135 				break;
8136 
8137 			formats[num_formats++] = cursor_formats[i];
8138 		}
8139 		break;
8140 	}
8141 
8142 	return num_formats;
8143 }
8144 
8145 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
8146 				struct drm_plane *plane,
8147 				unsigned long possible_crtcs,
8148 				const struct dc_plane_cap *plane_cap)
8149 {
8150 	uint32_t formats[32];
8151 	int num_formats;
8152 	int res = -EPERM;
8153 	unsigned int supported_rotations;
8154 	uint64_t *modifiers = NULL;
8155 
8156 	num_formats = get_plane_formats(plane, plane_cap, formats,
8157 					ARRAY_SIZE(formats));
8158 
8159 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
8160 	if (res)
8161 		return res;
8162 
8163 	if (modifiers == NULL)
8164 		adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
8165 
8166 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
8167 				       &dm_plane_funcs, formats, num_formats,
8168 				       modifiers, plane->type, NULL);
8169 	kfree(modifiers);
8170 	if (res)
8171 		return res;
8172 
8173 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
8174 	    plane_cap && plane_cap->per_pixel_alpha) {
8175 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
8176 					  BIT(DRM_MODE_BLEND_PREMULTI) |
8177 					  BIT(DRM_MODE_BLEND_COVERAGE);
8178 
8179 		drm_plane_create_alpha_property(plane);
8180 		drm_plane_create_blend_mode_property(plane, blend_caps);
8181 	}
8182 
8183 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
8184 	    plane_cap &&
8185 	    (plane_cap->pixel_format_support.nv12 ||
8186 	     plane_cap->pixel_format_support.p010)) {
8187 		/* This only affects YUV formats. */
8188 		drm_plane_create_color_properties(
8189 			plane,
8190 			BIT(DRM_COLOR_YCBCR_BT601) |
8191 			BIT(DRM_COLOR_YCBCR_BT709) |
8192 			BIT(DRM_COLOR_YCBCR_BT2020),
8193 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
8194 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
8195 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
8196 	}
8197 
8198 	supported_rotations =
8199 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
8200 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
8201 
8202 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
8203 	    plane->type != DRM_PLANE_TYPE_CURSOR)
8204 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
8205 						   supported_rotations);
8206 
8207 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
8208 
8209 	/* Create (reset) the plane state */
8210 	if (plane->funcs->reset)
8211 		plane->funcs->reset(plane);
8212 
8213 	return 0;
8214 }
8215 
8216 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
8217 			       struct drm_plane *plane,
8218 			       uint32_t crtc_index)
8219 {
8220 	struct amdgpu_crtc *acrtc = NULL;
8221 	struct drm_plane *cursor_plane;
8222 
8223 	int res = -ENOMEM;
8224 
8225 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
8226 	if (!cursor_plane)
8227 		goto fail;
8228 
8229 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
8230 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
8231 
8232 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
8233 	if (!acrtc)
8234 		goto fail;
8235 
8236 	res = drm_crtc_init_with_planes(
8237 			dm->ddev,
8238 			&acrtc->base,
8239 			plane,
8240 			cursor_plane,
8241 			&amdgpu_dm_crtc_funcs, NULL);
8242 
8243 	if (res)
8244 		goto fail;
8245 
8246 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
8247 
8248 	/* Create (reset) the plane state */
8249 	if (acrtc->base.funcs->reset)
8250 		acrtc->base.funcs->reset(&acrtc->base);
8251 
8252 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
8253 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
8254 
8255 	acrtc->crtc_id = crtc_index;
8256 	acrtc->base.enabled = false;
8257 	acrtc->otg_inst = -1;
8258 
8259 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8260 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8261 				   true, MAX_COLOR_LUT_ENTRIES);
8262 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8263 
8264 	return 0;
8265 
8266 fail:
8267 	kfree(acrtc);
8268 	kfree(cursor_plane);
8269 	return res;
8270 }
8271 
8272 
8273 static int to_drm_connector_type(enum signal_type st)
8274 {
8275 	switch (st) {
8276 	case SIGNAL_TYPE_HDMI_TYPE_A:
8277 		return DRM_MODE_CONNECTOR_HDMIA;
8278 	case SIGNAL_TYPE_EDP:
8279 		return DRM_MODE_CONNECTOR_eDP;
8280 	case SIGNAL_TYPE_LVDS:
8281 		return DRM_MODE_CONNECTOR_LVDS;
8282 	case SIGNAL_TYPE_RGB:
8283 		return DRM_MODE_CONNECTOR_VGA;
8284 	case SIGNAL_TYPE_DISPLAY_PORT:
8285 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
8286 		return DRM_MODE_CONNECTOR_DisplayPort;
8287 	case SIGNAL_TYPE_DVI_DUAL_LINK:
8288 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
8289 		return DRM_MODE_CONNECTOR_DVID;
8290 	case SIGNAL_TYPE_VIRTUAL:
8291 		return DRM_MODE_CONNECTOR_VIRTUAL;
8292 
8293 	default:
8294 		return DRM_MODE_CONNECTOR_Unknown;
8295 	}
8296 }
8297 
8298 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8299 {
8300 	struct drm_encoder *encoder;
8301 
8302 	/* There is only one encoder per connector */
8303 	drm_connector_for_each_possible_encoder(connector, encoder)
8304 		return encoder;
8305 
8306 	return NULL;
8307 }
8308 
8309 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8310 {
8311 	struct drm_encoder *encoder;
8312 	struct amdgpu_encoder *amdgpu_encoder;
8313 
8314 	encoder = amdgpu_dm_connector_to_encoder(connector);
8315 
8316 	if (encoder == NULL)
8317 		return;
8318 
8319 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8320 
8321 	amdgpu_encoder->native_mode.clock = 0;
8322 
8323 	if (!list_empty(&connector->probed_modes)) {
8324 		struct drm_display_mode *preferred_mode = NULL;
8325 
8326 		list_for_each_entry(preferred_mode,
8327 				    &connector->probed_modes,
8328 				    head) {
8329 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8330 				amdgpu_encoder->native_mode = *preferred_mode;
8331 
8332 			break;
8333 		}
8334 
8335 	}
8336 }
8337 
8338 static struct drm_display_mode *
8339 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8340 			     char *name,
8341 			     int hdisplay, int vdisplay)
8342 {
8343 	struct drm_device *dev = encoder->dev;
8344 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8345 	struct drm_display_mode *mode = NULL;
8346 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8347 
8348 	mode = drm_mode_duplicate(dev, native_mode);
8349 
8350 	if (mode == NULL)
8351 		return NULL;
8352 
8353 	mode->hdisplay = hdisplay;
8354 	mode->vdisplay = vdisplay;
8355 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8356 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8357 
8358 	return mode;
8359 
8360 }
8361 
8362 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8363 						 struct drm_connector *connector)
8364 {
8365 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8366 	struct drm_display_mode *mode = NULL;
8367 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8368 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8369 				to_amdgpu_dm_connector(connector);
8370 	int i;
8371 	int n;
8372 	struct mode_size {
8373 		char name[DRM_DISPLAY_MODE_LEN];
8374 		int w;
8375 		int h;
8376 	} common_modes[] = {
8377 		{  "640x480",  640,  480},
8378 		{  "800x600",  800,  600},
8379 		{ "1024x768", 1024,  768},
8380 		{ "1280x720", 1280,  720},
8381 		{ "1280x800", 1280,  800},
8382 		{"1280x1024", 1280, 1024},
8383 		{ "1440x900", 1440,  900},
8384 		{"1680x1050", 1680, 1050},
8385 		{"1600x1200", 1600, 1200},
8386 		{"1920x1080", 1920, 1080},
8387 		{"1920x1200", 1920, 1200}
8388 	};
8389 
8390 	n = ARRAY_SIZE(common_modes);
8391 
8392 	for (i = 0; i < n; i++) {
8393 		struct drm_display_mode *curmode = NULL;
8394 		bool mode_existed = false;
8395 
8396 		if (common_modes[i].w > native_mode->hdisplay ||
8397 		    common_modes[i].h > native_mode->vdisplay ||
8398 		   (common_modes[i].w == native_mode->hdisplay &&
8399 		    common_modes[i].h == native_mode->vdisplay))
8400 			continue;
8401 
8402 		list_for_each_entry(curmode, &connector->probed_modes, head) {
8403 			if (common_modes[i].w == curmode->hdisplay &&
8404 			    common_modes[i].h == curmode->vdisplay) {
8405 				mode_existed = true;
8406 				break;
8407 			}
8408 		}
8409 
8410 		if (mode_existed)
8411 			continue;
8412 
8413 		mode = amdgpu_dm_create_common_mode(encoder,
8414 				common_modes[i].name, common_modes[i].w,
8415 				common_modes[i].h);
8416 		if (!mode)
8417 			continue;
8418 
8419 		drm_mode_probed_add(connector, mode);
8420 		amdgpu_dm_connector->num_modes++;
8421 	}
8422 }
8423 
8424 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8425 {
8426 	struct drm_encoder *encoder;
8427 	struct amdgpu_encoder *amdgpu_encoder;
8428 	const struct drm_display_mode *native_mode;
8429 
8430 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8431 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8432 		return;
8433 
8434 	encoder = amdgpu_dm_connector_to_encoder(connector);
8435 	if (!encoder)
8436 		return;
8437 
8438 	amdgpu_encoder = to_amdgpu_encoder(encoder);
8439 
8440 	native_mode = &amdgpu_encoder->native_mode;
8441 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8442 		return;
8443 
8444 	drm_connector_set_panel_orientation_with_quirk(connector,
8445 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8446 						       native_mode->hdisplay,
8447 						       native_mode->vdisplay);
8448 }
8449 
8450 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8451 					      struct edid *edid)
8452 {
8453 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8454 			to_amdgpu_dm_connector(connector);
8455 
8456 	if (edid) {
8457 		/* empty probed_modes */
8458 		INIT_LIST_HEAD(&connector->probed_modes);
8459 		amdgpu_dm_connector->num_modes =
8460 				drm_add_edid_modes(connector, edid);
8461 
8462 		/* sorting the probed modes before calling function
8463 		 * amdgpu_dm_get_native_mode() since EDID can have
8464 		 * more than one preferred mode. The modes that are
8465 		 * later in the probed mode list could be of higher
8466 		 * and preferred resolution. For example, 3840x2160
8467 		 * resolution in base EDID preferred timing and 4096x2160
8468 		 * preferred resolution in DID extension block later.
8469 		 */
8470 		drm_mode_sort(&connector->probed_modes);
8471 		amdgpu_dm_get_native_mode(connector);
8472 
8473 		/* Freesync capabilities are reset by calling
8474 		 * drm_add_edid_modes() and need to be
8475 		 * restored here.
8476 		 */
8477 		amdgpu_dm_update_freesync_caps(connector, edid);
8478 
8479 		amdgpu_set_panel_orientation(connector);
8480 	} else {
8481 		amdgpu_dm_connector->num_modes = 0;
8482 	}
8483 }
8484 
8485 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8486 			      struct drm_display_mode *mode)
8487 {
8488 	struct drm_display_mode *m;
8489 
8490 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8491 		if (drm_mode_equal(m, mode))
8492 			return true;
8493 	}
8494 
8495 	return false;
8496 }
8497 
8498 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8499 {
8500 	const struct drm_display_mode *m;
8501 	struct drm_display_mode *new_mode;
8502 	uint i;
8503 	uint32_t new_modes_count = 0;
8504 
8505 	/* Standard FPS values
8506 	 *
8507 	 * 23.976       - TV/NTSC
8508 	 * 24 	        - Cinema
8509 	 * 25 	        - TV/PAL
8510 	 * 29.97        - TV/NTSC
8511 	 * 30 	        - TV/NTSC
8512 	 * 48 	        - Cinema HFR
8513 	 * 50 	        - TV/PAL
8514 	 * 60 	        - Commonly used
8515 	 * 48,72,96,120 - Multiples of 24
8516 	 */
8517 	static const uint32_t common_rates[] = {
8518 		23976, 24000, 25000, 29970, 30000,
8519 		48000, 50000, 60000, 72000, 96000, 120000
8520 	};
8521 
8522 	/*
8523 	 * Find mode with highest refresh rate with the same resolution
8524 	 * as the preferred mode. Some monitors report a preferred mode
8525 	 * with lower resolution than the highest refresh rate supported.
8526 	 */
8527 
8528 	m = get_highest_refresh_rate_mode(aconnector, true);
8529 	if (!m)
8530 		return 0;
8531 
8532 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8533 		uint64_t target_vtotal, target_vtotal_diff;
8534 		uint64_t num, den;
8535 
8536 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8537 			continue;
8538 
8539 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8540 		    common_rates[i] > aconnector->max_vfreq * 1000)
8541 			continue;
8542 
8543 		num = (unsigned long long)m->clock * 1000 * 1000;
8544 		den = common_rates[i] * (unsigned long long)m->htotal;
8545 		target_vtotal = div_u64(num, den);
8546 		target_vtotal_diff = target_vtotal - m->vtotal;
8547 
8548 		/* Check for illegal modes */
8549 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8550 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8551 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8552 			continue;
8553 
8554 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8555 		if (!new_mode)
8556 			goto out;
8557 
8558 		new_mode->vtotal += (u16)target_vtotal_diff;
8559 		new_mode->vsync_start += (u16)target_vtotal_diff;
8560 		new_mode->vsync_end += (u16)target_vtotal_diff;
8561 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8562 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8563 
8564 		if (!is_duplicate_mode(aconnector, new_mode)) {
8565 			drm_mode_probed_add(&aconnector->base, new_mode);
8566 			new_modes_count += 1;
8567 		} else
8568 			drm_mode_destroy(aconnector->base.dev, new_mode);
8569 	}
8570  out:
8571 	return new_modes_count;
8572 }
8573 
8574 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8575 						   struct edid *edid)
8576 {
8577 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8578 		to_amdgpu_dm_connector(connector);
8579 
8580 	if (!edid)
8581 		return;
8582 
8583 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8584 		amdgpu_dm_connector->num_modes +=
8585 			add_fs_modes(amdgpu_dm_connector);
8586 }
8587 
8588 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8589 {
8590 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8591 			to_amdgpu_dm_connector(connector);
8592 	struct drm_encoder *encoder;
8593 	struct edid *edid = amdgpu_dm_connector->edid;
8594 
8595 	encoder = amdgpu_dm_connector_to_encoder(connector);
8596 
8597 	if (!drm_edid_is_valid(edid)) {
8598 		amdgpu_dm_connector->num_modes =
8599 				drm_add_modes_noedid(connector, 640, 480);
8600 	} else {
8601 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8602 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8603 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8604 	}
8605 	amdgpu_dm_fbc_init(connector);
8606 
8607 	return amdgpu_dm_connector->num_modes;
8608 }
8609 
8610 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8611 				     struct amdgpu_dm_connector *aconnector,
8612 				     int connector_type,
8613 				     struct dc_link *link,
8614 				     int link_index)
8615 {
8616 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8617 
8618 	/*
8619 	 * Some of the properties below require access to state, like bpc.
8620 	 * Allocate some default initial connector state with our reset helper.
8621 	 */
8622 	if (aconnector->base.funcs->reset)
8623 		aconnector->base.funcs->reset(&aconnector->base);
8624 
8625 	aconnector->connector_id = link_index;
8626 	aconnector->dc_link = link;
8627 	aconnector->base.interlace_allowed = false;
8628 	aconnector->base.doublescan_allowed = false;
8629 	aconnector->base.stereo_allowed = false;
8630 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8631 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8632 	aconnector->audio_inst = -1;
8633 	mutex_init(&aconnector->hpd_lock);
8634 
8635 	/*
8636 	 * configure support HPD hot plug connector_>polled default value is 0
8637 	 * which means HPD hot plug not supported
8638 	 */
8639 	switch (connector_type) {
8640 	case DRM_MODE_CONNECTOR_HDMIA:
8641 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8642 		aconnector->base.ycbcr_420_allowed =
8643 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8644 		break;
8645 	case DRM_MODE_CONNECTOR_DisplayPort:
8646 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8647 		link->link_enc = link_enc_cfg_get_link_enc(link);
8648 		ASSERT(link->link_enc);
8649 		if (link->link_enc)
8650 			aconnector->base.ycbcr_420_allowed =
8651 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8652 		break;
8653 	case DRM_MODE_CONNECTOR_DVID:
8654 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8655 		break;
8656 	default:
8657 		break;
8658 	}
8659 
8660 	drm_object_attach_property(&aconnector->base.base,
8661 				dm->ddev->mode_config.scaling_mode_property,
8662 				DRM_MODE_SCALE_NONE);
8663 
8664 	drm_object_attach_property(&aconnector->base.base,
8665 				adev->mode_info.underscan_property,
8666 				UNDERSCAN_OFF);
8667 	drm_object_attach_property(&aconnector->base.base,
8668 				adev->mode_info.underscan_hborder_property,
8669 				0);
8670 	drm_object_attach_property(&aconnector->base.base,
8671 				adev->mode_info.underscan_vborder_property,
8672 				0);
8673 
8674 	if (!aconnector->mst_port)
8675 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8676 
8677 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8678 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8679 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8680 
8681 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8682 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8683 		drm_object_attach_property(&aconnector->base.base,
8684 				adev->mode_info.abm_level_property, 0);
8685 	}
8686 
8687 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8688 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8689 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8690 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8691 
8692 		if (!aconnector->mst_port)
8693 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8694 
8695 #ifdef CONFIG_DRM_AMD_DC_HDCP
8696 		if (adev->dm.hdcp_workqueue)
8697 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8698 #endif
8699 	}
8700 }
8701 
8702 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8703 			      struct i2c_msg *msgs, int num)
8704 {
8705 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8706 	struct ddc_service *ddc_service = i2c->ddc_service;
8707 	struct i2c_command cmd;
8708 	int i;
8709 	int result = -EIO;
8710 
8711 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8712 
8713 	if (!cmd.payloads)
8714 		return result;
8715 
8716 	cmd.number_of_payloads = num;
8717 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8718 	cmd.speed = 100;
8719 
8720 	for (i = 0; i < num; i++) {
8721 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8722 		cmd.payloads[i].address = msgs[i].addr;
8723 		cmd.payloads[i].length = msgs[i].len;
8724 		cmd.payloads[i].data = msgs[i].buf;
8725 	}
8726 
8727 	if (dc_submit_i2c(
8728 			ddc_service->ctx->dc,
8729 			ddc_service->ddc_pin->hw_info.ddc_channel,
8730 			&cmd))
8731 		result = num;
8732 
8733 	kfree(cmd.payloads);
8734 	return result;
8735 }
8736 
8737 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8738 {
8739 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8740 }
8741 
8742 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8743 	.master_xfer = amdgpu_dm_i2c_xfer,
8744 	.functionality = amdgpu_dm_i2c_func,
8745 };
8746 
8747 static struct amdgpu_i2c_adapter *
8748 create_i2c(struct ddc_service *ddc_service,
8749 	   int link_index,
8750 	   int *res)
8751 {
8752 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8753 	struct amdgpu_i2c_adapter *i2c;
8754 
8755 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8756 	if (!i2c)
8757 		return NULL;
8758 	i2c->base.owner = THIS_MODULE;
8759 	i2c->base.class = I2C_CLASS_DDC;
8760 	i2c->base.dev.parent = &adev->pdev->dev;
8761 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8762 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8763 	i2c_set_adapdata(&i2c->base, i2c);
8764 	i2c->ddc_service = ddc_service;
8765 	if (i2c->ddc_service->ddc_pin)
8766 		i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8767 
8768 	return i2c;
8769 }
8770 
8771 
8772 /*
8773  * Note: this function assumes that dc_link_detect() was called for the
8774  * dc_link which will be represented by this aconnector.
8775  */
8776 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8777 				    struct amdgpu_dm_connector *aconnector,
8778 				    uint32_t link_index,
8779 				    struct amdgpu_encoder *aencoder)
8780 {
8781 	int res = 0;
8782 	int connector_type;
8783 	struct dc *dc = dm->dc;
8784 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8785 	struct amdgpu_i2c_adapter *i2c;
8786 
8787 	link->priv = aconnector;
8788 
8789 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8790 
8791 	i2c = create_i2c(link->ddc, link->link_index, &res);
8792 	if (!i2c) {
8793 		DRM_ERROR("Failed to create i2c adapter data\n");
8794 		return -ENOMEM;
8795 	}
8796 
8797 	aconnector->i2c = i2c;
8798 	res = i2c_add_adapter(&i2c->base);
8799 
8800 	if (res) {
8801 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8802 		goto out_free;
8803 	}
8804 
8805 	connector_type = to_drm_connector_type(link->connector_signal);
8806 
8807 	res = drm_connector_init_with_ddc(
8808 			dm->ddev,
8809 			&aconnector->base,
8810 			&amdgpu_dm_connector_funcs,
8811 			connector_type,
8812 			&i2c->base);
8813 
8814 	if (res) {
8815 		DRM_ERROR("connector_init failed\n");
8816 		aconnector->connector_id = -1;
8817 		goto out_free;
8818 	}
8819 
8820 	drm_connector_helper_add(
8821 			&aconnector->base,
8822 			&amdgpu_dm_connector_helper_funcs);
8823 
8824 	amdgpu_dm_connector_init_helper(
8825 		dm,
8826 		aconnector,
8827 		connector_type,
8828 		link,
8829 		link_index);
8830 
8831 	drm_connector_attach_encoder(
8832 		&aconnector->base, &aencoder->base);
8833 
8834 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8835 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8836 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8837 
8838 out_free:
8839 	if (res) {
8840 		kfree(i2c);
8841 		aconnector->i2c = NULL;
8842 	}
8843 	return res;
8844 }
8845 
8846 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8847 {
8848 	switch (adev->mode_info.num_crtc) {
8849 	case 1:
8850 		return 0x1;
8851 	case 2:
8852 		return 0x3;
8853 	case 3:
8854 		return 0x7;
8855 	case 4:
8856 		return 0xf;
8857 	case 5:
8858 		return 0x1f;
8859 	case 6:
8860 	default:
8861 		return 0x3f;
8862 	}
8863 }
8864 
8865 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8866 				  struct amdgpu_encoder *aencoder,
8867 				  uint32_t link_index)
8868 {
8869 	struct amdgpu_device *adev = drm_to_adev(dev);
8870 
8871 	int res = drm_encoder_init(dev,
8872 				   &aencoder->base,
8873 				   &amdgpu_dm_encoder_funcs,
8874 				   DRM_MODE_ENCODER_TMDS,
8875 				   NULL);
8876 
8877 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8878 
8879 	if (!res)
8880 		aencoder->encoder_id = link_index;
8881 	else
8882 		aencoder->encoder_id = -1;
8883 
8884 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8885 
8886 	return res;
8887 }
8888 
8889 static void manage_dm_interrupts(struct amdgpu_device *adev,
8890 				 struct amdgpu_crtc *acrtc,
8891 				 bool enable)
8892 {
8893 	/*
8894 	 * We have no guarantee that the frontend index maps to the same
8895 	 * backend index - some even map to more than one.
8896 	 *
8897 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8898 	 */
8899 	int irq_type =
8900 		amdgpu_display_crtc_idx_to_irq_type(
8901 			adev,
8902 			acrtc->crtc_id);
8903 
8904 	if (enable) {
8905 		drm_crtc_vblank_on(&acrtc->base);
8906 		amdgpu_irq_get(
8907 			adev,
8908 			&adev->pageflip_irq,
8909 			irq_type);
8910 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8911 		amdgpu_irq_get(
8912 			adev,
8913 			&adev->vline0_irq,
8914 			irq_type);
8915 #endif
8916 	} else {
8917 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8918 		amdgpu_irq_put(
8919 			adev,
8920 			&adev->vline0_irq,
8921 			irq_type);
8922 #endif
8923 		amdgpu_irq_put(
8924 			adev,
8925 			&adev->pageflip_irq,
8926 			irq_type);
8927 		drm_crtc_vblank_off(&acrtc->base);
8928 	}
8929 }
8930 
8931 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8932 				      struct amdgpu_crtc *acrtc)
8933 {
8934 	int irq_type =
8935 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8936 
8937 	/**
8938 	 * This reads the current state for the IRQ and force reapplies
8939 	 * the setting to hardware.
8940 	 */
8941 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8942 }
8943 
8944 static bool
8945 is_scaling_state_different(const struct dm_connector_state *dm_state,
8946 			   const struct dm_connector_state *old_dm_state)
8947 {
8948 	if (dm_state->scaling != old_dm_state->scaling)
8949 		return true;
8950 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8951 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8952 			return true;
8953 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8954 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8955 			return true;
8956 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8957 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8958 		return true;
8959 	return false;
8960 }
8961 
8962 #ifdef CONFIG_DRM_AMD_DC_HDCP
8963 static bool is_content_protection_different(struct drm_connector_state *state,
8964 					    const struct drm_connector_state *old_state,
8965 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8966 {
8967 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8968 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8969 
8970 	/* Handle: Type0/1 change */
8971 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8972 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8973 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8974 		return true;
8975 	}
8976 
8977 	/* CP is being re enabled, ignore this
8978 	 *
8979 	 * Handles:	ENABLED -> DESIRED
8980 	 */
8981 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8982 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8983 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8984 		return false;
8985 	}
8986 
8987 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8988 	 *
8989 	 * Handles:	UNDESIRED -> ENABLED
8990 	 */
8991 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8992 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8993 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8994 
8995 	/* Stream removed and re-enabled
8996 	 *
8997 	 * Can sometimes overlap with the HPD case,
8998 	 * thus set update_hdcp to false to avoid
8999 	 * setting HDCP multiple times.
9000 	 *
9001 	 * Handles:	DESIRED -> DESIRED (Special case)
9002 	 */
9003 	if (!(old_state->crtc && old_state->crtc->enabled) &&
9004 		state->crtc && state->crtc->enabled &&
9005 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
9006 		dm_con_state->update_hdcp = false;
9007 		return true;
9008 	}
9009 
9010 	/* Hot-plug, headless s3, dpms
9011 	 *
9012 	 * Only start HDCP if the display is connected/enabled.
9013 	 * update_hdcp flag will be set to false until the next
9014 	 * HPD comes in.
9015 	 *
9016 	 * Handles:	DESIRED -> DESIRED (Special case)
9017 	 */
9018 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
9019 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
9020 		dm_con_state->update_hdcp = false;
9021 		return true;
9022 	}
9023 
9024 	/*
9025 	 * Handles:	UNDESIRED -> UNDESIRED
9026 	 *		DESIRED -> DESIRED
9027 	 *		ENABLED -> ENABLED
9028 	 */
9029 	if (old_state->content_protection == state->content_protection)
9030 		return false;
9031 
9032 	/*
9033 	 * Handles:	UNDESIRED -> DESIRED
9034 	 *		DESIRED -> UNDESIRED
9035 	 *		ENABLED -> UNDESIRED
9036 	 */
9037 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
9038 		return true;
9039 
9040 	/*
9041 	 * Handles:	DESIRED -> ENABLED
9042 	 */
9043 	return false;
9044 }
9045 
9046 #endif
9047 static void remove_stream(struct amdgpu_device *adev,
9048 			  struct amdgpu_crtc *acrtc,
9049 			  struct dc_stream_state *stream)
9050 {
9051 	/* this is the update mode case */
9052 
9053 	acrtc->otg_inst = -1;
9054 	acrtc->enabled = false;
9055 }
9056 
9057 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
9058 			       struct dc_cursor_position *position)
9059 {
9060 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9061 	int x, y;
9062 	int xorigin = 0, yorigin = 0;
9063 
9064 	if (!crtc || !plane->state->fb)
9065 		return 0;
9066 
9067 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
9068 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
9069 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
9070 			  __func__,
9071 			  plane->state->crtc_w,
9072 			  plane->state->crtc_h);
9073 		return -EINVAL;
9074 	}
9075 
9076 	x = plane->state->crtc_x;
9077 	y = plane->state->crtc_y;
9078 
9079 	if (x <= -amdgpu_crtc->max_cursor_width ||
9080 	    y <= -amdgpu_crtc->max_cursor_height)
9081 		return 0;
9082 
9083 	if (x < 0) {
9084 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
9085 		x = 0;
9086 	}
9087 	if (y < 0) {
9088 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
9089 		y = 0;
9090 	}
9091 	position->enable = true;
9092 	position->translate_by_source = true;
9093 	position->x = x;
9094 	position->y = y;
9095 	position->x_hotspot = xorigin;
9096 	position->y_hotspot = yorigin;
9097 
9098 	return 0;
9099 }
9100 
9101 static void handle_cursor_update(struct drm_plane *plane,
9102 				 struct drm_plane_state *old_plane_state)
9103 {
9104 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
9105 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
9106 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
9107 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
9108 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
9109 	uint64_t address = afb ? afb->address : 0;
9110 	struct dc_cursor_position position = {0};
9111 	struct dc_cursor_attributes attributes;
9112 	int ret;
9113 
9114 	if (!plane->state->fb && !old_plane_state->fb)
9115 		return;
9116 
9117 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
9118 		      __func__,
9119 		      amdgpu_crtc->crtc_id,
9120 		      plane->state->crtc_w,
9121 		      plane->state->crtc_h);
9122 
9123 	ret = get_cursor_position(plane, crtc, &position);
9124 	if (ret)
9125 		return;
9126 
9127 	if (!position.enable) {
9128 		/* turn off cursor */
9129 		if (crtc_state && crtc_state->stream) {
9130 			mutex_lock(&adev->dm.dc_lock);
9131 			dc_stream_set_cursor_position(crtc_state->stream,
9132 						      &position);
9133 			mutex_unlock(&adev->dm.dc_lock);
9134 		}
9135 		return;
9136 	}
9137 
9138 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
9139 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
9140 
9141 	memset(&attributes, 0, sizeof(attributes));
9142 	attributes.address.high_part = upper_32_bits(address);
9143 	attributes.address.low_part  = lower_32_bits(address);
9144 	attributes.width             = plane->state->crtc_w;
9145 	attributes.height            = plane->state->crtc_h;
9146 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
9147 	attributes.rotation_angle    = 0;
9148 	attributes.attribute_flags.value = 0;
9149 
9150 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
9151 
9152 	if (crtc_state->stream) {
9153 		mutex_lock(&adev->dm.dc_lock);
9154 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
9155 							 &attributes))
9156 			DRM_ERROR("DC failed to set cursor attributes\n");
9157 
9158 		if (!dc_stream_set_cursor_position(crtc_state->stream,
9159 						   &position))
9160 			DRM_ERROR("DC failed to set cursor position\n");
9161 		mutex_unlock(&adev->dm.dc_lock);
9162 	}
9163 }
9164 
9165 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
9166 {
9167 
9168 	assert_spin_locked(&acrtc->base.dev->event_lock);
9169 	WARN_ON(acrtc->event);
9170 
9171 	acrtc->event = acrtc->base.state->event;
9172 
9173 	/* Set the flip status */
9174 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
9175 
9176 	/* Mark this event as consumed */
9177 	acrtc->base.state->event = NULL;
9178 
9179 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
9180 		     acrtc->crtc_id);
9181 }
9182 
9183 static void update_freesync_state_on_stream(
9184 	struct amdgpu_display_manager *dm,
9185 	struct dm_crtc_state *new_crtc_state,
9186 	struct dc_stream_state *new_stream,
9187 	struct dc_plane_state *surface,
9188 	u32 flip_timestamp_in_us)
9189 {
9190 	struct mod_vrr_params vrr_params;
9191 	struct dc_info_packet vrr_infopacket = {0};
9192 	struct amdgpu_device *adev = dm->adev;
9193 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9194 	unsigned long flags;
9195 	bool pack_sdp_v1_3 = false;
9196 
9197 	if (!new_stream)
9198 		return;
9199 
9200 	/*
9201 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9202 	 * For now it's sufficient to just guard against these conditions.
9203 	 */
9204 
9205 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9206 		return;
9207 
9208 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9209         vrr_params = acrtc->dm_irq_params.vrr_params;
9210 
9211 	if (surface) {
9212 		mod_freesync_handle_preflip(
9213 			dm->freesync_module,
9214 			surface,
9215 			new_stream,
9216 			flip_timestamp_in_us,
9217 			&vrr_params);
9218 
9219 		if (adev->family < AMDGPU_FAMILY_AI &&
9220 		    amdgpu_dm_vrr_active(new_crtc_state)) {
9221 			mod_freesync_handle_v_update(dm->freesync_module,
9222 						     new_stream, &vrr_params);
9223 
9224 			/* Need to call this before the frame ends. */
9225 			dc_stream_adjust_vmin_vmax(dm->dc,
9226 						   new_crtc_state->stream,
9227 						   &vrr_params.adjust);
9228 		}
9229 	}
9230 
9231 	mod_freesync_build_vrr_infopacket(
9232 		dm->freesync_module,
9233 		new_stream,
9234 		&vrr_params,
9235 		PACKET_TYPE_VRR,
9236 		TRANSFER_FUNC_UNKNOWN,
9237 		&vrr_infopacket,
9238 		pack_sdp_v1_3);
9239 
9240 	new_crtc_state->freesync_timing_changed |=
9241 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9242 			&vrr_params.adjust,
9243 			sizeof(vrr_params.adjust)) != 0);
9244 
9245 	new_crtc_state->freesync_vrr_info_changed |=
9246 		(memcmp(&new_crtc_state->vrr_infopacket,
9247 			&vrr_infopacket,
9248 			sizeof(vrr_infopacket)) != 0);
9249 
9250 	acrtc->dm_irq_params.vrr_params = vrr_params;
9251 	new_crtc_state->vrr_infopacket = vrr_infopacket;
9252 
9253 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
9254 	new_stream->vrr_infopacket = vrr_infopacket;
9255 
9256 	if (new_crtc_state->freesync_vrr_info_changed)
9257 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9258 			      new_crtc_state->base.crtc->base.id,
9259 			      (int)new_crtc_state->base.vrr_enabled,
9260 			      (int)vrr_params.state);
9261 
9262 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9263 }
9264 
9265 static void update_stream_irq_parameters(
9266 	struct amdgpu_display_manager *dm,
9267 	struct dm_crtc_state *new_crtc_state)
9268 {
9269 	struct dc_stream_state *new_stream = new_crtc_state->stream;
9270 	struct mod_vrr_params vrr_params;
9271 	struct mod_freesync_config config = new_crtc_state->freesync_config;
9272 	struct amdgpu_device *adev = dm->adev;
9273 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9274 	unsigned long flags;
9275 
9276 	if (!new_stream)
9277 		return;
9278 
9279 	/*
9280 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
9281 	 * For now it's sufficient to just guard against these conditions.
9282 	 */
9283 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9284 		return;
9285 
9286 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9287 	vrr_params = acrtc->dm_irq_params.vrr_params;
9288 
9289 	if (new_crtc_state->vrr_supported &&
9290 	    config.min_refresh_in_uhz &&
9291 	    config.max_refresh_in_uhz) {
9292 		/*
9293 		 * if freesync compatible mode was set, config.state will be set
9294 		 * in atomic check
9295 		 */
9296 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9297 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9298 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9299 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9300 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9301 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9302 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9303 		} else {
9304 			config.state = new_crtc_state->base.vrr_enabled ?
9305 						     VRR_STATE_ACTIVE_VARIABLE :
9306 						     VRR_STATE_INACTIVE;
9307 		}
9308 	} else {
9309 		config.state = VRR_STATE_UNSUPPORTED;
9310 	}
9311 
9312 	mod_freesync_build_vrr_params(dm->freesync_module,
9313 				      new_stream,
9314 				      &config, &vrr_params);
9315 
9316 	new_crtc_state->freesync_timing_changed |=
9317 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9318 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9319 
9320 	new_crtc_state->freesync_config = config;
9321 	/* Copy state for access from DM IRQ handler */
9322 	acrtc->dm_irq_params.freesync_config = config;
9323 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9324 	acrtc->dm_irq_params.vrr_params = vrr_params;
9325 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9326 }
9327 
9328 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9329 					    struct dm_crtc_state *new_state)
9330 {
9331 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9332 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9333 
9334 	if (!old_vrr_active && new_vrr_active) {
9335 		/* Transition VRR inactive -> active:
9336 		 * While VRR is active, we must not disable vblank irq, as a
9337 		 * reenable after disable would compute bogus vblank/pflip
9338 		 * timestamps if it likely happened inside display front-porch.
9339 		 *
9340 		 * We also need vupdate irq for the actual core vblank handling
9341 		 * at end of vblank.
9342 		 */
9343 		dm_set_vupdate_irq(new_state->base.crtc, true);
9344 		drm_crtc_vblank_get(new_state->base.crtc);
9345 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9346 				 __func__, new_state->base.crtc->base.id);
9347 	} else if (old_vrr_active && !new_vrr_active) {
9348 		/* Transition VRR active -> inactive:
9349 		 * Allow vblank irq disable again for fixed refresh rate.
9350 		 */
9351 		dm_set_vupdate_irq(new_state->base.crtc, false);
9352 		drm_crtc_vblank_put(new_state->base.crtc);
9353 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9354 				 __func__, new_state->base.crtc->base.id);
9355 	}
9356 }
9357 
9358 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9359 {
9360 	struct drm_plane *plane;
9361 	struct drm_plane_state *old_plane_state;
9362 	int i;
9363 
9364 	/*
9365 	 * TODO: Make this per-stream so we don't issue redundant updates for
9366 	 * commits with multiple streams.
9367 	 */
9368 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
9369 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9370 			handle_cursor_update(plane, old_plane_state);
9371 }
9372 
9373 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9374 				    struct dc_state *dc_state,
9375 				    struct drm_device *dev,
9376 				    struct amdgpu_display_manager *dm,
9377 				    struct drm_crtc *pcrtc,
9378 				    bool wait_for_vblank)
9379 {
9380 	uint32_t i;
9381 	uint64_t timestamp_ns;
9382 	struct drm_plane *plane;
9383 	struct drm_plane_state *old_plane_state, *new_plane_state;
9384 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9385 	struct drm_crtc_state *new_pcrtc_state =
9386 			drm_atomic_get_new_crtc_state(state, pcrtc);
9387 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9388 	struct dm_crtc_state *dm_old_crtc_state =
9389 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9390 	int planes_count = 0, vpos, hpos;
9391 	long r;
9392 	unsigned long flags;
9393 	struct amdgpu_bo *abo;
9394 	uint32_t target_vblank, last_flip_vblank;
9395 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9396 	bool pflip_present = false;
9397 	struct {
9398 		struct dc_surface_update surface_updates[MAX_SURFACES];
9399 		struct dc_plane_info plane_infos[MAX_SURFACES];
9400 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
9401 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9402 		struct dc_stream_update stream_update;
9403 	} *bundle;
9404 
9405 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9406 
9407 	if (!bundle) {
9408 		dm_error("Failed to allocate update bundle\n");
9409 		goto cleanup;
9410 	}
9411 
9412 	/*
9413 	 * Disable the cursor first if we're disabling all the planes.
9414 	 * It'll remain on the screen after the planes are re-enabled
9415 	 * if we don't.
9416 	 */
9417 	if (acrtc_state->active_planes == 0)
9418 		amdgpu_dm_commit_cursors(state);
9419 
9420 	/* update planes when needed */
9421 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9422 		struct drm_crtc *crtc = new_plane_state->crtc;
9423 		struct drm_crtc_state *new_crtc_state;
9424 		struct drm_framebuffer *fb = new_plane_state->fb;
9425 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9426 		bool plane_needs_flip;
9427 		struct dc_plane_state *dc_plane;
9428 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9429 
9430 		/* Cursor plane is handled after stream updates */
9431 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
9432 			continue;
9433 
9434 		if (!fb || !crtc || pcrtc != crtc)
9435 			continue;
9436 
9437 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9438 		if (!new_crtc_state->active)
9439 			continue;
9440 
9441 		dc_plane = dm_new_plane_state->dc_state;
9442 
9443 		bundle->surface_updates[planes_count].surface = dc_plane;
9444 		if (new_pcrtc_state->color_mgmt_changed) {
9445 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9446 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9447 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9448 		}
9449 
9450 		fill_dc_scaling_info(dm->adev, new_plane_state,
9451 				     &bundle->scaling_infos[planes_count]);
9452 
9453 		bundle->surface_updates[planes_count].scaling_info =
9454 			&bundle->scaling_infos[planes_count];
9455 
9456 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9457 
9458 		pflip_present = pflip_present || plane_needs_flip;
9459 
9460 		if (!plane_needs_flip) {
9461 			planes_count += 1;
9462 			continue;
9463 		}
9464 
9465 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9466 
9467 		/*
9468 		 * Wait for all fences on this FB. Do limited wait to avoid
9469 		 * deadlock during GPU reset when this fence will not signal
9470 		 * but we hold reservation lock for the BO.
9471 		 */
9472 		r = dma_resv_wait_timeout(abo->tbo.base.resv,
9473 					  DMA_RESV_USAGE_WRITE, false,
9474 					  msecs_to_jiffies(5000));
9475 		if (unlikely(r <= 0))
9476 			DRM_ERROR("Waiting for fences timed out!");
9477 
9478 		fill_dc_plane_info_and_addr(
9479 			dm->adev, new_plane_state,
9480 			afb->tiling_flags,
9481 			&bundle->plane_infos[planes_count],
9482 			&bundle->flip_addrs[planes_count].address,
9483 			afb->tmz_surface, false);
9484 
9485 		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9486 				 new_plane_state->plane->index,
9487 				 bundle->plane_infos[planes_count].dcc.enable);
9488 
9489 		bundle->surface_updates[planes_count].plane_info =
9490 			&bundle->plane_infos[planes_count];
9491 
9492 		fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
9493 				    new_crtc_state,
9494 				    &bundle->flip_addrs[planes_count]);
9495 
9496 		/*
9497 		 * Only allow immediate flips for fast updates that don't
9498 		 * change FB pitch, DCC state, rotation or mirroing.
9499 		 */
9500 		bundle->flip_addrs[planes_count].flip_immediate =
9501 			crtc->state->async_flip &&
9502 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9503 
9504 		timestamp_ns = ktime_get_ns();
9505 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9506 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9507 		bundle->surface_updates[planes_count].surface = dc_plane;
9508 
9509 		if (!bundle->surface_updates[planes_count].surface) {
9510 			DRM_ERROR("No surface for CRTC: id=%d\n",
9511 					acrtc_attach->crtc_id);
9512 			continue;
9513 		}
9514 
9515 		if (plane == pcrtc->primary)
9516 			update_freesync_state_on_stream(
9517 				dm,
9518 				acrtc_state,
9519 				acrtc_state->stream,
9520 				dc_plane,
9521 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9522 
9523 		drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9524 				 __func__,
9525 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9526 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9527 
9528 		planes_count += 1;
9529 
9530 	}
9531 
9532 	if (pflip_present) {
9533 		if (!vrr_active) {
9534 			/* Use old throttling in non-vrr fixed refresh rate mode
9535 			 * to keep flip scheduling based on target vblank counts
9536 			 * working in a backwards compatible way, e.g., for
9537 			 * clients using the GLX_OML_sync_control extension or
9538 			 * DRI3/Present extension with defined target_msc.
9539 			 */
9540 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9541 		}
9542 		else {
9543 			/* For variable refresh rate mode only:
9544 			 * Get vblank of last completed flip to avoid > 1 vrr
9545 			 * flips per video frame by use of throttling, but allow
9546 			 * flip programming anywhere in the possibly large
9547 			 * variable vrr vblank interval for fine-grained flip
9548 			 * timing control and more opportunity to avoid stutter
9549 			 * on late submission of flips.
9550 			 */
9551 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9552 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9553 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9554 		}
9555 
9556 		target_vblank = last_flip_vblank + wait_for_vblank;
9557 
9558 		/*
9559 		 * Wait until we're out of the vertical blank period before the one
9560 		 * targeted by the flip
9561 		 */
9562 		while ((acrtc_attach->enabled &&
9563 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9564 							    0, &vpos, &hpos, NULL,
9565 							    NULL, &pcrtc->hwmode)
9566 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9567 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9568 			(int)(target_vblank -
9569 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9570 			usleep_range(1000, 1100);
9571 		}
9572 
9573 		/**
9574 		 * Prepare the flip event for the pageflip interrupt to handle.
9575 		 *
9576 		 * This only works in the case where we've already turned on the
9577 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9578 		 * from 0 -> n planes we have to skip a hardware generated event
9579 		 * and rely on sending it from software.
9580 		 */
9581 		if (acrtc_attach->base.state->event &&
9582 		    acrtc_state->active_planes > 0 &&
9583 		    !acrtc_state->force_dpms_off) {
9584 			drm_crtc_vblank_get(pcrtc);
9585 
9586 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9587 
9588 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9589 			prepare_flip_isr(acrtc_attach);
9590 
9591 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9592 		}
9593 
9594 		if (acrtc_state->stream) {
9595 			if (acrtc_state->freesync_vrr_info_changed)
9596 				bundle->stream_update.vrr_infopacket =
9597 					&acrtc_state->stream->vrr_infopacket;
9598 		}
9599 	}
9600 
9601 	/* Update the planes if changed or disable if we don't have any. */
9602 	if ((planes_count || acrtc_state->active_planes == 0) &&
9603 		acrtc_state->stream) {
9604 		/*
9605 		 * If PSR or idle optimizations are enabled then flush out
9606 		 * any pending work before hardware programming.
9607 		 */
9608 		if (dm->vblank_control_workqueue)
9609 			flush_workqueue(dm->vblank_control_workqueue);
9610 
9611 		bundle->stream_update.stream = acrtc_state->stream;
9612 		if (new_pcrtc_state->mode_changed) {
9613 			bundle->stream_update.src = acrtc_state->stream->src;
9614 			bundle->stream_update.dst = acrtc_state->stream->dst;
9615 		}
9616 
9617 		if (new_pcrtc_state->color_mgmt_changed) {
9618 			/*
9619 			 * TODO: This isn't fully correct since we've actually
9620 			 * already modified the stream in place.
9621 			 */
9622 			bundle->stream_update.gamut_remap =
9623 				&acrtc_state->stream->gamut_remap_matrix;
9624 			bundle->stream_update.output_csc_transform =
9625 				&acrtc_state->stream->csc_color_matrix;
9626 			bundle->stream_update.out_transfer_func =
9627 				acrtc_state->stream->out_transfer_func;
9628 		}
9629 
9630 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9631 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9632 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9633 
9634 		/*
9635 		 * If FreeSync state on the stream has changed then we need to
9636 		 * re-adjust the min/max bounds now that DC doesn't handle this
9637 		 * as part of commit.
9638 		 */
9639 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9640 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9641 			dc_stream_adjust_vmin_vmax(
9642 				dm->dc, acrtc_state->stream,
9643 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9644 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9645 		}
9646 		mutex_lock(&dm->dc_lock);
9647 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9648 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9649 			amdgpu_dm_psr_disable(acrtc_state->stream);
9650 
9651 		dc_commit_updates_for_stream(dm->dc,
9652 						     bundle->surface_updates,
9653 						     planes_count,
9654 						     acrtc_state->stream,
9655 						     &bundle->stream_update,
9656 						     dc_state);
9657 
9658 		/**
9659 		 * Enable or disable the interrupts on the backend.
9660 		 *
9661 		 * Most pipes are put into power gating when unused.
9662 		 *
9663 		 * When power gating is enabled on a pipe we lose the
9664 		 * interrupt enablement state when power gating is disabled.
9665 		 *
9666 		 * So we need to update the IRQ control state in hardware
9667 		 * whenever the pipe turns on (since it could be previously
9668 		 * power gated) or off (since some pipes can't be power gated
9669 		 * on some ASICs).
9670 		 */
9671 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9672 			dm_update_pflip_irq_state(drm_to_adev(dev),
9673 						  acrtc_attach);
9674 
9675 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9676 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9677 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9678 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9679 
9680 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9681 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9682 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9683 			struct amdgpu_dm_connector *aconn =
9684 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9685 
9686 			if (aconn->psr_skip_count > 0)
9687 				aconn->psr_skip_count--;
9688 
9689 			/* Allow PSR when skip count is 0. */
9690 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9691 
9692 			/*
9693 			 * If sink supports PSR SU, there is no need to rely on
9694 			 * a vblank event disable request to enable PSR. PSR SU
9695 			 * can be enabled immediately once OS demonstrates an
9696 			 * adequate number of fast atomic commits to notify KMD
9697 			 * of update events. See `vblank_control_worker()`.
9698 			 */
9699 			if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
9700 			    acrtc_attach->dm_irq_params.allow_psr_entry &&
9701 			    !acrtc_state->stream->link->psr_settings.psr_allow_active)
9702 				amdgpu_dm_psr_enable(acrtc_state->stream);
9703 		} else {
9704 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9705 		}
9706 
9707 		mutex_unlock(&dm->dc_lock);
9708 	}
9709 
9710 	/*
9711 	 * Update cursor state *after* programming all the planes.
9712 	 * This avoids redundant programming in the case where we're going
9713 	 * to be disabling a single plane - those pipes are being disabled.
9714 	 */
9715 	if (acrtc_state->active_planes)
9716 		amdgpu_dm_commit_cursors(state);
9717 
9718 cleanup:
9719 	kfree(bundle);
9720 }
9721 
9722 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9723 				   struct drm_atomic_state *state)
9724 {
9725 	struct amdgpu_device *adev = drm_to_adev(dev);
9726 	struct amdgpu_dm_connector *aconnector;
9727 	struct drm_connector *connector;
9728 	struct drm_connector_state *old_con_state, *new_con_state;
9729 	struct drm_crtc_state *new_crtc_state;
9730 	struct dm_crtc_state *new_dm_crtc_state;
9731 	const struct dc_stream_status *status;
9732 	int i, inst;
9733 
9734 	/* Notify device removals. */
9735 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9736 		if (old_con_state->crtc != new_con_state->crtc) {
9737 			/* CRTC changes require notification. */
9738 			goto notify;
9739 		}
9740 
9741 		if (!new_con_state->crtc)
9742 			continue;
9743 
9744 		new_crtc_state = drm_atomic_get_new_crtc_state(
9745 			state, new_con_state->crtc);
9746 
9747 		if (!new_crtc_state)
9748 			continue;
9749 
9750 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9751 			continue;
9752 
9753 	notify:
9754 		aconnector = to_amdgpu_dm_connector(connector);
9755 
9756 		mutex_lock(&adev->dm.audio_lock);
9757 		inst = aconnector->audio_inst;
9758 		aconnector->audio_inst = -1;
9759 		mutex_unlock(&adev->dm.audio_lock);
9760 
9761 		amdgpu_dm_audio_eld_notify(adev, inst);
9762 	}
9763 
9764 	/* Notify audio device additions. */
9765 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9766 		if (!new_con_state->crtc)
9767 			continue;
9768 
9769 		new_crtc_state = drm_atomic_get_new_crtc_state(
9770 			state, new_con_state->crtc);
9771 
9772 		if (!new_crtc_state)
9773 			continue;
9774 
9775 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9776 			continue;
9777 
9778 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9779 		if (!new_dm_crtc_state->stream)
9780 			continue;
9781 
9782 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9783 		if (!status)
9784 			continue;
9785 
9786 		aconnector = to_amdgpu_dm_connector(connector);
9787 
9788 		mutex_lock(&adev->dm.audio_lock);
9789 		inst = status->audio_inst;
9790 		aconnector->audio_inst = inst;
9791 		mutex_unlock(&adev->dm.audio_lock);
9792 
9793 		amdgpu_dm_audio_eld_notify(adev, inst);
9794 	}
9795 }
9796 
9797 /*
9798  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9799  * @crtc_state: the DRM CRTC state
9800  * @stream_state: the DC stream state.
9801  *
9802  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9803  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9804  */
9805 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9806 						struct dc_stream_state *stream_state)
9807 {
9808 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9809 }
9810 
9811 /**
9812  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9813  * @state: The atomic state to commit
9814  *
9815  * This will tell DC to commit the constructed DC state from atomic_check,
9816  * programming the hardware. Any failures here implies a hardware failure, since
9817  * atomic check should have filtered anything non-kosher.
9818  */
9819 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9820 {
9821 	struct drm_device *dev = state->dev;
9822 	struct amdgpu_device *adev = drm_to_adev(dev);
9823 	struct amdgpu_display_manager *dm = &adev->dm;
9824 	struct dm_atomic_state *dm_state;
9825 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9826 	uint32_t i, j;
9827 	struct drm_crtc *crtc;
9828 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9829 	unsigned long flags;
9830 	bool wait_for_vblank = true;
9831 	struct drm_connector *connector;
9832 	struct drm_connector_state *old_con_state, *new_con_state;
9833 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9834 	int crtc_disable_count = 0;
9835 	bool mode_set_reset_required = false;
9836 
9837 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9838 
9839 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9840 
9841 	dm_state = dm_atomic_get_new_state(state);
9842 	if (dm_state && dm_state->context) {
9843 		dc_state = dm_state->context;
9844 	} else {
9845 		/* No state changes, retain current state. */
9846 		dc_state_temp = dc_create_state(dm->dc);
9847 		ASSERT(dc_state_temp);
9848 		dc_state = dc_state_temp;
9849 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9850 	}
9851 
9852 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9853 				       new_crtc_state, i) {
9854 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9855 
9856 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9857 
9858 		if (old_crtc_state->active &&
9859 		    (!new_crtc_state->active ||
9860 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9861 			manage_dm_interrupts(adev, acrtc, false);
9862 			dc_stream_release(dm_old_crtc_state->stream);
9863 		}
9864 	}
9865 
9866 	drm_atomic_helper_calc_timestamping_constants(state);
9867 
9868 	/* update changed items */
9869 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9870 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9871 
9872 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9873 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9874 
9875 		drm_dbg_state(state->dev,
9876 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9877 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9878 			"connectors_changed:%d\n",
9879 			acrtc->crtc_id,
9880 			new_crtc_state->enable,
9881 			new_crtc_state->active,
9882 			new_crtc_state->planes_changed,
9883 			new_crtc_state->mode_changed,
9884 			new_crtc_state->active_changed,
9885 			new_crtc_state->connectors_changed);
9886 
9887 		/* Disable cursor if disabling crtc */
9888 		if (old_crtc_state->active && !new_crtc_state->active) {
9889 			struct dc_cursor_position position;
9890 
9891 			memset(&position, 0, sizeof(position));
9892 			mutex_lock(&dm->dc_lock);
9893 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9894 			mutex_unlock(&dm->dc_lock);
9895 		}
9896 
9897 		/* Copy all transient state flags into dc state */
9898 		if (dm_new_crtc_state->stream) {
9899 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9900 							    dm_new_crtc_state->stream);
9901 		}
9902 
9903 		/* handles headless hotplug case, updating new_state and
9904 		 * aconnector as needed
9905 		 */
9906 
9907 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9908 
9909 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9910 
9911 			if (!dm_new_crtc_state->stream) {
9912 				/*
9913 				 * this could happen because of issues with
9914 				 * userspace notifications delivery.
9915 				 * In this case userspace tries to set mode on
9916 				 * display which is disconnected in fact.
9917 				 * dc_sink is NULL in this case on aconnector.
9918 				 * We expect reset mode will come soon.
9919 				 *
9920 				 * This can also happen when unplug is done
9921 				 * during resume sequence ended
9922 				 *
9923 				 * In this case, we want to pretend we still
9924 				 * have a sink to keep the pipe running so that
9925 				 * hw state is consistent with the sw state
9926 				 */
9927 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9928 						__func__, acrtc->base.base.id);
9929 				continue;
9930 			}
9931 
9932 			if (dm_old_crtc_state->stream)
9933 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9934 
9935 			pm_runtime_get_noresume(dev->dev);
9936 
9937 			acrtc->enabled = true;
9938 			acrtc->hw_mode = new_crtc_state->mode;
9939 			crtc->hwmode = new_crtc_state->mode;
9940 			mode_set_reset_required = true;
9941 		} else if (modereset_required(new_crtc_state)) {
9942 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9943 			/* i.e. reset mode */
9944 			if (dm_old_crtc_state->stream)
9945 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9946 
9947 			mode_set_reset_required = true;
9948 		}
9949 	} /* for_each_crtc_in_state() */
9950 
9951 	if (dc_state) {
9952 		/* if there mode set or reset, disable eDP PSR */
9953 		if (mode_set_reset_required) {
9954 			if (dm->vblank_control_workqueue)
9955 				flush_workqueue(dm->vblank_control_workqueue);
9956 
9957 			amdgpu_dm_psr_disable_all(dm);
9958 		}
9959 
9960 		dm_enable_per_frame_crtc_master_sync(dc_state);
9961 		mutex_lock(&dm->dc_lock);
9962 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9963 
9964 		/* Allow idle optimization when vblank count is 0 for display off */
9965 		if (dm->active_vblank_irq_count == 0)
9966 			dc_allow_idle_optimizations(dm->dc, true);
9967 		mutex_unlock(&dm->dc_lock);
9968 	}
9969 
9970 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9971 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9972 
9973 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9974 
9975 		if (dm_new_crtc_state->stream != NULL) {
9976 			const struct dc_stream_status *status =
9977 					dc_stream_get_status(dm_new_crtc_state->stream);
9978 
9979 			if (!status)
9980 				status = dc_stream_get_status_from_state(dc_state,
9981 									 dm_new_crtc_state->stream);
9982 			if (!status)
9983 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9984 			else
9985 				acrtc->otg_inst = status->primary_otg_inst;
9986 		}
9987 	}
9988 #ifdef CONFIG_DRM_AMD_DC_HDCP
9989 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9990 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9991 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9992 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9993 
9994 		new_crtc_state = NULL;
9995 
9996 		if (acrtc)
9997 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9998 
9999 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10000 
10001 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
10002 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
10003 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
10004 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
10005 			dm_new_con_state->update_hdcp = true;
10006 			continue;
10007 		}
10008 
10009 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
10010 			hdcp_update_display(
10011 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
10012 				new_con_state->hdcp_content_type,
10013 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
10014 	}
10015 #endif
10016 
10017 	/* Handle connector state changes */
10018 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10019 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10020 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10021 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10022 		struct dc_surface_update dummy_updates[MAX_SURFACES];
10023 		struct dc_stream_update stream_update;
10024 		struct dc_info_packet hdr_packet;
10025 		struct dc_stream_status *status = NULL;
10026 		bool abm_changed, hdr_changed, scaling_changed;
10027 
10028 		memset(&dummy_updates, 0, sizeof(dummy_updates));
10029 		memset(&stream_update, 0, sizeof(stream_update));
10030 
10031 		if (acrtc) {
10032 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
10033 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
10034 		}
10035 
10036 		/* Skip any modesets/resets */
10037 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
10038 			continue;
10039 
10040 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10041 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10042 
10043 		scaling_changed = is_scaling_state_different(dm_new_con_state,
10044 							     dm_old_con_state);
10045 
10046 		abm_changed = dm_new_crtc_state->abm_level !=
10047 			      dm_old_crtc_state->abm_level;
10048 
10049 		hdr_changed =
10050 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
10051 
10052 		if (!scaling_changed && !abm_changed && !hdr_changed)
10053 			continue;
10054 
10055 		stream_update.stream = dm_new_crtc_state->stream;
10056 		if (scaling_changed) {
10057 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
10058 					dm_new_con_state, dm_new_crtc_state->stream);
10059 
10060 			stream_update.src = dm_new_crtc_state->stream->src;
10061 			stream_update.dst = dm_new_crtc_state->stream->dst;
10062 		}
10063 
10064 		if (abm_changed) {
10065 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
10066 
10067 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
10068 		}
10069 
10070 		if (hdr_changed) {
10071 			fill_hdr_info_packet(new_con_state, &hdr_packet);
10072 			stream_update.hdr_static_metadata = &hdr_packet;
10073 		}
10074 
10075 		status = dc_stream_get_status(dm_new_crtc_state->stream);
10076 
10077 		if (WARN_ON(!status))
10078 			continue;
10079 
10080 		WARN_ON(!status->plane_count);
10081 
10082 		/*
10083 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
10084 		 * Here we create an empty update on each plane.
10085 		 * To fix this, DC should permit updating only stream properties.
10086 		 */
10087 		for (j = 0; j < status->plane_count; j++)
10088 			dummy_updates[j].surface = status->plane_states[0];
10089 
10090 
10091 		mutex_lock(&dm->dc_lock);
10092 		dc_commit_updates_for_stream(dm->dc,
10093 						     dummy_updates,
10094 						     status->plane_count,
10095 						     dm_new_crtc_state->stream,
10096 						     &stream_update,
10097 						     dc_state);
10098 		mutex_unlock(&dm->dc_lock);
10099 	}
10100 
10101 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
10102 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
10103 				      new_crtc_state, i) {
10104 		if (old_crtc_state->active && !new_crtc_state->active)
10105 			crtc_disable_count++;
10106 
10107 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10108 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10109 
10110 		/* For freesync config update on crtc state and params for irq */
10111 		update_stream_irq_parameters(dm, dm_new_crtc_state);
10112 
10113 		/* Handle vrr on->off / off->on transitions */
10114 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
10115 						dm_new_crtc_state);
10116 	}
10117 
10118 	/**
10119 	 * Enable interrupts for CRTCs that are newly enabled or went through
10120 	 * a modeset. It was intentionally deferred until after the front end
10121 	 * state was modified to wait until the OTG was on and so the IRQ
10122 	 * handlers didn't access stale or invalid state.
10123 	 */
10124 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10125 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
10126 #ifdef CONFIG_DEBUG_FS
10127 		bool configure_crc = false;
10128 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
10129 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10130 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
10131 #endif
10132 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10133 		cur_crc_src = acrtc->dm_irq_params.crc_src;
10134 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10135 #endif
10136 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10137 
10138 		if (new_crtc_state->active &&
10139 		    (!old_crtc_state->active ||
10140 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
10141 			dc_stream_retain(dm_new_crtc_state->stream);
10142 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
10143 			manage_dm_interrupts(adev, acrtc, true);
10144 
10145 #ifdef CONFIG_DEBUG_FS
10146 			/**
10147 			 * Frontend may have changed so reapply the CRC capture
10148 			 * settings for the stream.
10149 			 */
10150 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10151 
10152 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
10153 				configure_crc = true;
10154 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
10155 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
10156 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10157 					acrtc->dm_irq_params.crc_window.update_win = true;
10158 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
10159 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
10160 					crc_rd_wrk->crtc = crtc;
10161 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
10162 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10163 				}
10164 #endif
10165 			}
10166 
10167 			if (configure_crc)
10168 				if (amdgpu_dm_crtc_configure_crc_source(
10169 					crtc, dm_new_crtc_state, cur_crc_src))
10170 					DRM_DEBUG_DRIVER("Failed to configure crc source");
10171 #endif
10172 		}
10173 	}
10174 
10175 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
10176 		if (new_crtc_state->async_flip)
10177 			wait_for_vblank = false;
10178 
10179 	/* update planes when needed per crtc*/
10180 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
10181 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10182 
10183 		if (dm_new_crtc_state->stream)
10184 			amdgpu_dm_commit_planes(state, dc_state, dev,
10185 						dm, crtc, wait_for_vblank);
10186 	}
10187 
10188 	/* Update audio instances for each connector. */
10189 	amdgpu_dm_commit_audio(dev, state);
10190 
10191 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
10192 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
10193 	/* restore the backlight level */
10194 	for (i = 0; i < dm->num_of_edps; i++) {
10195 		if (dm->backlight_dev[i] &&
10196 		    (dm->actual_brightness[i] != dm->brightness[i]))
10197 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
10198 	}
10199 #endif
10200 	/*
10201 	 * send vblank event on all events not handled in flip and
10202 	 * mark consumed event for drm_atomic_helper_commit_hw_done
10203 	 */
10204 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
10205 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10206 
10207 		if (new_crtc_state->event)
10208 			drm_send_event_locked(dev, &new_crtc_state->event->base);
10209 
10210 		new_crtc_state->event = NULL;
10211 	}
10212 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
10213 
10214 	/* Signal HW programming completion */
10215 	drm_atomic_helper_commit_hw_done(state);
10216 
10217 	if (wait_for_vblank)
10218 		drm_atomic_helper_wait_for_flip_done(dev, state);
10219 
10220 	drm_atomic_helper_cleanup_planes(dev, state);
10221 
10222 	/* return the stolen vga memory back to VRAM */
10223 	if (!adev->mman.keep_stolen_vga_memory)
10224 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
10225 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
10226 
10227 	/*
10228 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
10229 	 * so we can put the GPU into runtime suspend if we're not driving any
10230 	 * displays anymore
10231 	 */
10232 	for (i = 0; i < crtc_disable_count; i++)
10233 		pm_runtime_put_autosuspend(dev->dev);
10234 	pm_runtime_mark_last_busy(dev->dev);
10235 
10236 	if (dc_state_temp)
10237 		dc_release_state(dc_state_temp);
10238 }
10239 
10240 
10241 static int dm_force_atomic_commit(struct drm_connector *connector)
10242 {
10243 	int ret = 0;
10244 	struct drm_device *ddev = connector->dev;
10245 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
10246 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10247 	struct drm_plane *plane = disconnected_acrtc->base.primary;
10248 	struct drm_connector_state *conn_state;
10249 	struct drm_crtc_state *crtc_state;
10250 	struct drm_plane_state *plane_state;
10251 
10252 	if (!state)
10253 		return -ENOMEM;
10254 
10255 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
10256 
10257 	/* Construct an atomic state to restore previous display setting */
10258 
10259 	/*
10260 	 * Attach connectors to drm_atomic_state
10261 	 */
10262 	conn_state = drm_atomic_get_connector_state(state, connector);
10263 
10264 	ret = PTR_ERR_OR_ZERO(conn_state);
10265 	if (ret)
10266 		goto out;
10267 
10268 	/* Attach crtc to drm_atomic_state*/
10269 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
10270 
10271 	ret = PTR_ERR_OR_ZERO(crtc_state);
10272 	if (ret)
10273 		goto out;
10274 
10275 	/* force a restore */
10276 	crtc_state->mode_changed = true;
10277 
10278 	/* Attach plane to drm_atomic_state */
10279 	plane_state = drm_atomic_get_plane_state(state, plane);
10280 
10281 	ret = PTR_ERR_OR_ZERO(plane_state);
10282 	if (ret)
10283 		goto out;
10284 
10285 	/* Call commit internally with the state we just constructed */
10286 	ret = drm_atomic_commit(state);
10287 
10288 out:
10289 	drm_atomic_state_put(state);
10290 	if (ret)
10291 		DRM_ERROR("Restoring old state failed with %i\n", ret);
10292 
10293 	return ret;
10294 }
10295 
10296 /*
10297  * This function handles all cases when set mode does not come upon hotplug.
10298  * This includes when a display is unplugged then plugged back into the
10299  * same port and when running without usermode desktop manager supprot
10300  */
10301 void dm_restore_drm_connector_state(struct drm_device *dev,
10302 				    struct drm_connector *connector)
10303 {
10304 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10305 	struct amdgpu_crtc *disconnected_acrtc;
10306 	struct dm_crtc_state *acrtc_state;
10307 
10308 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10309 		return;
10310 
10311 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10312 	if (!disconnected_acrtc)
10313 		return;
10314 
10315 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10316 	if (!acrtc_state->stream)
10317 		return;
10318 
10319 	/*
10320 	 * If the previous sink is not released and different from the current,
10321 	 * we deduce we are in a state where we can not rely on usermode call
10322 	 * to turn on the display, so we do it here
10323 	 */
10324 	if (acrtc_state->stream->sink != aconnector->dc_sink)
10325 		dm_force_atomic_commit(&aconnector->base);
10326 }
10327 
10328 /*
10329  * Grabs all modesetting locks to serialize against any blocking commits,
10330  * Waits for completion of all non blocking commits.
10331  */
10332 static int do_aquire_global_lock(struct drm_device *dev,
10333 				 struct drm_atomic_state *state)
10334 {
10335 	struct drm_crtc *crtc;
10336 	struct drm_crtc_commit *commit;
10337 	long ret;
10338 
10339 	/*
10340 	 * Adding all modeset locks to aquire_ctx will
10341 	 * ensure that when the framework release it the
10342 	 * extra locks we are locking here will get released to
10343 	 */
10344 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10345 	if (ret)
10346 		return ret;
10347 
10348 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10349 		spin_lock(&crtc->commit_lock);
10350 		commit = list_first_entry_or_null(&crtc->commit_list,
10351 				struct drm_crtc_commit, commit_entry);
10352 		if (commit)
10353 			drm_crtc_commit_get(commit);
10354 		spin_unlock(&crtc->commit_lock);
10355 
10356 		if (!commit)
10357 			continue;
10358 
10359 		/*
10360 		 * Make sure all pending HW programming completed and
10361 		 * page flips done
10362 		 */
10363 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10364 
10365 		if (ret > 0)
10366 			ret = wait_for_completion_interruptible_timeout(
10367 					&commit->flip_done, 10*HZ);
10368 
10369 		if (ret == 0)
10370 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10371 				  "timed out\n", crtc->base.id, crtc->name);
10372 
10373 		drm_crtc_commit_put(commit);
10374 	}
10375 
10376 	return ret < 0 ? ret : 0;
10377 }
10378 
10379 static void get_freesync_config_for_crtc(
10380 	struct dm_crtc_state *new_crtc_state,
10381 	struct dm_connector_state *new_con_state)
10382 {
10383 	struct mod_freesync_config config = {0};
10384 	struct amdgpu_dm_connector *aconnector =
10385 			to_amdgpu_dm_connector(new_con_state->base.connector);
10386 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
10387 	int vrefresh = drm_mode_vrefresh(mode);
10388 	bool fs_vid_mode = false;
10389 
10390 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10391 					vrefresh >= aconnector->min_vfreq &&
10392 					vrefresh <= aconnector->max_vfreq;
10393 
10394 	if (new_crtc_state->vrr_supported) {
10395 		new_crtc_state->stream->ignore_msa_timing_param = true;
10396 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10397 
10398 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10399 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10400 		config.vsif_supported = true;
10401 		config.btr = true;
10402 
10403 		if (fs_vid_mode) {
10404 			config.state = VRR_STATE_ACTIVE_FIXED;
10405 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10406 			goto out;
10407 		} else if (new_crtc_state->base.vrr_enabled) {
10408 			config.state = VRR_STATE_ACTIVE_VARIABLE;
10409 		} else {
10410 			config.state = VRR_STATE_INACTIVE;
10411 		}
10412 	}
10413 out:
10414 	new_crtc_state->freesync_config = config;
10415 }
10416 
10417 static void reset_freesync_config_for_crtc(
10418 	struct dm_crtc_state *new_crtc_state)
10419 {
10420 	new_crtc_state->vrr_supported = false;
10421 
10422 	memset(&new_crtc_state->vrr_infopacket, 0,
10423 	       sizeof(new_crtc_state->vrr_infopacket));
10424 }
10425 
10426 static bool
10427 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10428 				 struct drm_crtc_state *new_crtc_state)
10429 {
10430 	const struct drm_display_mode *old_mode, *new_mode;
10431 
10432 	if (!old_crtc_state || !new_crtc_state)
10433 		return false;
10434 
10435 	old_mode = &old_crtc_state->mode;
10436 	new_mode = &new_crtc_state->mode;
10437 
10438 	if (old_mode->clock       == new_mode->clock &&
10439 	    old_mode->hdisplay    == new_mode->hdisplay &&
10440 	    old_mode->vdisplay    == new_mode->vdisplay &&
10441 	    old_mode->htotal      == new_mode->htotal &&
10442 	    old_mode->vtotal      != new_mode->vtotal &&
10443 	    old_mode->hsync_start == new_mode->hsync_start &&
10444 	    old_mode->vsync_start != new_mode->vsync_start &&
10445 	    old_mode->hsync_end   == new_mode->hsync_end &&
10446 	    old_mode->vsync_end   != new_mode->vsync_end &&
10447 	    old_mode->hskew       == new_mode->hskew &&
10448 	    old_mode->vscan       == new_mode->vscan &&
10449 	    (old_mode->vsync_end - old_mode->vsync_start) ==
10450 	    (new_mode->vsync_end - new_mode->vsync_start))
10451 		return true;
10452 
10453 	return false;
10454 }
10455 
10456 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10457 	uint64_t num, den, res;
10458 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10459 
10460 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10461 
10462 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10463 	den = (unsigned long long)new_crtc_state->mode.htotal *
10464 	      (unsigned long long)new_crtc_state->mode.vtotal;
10465 
10466 	res = div_u64(num, den);
10467 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10468 }
10469 
10470 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10471 			 struct drm_atomic_state *state,
10472 			 struct drm_crtc *crtc,
10473 			 struct drm_crtc_state *old_crtc_state,
10474 			 struct drm_crtc_state *new_crtc_state,
10475 			 bool enable,
10476 			 bool *lock_and_validation_needed)
10477 {
10478 	struct dm_atomic_state *dm_state = NULL;
10479 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10480 	struct dc_stream_state *new_stream;
10481 	int ret = 0;
10482 
10483 	/*
10484 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10485 	 * update changed items
10486 	 */
10487 	struct amdgpu_crtc *acrtc = NULL;
10488 	struct amdgpu_dm_connector *aconnector = NULL;
10489 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10490 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10491 
10492 	new_stream = NULL;
10493 
10494 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10495 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10496 	acrtc = to_amdgpu_crtc(crtc);
10497 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10498 
10499 	/* TODO This hack should go away */
10500 	if (aconnector && enable) {
10501 		/* Make sure fake sink is created in plug-in scenario */
10502 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10503 							    &aconnector->base);
10504 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10505 							    &aconnector->base);
10506 
10507 		if (IS_ERR(drm_new_conn_state)) {
10508 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10509 			goto fail;
10510 		}
10511 
10512 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10513 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10514 
10515 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10516 			goto skip_modeset;
10517 
10518 		new_stream = create_validate_stream_for_sink(aconnector,
10519 							     &new_crtc_state->mode,
10520 							     dm_new_conn_state,
10521 							     dm_old_crtc_state->stream);
10522 
10523 		/*
10524 		 * we can have no stream on ACTION_SET if a display
10525 		 * was disconnected during S3, in this case it is not an
10526 		 * error, the OS will be updated after detection, and
10527 		 * will do the right thing on next atomic commit
10528 		 */
10529 
10530 		if (!new_stream) {
10531 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10532 					__func__, acrtc->base.base.id);
10533 			ret = -ENOMEM;
10534 			goto fail;
10535 		}
10536 
10537 		/*
10538 		 * TODO: Check VSDB bits to decide whether this should
10539 		 * be enabled or not.
10540 		 */
10541 		new_stream->triggered_crtc_reset.enabled =
10542 			dm->force_timing_sync;
10543 
10544 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10545 
10546 		ret = fill_hdr_info_packet(drm_new_conn_state,
10547 					   &new_stream->hdr_static_metadata);
10548 		if (ret)
10549 			goto fail;
10550 
10551 		/*
10552 		 * If we already removed the old stream from the context
10553 		 * (and set the new stream to NULL) then we can't reuse
10554 		 * the old stream even if the stream and scaling are unchanged.
10555 		 * We'll hit the BUG_ON and black screen.
10556 		 *
10557 		 * TODO: Refactor this function to allow this check to work
10558 		 * in all conditions.
10559 		 */
10560 		if (dm_new_crtc_state->stream &&
10561 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10562 			goto skip_modeset;
10563 
10564 		if (dm_new_crtc_state->stream &&
10565 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10566 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10567 			new_crtc_state->mode_changed = false;
10568 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10569 					 new_crtc_state->mode_changed);
10570 		}
10571 	}
10572 
10573 	/* mode_changed flag may get updated above, need to check again */
10574 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10575 		goto skip_modeset;
10576 
10577 	drm_dbg_state(state->dev,
10578 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10579 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10580 		"connectors_changed:%d\n",
10581 		acrtc->crtc_id,
10582 		new_crtc_state->enable,
10583 		new_crtc_state->active,
10584 		new_crtc_state->planes_changed,
10585 		new_crtc_state->mode_changed,
10586 		new_crtc_state->active_changed,
10587 		new_crtc_state->connectors_changed);
10588 
10589 	/* Remove stream for any changed/disabled CRTC */
10590 	if (!enable) {
10591 
10592 		if (!dm_old_crtc_state->stream)
10593 			goto skip_modeset;
10594 
10595 		if (dm_new_crtc_state->stream &&
10596 		    is_timing_unchanged_for_freesync(new_crtc_state,
10597 						     old_crtc_state)) {
10598 			new_crtc_state->mode_changed = false;
10599 			DRM_DEBUG_DRIVER(
10600 				"Mode change not required for front porch change, "
10601 				"setting mode_changed to %d",
10602 				new_crtc_state->mode_changed);
10603 
10604 			set_freesync_fixed_config(dm_new_crtc_state);
10605 
10606 			goto skip_modeset;
10607 		} else if (aconnector &&
10608 			   is_freesync_video_mode(&new_crtc_state->mode,
10609 						  aconnector)) {
10610 			struct drm_display_mode *high_mode;
10611 
10612 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10613 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10614 				set_freesync_fixed_config(dm_new_crtc_state);
10615 			}
10616 		}
10617 
10618 		ret = dm_atomic_get_state(state, &dm_state);
10619 		if (ret)
10620 			goto fail;
10621 
10622 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10623 				crtc->base.id);
10624 
10625 		/* i.e. reset mode */
10626 		if (dc_remove_stream_from_ctx(
10627 				dm->dc,
10628 				dm_state->context,
10629 				dm_old_crtc_state->stream) != DC_OK) {
10630 			ret = -EINVAL;
10631 			goto fail;
10632 		}
10633 
10634 		dc_stream_release(dm_old_crtc_state->stream);
10635 		dm_new_crtc_state->stream = NULL;
10636 
10637 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10638 
10639 		*lock_and_validation_needed = true;
10640 
10641 	} else {/* Add stream for any updated/enabled CRTC */
10642 		/*
10643 		 * Quick fix to prevent NULL pointer on new_stream when
10644 		 * added MST connectors not found in existing crtc_state in the chained mode
10645 		 * TODO: need to dig out the root cause of that
10646 		 */
10647 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10648 			goto skip_modeset;
10649 
10650 		if (modereset_required(new_crtc_state))
10651 			goto skip_modeset;
10652 
10653 		if (modeset_required(new_crtc_state, new_stream,
10654 				     dm_old_crtc_state->stream)) {
10655 
10656 			WARN_ON(dm_new_crtc_state->stream);
10657 
10658 			ret = dm_atomic_get_state(state, &dm_state);
10659 			if (ret)
10660 				goto fail;
10661 
10662 			dm_new_crtc_state->stream = new_stream;
10663 
10664 			dc_stream_retain(new_stream);
10665 
10666 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10667 					 crtc->base.id);
10668 
10669 			if (dc_add_stream_to_ctx(
10670 					dm->dc,
10671 					dm_state->context,
10672 					dm_new_crtc_state->stream) != DC_OK) {
10673 				ret = -EINVAL;
10674 				goto fail;
10675 			}
10676 
10677 			*lock_and_validation_needed = true;
10678 		}
10679 	}
10680 
10681 skip_modeset:
10682 	/* Release extra reference */
10683 	if (new_stream)
10684 		 dc_stream_release(new_stream);
10685 
10686 	/*
10687 	 * We want to do dc stream updates that do not require a
10688 	 * full modeset below.
10689 	 */
10690 	if (!(enable && aconnector && new_crtc_state->active))
10691 		return 0;
10692 	/*
10693 	 * Given above conditions, the dc state cannot be NULL because:
10694 	 * 1. We're in the process of enabling CRTCs (just been added
10695 	 *    to the dc context, or already is on the context)
10696 	 * 2. Has a valid connector attached, and
10697 	 * 3. Is currently active and enabled.
10698 	 * => The dc stream state currently exists.
10699 	 */
10700 	BUG_ON(dm_new_crtc_state->stream == NULL);
10701 
10702 	/* Scaling or underscan settings */
10703 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10704 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10705 		update_stream_scaling_settings(
10706 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10707 
10708 	/* ABM settings */
10709 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10710 
10711 	/*
10712 	 * Color management settings. We also update color properties
10713 	 * when a modeset is needed, to ensure it gets reprogrammed.
10714 	 */
10715 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10716 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10717 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10718 		if (ret)
10719 			goto fail;
10720 	}
10721 
10722 	/* Update Freesync settings. */
10723 	get_freesync_config_for_crtc(dm_new_crtc_state,
10724 				     dm_new_conn_state);
10725 
10726 	return ret;
10727 
10728 fail:
10729 	if (new_stream)
10730 		dc_stream_release(new_stream);
10731 	return ret;
10732 }
10733 
10734 static bool should_reset_plane(struct drm_atomic_state *state,
10735 			       struct drm_plane *plane,
10736 			       struct drm_plane_state *old_plane_state,
10737 			       struct drm_plane_state *new_plane_state)
10738 {
10739 	struct drm_plane *other;
10740 	struct drm_plane_state *old_other_state, *new_other_state;
10741 	struct drm_crtc_state *new_crtc_state;
10742 	int i;
10743 
10744 	/*
10745 	 * TODO: Remove this hack once the checks below are sufficient
10746 	 * enough to determine when we need to reset all the planes on
10747 	 * the stream.
10748 	 */
10749 	if (state->allow_modeset)
10750 		return true;
10751 
10752 	/* Exit early if we know that we're adding or removing the plane. */
10753 	if (old_plane_state->crtc != new_plane_state->crtc)
10754 		return true;
10755 
10756 	/* old crtc == new_crtc == NULL, plane not in context. */
10757 	if (!new_plane_state->crtc)
10758 		return false;
10759 
10760 	new_crtc_state =
10761 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10762 
10763 	if (!new_crtc_state)
10764 		return true;
10765 
10766 	/* CRTC Degamma changes currently require us to recreate planes. */
10767 	if (new_crtc_state->color_mgmt_changed)
10768 		return true;
10769 
10770 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10771 		return true;
10772 
10773 	/*
10774 	 * If there are any new primary or overlay planes being added or
10775 	 * removed then the z-order can potentially change. To ensure
10776 	 * correct z-order and pipe acquisition the current DC architecture
10777 	 * requires us to remove and recreate all existing planes.
10778 	 *
10779 	 * TODO: Come up with a more elegant solution for this.
10780 	 */
10781 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10782 		struct amdgpu_framebuffer *old_afb, *new_afb;
10783 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10784 			continue;
10785 
10786 		if (old_other_state->crtc != new_plane_state->crtc &&
10787 		    new_other_state->crtc != new_plane_state->crtc)
10788 			continue;
10789 
10790 		if (old_other_state->crtc != new_other_state->crtc)
10791 			return true;
10792 
10793 		/* Src/dst size and scaling updates. */
10794 		if (old_other_state->src_w != new_other_state->src_w ||
10795 		    old_other_state->src_h != new_other_state->src_h ||
10796 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10797 		    old_other_state->crtc_h != new_other_state->crtc_h)
10798 			return true;
10799 
10800 		/* Rotation / mirroring updates. */
10801 		if (old_other_state->rotation != new_other_state->rotation)
10802 			return true;
10803 
10804 		/* Blending updates. */
10805 		if (old_other_state->pixel_blend_mode !=
10806 		    new_other_state->pixel_blend_mode)
10807 			return true;
10808 
10809 		/* Alpha updates. */
10810 		if (old_other_state->alpha != new_other_state->alpha)
10811 			return true;
10812 
10813 		/* Colorspace changes. */
10814 		if (old_other_state->color_range != new_other_state->color_range ||
10815 		    old_other_state->color_encoding != new_other_state->color_encoding)
10816 			return true;
10817 
10818 		/* Framebuffer checks fall at the end. */
10819 		if (!old_other_state->fb || !new_other_state->fb)
10820 			continue;
10821 
10822 		/* Pixel format changes can require bandwidth updates. */
10823 		if (old_other_state->fb->format != new_other_state->fb->format)
10824 			return true;
10825 
10826 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10827 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10828 
10829 		/* Tiling and DCC changes also require bandwidth updates. */
10830 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10831 		    old_afb->base.modifier != new_afb->base.modifier)
10832 			return true;
10833 	}
10834 
10835 	return false;
10836 }
10837 
10838 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10839 			      struct drm_plane_state *new_plane_state,
10840 			      struct drm_framebuffer *fb)
10841 {
10842 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10843 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10844 	unsigned int pitch;
10845 	bool linear;
10846 
10847 	if (fb->width > new_acrtc->max_cursor_width ||
10848 	    fb->height > new_acrtc->max_cursor_height) {
10849 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10850 				 new_plane_state->fb->width,
10851 				 new_plane_state->fb->height);
10852 		return -EINVAL;
10853 	}
10854 	if (new_plane_state->src_w != fb->width << 16 ||
10855 	    new_plane_state->src_h != fb->height << 16) {
10856 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10857 		return -EINVAL;
10858 	}
10859 
10860 	/* Pitch in pixels */
10861 	pitch = fb->pitches[0] / fb->format->cpp[0];
10862 
10863 	if (fb->width != pitch) {
10864 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10865 				 fb->width, pitch);
10866 		return -EINVAL;
10867 	}
10868 
10869 	switch (pitch) {
10870 	case 64:
10871 	case 128:
10872 	case 256:
10873 		/* FB pitch is supported by cursor plane */
10874 		break;
10875 	default:
10876 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10877 		return -EINVAL;
10878 	}
10879 
10880 	/* Core DRM takes care of checking FB modifiers, so we only need to
10881 	 * check tiling flags when the FB doesn't have a modifier. */
10882 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10883 		if (adev->family < AMDGPU_FAMILY_AI) {
10884 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10885 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10886 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10887 		} else {
10888 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10889 		}
10890 		if (!linear) {
10891 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10892 			return -EINVAL;
10893 		}
10894 	}
10895 
10896 	return 0;
10897 }
10898 
10899 static int dm_update_plane_state(struct dc *dc,
10900 				 struct drm_atomic_state *state,
10901 				 struct drm_plane *plane,
10902 				 struct drm_plane_state *old_plane_state,
10903 				 struct drm_plane_state *new_plane_state,
10904 				 bool enable,
10905 				 bool *lock_and_validation_needed)
10906 {
10907 
10908 	struct dm_atomic_state *dm_state = NULL;
10909 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10910 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10911 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10912 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10913 	struct amdgpu_crtc *new_acrtc;
10914 	bool needs_reset;
10915 	int ret = 0;
10916 
10917 
10918 	new_plane_crtc = new_plane_state->crtc;
10919 	old_plane_crtc = old_plane_state->crtc;
10920 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10921 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10922 
10923 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10924 		if (!enable || !new_plane_crtc ||
10925 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10926 			return 0;
10927 
10928 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10929 
10930 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10931 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10932 			return -EINVAL;
10933 		}
10934 
10935 		if (new_plane_state->fb) {
10936 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10937 						 new_plane_state->fb);
10938 			if (ret)
10939 				return ret;
10940 		}
10941 
10942 		return 0;
10943 	}
10944 
10945 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10946 					 new_plane_state);
10947 
10948 	/* Remove any changed/removed planes */
10949 	if (!enable) {
10950 		if (!needs_reset)
10951 			return 0;
10952 
10953 		if (!old_plane_crtc)
10954 			return 0;
10955 
10956 		old_crtc_state = drm_atomic_get_old_crtc_state(
10957 				state, old_plane_crtc);
10958 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10959 
10960 		if (!dm_old_crtc_state->stream)
10961 			return 0;
10962 
10963 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10964 				plane->base.id, old_plane_crtc->base.id);
10965 
10966 		ret = dm_atomic_get_state(state, &dm_state);
10967 		if (ret)
10968 			return ret;
10969 
10970 		if (!dc_remove_plane_from_context(
10971 				dc,
10972 				dm_old_crtc_state->stream,
10973 				dm_old_plane_state->dc_state,
10974 				dm_state->context)) {
10975 
10976 			return -EINVAL;
10977 		}
10978 
10979 
10980 		dc_plane_state_release(dm_old_plane_state->dc_state);
10981 		dm_new_plane_state->dc_state = NULL;
10982 
10983 		*lock_and_validation_needed = true;
10984 
10985 	} else { /* Add new planes */
10986 		struct dc_plane_state *dc_new_plane_state;
10987 
10988 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10989 			return 0;
10990 
10991 		if (!new_plane_crtc)
10992 			return 0;
10993 
10994 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10995 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10996 
10997 		if (!dm_new_crtc_state->stream)
10998 			return 0;
10999 
11000 		if (!needs_reset)
11001 			return 0;
11002 
11003 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
11004 		if (ret)
11005 			return ret;
11006 
11007 		WARN_ON(dm_new_plane_state->dc_state);
11008 
11009 		dc_new_plane_state = dc_create_plane_state(dc);
11010 		if (!dc_new_plane_state)
11011 			return -ENOMEM;
11012 
11013 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
11014 				 plane->base.id, new_plane_crtc->base.id);
11015 
11016 		ret = fill_dc_plane_attributes(
11017 			drm_to_adev(new_plane_crtc->dev),
11018 			dc_new_plane_state,
11019 			new_plane_state,
11020 			new_crtc_state);
11021 		if (ret) {
11022 			dc_plane_state_release(dc_new_plane_state);
11023 			return ret;
11024 		}
11025 
11026 		ret = dm_atomic_get_state(state, &dm_state);
11027 		if (ret) {
11028 			dc_plane_state_release(dc_new_plane_state);
11029 			return ret;
11030 		}
11031 
11032 		/*
11033 		 * Any atomic check errors that occur after this will
11034 		 * not need a release. The plane state will be attached
11035 		 * to the stream, and therefore part of the atomic
11036 		 * state. It'll be released when the atomic state is
11037 		 * cleaned.
11038 		 */
11039 		if (!dc_add_plane_to_context(
11040 				dc,
11041 				dm_new_crtc_state->stream,
11042 				dc_new_plane_state,
11043 				dm_state->context)) {
11044 
11045 			dc_plane_state_release(dc_new_plane_state);
11046 			return -EINVAL;
11047 		}
11048 
11049 		dm_new_plane_state->dc_state = dc_new_plane_state;
11050 
11051 		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
11052 
11053 		/* Tell DC to do a full surface update every time there
11054 		 * is a plane change. Inefficient, but works for now.
11055 		 */
11056 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
11057 
11058 		*lock_and_validation_needed = true;
11059 	}
11060 
11061 
11062 	return ret;
11063 }
11064 
11065 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
11066 				       int *src_w, int *src_h)
11067 {
11068 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
11069 	case DRM_MODE_ROTATE_90:
11070 	case DRM_MODE_ROTATE_270:
11071 		*src_w = plane_state->src_h >> 16;
11072 		*src_h = plane_state->src_w >> 16;
11073 		break;
11074 	case DRM_MODE_ROTATE_0:
11075 	case DRM_MODE_ROTATE_180:
11076 	default:
11077 		*src_w = plane_state->src_w >> 16;
11078 		*src_h = plane_state->src_h >> 16;
11079 		break;
11080 	}
11081 }
11082 
11083 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
11084 				struct drm_crtc *crtc,
11085 				struct drm_crtc_state *new_crtc_state)
11086 {
11087 	struct drm_plane *cursor = crtc->cursor, *underlying;
11088 	struct drm_plane_state *new_cursor_state, *new_underlying_state;
11089 	int i;
11090 	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
11091 	int cursor_src_w, cursor_src_h;
11092 	int underlying_src_w, underlying_src_h;
11093 
11094 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
11095 	 * cursor per pipe but it's going to inherit the scaling and
11096 	 * positioning from the underlying pipe. Check the cursor plane's
11097 	 * blending properties match the underlying planes'. */
11098 
11099 	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
11100 	if (!new_cursor_state || !new_cursor_state->fb) {
11101 		return 0;
11102 	}
11103 
11104 	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
11105 	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
11106 	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
11107 
11108 	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
11109 		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
11110 		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
11111 			continue;
11112 
11113 		/* Ignore disabled planes */
11114 		if (!new_underlying_state->fb)
11115 			continue;
11116 
11117 		dm_get_oriented_plane_size(new_underlying_state,
11118 					   &underlying_src_w, &underlying_src_h);
11119 		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
11120 		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
11121 
11122 		if (cursor_scale_w != underlying_scale_w ||
11123 		    cursor_scale_h != underlying_scale_h) {
11124 			drm_dbg_atomic(crtc->dev,
11125 				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
11126 				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
11127 			return -EINVAL;
11128 		}
11129 
11130 		/* If this plane covers the whole CRTC, no need to check planes underneath */
11131 		if (new_underlying_state->crtc_x <= 0 &&
11132 		    new_underlying_state->crtc_y <= 0 &&
11133 		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
11134 		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
11135 			break;
11136 	}
11137 
11138 	return 0;
11139 }
11140 
11141 #if defined(CONFIG_DRM_AMD_DC_DCN)
11142 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
11143 {
11144 	struct drm_connector *connector;
11145 	struct drm_connector_state *conn_state, *old_conn_state;
11146 	struct amdgpu_dm_connector *aconnector = NULL;
11147 	int i;
11148 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
11149 		if (!conn_state->crtc)
11150 			conn_state = old_conn_state;
11151 
11152 		if (conn_state->crtc != crtc)
11153 			continue;
11154 
11155 		aconnector = to_amdgpu_dm_connector(connector);
11156 		if (!aconnector->port || !aconnector->mst_port)
11157 			aconnector = NULL;
11158 		else
11159 			break;
11160 	}
11161 
11162 	if (!aconnector)
11163 		return 0;
11164 
11165 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
11166 }
11167 #endif
11168 
11169 /**
11170  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
11171  * @dev: The DRM device
11172  * @state: The atomic state to commit
11173  *
11174  * Validate that the given atomic state is programmable by DC into hardware.
11175  * This involves constructing a &struct dc_state reflecting the new hardware
11176  * state we wish to commit, then querying DC to see if it is programmable. It's
11177  * important not to modify the existing DC state. Otherwise, atomic_check
11178  * may unexpectedly commit hardware changes.
11179  *
11180  * When validating the DC state, it's important that the right locks are
11181  * acquired. For full updates case which removes/adds/updates streams on one
11182  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
11183  * that any such full update commit will wait for completion of any outstanding
11184  * flip using DRMs synchronization events.
11185  *
11186  * Note that DM adds the affected connectors for all CRTCs in state, when that
11187  * might not seem necessary. This is because DC stream creation requires the
11188  * DC sink, which is tied to the DRM connector state. Cleaning this up should
11189  * be possible but non-trivial - a possible TODO item.
11190  *
11191  * Return: -Error code if validation failed.
11192  */
11193 static int amdgpu_dm_atomic_check(struct drm_device *dev,
11194 				  struct drm_atomic_state *state)
11195 {
11196 	struct amdgpu_device *adev = drm_to_adev(dev);
11197 	struct dm_atomic_state *dm_state = NULL;
11198 	struct dc *dc = adev->dm.dc;
11199 	struct drm_connector *connector;
11200 	struct drm_connector_state *old_con_state, *new_con_state;
11201 	struct drm_crtc *crtc;
11202 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
11203 	struct drm_plane *plane;
11204 	struct drm_plane_state *old_plane_state, *new_plane_state;
11205 	enum dc_status status;
11206 	int ret, i;
11207 	bool lock_and_validation_needed = false;
11208 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
11209 #if defined(CONFIG_DRM_AMD_DC_DCN)
11210 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
11211 	struct drm_dp_mst_topology_state *mst_state;
11212 	struct drm_dp_mst_topology_mgr *mgr;
11213 #endif
11214 
11215 	trace_amdgpu_dm_atomic_check_begin(state);
11216 
11217 	ret = drm_atomic_helper_check_modeset(dev, state);
11218 	if (ret) {
11219 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
11220 		goto fail;
11221 	}
11222 
11223 	/* Check connector changes */
11224 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11225 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11226 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11227 
11228 		/* Skip connectors that are disabled or part of modeset already. */
11229 		if (!old_con_state->crtc && !new_con_state->crtc)
11230 			continue;
11231 
11232 		if (!new_con_state->crtc)
11233 			continue;
11234 
11235 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
11236 		if (IS_ERR(new_crtc_state)) {
11237 			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
11238 			ret = PTR_ERR(new_crtc_state);
11239 			goto fail;
11240 		}
11241 
11242 		if (dm_old_con_state->abm_level !=
11243 		    dm_new_con_state->abm_level)
11244 			new_crtc_state->connectors_changed = true;
11245 	}
11246 
11247 #if defined(CONFIG_DRM_AMD_DC_DCN)
11248 	if (dc_resource_is_dsc_encoding_supported(dc)) {
11249 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11250 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
11251 				ret = add_affected_mst_dsc_crtcs(state, crtc);
11252 				if (ret) {
11253 					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
11254 					goto fail;
11255 				}
11256 			}
11257 		}
11258 		if (!pre_validate_dsc(state, &dm_state, vars)) {
11259 			ret = -EINVAL;
11260 			goto fail;
11261 		}
11262 	}
11263 #endif
11264 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11265 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
11266 
11267 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
11268 		    !new_crtc_state->color_mgmt_changed &&
11269 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
11270 			dm_old_crtc_state->dsc_force_changed == false)
11271 			continue;
11272 
11273 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
11274 		if (ret) {
11275 			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11276 			goto fail;
11277 		}
11278 
11279 		if (!new_crtc_state->enable)
11280 			continue;
11281 
11282 		ret = drm_atomic_add_affected_connectors(state, crtc);
11283 		if (ret) {
11284 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11285 			goto fail;
11286 		}
11287 
11288 		ret = drm_atomic_add_affected_planes(state, crtc);
11289 		if (ret) {
11290 			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11291 			goto fail;
11292 		}
11293 
11294 		if (dm_old_crtc_state->dsc_force_changed)
11295 			new_crtc_state->mode_changed = true;
11296 	}
11297 
11298 	/*
11299 	 * Add all primary and overlay planes on the CRTC to the state
11300 	 * whenever a plane is enabled to maintain correct z-ordering
11301 	 * and to enable fast surface updates.
11302 	 */
11303 	drm_for_each_crtc(crtc, dev) {
11304 		bool modified = false;
11305 
11306 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11307 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11308 				continue;
11309 
11310 			if (new_plane_state->crtc == crtc ||
11311 			    old_plane_state->crtc == crtc) {
11312 				modified = true;
11313 				break;
11314 			}
11315 		}
11316 
11317 		if (!modified)
11318 			continue;
11319 
11320 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11321 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
11322 				continue;
11323 
11324 			new_plane_state =
11325 				drm_atomic_get_plane_state(state, plane);
11326 
11327 			if (IS_ERR(new_plane_state)) {
11328 				ret = PTR_ERR(new_plane_state);
11329 				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11330 				goto fail;
11331 			}
11332 		}
11333 	}
11334 
11335 	/* Remove exiting planes if they are modified */
11336 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11337 		ret = dm_update_plane_state(dc, state, plane,
11338 					    old_plane_state,
11339 					    new_plane_state,
11340 					    false,
11341 					    &lock_and_validation_needed);
11342 		if (ret) {
11343 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11344 			goto fail;
11345 		}
11346 	}
11347 
11348 	/* Disable all crtcs which require disable */
11349 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11350 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11351 					   old_crtc_state,
11352 					   new_crtc_state,
11353 					   false,
11354 					   &lock_and_validation_needed);
11355 		if (ret) {
11356 			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11357 			goto fail;
11358 		}
11359 	}
11360 
11361 	/* Enable all crtcs which require enable */
11362 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11363 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
11364 					   old_crtc_state,
11365 					   new_crtc_state,
11366 					   true,
11367 					   &lock_and_validation_needed);
11368 		if (ret) {
11369 			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11370 			goto fail;
11371 		}
11372 	}
11373 
11374 	/* Add new/modified planes */
11375 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11376 		ret = dm_update_plane_state(dc, state, plane,
11377 					    old_plane_state,
11378 					    new_plane_state,
11379 					    true,
11380 					    &lock_and_validation_needed);
11381 		if (ret) {
11382 			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11383 			goto fail;
11384 		}
11385 	}
11386 
11387 	/* Run this here since we want to validate the streams we created */
11388 	ret = drm_atomic_helper_check_planes(dev, state);
11389 	if (ret) {
11390 		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11391 		goto fail;
11392 	}
11393 
11394 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11395 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11396 		if (dm_new_crtc_state->mpo_requested)
11397 			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11398 	}
11399 
11400 	/* Check cursor planes scaling */
11401 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11402 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11403 		if (ret) {
11404 			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11405 			goto fail;
11406 		}
11407 	}
11408 
11409 	if (state->legacy_cursor_update) {
11410 		/*
11411 		 * This is a fast cursor update coming from the plane update
11412 		 * helper, check if it can be done asynchronously for better
11413 		 * performance.
11414 		 */
11415 		state->async_update =
11416 			!drm_atomic_helper_async_check(dev, state);
11417 
11418 		/*
11419 		 * Skip the remaining global validation if this is an async
11420 		 * update. Cursor updates can be done without affecting
11421 		 * state or bandwidth calcs and this avoids the performance
11422 		 * penalty of locking the private state object and
11423 		 * allocating a new dc_state.
11424 		 */
11425 		if (state->async_update)
11426 			return 0;
11427 	}
11428 
11429 	/* Check scaling and underscan changes*/
11430 	/* TODO Removed scaling changes validation due to inability to commit
11431 	 * new stream into context w\o causing full reset. Need to
11432 	 * decide how to handle.
11433 	 */
11434 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11435 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11436 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11437 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11438 
11439 		/* Skip any modesets/resets */
11440 		if (!acrtc || drm_atomic_crtc_needs_modeset(
11441 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11442 			continue;
11443 
11444 		/* Skip any thing not scale or underscan changes */
11445 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11446 			continue;
11447 
11448 		lock_and_validation_needed = true;
11449 	}
11450 
11451 #if defined(CONFIG_DRM_AMD_DC_DCN)
11452 	/* set the slot info for each mst_state based on the link encoding format */
11453 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11454 		struct amdgpu_dm_connector *aconnector;
11455 		struct drm_connector *connector;
11456 		struct drm_connector_list_iter iter;
11457 		u8 link_coding_cap;
11458 
11459 		if (!mgr->mst_state )
11460 			continue;
11461 
11462 		drm_connector_list_iter_begin(dev, &iter);
11463 		drm_for_each_connector_iter(connector, &iter) {
11464 			int id = connector->index;
11465 
11466 			if (id == mst_state->mgr->conn_base_id) {
11467 				aconnector = to_amdgpu_dm_connector(connector);
11468 				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11469 				drm_dp_mst_update_slots(mst_state, link_coding_cap);
11470 
11471 				break;
11472 			}
11473 		}
11474 		drm_connector_list_iter_end(&iter);
11475 
11476 	}
11477 #endif
11478 	/**
11479 	 * Streams and planes are reset when there are changes that affect
11480 	 * bandwidth. Anything that affects bandwidth needs to go through
11481 	 * DC global validation to ensure that the configuration can be applied
11482 	 * to hardware.
11483 	 *
11484 	 * We have to currently stall out here in atomic_check for outstanding
11485 	 * commits to finish in this case because our IRQ handlers reference
11486 	 * DRM state directly - we can end up disabling interrupts too early
11487 	 * if we don't.
11488 	 *
11489 	 * TODO: Remove this stall and drop DM state private objects.
11490 	 */
11491 	if (lock_and_validation_needed) {
11492 		ret = dm_atomic_get_state(state, &dm_state);
11493 		if (ret) {
11494 			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11495 			goto fail;
11496 		}
11497 
11498 		ret = do_aquire_global_lock(dev, state);
11499 		if (ret) {
11500 			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11501 			goto fail;
11502 		}
11503 
11504 #if defined(CONFIG_DRM_AMD_DC_DCN)
11505 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11506 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11507 			ret = -EINVAL;
11508 			goto fail;
11509 		}
11510 
11511 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11512 		if (ret) {
11513 			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11514 			goto fail;
11515 		}
11516 #endif
11517 
11518 		/*
11519 		 * Perform validation of MST topology in the state:
11520 		 * We need to perform MST atomic check before calling
11521 		 * dc_validate_global_state(), or there is a chance
11522 		 * to get stuck in an infinite loop and hang eventually.
11523 		 */
11524 		ret = drm_dp_mst_atomic_check(state);
11525 		if (ret) {
11526 			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11527 			goto fail;
11528 		}
11529 		status = dc_validate_global_state(dc, dm_state->context, true);
11530 		if (status != DC_OK) {
11531 			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11532 				       dc_status_to_str(status), status);
11533 			ret = -EINVAL;
11534 			goto fail;
11535 		}
11536 	} else {
11537 		/*
11538 		 * The commit is a fast update. Fast updates shouldn't change
11539 		 * the DC context, affect global validation, and can have their
11540 		 * commit work done in parallel with other commits not touching
11541 		 * the same resource. If we have a new DC context as part of
11542 		 * the DM atomic state from validation we need to free it and
11543 		 * retain the existing one instead.
11544 		 *
11545 		 * Furthermore, since the DM atomic state only contains the DC
11546 		 * context and can safely be annulled, we can free the state
11547 		 * and clear the associated private object now to free
11548 		 * some memory and avoid a possible use-after-free later.
11549 		 */
11550 
11551 		for (i = 0; i < state->num_private_objs; i++) {
11552 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11553 
11554 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11555 				int j = state->num_private_objs-1;
11556 
11557 				dm_atomic_destroy_state(obj,
11558 						state->private_objs[i].state);
11559 
11560 				/* If i is not at the end of the array then the
11561 				 * last element needs to be moved to where i was
11562 				 * before the array can safely be truncated.
11563 				 */
11564 				if (i != j)
11565 					state->private_objs[i] =
11566 						state->private_objs[j];
11567 
11568 				state->private_objs[j].ptr = NULL;
11569 				state->private_objs[j].state = NULL;
11570 				state->private_objs[j].old_state = NULL;
11571 				state->private_objs[j].new_state = NULL;
11572 
11573 				state->num_private_objs = j;
11574 				break;
11575 			}
11576 		}
11577 	}
11578 
11579 	/* Store the overall update type for use later in atomic check. */
11580 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11581 		struct dm_crtc_state *dm_new_crtc_state =
11582 			to_dm_crtc_state(new_crtc_state);
11583 
11584 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11585 							 UPDATE_TYPE_FULL :
11586 							 UPDATE_TYPE_FAST;
11587 	}
11588 
11589 	/* Must be success */
11590 	WARN_ON(ret);
11591 
11592 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11593 
11594 	return ret;
11595 
11596 fail:
11597 	if (ret == -EDEADLK)
11598 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11599 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11600 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11601 	else
11602 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11603 
11604 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11605 
11606 	return ret;
11607 }
11608 
11609 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11610 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11611 {
11612 	uint8_t dpcd_data;
11613 	bool capable = false;
11614 
11615 	if (amdgpu_dm_connector->dc_link &&
11616 		dm_helpers_dp_read_dpcd(
11617 				NULL,
11618 				amdgpu_dm_connector->dc_link,
11619 				DP_DOWN_STREAM_PORT_COUNT,
11620 				&dpcd_data,
11621 				sizeof(dpcd_data))) {
11622 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11623 	}
11624 
11625 	return capable;
11626 }
11627 
11628 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11629 		unsigned int offset,
11630 		unsigned int total_length,
11631 		uint8_t *data,
11632 		unsigned int length,
11633 		struct amdgpu_hdmi_vsdb_info *vsdb)
11634 {
11635 	bool res;
11636 	union dmub_rb_cmd cmd;
11637 	struct dmub_cmd_send_edid_cea *input;
11638 	struct dmub_cmd_edid_cea_output *output;
11639 
11640 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11641 		return false;
11642 
11643 	memset(&cmd, 0, sizeof(cmd));
11644 
11645 	input = &cmd.edid_cea.data.input;
11646 
11647 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11648 	cmd.edid_cea.header.sub_type = 0;
11649 	cmd.edid_cea.header.payload_bytes =
11650 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11651 	input->offset = offset;
11652 	input->length = length;
11653 	input->cea_total_length = total_length;
11654 	memcpy(input->payload, data, length);
11655 
11656 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11657 	if (!res) {
11658 		DRM_ERROR("EDID CEA parser failed\n");
11659 		return false;
11660 	}
11661 
11662 	output = &cmd.edid_cea.data.output;
11663 
11664 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11665 		if (!output->ack.success) {
11666 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11667 					output->ack.offset);
11668 		}
11669 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11670 		if (!output->amd_vsdb.vsdb_found)
11671 			return false;
11672 
11673 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11674 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11675 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11676 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11677 	} else {
11678 		DRM_WARN("Unknown EDID CEA parser results\n");
11679 		return false;
11680 	}
11681 
11682 	return true;
11683 }
11684 
11685 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11686 		uint8_t *edid_ext, int len,
11687 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11688 {
11689 	int i;
11690 
11691 	/* send extension block to DMCU for parsing */
11692 	for (i = 0; i < len; i += 8) {
11693 		bool res;
11694 		int offset;
11695 
11696 		/* send 8 bytes a time */
11697 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11698 			return false;
11699 
11700 		if (i+8 == len) {
11701 			/* EDID block sent completed, expect result */
11702 			int version, min_rate, max_rate;
11703 
11704 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11705 			if (res) {
11706 				/* amd vsdb found */
11707 				vsdb_info->freesync_supported = 1;
11708 				vsdb_info->amd_vsdb_version = version;
11709 				vsdb_info->min_refresh_rate_hz = min_rate;
11710 				vsdb_info->max_refresh_rate_hz = max_rate;
11711 				return true;
11712 			}
11713 			/* not amd vsdb */
11714 			return false;
11715 		}
11716 
11717 		/* check for ack*/
11718 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11719 		if (!res)
11720 			return false;
11721 	}
11722 
11723 	return false;
11724 }
11725 
11726 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11727 		uint8_t *edid_ext, int len,
11728 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11729 {
11730 	int i;
11731 
11732 	/* send extension block to DMCU for parsing */
11733 	for (i = 0; i < len; i += 8) {
11734 		/* send 8 bytes a time */
11735 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11736 			return false;
11737 	}
11738 
11739 	return vsdb_info->freesync_supported;
11740 }
11741 
11742 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11743 		uint8_t *edid_ext, int len,
11744 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11745 {
11746 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11747 
11748 	if (adev->dm.dmub_srv)
11749 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11750 	else
11751 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11752 }
11753 
11754 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11755 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11756 {
11757 	uint8_t *edid_ext = NULL;
11758 	int i;
11759 	bool valid_vsdb_found = false;
11760 
11761 	/*----- drm_find_cea_extension() -----*/
11762 	/* No EDID or EDID extensions */
11763 	if (edid == NULL || edid->extensions == 0)
11764 		return -ENODEV;
11765 
11766 	/* Find CEA extension */
11767 	for (i = 0; i < edid->extensions; i++) {
11768 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11769 		if (edid_ext[0] == CEA_EXT)
11770 			break;
11771 	}
11772 
11773 	if (i == edid->extensions)
11774 		return -ENODEV;
11775 
11776 	/*----- cea_db_offsets() -----*/
11777 	if (edid_ext[0] != CEA_EXT)
11778 		return -ENODEV;
11779 
11780 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11781 
11782 	return valid_vsdb_found ? i : -ENODEV;
11783 }
11784 
11785 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11786 					struct edid *edid)
11787 {
11788 	int i = 0;
11789 	struct detailed_timing *timing;
11790 	struct detailed_non_pixel *data;
11791 	struct detailed_data_monitor_range *range;
11792 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11793 			to_amdgpu_dm_connector(connector);
11794 	struct dm_connector_state *dm_con_state = NULL;
11795 	struct dc_sink *sink;
11796 
11797 	struct drm_device *dev = connector->dev;
11798 	struct amdgpu_device *adev = drm_to_adev(dev);
11799 	bool freesync_capable = false;
11800 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11801 
11802 	if (!connector->state) {
11803 		DRM_ERROR("%s - Connector has no state", __func__);
11804 		goto update;
11805 	}
11806 
11807 	sink = amdgpu_dm_connector->dc_sink ?
11808 		amdgpu_dm_connector->dc_sink :
11809 		amdgpu_dm_connector->dc_em_sink;
11810 
11811 	if (!edid || !sink) {
11812 		dm_con_state = to_dm_connector_state(connector->state);
11813 
11814 		amdgpu_dm_connector->min_vfreq = 0;
11815 		amdgpu_dm_connector->max_vfreq = 0;
11816 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11817 		connector->display_info.monitor_range.min_vfreq = 0;
11818 		connector->display_info.monitor_range.max_vfreq = 0;
11819 		freesync_capable = false;
11820 
11821 		goto update;
11822 	}
11823 
11824 	dm_con_state = to_dm_connector_state(connector->state);
11825 
11826 	if (!adev->dm.freesync_module)
11827 		goto update;
11828 
11829 
11830 	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11831 		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
11832 		bool edid_check_required = false;
11833 
11834 		if (edid) {
11835 			edid_check_required = is_dp_capable_without_timing_msa(
11836 						adev->dm.dc,
11837 						amdgpu_dm_connector);
11838 		}
11839 
11840 		if (edid_check_required == true && (edid->version > 1 ||
11841 		   (edid->version == 1 && edid->revision > 1))) {
11842 			for (i = 0; i < 4; i++) {
11843 
11844 				timing	= &edid->detailed_timings[i];
11845 				data	= &timing->data.other_data;
11846 				range	= &data->data.range;
11847 				/*
11848 				 * Check if monitor has continuous frequency mode
11849 				 */
11850 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11851 					continue;
11852 				/*
11853 				 * Check for flag range limits only. If flag == 1 then
11854 				 * no additional timing information provided.
11855 				 * Default GTF, GTF Secondary curve and CVT are not
11856 				 * supported
11857 				 */
11858 				if (range->flags != 1)
11859 					continue;
11860 
11861 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11862 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11863 				amdgpu_dm_connector->pixel_clock_mhz =
11864 					range->pixel_clock_mhz * 10;
11865 
11866 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11867 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11868 
11869 				break;
11870 			}
11871 
11872 			if (amdgpu_dm_connector->max_vfreq -
11873 			    amdgpu_dm_connector->min_vfreq > 10) {
11874 
11875 				freesync_capable = true;
11876 			}
11877 		}
11878 	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11879 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11880 		if (i >= 0 && vsdb_info.freesync_supported) {
11881 			timing  = &edid->detailed_timings[i];
11882 			data    = &timing->data.other_data;
11883 
11884 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11885 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11886 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11887 				freesync_capable = true;
11888 
11889 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11890 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11891 		}
11892 	}
11893 
11894 update:
11895 	if (dm_con_state)
11896 		dm_con_state->freesync_capable = freesync_capable;
11897 
11898 	if (connector->vrr_capable_property)
11899 		drm_connector_set_vrr_capable_property(connector,
11900 						       freesync_capable);
11901 }
11902 
11903 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11904 {
11905 	struct amdgpu_device *adev = drm_to_adev(dev);
11906 	struct dc *dc = adev->dm.dc;
11907 	int i;
11908 
11909 	mutex_lock(&adev->dm.dc_lock);
11910 	if (dc->current_state) {
11911 		for (i = 0; i < dc->current_state->stream_count; ++i)
11912 			dc->current_state->streams[i]
11913 				->triggered_crtc_reset.enabled =
11914 				adev->dm.force_timing_sync;
11915 
11916 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11917 		dc_trigger_sync(dc, dc->current_state);
11918 	}
11919 	mutex_unlock(&adev->dm.dc_lock);
11920 }
11921 
11922 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11923 		       uint32_t value, const char *func_name)
11924 {
11925 #ifdef DM_CHECK_ADDR_0
11926 	if (address == 0) {
11927 		DC_ERR("invalid register write. address = 0");
11928 		return;
11929 	}
11930 #endif
11931 	cgs_write_register(ctx->cgs_device, address, value);
11932 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11933 }
11934 
11935 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11936 			  const char *func_name)
11937 {
11938 	uint32_t value;
11939 #ifdef DM_CHECK_ADDR_0
11940 	if (address == 0) {
11941 		DC_ERR("invalid register read; address = 0\n");
11942 		return 0;
11943 	}
11944 #endif
11945 
11946 	if (ctx->dmub_srv &&
11947 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11948 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11949 		ASSERT(false);
11950 		return 0;
11951 	}
11952 
11953 	value = cgs_read_register(ctx->cgs_device, address);
11954 
11955 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11956 
11957 	return value;
11958 }
11959 
11960 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11961 						struct dc_context *ctx,
11962 						uint8_t status_type,
11963 						uint32_t *operation_result)
11964 {
11965 	struct amdgpu_device *adev = ctx->driver_context;
11966 	int return_status = -1;
11967 	struct dmub_notification *p_notify = adev->dm.dmub_notify;
11968 
11969 	if (is_cmd_aux) {
11970 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11971 			return_status = p_notify->aux_reply.length;
11972 			*operation_result = p_notify->result;
11973 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11974 			*operation_result = AUX_RET_ERROR_TIMEOUT;
11975 		} else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11976 			*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11977 		} else {
11978 			*operation_result = AUX_RET_ERROR_UNKNOWN;
11979 		}
11980 	} else {
11981 		if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11982 			return_status = 0;
11983 			*operation_result = p_notify->sc_status;
11984 		} else {
11985 			*operation_result = SET_CONFIG_UNKNOWN_ERROR;
11986 		}
11987 	}
11988 
11989 	return return_status;
11990 }
11991 
11992 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11993 	unsigned int link_index, void *cmd_payload, void *operation_result)
11994 {
11995 	struct amdgpu_device *adev = ctx->driver_context;
11996 	int ret = 0;
11997 
11998 	if (is_cmd_aux) {
11999 		dc_process_dmub_aux_transfer_async(ctx->dc,
12000 			link_index, (struct aux_payload *)cmd_payload);
12001 	} else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
12002 					(struct set_config_cmd_payload *)cmd_payload,
12003 					adev->dm.dmub_notify)) {
12004 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12005 					ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
12006 					(uint32_t *)operation_result);
12007 	}
12008 
12009 	ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
12010 	if (ret == 0) {
12011 		DRM_ERROR("wait_for_completion_timeout timeout!");
12012 		return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12013 				ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
12014 				(uint32_t *)operation_result);
12015 	}
12016 
12017 	if (is_cmd_aux) {
12018 		if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
12019 			struct aux_payload *payload = (struct aux_payload *)cmd_payload;
12020 
12021 			payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
12022 			if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
12023 			    payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
12024 				memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
12025 				       adev->dm.dmub_notify->aux_reply.length);
12026 			}
12027 		}
12028 	}
12029 
12030 	return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
12031 			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
12032 			(uint32_t *)operation_result);
12033 }
12034 
12035 /*
12036  * Check whether seamless boot is supported.
12037  *
12038  * So far we only support seamless boot on CHIP_VANGOGH.
12039  * If everything goes well, we may consider expanding
12040  * seamless boot to other ASICs.
12041  */
12042 bool check_seamless_boot_capability(struct amdgpu_device *adev)
12043 {
12044 	switch (adev->asic_type) {
12045 	case CHIP_VANGOGH:
12046 		if (!adev->mman.keep_stolen_vga_memory)
12047 			return true;
12048 		break;
12049 	default:
12050 		break;
12051 	}
12052 
12053 	return false;
12054 }
12055